From 62967503fa4240bd8074078e1e3816504d971c4c Mon Sep 17 00:00:00 2001 From: Partho Date: Sat, 7 Oct 2023 19:39:15 +0530 Subject: [PATCH] update doc `llama`->`llama2` --- _snippets/default_models_table.mdx | 2 +- get_started/quickstart/quickstart.mdx | 2 +- reference/getting_started.mdx | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/_snippets/default_models_table.mdx b/_snippets/default_models_table.mdx index 9ecad6f..7f965e0 100644 --- a/_snippets/default_models_table.mdx +++ b/_snippets/default_models_table.mdx @@ -4,7 +4,7 @@ |```'gpt'``` |[OpenAI](https://platform.openai.com/) | [text-davinci-003](https://platform.openai.com/docs/models/gpt-3) |N/A| |```'bloom'``` |[BigScience](https://huggingface.co/bigscience) | [bloom-560](https://huggingface.co/bigscience/bloom-560m) |1.12 GB| |```'flan'``` |[Google](https://huggingface.co/google) | [flan-t5-small](https://huggingface.co/google/flan-t5-small) |308 MB| -|```'llama'``` |[Openlm-Research](https://huggingface.co/openlm-research) | [open_llama_3b](https://huggingface.co/openlm-research/open_llama_3b) |6.85 GB| +|```'llama2'``` |[Openlm-Research](https://huggingface.co/openlm-research) | [open_llama_3b](https://huggingface.co/openlm-research/open_llama_3b) |6.85 GB| |```'neo'``` |[ElutherAI](https://huggingface.co/EleutherAI) | [gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) |5.31 GB| |```'opt'``` |[Facebook](https://huggingface.co/facebook) | [opt-350m](https://huggingface.co/facebook/opt-350m) |662 MB| |```'pythia'``` |[ElutherAI](https://huggingface.co/EleutherAI) | [pythia-70m-deduped](https://huggingface.co/EleutherAI/pythia-70m-deduped)|166 MB| diff --git a/get_started/quickstart/quickstart.mdx b/get_started/quickstart/quickstart.mdx index 9148121..8258084 100644 --- a/get_started/quickstart/quickstart.mdx +++ b/get_started/quickstart/quickstart.mdx @@ -47,7 +47,7 @@ print(response) Be sure to check our [System Requirements](system) to make sure you can use your desired model. ```python Supported_Models=[ # Use these strings to call the model - 'chat_gpt', 'gpt', 'neo', 'llama', + 'chat_gpt', 'gpt', 'neo', 'llama2', 'bloom', 'opt', 'pythia'] ``` diff --git a/reference/getting_started.mdx b/reference/getting_started.mdx index 2b1f62e..91da35d 100644 --- a/reference/getting_started.mdx +++ b/reference/getting_started.mdx @@ -37,8 +37,8 @@ print(response) # import our client from llm_vm.client import Client -# Select the LlaMA model -client = Client(big_model = 'llama') +# Select the LlaMA 2 model +client = Client(big_model = 'llama2') # Put in your prompt and go! response = client.complete(prompt = 'What is Anarchy?', context = '')