diff --git a/README.md b/README.md index 0913f1b..551901d 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ api_key = If you are self-hosting, my recommendation is to use [Ollama](https://github.com/ollama/ollama) with -[Llama 3.1 70B](https://ollama.com/library/llama3.1). An out of the box +[Llama 3.3 70B](https://ollama.com/library/llama3.3). An out of the box configuration running on `localhost` could then look something like this: @@ -79,7 +79,7 @@ configuration = local-llama [local-llama] provider = self-hosted -model = llama3.1 +model = llama3.3 server = http://localhost:11434/v1 ``` @@ -119,7 +119,7 @@ configuration = huggingface provider = huggingface email = password = -model = meta-llama/Meta-Llama-3.1-70B-Instruct +model = meta-llama/Llama-3.3-70B-Instruct ``` Available models are listed [here](https://huggingface.co/chat/models). diff --git a/pyproject.toml b/pyproject.toml index 7959703..3e5671e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "fish_ai" -version = "1.0.0" +version = "1.0.1" authors = [{ name = "Bastian Fredriksson", email = "realiserad@gmail.com" }] description = "Provides core functionality for fish-ai, an AI plugin for the fish shell." readme = "README.md" diff --git a/src/fish_ai/engine.py b/src/fish_ai/engine.py index 7f07851..7d993fd 100644 --- a/src/fish_ai/engine.py +++ b/src/fish_ai/engine.py @@ -215,7 +215,7 @@ def get_response(messages): cookies=cookies.get_dict(), system_prompt=create_system_prompt(messages), default_llm=get_config('model') or - 'meta-llama/Meta-Llama-3.1-70B-Instruct') + 'meta-llama/Llama-3.3-70B-Instruct') response = bot.chat( messages[-1].get('content')).wait_until_done()