From 705ca9803cc921e8f9882a459e9238d630c38b63 Mon Sep 17 00:00:00 2001 From: Richard Anthony Hein Date: Sun, 8 Sep 2024 11:25:25 +0000 Subject: [PATCH] set default model properly --- playground/agents/use_cases/weather/main.py | 2 +- playground/demos/chatbot/server/dockerRunVllm.sh | 2 +- playground/demos/chatbot/server/server_models.py | 4 ++-- playground/demos/vLLM/vLLM_example.py | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/playground/agents/use_cases/weather/main.py b/playground/agents/use_cases/weather/main.py index 0683aa5b..4ccdccce 100644 --- a/playground/agents/use_cases/weather/main.py +++ b/playground/agents/use_cases/weather/main.py @@ -12,7 +12,7 @@ weather_api_key= "af6ef989b5c50a91ca068cc00df125b7", # Replace with your weathe llm = OpenAIChatLLM( base_url=api_base, api_key=api_key, - model="NousResearch/Meta-Llama-3.1-8B-Instruct", + model="NousResearch/Meta-Llama-3-8B-Instruct", temperature=0, streaming=False ) diff --git a/playground/demos/chatbot/server/dockerRunVllm.sh b/playground/demos/chatbot/server/dockerRunVllm.sh index 7e346b4d..ace21b2a 100644 --- a/playground/demos/chatbot/server/dockerRunVllm.sh +++ b/playground/demos/chatbot/server/dockerRunVllm.sh @@ -5,4 +5,4 @@ docker run --runtime nvidia --gpus all \ --network=host \ --name vllm \ vllm/vllm-openai:latest \ - --model NousResearch/Meta-Llama-3.1-8B-Instruct \ No newline at end of file + --model NousResearch/Meta-Llama-3-8B-Instruct \ No newline at end of file diff --git a/playground/demos/chatbot/server/server_models.py b/playground/demos/chatbot/server/server_models.py index d7ade77c..49e54533 100644 --- a/playground/demos/chatbot/server/server_models.py +++ b/playground/demos/chatbot/server/server_models.py @@ -44,8 +44,8 @@ class ChatRequest(BaseModel): """ The model for a ChatRequest for theChatbot Chat POST endpoint""" id: str model: AIModel = AIModel( - id="NousResearch/Meta-Llama-3.1-8B-Instruct", - name="NousResearch/Meta-Llama-3.1-8B-Instruct", + id="NousResearch/Meta-Llama-3-8B-Instruct", + name="NousResearch/Meta-Llama-3-8B-Instruct", maxLength=2048, tokenLimit=2048, ) diff --git a/playground/demos/vLLM/vLLM_example.py b/playground/demos/vLLM/vLLM_example.py index 090b902d..e4805838 100644 --- a/playground/demos/vLLM/vLLM_example.py +++ b/playground/demos/vLLM/vLLM_example.py @@ -11,7 +11,7 @@ api_base = os.getenv("OPENAI_API_BASE") or "http://localhost:8000/v1" # for vllm # Create an instance of the OpenAIChat class model = OpenAIChat( - base_url=api_base, api_key=api_key, model="NousResearch/Meta-Llama-3.1-8B-Instruct", temperature=0.5, streaming=True, verbose=True + base_url=api_base, api_key=api_key, model="NousResearch/Meta-Llama-3-8B-Instruct", temperature=0.5, streaming=True, verbose=True ) # Initialize the agent