|
|
@ -8,7 +8,7 @@ Todo
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
|
|
|
import os
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
from dotenv import load_dotenv
|
|
|
|
from swarm_models import OpenAIChat, OpenAIFunctionCaller
|
|
|
|
from swarms.utils.litellm_wrapper import LiteLLM
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from pydantic import BaseModel
|
|
|
|
from typing import List
|
|
|
|
from typing import List
|
|
|
|
|
|
|
|
|
|
|
@ -30,21 +30,19 @@ load_dotenv()
|
|
|
|
api_key = os.getenv("GROQ_API_KEY")
|
|
|
|
api_key = os.getenv("GROQ_API_KEY")
|
|
|
|
|
|
|
|
|
|
|
|
# Initialize the model
|
|
|
|
# Initialize the model
|
|
|
|
model = OpenAIChat(
|
|
|
|
model = LiteLLM(
|
|
|
|
openai_api_base="https://api.groq.com/openai/v1",
|
|
|
|
model_name="groq/llama-3.1-70b-versatile",
|
|
|
|
openai_api_key=api_key,
|
|
|
|
|
|
|
|
model_name="llama-3.1-70b-versatile",
|
|
|
|
|
|
|
|
temperature=0.1,
|
|
|
|
temperature=0.1,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
function_caller = OpenAIFunctionCaller(
|
|
|
|
function_caller = LiteLLM(
|
|
|
|
|
|
|
|
model_name="gpt-4o",
|
|
|
|
system_prompt="""You are a college selection final decision maker. Your role is to:
|
|
|
|
system_prompt="""You are a college selection final decision maker. Your role is to:
|
|
|
|
- Balance all relevant factors and stakeholder input.
|
|
|
|
- Balance all relevant factors and stakeholder input.
|
|
|
|
- Only return the output in the schema format.
|
|
|
|
- Only return the output in the schema format.
|
|
|
|
""",
|
|
|
|
""",
|
|
|
|
openai_api_key=os.getenv("OPENAI_API_KEY"),
|
|
|
|
response_format=CollegesRecommendation,
|
|
|
|
base_model=CollegesRecommendation,
|
|
|
|
temperature=0.1,
|
|
|
|
# parallel_tool_calls=True,
|
|
|
|
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|