diff --git a/.gitignore b/.gitignore index 82e7f4b5..50ad6dc1 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ chroma Accounting Assistant_state.json Unit Testing Agent_state.json Devin_state.json +hire_researchers json_logs Medical Image Diagnostic Agent_state.json flight agent_state.json diff --git a/swarms/schemas/agent_input_schema.py b/swarms/schemas/agent_input_schema.py new file mode 100644 index 00000000..1c34719f --- /dev/null +++ b/swarms/schemas/agent_input_schema.py @@ -0,0 +1,143 @@ +from typing import Any, Callable, Dict, List, Optional +from pydantic import BaseModel, Field +from pydantic.v1 import validator + + +class AgentSchema(BaseModel): + llm: Any = Field(..., description="The language model to use") + max_tokens: int = Field( + ..., description="The maximum number of tokens", ge=1 + ) + context_window: int = Field( + ..., description="The context window size", ge=1 + ) + user_name: str = Field(..., description="The user name") + agent_name: str = Field(..., description="The name of the agent") + system_prompt: str = Field(..., description="The system prompt") + template: Optional[str] = Field(default=None) + max_loops: Optional[int] = Field(default=1, ge=1) + stopping_condition: Optional[Callable[[str], bool]] = Field( + default=None + ) + loop_interval: Optional[int] = Field(default=0, ge=0) + retry_attempts: Optional[int] = Field(default=3, ge=0) + retry_interval: Optional[int] = Field(default=1, ge=0) + return_history: Optional[bool] = Field(default=False) + stopping_token: Optional[str] = Field(default=None) + dynamic_loops: Optional[bool] = Field(default=False) + interactive: Optional[bool] = Field(default=False) + dashboard: Optional[bool] = Field(default=False) + agent_description: Optional[str] = Field(default=None) + tools: Optional[List[Callable]] = Field(default=None) + dynamic_temperature_enabled: Optional[bool] = Field(default=False) + sop: Optional[str] = Field(default=None) + sop_list: Optional[List[str]] = Field(default=None) + saved_state_path: Optional[str] = Field(default=None) + autosave: Optional[bool] = Field(default=False) + self_healing_enabled: Optional[bool] = Field(default=False) + code_interpreter: Optional[bool] = Field(default=False) + multi_modal: Optional[bool] = Field(default=False) + pdf_path: Optional[str] = Field(default=None) + list_of_pdf: Optional[str] = Field(default=None) + tokenizer: Optional[Any] = Field(default=None) + long_term_memory: Optional[Any] = Field(default=None) + preset_stopping_token: Optional[bool] = Field(default=False) + traceback: Optional[Any] = Field(default=None) + traceback_handlers: Optional[Any] = Field(default=None) + streaming_on: Optional[bool] = Field(default=False) + docs: Optional[List[str]] = Field(default=None) + docs_folder: Optional[str] = Field(default=None) + verbose: Optional[bool] = Field(default=False) + parser: Optional[Callable] = Field(default=None) + best_of_n: Optional[int] = Field(default=None) + callback: Optional[Callable] = Field(default=None) + metadata: Optional[Dict[str, Any]] = Field(default=None) + callbacks: Optional[List[Callable]] = Field(default=None) + logger_handler: Optional[Any] = Field(default=None) + search_algorithm: Optional[Callable] = Field(default=None) + logs_to_filename: Optional[str] = Field(default=None) + evaluator: Optional[Callable] = Field(default=None) + output_json: Optional[bool] = Field(default=False) + stopping_func: Optional[Callable] = Field(default=None) + custom_loop_condition: Optional[Callable] = Field(default=None) + sentiment_threshold: Optional[float] = Field(default=None) + custom_exit_command: Optional[str] = Field(default="exit") + sentiment_analyzer: Optional[Callable] = Field(default=None) + limit_tokens_from_string: Optional[Callable] = Field(default=None) + custom_tools_prompt: Optional[Callable] = Field(default=None) + tool_schema: Optional[Any] = Field(default=None) + output_type: Optional[Any] = Field(default=None) + function_calling_type: Optional[str] = Field(default="json") + output_cleaner: Optional[Callable] = Field(default=None) + function_calling_format_type: Optional[str] = Field(default="OpenAI") + list_base_models: Optional[List[Any]] = Field(default=None) + metadata_output_type: Optional[str] = Field(default="json") + state_save_file_type: Optional[str] = Field(default="json") + chain_of_thoughts: Optional[bool] = Field(default=False) + algorithm_of_thoughts: Optional[bool] = Field(default=False) + tree_of_thoughts: Optional[bool] = Field(default=False) + tool_choice: Optional[str] = Field(default="auto") + execute_tool: Optional[bool] = Field(default=False) + rules: Optional[str] = Field(default=None) + planning: Optional[bool] = Field(default=False) + planning_prompt: Optional[str] = Field(default=None) + device: Optional[str] = Field(default=None) + custom_planning_prompt: Optional[str] = Field(default=None) + memory_chunk_size: Optional[int] = Field(default=2000, ge=0) + agent_ops_on: Optional[bool] = Field(default=False) + log_directory: Optional[str] = Field(default=None) + project_path: Optional[str] = Field(default=None) + tool_system_prompt: Optional[str] = Field(default="tool_sop_prompt()") + top_p: Optional[float] = Field(default=0.9, ge=0, le=1) + top_k: Optional[int] = Field(default=None) + frequency_penalty: Optional[float] = Field(default=0.0, ge=0, le=1) + presence_penalty: Optional[float] = Field(default=0.0, ge=0, le=1) + temperature: Optional[float] = Field(default=0.1, ge=0, le=1) + + @validator( + "tools", + "docs", + "sop_list", + "callbacks", + "list_base_models", + each_item=True, + ) + def check_list_items_not_none(cls, v): + if v is None: + raise ValueError("List items must not be None") + return v + + @validator( + "tokenizer", + "memory", + "traceback", + "traceback_handlers", + "parser", + "callback", + "search_algorithm", + "evaluator", + "stopping_func", + "custom_loop_condition", + "sentiment_analyzer", + "limit_tokens_from_string", + "custom_tools_prompt", + "output_cleaner", + ) + def check_optional_callable_not_none(cls, v): + if v is not None and not callable(v): + raise ValueError(f"{v} must be a callable") + return v + + +# # Example of how to use the schema +# agent_data = { +# "llm": "OpenAIChat", +# "max_tokens": 4096, +# "context_window": 8192, +# "user_name": "Human", +# "agent_name": "test-agent", +# "system_prompt": "Custom system prompt", +# } + +# agent = AgentSchema(**agent_data) +# print(agent) diff --git a/swarms/schemas/schemas.py b/swarms/schemas/schemas.py index eee8db07..cc3961b5 100644 --- a/swarms/schemas/schemas.py +++ b/swarms/schemas/schemas.py @@ -16,35 +16,6 @@ class TaskInput(BaseModel): ) -class Artifact(BaseModel): - """ - Represents an artifact. - - Attributes: - artifact_id (str): Id of the artifact. - file_name (str): Filename of the artifact. - relative_path (str, optional): Relative path of the artifact in the agent's workspace. - """ - - artifact_id: str = Field( - ..., - description="Id of the artifact", - examples=["b225e278-8b4c-4f99-a696-8facf19f0e56"], - ) - file_name: str = Field( - ..., - description="Filename of the artifact", - examples=["main.py"], - ) - relative_path: str | None = Field( - None, - description=( - "Relative path of the artifact in the agent's workspace" - ), - examples=["python/code/"], - ) - - class ArtifactUpload(BaseModel): file: bytes = Field(..., description="File to upload") relative_path: str | None = Field( @@ -86,22 +57,22 @@ class TaskRequestBody(BaseModel): additional_input: TaskInput | None = None -class Task(TaskRequestBody): - task_id: str = Field( - ..., - description="The ID of the task.", - examples=["50da533e-3904-4401-8a07-c49adf88b5eb"], - ) - artifacts: list[Artifact] = Field( - [], - description="A list of artifacts that the task has produced.", - examples=[ - [ - "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e", - "ab7b4091-2560-4692-a4fe-d831ea3ca7d6", - ] - ], - ) +# class Task(TaskRequestBody): +# task_id: str = Field( +# ..., +# description="The ID of the task.", +# examples=["50da533e-3904-4401-8a07-c49adf88b5eb"], +# ) +# artifacts: list[Artifact] = Field( +# [], +# description="A list of artifacts that the task has produced.", +# examples=[ +# [ +# "7a49f31c-f9c6-4346-a22c-e32bc5af4d8e", +# "ab7b4091-2560-4692-a4fe-d831ea3ca7d6", +# ] +# ], +# ) class StepRequestBody(BaseModel): @@ -144,7 +115,7 @@ class Step(BaseModel): " activate agentops - self.activate_agentops() + if agent_ops_on is True: + self.activate_agentops() def set_system_prompt(self, system_prompt: str): """Set the system prompt""" @@ -483,19 +486,19 @@ class Agent(BaseStructure): self.feedback.append(feedback) logging.info(f"Feedback received: {feedback}") - def initialize_llm(self, llm: Any) -> None: - return llm( - system_prompt=self.system_prompt, - max_tokens=self.max_tokens, - context_length=self.context_length, - temperature=self.temperature, - top_p=self.top_p, - top_k=self.top_k, - frequency_penalty=self.frequency_penalty, - presence_penalty=self.presence_penalty, - stop=self.stopping_token, - engine=self.engine, - ) + # TODO: Implement the function + # def initialize_llm(self, llm: Any) -> None: + # return llm( + # system_prompt=self.system_prompt, + # max_tokens=self.max_tokens, + # context_length=self.context_length, + # temperature=self.temperature, + # top_p=self.top_p, + # top_k=self.top_k, + # frequency_penalty=self.frequency_penalty, + # presence_penalty=self.presence_penalty, + # stop=self.stopping_token, + # ) def agent_initialization(self): try: @@ -718,8 +721,8 @@ class Agent(BaseStructure): response = None all_responses = [] - if self.tokenizer is not None: - self.check_available_tokens() + # if self.tokenizer is not None: + # self.check_available_tokens() while self.max_loops == "auto" or loop_count < self.max_loops: loop_count += 1 @@ -733,6 +736,7 @@ class Agent(BaseStructure): # Task prompt task_prompt = self.short_memory.return_history_as_string() + # Parameters attempt = 0 success = False while attempt < self.retry_attempts and not success: @@ -743,6 +747,14 @@ class Agent(BaseStructure): task, *args, **kwargs ) ) + + if exists(self.tokenizer): + task_prompt = ( + self.count_and_shorten_context_window( + memory_retrieval + ) + ) + # Merge the task prompt with the memory retrieval task_prompt = f"{task_prompt} Documents: Available {memory_retrieval}" @@ -758,14 +770,6 @@ class Agent(BaseStructure): all_responses.append(response) else: - - if exists(self.tokenizer): - task_prompt = ( - self.count_and_shorten_context_window( - task_prompt - ) - ) - response_args = ( (task_prompt, *args) if img is None @@ -1996,3 +2000,23 @@ class Agent(BaseStructure): f"Error with the base models, check the base model types and make sure they are initialized {error}" ) raise error + + async def count_tokens_and_subtract_from_context_window( + self, response: str, *args, **kwargs + ): + """ + Count the number of tokens in the response and subtract it from the context window. + + Args: + response (str): The response to count the tokens from. + + Returns: + str: The response after counting the tokens and subtracting it from the context window. + """ + # Count the number of tokens in the response + tokens = self.tokenizer.count_tokens(response) + + # Subtract the number of tokens from the context window + self.context_length -= len(tokens) + + return response diff --git a/test.py b/test.py new file mode 100644 index 00000000..87d40a4e --- /dev/null +++ b/test.py @@ -0,0 +1,16 @@ +import requests + +url = "https://linkedin-api8.p.rapidapi.com/linkedin-to-email" + +querystring = { + "url": "https://www.linkedin.com/in/nicolas-nahas-3ba227170/" +} + +headers = { + "x-rapidapi-key": "8c6cd073d2msh9fc7d37c26ce73bp1dea6ajsn81819935da85", + "x-rapidapi-host": "linkedin-api8.p.rapidapi.com", +} + +response = requests.get(url, headers=headers, params=querystring) + +print(response.json())