diff --git a/swarms/agents/models/prompts/agent_prompt_auto.py b/swarms/agents/models/prompts/agent_prompt_auto.py index 088a4012..a75d0189 100644 --- a/swarms/agents/models/prompts/agent_prompt_auto.py +++ b/swarms/agents/models/prompts/agent_prompt_auto.py @@ -1,6 +1,6 @@ import time from typing import Any, Callable, List -from pydantic import BaseModel +from swarms.agents.models.prompts.agent_prompt_generator import get_prompt class TokenUtils: @staticmethod @@ -33,7 +33,7 @@ class PromptConstructor: return full_prompt -class Message(BaseModel): +class Message: content: str def count_tokens(self) -> int: diff --git a/swarms/agents/models/prompts/chains/llm.py b/swarms/agents/models/prompts/chains/llm.py deleted file mode 100644 index 4326bc79..00000000 --- a/swarms/agents/models/prompts/chains/llm.py +++ /dev/null @@ -1,339 +0,0 @@ -"""Chain that just formats a prompt and calls an LLM.""" -from __future__ import annotations - -import warnings -from typing import Any, Dict, List, Optional, Sequence, Tuple, Union - -from langchain.base_language import BaseLanguageModel -from langchain.callbacks.manager import ( - AsyncCallbackManager, - AsyncCallbackManagerForChainRun, - CallbackManager, - CallbackManagerForChainRun, - Callbacks, -) -from langchain.chains.base import Chain -from langchain.input import get_colored_text -from langchain.load.dump import dumpd -from langchain.prompts.prompt import PromptTemplate -from langchain.schema import ( - BaseLLMOutputParser, - BasePromptTemplate, - LLMResult, - NoOpOutputParser, - PromptValue, -) -from pydantic import Extra, Field - - -class LLMChain(Chain): - """Chain to run queries against LLMs. - - Example: - .. code-block:: python - - from langchain import LLMChain, OpenAI, PromptTemplate - prompt_template = "Tell me a {adjective} joke" - prompt = PromptTemplate( - input_variables=["adjective"], template=prompt_template - ) - llm = LLMChain(llm=OpenAI(), prompt=prompt) - """ - - @property - def lc_serializable(self) -> bool: - return True - - prompt: BasePromptTemplate - """Prompt object to use.""" - llm: BaseLanguageModel - """Language model to call.""" - output_key: str = "text" #: :meta private: - output_parser: BaseLLMOutputParser = Field(default_factory=NoOpOutputParser) - """Output parser to use. - Defaults to one that takes the most likely string but does not change it - otherwise.""" - return_final_only: bool = True - """Whether to return only the final parsed result. Defaults to True. - If false, will return a bunch of extra information about the generation.""" - llm_kwargs: dict = Field(default_factory=dict) - - class Config: - """Configuration for this pydantic object.""" - - extra = Extra.forbid - arbitrary_types_allowed = True - - @property - def input_keys(self) -> List[str]: - """Will be whatever keys the prompt expects. - - :meta private: - """ - return self.prompt.input_variables - - @property - def output_keys(self) -> List[str]: - """Will always return text key. - - :meta private: - """ - if self.return_final_only: - return [self.output_key] - else: - return [self.output_key, "full_generation"] - - def _call( - self, - inputs: Dict[str, Any], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - response = self.generate([inputs], run_manager=run_manager) - return self.create_outputs(response)[0] - - def generate( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> LLMResult: - """Generate LLM result from inputs.""" - prompts, stop = self.prep_prompts(input_list, run_manager=run_manager) - return self.llm.generate_prompt( - prompts, - stop, - callbacks=run_manager.get_child() if run_manager else None, - **self.llm_kwargs, - ) - - async def agenerate( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> LLMResult: - """Generate LLM result from inputs.""" - prompts, stop = await self.aprep_prompts(input_list, run_manager=run_manager) - return await self.llm.agenerate_prompt( - prompts, - stop, - callbacks=run_manager.get_child() if run_manager else None, - **self.llm_kwargs, - ) - - def prep_prompts( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[CallbackManagerForChainRun] = None, - ) -> Tuple[List[PromptValue], Optional[List[str]]]: - """Prepare prompts from inputs.""" - stop = None - if "stop" in input_list[0]: - stop = input_list[0]["stop"] - prompts = [] - for inputs in input_list: - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format_prompt(**selected_inputs) - _colored_text = get_colored_text(prompt.to_string(), "green") - _text = "Prompt after formatting:\n" + _colored_text - if run_manager: - run_manager.on_text(_text, end="\n", verbose=self.verbose) - if "stop" in inputs and inputs["stop"] != stop: - raise ValueError( - "If `stop` is present in any inputs, should be present in all." - ) - prompts.append(prompt) - return prompts, stop - - async def aprep_prompts( - self, - input_list: List[Dict[str, Any]], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Tuple[List[PromptValue], Optional[List[str]]]: - """Prepare prompts from inputs.""" - stop = None - if "stop" in input_list[0]: - stop = input_list[0]["stop"] - prompts = [] - for inputs in input_list: - selected_inputs = {k: inputs[k] for k in self.prompt.input_variables} - prompt = self.prompt.format_prompt(**selected_inputs) - _colored_text = get_colored_text(prompt.to_string(), "green") - _text = "Prompt after formatting:\n" + _colored_text - if run_manager: - await run_manager.on_text(_text, end="\n", verbose=self.verbose) - if "stop" in inputs and inputs["stop"] != stop: - raise ValueError( - "If `stop` is present in any inputs, should be present in all." - ) - prompts.append(prompt) - return prompts, stop - - def apply( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> List[Dict[str, str]]: - """Utilize the LLM generate method for speed gains.""" - callback_manager = CallbackManager.configure( - callbacks, self.callbacks, self.verbose - ) - run_manager = callback_manager.on_chain_start( - dumpd(self), - {"input_list": input_list}, - ) - try: - response = self.generate(input_list, run_manager=run_manager) - except (KeyboardInterrupt, Exception) as e: - run_manager.on_chain_error(e) - raise e - outputs = self.create_outputs(response) - run_manager.on_chain_end({"outputs": outputs}) - return outputs - - async def aapply( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> List[Dict[str, str]]: - """Utilize the LLM generate method for speed gains.""" - callback_manager = AsyncCallbackManager.configure( - callbacks, self.callbacks, self.verbose - ) - run_manager = await callback_manager.on_chain_start( - dumpd(self), - {"input_list": input_list}, - ) - try: - response = await self.agenerate(input_list, run_manager=run_manager) - except (KeyboardInterrupt, Exception) as e: - await run_manager.on_chain_error(e) - raise e - outputs = self.create_outputs(response) - await run_manager.on_chain_end({"outputs": outputs}) - return outputs - - @property - def _run_output_key(self) -> str: - return self.output_key - - def create_outputs(self, llm_result: LLMResult) -> List[Dict[str, Any]]: - """Create outputs from response.""" - result = [ - # Get the text of the top generated string. - { - self.output_key: self.output_parser.parse_result(generation), - "full_generation": generation, - } - for generation in llm_result.generations - ] - if self.return_final_only: - result = [{self.output_key: r[self.output_key]} for r in result] - return result - - async def _acall( - self, - inputs: Dict[str, Any], - run_manager: Optional[AsyncCallbackManagerForChainRun] = None, - ) -> Dict[str, str]: - response = await self.agenerate([inputs], run_manager=run_manager) - return self.create_outputs(response)[0] - - def predict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: - """Format prompt with kwargs and pass to LLM. - - Args: - callbacks: Callbacks to pass to LLMChain - **kwargs: Keys to pass to prompt template. - - Returns: - Completion from LLM. - - Example: - .. code-block:: python - - completion = llm.predict(adjective="funny") - """ - return self(kwargs, callbacks=callbacks)[self.output_key] - - async def apredict(self, callbacks: Callbacks = None, **kwargs: Any) -> str: - """Format prompt with kwargs and pass to LLM. - - Args: - callbacks: Callbacks to pass to LLMChain - **kwargs: Keys to pass to prompt template. - - Returns: - Completion from LLM. - - Example: - .. code-block:: python - - completion = llm.predict(adjective="funny") - """ - return (await self.acall(kwargs, callbacks=callbacks))[self.output_key] - - def predict_and_parse( - self, callbacks: Callbacks = None, **kwargs: Any - ) -> Union[str, List[str], Dict[str, Any]]: - """Call predict and then parse the results.""" - warnings.warn( - "The predict_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = self.predict(callbacks=callbacks, **kwargs) - if self.prompt.output_parser is not None: - return self.prompt.output_parser.parse(result) - else: - return result - - async def apredict_and_parse( - self, callbacks: Callbacks = None, **kwargs: Any - ) -> Union[str, List[str], Dict[str, str]]: - """Call apredict and then parse the results.""" - warnings.warn( - "The apredict_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = await self.apredict(callbacks=callbacks, **kwargs) - if self.prompt.output_parser is not None: - return self.prompt.output_parser.parse(result) - else: - return result - - def apply_and_parse( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - """Call apply and then parse the results.""" - warnings.warn( - "The apply_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = self.apply(input_list, callbacks=callbacks) - return self._parse_generation(result) - - def _parse_generation( - self, generation: List[Dict[str, str]] - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - if self.prompt.output_parser is not None: - return [ - self.prompt.output_parser.parse(res[self.output_key]) - for res in generation - ] - else: - return generation - - async def aapply_and_parse( - self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None - ) -> Sequence[Union[str, List[str], Dict[str, str]]]: - """Call apply and then parse the results.""" - warnings.warn( - "The aapply_and_parse method is deprecated, " - "instead pass an output parser directly to LLMChain." - ) - result = await self.aapply(input_list, callbacks=callbacks) - return self._parse_generation(result) - - @property - def _chain_type(self) -> str: - return "llm_chain" - - @classmethod - def from_string(cls, llm: BaseLanguageModel, template: str) -> LLMChain: - """Create LLMChain from LLM and template.""" - prompt_template = PromptTemplate.from_template(template) - return cls(llm=llm, prompt=prompt_template) \ No newline at end of file diff --git a/swarms/agents/models/prompts/chains/__init__.py b/swarms/agents/models/prompts/prebuild/__init__.py similarity index 100% rename from swarms/agents/models/prompts/chains/__init__.py rename to swarms/agents/models/prompts/prebuild/__init__.py diff --git a/swarms/agents/models/prompts/prompts.py b/swarms/agents/models/prompts/prebuild/multi_modal_prompts.py similarity index 100% rename from swarms/agents/models/prompts/prompts.py rename to swarms/agents/models/prompts/prebuild/multi_modal_prompts.py diff --git a/swarms/agents/models/prompts/project_manager.py b/swarms/agents/models/prompts/prebuild/project_manager.py similarity index 100% rename from swarms/agents/models/prompts/project_manager.py rename to swarms/agents/models/prompts/prebuild/project_manager.py diff --git a/swarms/agents/models/prompts/sales.py b/swarms/agents/models/prompts/prebuild/sales_prompts.py similarity index 100% rename from swarms/agents/models/prompts/sales.py rename to swarms/agents/models/prompts/prebuild/sales_prompts.py diff --git a/swarms/agents/models/prompts/summaries.py b/swarms/agents/models/prompts/prebuild/summaries_prompts.py similarity index 100% rename from swarms/agents/models/prompts/summaries.py rename to swarms/agents/models/prompts/prebuild/summaries_prompts.py diff --git a/swarms/agents/tools/models.py b/swarms/agents/tools/models.py index 23f714f9..e8aaff6b 100644 --- a/swarms/agents/tools/models.py +++ b/swarms/agents/tools/models.py @@ -18,7 +18,7 @@ from transformers import ( CLIPSegProcessor, ) -from swarms.agents.models.prompts.prompts import IMAGE_PROMPT +from swarms.agents.models.prompts.prebuild.multi_modal_prompts import IMAGE_PROMPT from swarms.agents.tools.base import tool from swarms.agents.tools.main import BaseToolSet from swarms.utils.logger import logger diff --git a/swarms/agents/utils/ConversationalChatAgent.py b/swarms/agents/utils/ConversationalChatAgent.py index 54215506..fe29101e 100644 --- a/swarms/agents/utils/ConversationalChatAgent.py +++ b/swarms/agents/utils/ConversationalChatAgent.py @@ -21,7 +21,7 @@ from langchain.schema import ( ) from langchain.tools.base import BaseTool -from swarms.agents.models.prompts.prompts import EVAL_TOOL_RESPONSE +from swarms.agents.models.prompts.prebuild.multi_modal_prompts import EVAL_TOOL_RESPONSE from swarms.agents.utils.Agent import Agent logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s') diff --git a/swarms/agents/utils/agent_creator.py b/swarms/agents/utils/agent_creator.py index cc0ad0c2..fc7bacfd 100644 --- a/swarms/agents/utils/agent_creator.py +++ b/swarms/agents/utils/agent_creator.py @@ -8,7 +8,6 @@ from langchain.memory.chat_memory import BaseChatMemory from swarms.agents.tools.main import BaseToolSet, ToolsFactory from swarms.agents.utils.agent_setup import AgentSetup -# from .callback import EVALCallbackHandler, ExecutionTracingCallbackHandler from swarms.agents.utils.Calback import EVALCallbackHandler, ExecutionTracingCallbackHandler callback_manager_instance = CallbackManager(EVALCallbackHandler()) diff --git a/swarms/agents/utils/agent_setup.py b/swarms/agents/utils/agent_setup.py index c78c2a14..ad6b4c44 100644 --- a/swarms/agents/utils/agent_setup.py +++ b/swarms/agents/utils/agent_setup.py @@ -7,7 +7,7 @@ from langchain.chat_models import ChatOpenAI from langchain.chat_models.base import BaseChatModel from langchain.schema import BaseOutputParser -from swarms.agents.models.prompts.prompts import EVAL_PREFIX, EVAL_SUFFIX +from swarms.agents.models.prompts.prebuild.multi_modal_prompts import EVAL_PREFIX, EVAL_SUFFIX from swarms.agents.tools.main import BaseToolSet, ToolsFactory from .ConversationalChatAgent import ConversationalChatAgent diff --git a/swarms/agents/utils/output_parser.py b/swarms/agents/utils/output_parser.py index 96cb6176..9c4205e2 100644 --- a/swarms/agents/utils/output_parser.py +++ b/swarms/agents/utils/output_parser.py @@ -5,7 +5,7 @@ from typing import Dict, NamedTuple from langchain.schema import BaseOutputParser -from swarms.agents.models.prompts.prompts import EVAL_FORMAT_INSTRUCTIONS +from swarms.agents.models.prompts.prebuild.multi_modal_prompts import EVAL_FORMAT_INSTRUCTIONS class EvalOutputParser(BaseOutputParser): diff --git a/swarms/swarms/base.py b/swarms/swarms/base.py index 8630baae..2f408641 100644 --- a/swarms/swarms/base.py +++ b/swarms/swarms/base.py @@ -16,9 +16,5 @@ class AbstractSwarm(ABC): pass @abstractmethod - def process(self): - pass - - @abstractmethod - def solve(self): + def run(self): pass \ No newline at end of file diff --git a/swarms/utils/main.py b/swarms/utils/main.py index 704634b6..3f8ff0b6 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -400,7 +400,7 @@ class FileHandler: #############===========================> -from swarms.agents.models.prompts.prompts import DATAFRAME_PROMPT +from swarms.agents.models.prompts.prebuild.multi_modal_prompts import DATAFRAME_PROMPT import pandas as pd class CsvToDataframe(BaseHandler):