diff --git a/example.py b/example.py index d9ba8f1c..6f77e07d 100644 --- a/example.py +++ b/example.py @@ -1,6 +1,7 @@ +"""Example of using the swarms package to run a workflow.""" from swarms import Agent, OpenAIChat -## Initialize the workflow +# Initialize the workflow agent = Agent( llm=OpenAIChat(), max_loops="auto", diff --git a/swarms/models/anthropic.py b/swarms/models/anthropic.py index 7cf5bc6e..cf29aaba 100644 --- a/swarms/models/anthropic.py +++ b/swarms/models/anthropic.py @@ -25,7 +25,9 @@ from langchain_community.callbacks.manager import ( ) from langchain_community.llms.base import LLM from pydantic import Field, SecretStr, root_validator -from langchain_community.schema.language_model import BaseLanguageModel +from langchain_community.schema.language_model import ( + BaseLanguageModel, +) from langchain_community.schema.output import GenerationChunk from langchain_community.schema.prompt import PromptValue from langchain_community.utils import ( diff --git a/swarms/models/kosmos_two.py b/swarms/models/kosmos_two.py index 6bc4d810..f1aa35a8 100644 --- a/swarms/models/kosmos_two.py +++ b/swarms/models/kosmos_two.py @@ -88,9 +88,10 @@ class Kosmos(BaseMultiModalModel): skip_special_tokens=True, )[0] - processed_text, entities = ( - self.processor.post_process_generation(generated_texts) - ) + ( + processed_text, + entities, + ) = self.processor.post_process_generation(generated_texts) return processed_text, entities diff --git a/swarms/models/medical_sam.py b/swarms/models/medical_sam.py index 8d096ba5..8877bb22 100644 --- a/swarms/models/medical_sam.py +++ b/swarms/models/medical_sam.py @@ -115,12 +115,13 @@ class MedicalSAM: if len(box_torch.shape) == 2: box_torch = box_torch[:, None, :] - sparse_embeddings, dense_embeddings = ( - self.model.prompt_encoder( - points=None, - boxes=box_torch, - masks=None, - ) + ( + sparse_embeddings, + dense_embeddings, + ) = self.model.prompt_encoder( + points=None, + boxes=box_torch, + masks=None, ) low_res_logits, _ = self.model.mask_decoder( diff --git a/swarms/models/openai_embeddings.py b/swarms/models/openai_embeddings.py index 796362ab..6eab327f 100644 --- a/swarms/models/openai_embeddings.py +++ b/swarms/models/openai_embeddings.py @@ -208,9 +208,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" - request_timeout: Optional[Union[float, Tuple[float, float]]] = ( - None - ) + request_timeout: Optional[ + Union[float, Tuple[float, float]] + ] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None tiktoken_model_name: Optional[str] = None diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index 8fb124f4..80c59ad7 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -244,9 +244,9 @@ class BaseOpenAI(BaseLLM): attributes["openai_api_base"] = self.openai_api_base if self.openai_organization != "": - attributes["openai_organization"] = ( - self.openai_organization - ) + attributes[ + "openai_organization" + ] = self.openai_organization if self.openai_proxy != "": attributes["openai_proxy"] = self.openai_proxy @@ -287,9 +287,9 @@ class BaseOpenAI(BaseLLM): openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" - request_timeout: Optional[Union[float, Tuple[float, float]]] = ( - None - ) + request_timeout: Optional[ + Union[float, Tuple[float, float]] + ] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field( default_factory=dict diff --git a/swarms/models/palm.py b/swarms/models/palm.py index 5960aaa2..204b1768 100644 --- a/swarms/models/palm.py +++ b/swarms/models/palm.py @@ -3,7 +3,9 @@ from __future__ import annotations import logging from typing import Any, Callable, Dict, List, Optional -from langchain_community.callbacks.manager import CallbackManagerForLLMRun +from langchain_community.callbacks.manager import ( + CallbackManagerForLLMRun, +) from langchain_community.llms import BaseLLM from langchain_community.pydantic_v1 import BaseModel, root_validator from langchain_community.schema import Generation, LLMResult diff --git a/swarms/prompts/base.py b/swarms/prompts/base.py index c9152e83..0c18eb7b 100644 --- a/swarms/prompts/base.py +++ b/swarms/prompts/base.py @@ -86,7 +86,9 @@ class BaseMessage(Serializable): return True def __add__(self, other: Any) -> ChatPromptTemplate: - from langchain_community.prompts.chat import ChatPromptTemplate + from langchain_community.prompts.chat import ( + ChatPromptTemplate, + ) prompt = ChatPromptTemplate(messages=[self]) return prompt + other diff --git a/swarms/prompts/worker_prompt.py b/swarms/prompts/worker_prompt.py index 165fa058..cc3265fb 100644 --- a/swarms/prompts/worker_prompt.py +++ b/swarms/prompts/worker_prompt.py @@ -62,6 +62,8 @@ def worker_tools_sop_promp(name: str, memory: str): [{memory}] Human: Determine which next command to use, and respond using the format specified above: - """.format(name=name, memory=memory, time=time) + """.format( + name=name, memory=memory, time=time + ) return str(out) diff --git a/swarms/structs/concurrent_workflow.py b/swarms/structs/concurrent_workflow.py index 8aa5399b..7e3c817e 100644 --- a/swarms/structs/concurrent_workflow.py +++ b/swarms/structs/concurrent_workflow.py @@ -36,9 +36,9 @@ class ConcurrentWorkflow(BaseStructure): max_loops: int = 1 max_workers: int = 5 autosave: bool = False - saved_state_filepath: Optional[str] = ( - "runs/concurrent_workflow.json" - ) + saved_state_filepath: Optional[ + str + ] = "runs/concurrent_workflow.json" print_results: bool = False return_results: bool = False use_processes: bool = False diff --git a/swarms/structs/multi_agent_collab.py b/swarms/structs/multi_agent_collab.py index 39af96b7..8187e5fa 100644 --- a/swarms/structs/multi_agent_collab.py +++ b/swarms/structs/multi_agent_collab.py @@ -317,9 +317,9 @@ class MultiAgentCollaboration: """Tracks and reports the performance of each agent""" performance_data = {} for agent in self.agents: - performance_data[agent.name] = ( - agent.get_performance_metrics() - ) + performance_data[ + agent.name + ] = agent.get_performance_metrics() return performance_data def set_interaction_rules(self, rules): diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 6b1d7c06..b49ccc00 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -42,9 +42,9 @@ class SequentialWorkflow: task_pool: List[Task] = field(default_factory=list) max_loops: int = 1 autosave: bool = False - saved_state_filepath: Optional[str] = ( - "sequential_workflow_state.json" - ) + saved_state_filepath: Optional[ + str + ] = "sequential_workflow_state.json" restore_state_filepath: Optional[str] = None dashboard: bool = False diff --git a/swarms/structs/swarm_redis_registry.py b/swarms/structs/swarm_redis_registry.py index a17549cd..2db57108 100644 --- a/swarms/structs/swarm_redis_registry.py +++ b/swarms/structs/swarm_redis_registry.py @@ -84,7 +84,9 @@ class RedisSwarmRegistry(AbstractSwarm): query = f""" {match_query} CREATE (a)-[r:joined]->(b) RETURN r - """.replace("\n", "") + """.replace( + "\n", "" + ) self.redis_graph.query(query) diff --git a/swarms/structs/swarming_architectures.py b/swarms/structs/swarming_architectures.py index ad3ad4ed..8621c356 100644 --- a/swarms/structs/swarming_architectures.py +++ b/swarms/structs/swarming_architectures.py @@ -105,7 +105,7 @@ def prime_swarm(agents: List[Agent], tasks: List[str]): def power_swarm(agents: List[str], tasks: List[str]): - powers = [2**i for i in range(int(len(agents) ** 0.5))] + powers = [2 ** i for i in range(int(len(agents) ** 0.5))] for power in powers: if power < len(agents) and tasks: task = tasks.pop(0) @@ -114,14 +114,14 @@ def power_swarm(agents: List[str], tasks: List[str]): def log_swarm(agents: List[Agent], tasks: List[str]): for i in range(len(agents)): - if 2**i < len(agents) and tasks: + if 2 ** i < len(agents) and tasks: task = tasks.pop(0) - agents[2**i].run(task) + agents[2 ** i].run(task) def exponential_swarm(agents: List[Agent], tasks: List[str]): for i in range(len(agents)): - index = min(int(2**i), len(agents) - 1) + index = min(int(2 ** i), len(agents) - 1) if tasks: task = tasks.pop(0) agents[index].run(task) @@ -130,7 +130,7 @@ def exponential_swarm(agents: List[Agent], tasks: List[str]): def geometric_swarm(agents, tasks): ratio = 2 for i in range(range(len(agents))): - index = min(int(ratio**2), len(agents) - 1) + index = min(int(ratio ** 2), len(agents) - 1) if tasks: task = tasks.pop(0) agents[index].run(task) diff --git a/swarms/telemetry/sys_info.py b/swarms/telemetry/sys_info.py index a4857e11..011e60ce 100644 --- a/swarms/telemetry/sys_info.py +++ b/swarms/telemetry/sys_info.py @@ -48,9 +48,9 @@ def get_cpu_info(): def get_ram_info(): vm = psutil.virtual_memory() - used_ram_gb = vm.used / (1024**3) - free_ram_gb = vm.free / (1024**3) - total_ram_gb = vm.total / (1024**3) + used_ram_gb = vm.used / (1024 ** 3) + free_ram_gb = vm.free / (1024 ** 3) + total_ram_gb = vm.total / (1024 ** 3) return ( f"{total_ram_gb:.2f} GB, used: {used_ram_gb:.2f}, free:" f" {free_ram_gb:.2f}" diff --git a/swarms/tools/tool.py b/swarms/tools/tool.py index 2f88b6fc..4e1dcf61 100644 --- a/swarms/tools/tool.py +++ b/swarms/tools/tool.py @@ -902,9 +902,9 @@ def tool( coroutine = ainvoke_wrapper func = invoke_wrapper - schema: Optional[Type[BaseModel]] = ( - runnable.input_schema - ) + schema: Optional[ + Type[BaseModel] + ] = runnable.input_schema description = repr(runnable) elif inspect.iscoroutinefunction(dec_func): coroutine = dec_func diff --git a/swarms/utils/load_environment.py b/swarms/utils/load_environment.py index 7b318193..a2a4dd56 100644 --- a/swarms/utils/load_environment.py +++ b/swarms/utils/load_environment.py @@ -8,4 +8,3 @@ def load_environment(): api_key = os.environ.get("OPENAI_API_KEY") return api_key, os.environ -