openai models

Former-commit-id: 7e92a9f02c
grit/923f7c6f-0958-480b-8748-ea6bbf1c2084
Kye 1 year ago
parent 4d34d879da
commit ff0314b7f5

@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "2.2.2"
version = "2.2.7"
description = "Swarms - Pytorch"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]

@ -103,7 +103,7 @@ def _create_retry_decorator(
errors = [
openai.Timeout,
openai.APIError,
openai.error.APIConnectionError,
openai.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]

@ -79,24 +79,24 @@ def _streaming_response_template() -> Dict[str, Any]:
}
def _create_retry_decorator(
llm: Union[BaseOpenAI, OpenAIChat],
run_manager: Optional[
Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
] = None,
) -> Callable[[Any], Any]:
import openai
errors = [
openai.Timeout,
openai.error.APIError,
openai.error.APIConnectionError,
openai.error.RateLimitError,
openai.error.ServiceUnavailableError,
]
return create_base_retry_decorator(
error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
)
# def _create_retry_decorator(
# llm: Union[BaseOpenAI, OpenAIChat],
# run_manager: Optional[
# Union[AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun]
# ] = None,
# ) -> Callable[[Any], Any]:
# import openai
# errors = [
# openai.Timeout,
# openai.APIError,
# openai.error.APIConnectionError,
# openai.error.RateLimitError,
# openai.error.ServiceUnavailableError,
# ]
# return create_base_retry_decorator(
# error_types=errors, max_retries=llm.max_retries, run_manager=run_manager
# )
def completion_with_retry(
@ -105,9 +105,9 @@ def completion_with_retry(
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
# @retry_decorator
def _completion_with_retry(**kwargs: Any) -> Any:
return llm.client.create(**kwargs)
@ -120,9 +120,9 @@ async def acompletion_with_retry(
**kwargs: Any,
) -> Any:
"""Use tenacity to retry the async completion call."""
retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
# retry_decorator = _create_retry_decorator(llm, run_manager=run_manager)
@retry_decorator
# @retry_decorator
async def _completion_with_retry(**kwargs: Any) -> Any:
# Use OpenAI's async api https://github.com/openai/openai-python#async-api
return await llm.client.acreate(**kwargs)
@ -500,10 +500,11 @@ class BaseOpenAI(BaseLLM):
if self.openai_proxy:
import openai
openai.proxy = {
"http": self.openai_proxy,
"https": self.openai_proxy,
} # type: ignore[assignment] # noqa: E501
# raise Exception("The 'openai.proxy' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g.",
# 'OpenAI(proxy={
# "http": self.openai_proxy,
# "https": self.openai_proxy,
# })'") # type: ignore[assignment] # noqa: E501
return {**openai_creds, **self._default_params}
@property
@ -787,11 +788,6 @@ class OpenAIChat(BaseLLM):
raise Exception("The 'openai.api_base' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(api_base=openai_api_base)'")
if openai_organization:
raise Exception("The 'openai.organization' option isn't read in the client API. You will need to pass it when you instantiate the client, e.g. 'OpenAI(organization=openai_organization)'")
if openai_proxy:
openai.proxy = {
"http": openai_proxy,
"https": openai_proxy,
} # type: ignore[assignment] # noqa: E501
except ImportError:
raise ImportError(
"Could not import openai python package. "

@ -119,12 +119,13 @@ class Flow:
retry_attempts: int = 3,
retry_interval: int = 1,
return_history: bool = False,
stopping_token: str = None,
dynamic_loops: Optional[bool] = False,
interactive: bool = False,
dashboard: bool = False,
agent_name: str = "Flow agent",
system_prompt: str = FLOW_SYSTEM_PROMPT,
tools: List[Any] = None,
# tools: List[Any] = None,
dynamic_temperature: bool = False,
saved_state_path: Optional[str] = "flow_state.json",
autosave: bool = False,
@ -141,16 +142,17 @@ class Flow:
self.feedback = []
self.memory = []
self.task = None
self.stopping_token = "<DONE>"
self.stopping_token = stopping_token or "<DONE>"
self.interactive = interactive
self.dashboard = dashboard
self.return_history = return_history
self.dynamic_temperature = dynamic_temperature
self.dynamic_loops = dynamic_loops
self.user_name = user_name
# The max_loops will be set dynamically if the dynamic_loop
if self.dynamic_loops:
self.max_loops = "auto"
self.tools = tools or []
# self.tools = tools or []
self.system_prompt = system_prompt
self.agent_name = agent_name
self.saved_state_path = saved_state_path
@ -206,72 +208,72 @@ class Flow:
return "\n".join(params_str_list)
def parse_tool_command(self, text: str):
# Parse the text for tool usage
pass
def get_tool_description(self):
"""Get the tool description"""
tool_descriptions = []
for tool in self.tools:
description = f"{tool.name}: {tool.description}"
tool_descriptions.append(description)
return "\n".join(tool_descriptions)
def find_tool_by_name(self, name: str):
"""Find a tool by name"""
for tool in self.tools:
if tool.name == name:
return tool
return None
def construct_dynamic_prompt(self):
"""Construct the dynamic prompt"""
tools_description = self.get_tool_description()
return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
def extract_tool_commands(self, text: str):
"""
Extract the tool commands from the text
Example:
```json
{
"tool": "tool_name",
"params": {
"tool1": "inputs",
"param2": "value2"
}
}
```
"""
# Regex to find JSON like strings
pattern = r"```json(.+?)```"
matches = re.findall(pattern, text, re.DOTALL)
json_commands = []
for match in matches:
try:
json_commands = json.loads(match)
json_commands.append(json_commands)
except Exception as error:
print(f"Error parsing JSON command: {error}")
def parse_and_execute_tools(self, response):
"""Parse and execute the tools"""
json_commands = self.extract_tool_commands(response)
for command in json_commands:
tool_name = command.get("tool")
params = command.get("parmas", {})
self.execute_tool(tool_name, params)
def execute_tools(self, tool_name, params):
"""Execute the tool with the provided params"""
tool = self.tool_find_by_name(tool_name)
if tool:
# Execute the tool with the provided parameters
tool_result = tool.run(**params)
print(tool_result)
# def parse_tool_command(self, text: str):
# # Parse the text for tool usage
# pass
# def get_tool_description(self):
# """Get the tool description"""
# tool_descriptions = []
# for tool in self.tools:
# description = f"{tool.name}: {tool.description}"
# tool_descriptions.append(description)
# return "\n".join(tool_descriptions)
# def find_tool_by_name(self, name: str):
# """Find a tool by name"""
# for tool in self.tools:
# if tool.name == name:
# return tool
# return None
# def construct_dynamic_prompt(self):
# """Construct the dynamic prompt"""
# tools_description = self.get_tool_description()
# return DYNAMICAL_TOOL_USAGE.format(tools=tools_description)
# def extract_tool_commands(self, text: str):
# """
# Extract the tool commands from the text
# Example:
# ```json
# {
# "tool": "tool_name",
# "params": {
# "tool1": "inputs",
# "param2": "value2"
# }
# }
# ```
# """
# # Regex to find JSON like strings
# pattern = r"```json(.+?)```"
# matches = re.findall(pattern, text, re.DOTALL)
# json_commands = []
# for match in matches:
# try:
# json_commands = json.loads(match)
# json_commands.append(json_commands)
# except Exception as error:
# print(f"Error parsing JSON command: {error}")
# def parse_and_execute_tools(self, response):
# """Parse and execute the tools"""
# json_commands = self.extract_tool_commands(response)
# for command in json_commands:
# tool_name = command.get("tool")
# params = command.get("parmas", {})
# self.execute_tool(tool_name, params)
# def execute_tools(self, tool_name, params):
# """Execute the tool with the provided params"""
# tool = self.tool_find_by_name(tool_name)
# if tool:
# # Execute the tool with the provided parameters
# tool_result = tool.run(**params)
# print(tool_result)
def truncate_history(self):
"""
@ -367,13 +369,13 @@ class Flow:
5. Repeat until stopping condition is met or max_loops is reached
"""
dynamic_prompt = self.construct_dynamic_prompt()
combined_prompt = f"{dynamic_prompt}\n{task}"
# dynamic_prompt = self.construct_dynamic_prompt()
# combined_prompt = f"{dynamic_prompt}\n{task}"
# Activate Autonomous agent message
self.activate_autonomous_agent()
response = combined_prompt # or task
response = task # or combined_prompt
history = [f"{self.user_name}: {task}"]
# If dashboard = True then print the dashboard

Loading…
Cancel
Save