pull/626/head
Your Name 2 months ago
parent 94c71259b5
commit 92b5930414

@ -19,7 +19,7 @@ agent = Agent(
agent_name="Financial-Analysis-Agent", agent_name="Financial-Analysis-Agent",
# system_prompt=FINANCIAL_AGENT_SYS_PROMPT, # system_prompt=FINANCIAL_AGENT_SYS_PROMPT,
llm=model, llm=model,
max_loops=3, max_loops=1,
autosave=True, autosave=True,
dashboard=False, dashboard=False,
verbose=True, verbose=True,
@ -29,15 +29,18 @@ agent = Agent(
retry_attempts=1, retry_attempts=1,
context_length=200000, context_length=200000,
return_step_meta=True, return_step_meta=True,
# output_type="json",
output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and output_type="json", # "json", "dict", "csv" OR "string" soon "yaml" and
streaming_on=False, streaming_on=False,
auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task auto_generate_prompt=False, # Auto generate prompt for the agent based on name, description, and system prompt, task
artifacts_on=True,
artifacts_output_path="roth_ira_report",
artifacts_file_extension=".md",
max_tokens=8000,
) )
print( print(
agent.run( agent.run(
"How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria. Create a report on this question."
) )
) )

@ -5,7 +5,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry] [tool.poetry]
name = "swarms" name = "swarms"
version = "6.0.1" version = "6.0.3"
description = "Swarms - Pytorch" description = "Swarms - Pytorch"
license = "MIT" license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"] authors = ["Kye Gomez <kye@apac.ai>"]
@ -73,12 +73,10 @@ tiktoken = "*"
networkx = "*" networkx = "*"
swarms-memory = "*" swarms-memory = "*"
black = "*" black = "*"
swarms-cloud = ">=0.4.4,<5"
aiofiles = "*" aiofiles = "*"
swarm-models = "*" swarm-models = "*"
clusterops = "*" clusterops = "*"
chromadb = "*" chromadb = "*"
uvloop = "*"
reportlab = "*" reportlab = "*"
[tool.poetry.scripts] [tool.poetry.scripts]

@ -13,7 +13,6 @@ pydantic==2.8.2
tenacity==8.5.0 tenacity==8.5.0
Pillow==10.4.0 Pillow==10.4.0
psutil psutil
uvloop
sentry-sdk sentry-sdk
python-dotenv python-dotenv
opencv-python-headless opencv-python-headless

@ -0,0 +1,87 @@
%PDF-1.3
%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
1 0 obj
<<
/F1 2 0 R
>>
endobj
2 0 obj
<<
/BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font
>>
endobj
3 0 obj
<<
/Contents 8 0 R /MediaBox [ 0 0 612 792 ] /Parent 7 0 R /Resources <<
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
>> /Rotate 0 /Trans <<
>>
/Type /Page
>>
endobj
4 0 obj
<<
/Contents 9 0 R /MediaBox [ 0 0 612 792 ] /Parent 7 0 R /Resources <<
/Font 1 0 R /ProcSet [ /PDF /Text /ImageB /ImageC /ImageI ]
>> /Rotate 0 /Trans <<
>>
/Type /Page
>>
endobj
5 0 obj
<<
/PageMode /UseNone /Pages 7 0 R /Type /Catalog
>>
endobj
6 0 obj
<<
/Author (anonymous) /CreationDate (D:20241104180641-05'00') /Creator (ReportLab PDF Library - www.reportlab.com) /Keywords () /ModDate (D:20241104180641-05'00') /Producer (ReportLab PDF Library - www.reportlab.com)
/Subject (unspecified) /Title (untitled) /Trapped /False
>>
endobj
7 0 obj
<<
/Count 2 /Kids [ 3 0 R 4 0 R ] /Type /Pages
>>
endobj
8 0 obj
<<
/Filter [ /ASCII85Decode /FlateDecode ] /Length 2270
>>
stream
GasaqgMYb8&:O"K%$3S2L?sf/fQ#c4M[Q:SMomN,!e+:76YU%nX6qU:W*eF<92_Q+P6m1-1,#tT3HO>A+r]Jh$GYVkkl$Bf^5XP0cmkl*Fuc>JDtIc:'<XjuU[p3Gj52>aZWF>:d?8^nqKMSRH*tr)gZi68MO)8#l#+&)e6'<V-@Vc&@aE?_>^/c=88+j6dX*T+e_\!5eJDD[^Mq\E/k<U&DfTmY's)atOtuab'`HV&_:mS:p$^l+Y"81k-`+rsN7XB779[X7.1?1FL,@\^S_99rZ=M];SD#2g7sj+X3+MM`j[j$O3B5G7RuY.tT+X3aUV3tePd'?kHT/g<3%GZaR?JoTb$h>ZN/K@O.$qXDHBDBCg/rEHc]Yif[=e2-`2\G0"66M)QaGJ`D]=R$VY$9/(p7ZmkO:)``D+<4jVihC@]MXrX[;P;di6!ORJ+ZuFZQ+Eq^0n:F)ZlbFVuiZ1OjO'm]GO"ot7D:F@grs$!TAjKY0.Eop`Z65;-b"=f56!.cup-*DItcL^em0f;1u6.AFP<GYf-=ZQt[tOcPa4ntfo!Q]'bo4#<pH\Y/P8!;thWb4J=Th>-lY-TrRU4dC%1F49Zg(B87[04t4ecuc9bNF##7ZMYrF7BQU!)1U1PJFQ!j,P5"rn&Y`j<<35mN\3kc>TClO]XeIJd>eU"q;]94j*l[*hf]Pd\YN"Q-L!l,em?8clRIId50+*Ene1nh1OG#4HA0j+:!d85q7N>hk-ML2G6(G1S'KD*P.lE>Bdku4<AKhe_;Y.h5WDhT&%qO[n]Ms:-[:C<Xr4H,H0;Yej47at)t7cDAW>o*DUHRinRPf^Gc-"TM-hsOd7)uIh,ZNOc3Wh+d"?j6$<"/^<"leUf?!=3dd(TCFu2\QKGikH>ZiRSU#C/43X.60(On0NnCb'qBbH,dqS=kH`jP-GO3DKFUiu:\a`IKoDoY#/ajjI`Qhf)=JPKNYNTQ%;#Od_43Uh!Bf3Vg>M`M:YOp4i((AkUsd]e.Edi[T9WZOC1H&J6t9J6.uZL24Z$abSni)O4&M,a^*qd$Zc9CSWDpK$Z9=-P:5SXSL5SJrEXZgQJ:?=LLt"6Iu>h:7!A4c7)MW8H/C?r#+O/l[D<P(>'Qf00<]+l@@\0kc9lWd$p>ei^6,lm'VpH?r5nDcL=ddSt14RLJj7E0nFM:8::CWn87Qj2r41"0p'Y0K2["Alkm/0HMI(80&V$m9[g83LgS<]P0P"-6[]!UK'D]!%rH,9ZO/iqc7?4kf#5ugNuicHpg&3+>]h0J4sU5O92s-*pUn+UN"Ml$!Fk&\?s,D7@C@4NZ-27(u1;)JQ=QIceDs]k!6R\"7arPE@UM8Q7sC?YSLA!-\apMb0Al2WFk'L;f9&B6oUYpm>QMnLdhDCSob%1f&W)r_,#]8JQSXlCN4'])"n$+?j:MRI6h-uO92AR/e[=/;"m`jVVS&>Fu)]j%K)6*ODOkm`*C:18/ndaUlLWN`Dnis@S]C'i1o5<mcY0)L4aTKE.2M2Tm_mN_NhPFZgn6@/0`^1YrYYPA83l:jK2Z%69eM_a8SUWf)q.9'D-)nOa]3B7O&-,/](2G8=-t$iJl5nH*@$?\Y5Y_'"N>LHB?\o6&s0GkYlFGj?c_gY4-V4FrLO[1uS:J_"gPlPb;(5I0R@U,:#7*?I3kt>$O<\$"8TH"$Q[$kbqHh]]_Bg&rn^lgK+s_P-A2e6;S'fIEY2?01a]-6F69]KG_J""*9asTEOVDX0?dT[VP08m0%iDNS'_Uan<<?5>Jcj@a7!Hb6<-["&Fe1)(UZ]'OVFBH_BHIgU8HDRqD:>oN;tr)19r:'D',po(,BGn4&T4=-744cs)cK\LUTp\H&0Y3,k+L:bQ#jD0?IDkY7Z$Xn*>U>Zb*'*8[e^h@3\BPb%2,9ESHCRWlb3L*?hV)qq!\8fF5m/c9&O67>qG2`dgg3^2AMCJ&o]juRT4+G%*/1o$E3pk\DoWS\eM1(W+nlJ,N5gJ,)=T1u'@Z$PY/WeIo[q#W)5h,Ng6N>Qp]nR,F)QTAmejpUK=6W(cC5g"W<7>CT-oKf<"j?o===)4ejC(l8r*RGW1XttHO<mN+.+^t&WdoR3b*=%-oB`R]"3s6<nq?\\kG6i$,O9"bo$)[U7OLZ6Q<OW,.7(+\I+G4c%KT.J/s+Nm3"B4t6!d:h`d)7,W[$#\M13U.>8(NCcK-1&S?+4WmmfB`+TM[\2UOQk38WeWo$oW2t=GI&_R6";'j4bm$<C737!;rt\_Y:gukU)[GqB+6MmF_~>endstream
endobj
9 0 obj
<<
/Filter [ /ASCII85Decode /FlateDecode ] /Length 272
>>
stream
Gap@Hd7T$G%#+0E'Z[g7ZE5nP>!ZTTe`[B_?#$_5Dedb9e$h8nKEMq%Et33B=Ll0a-fDsRrT<#Zi"Omh[=s+NX'aN8rM,Dm4B_Of9:oA7W(QBS9a2\2&J/NRNA;716'+BL0O5h.Ucf\MXI77eJU?W!]DXOK1h`5##+&cF7&?AO3ML.Q:mK+qPHEdO#:btJlO(7WDjc`?`MC$^g[I4mSu!jZZ]JlpcB_ZYiQ;q7Pp.T9Q:P$EObbV\N<ieu52Y/4TA]0es2O%'FGF1%~>endstream
endobj
xref
0 10
0000000000 65535 f
0000000073 00000 n
0000000104 00000 n
0000000211 00000 n
0000000404 00000 n
0000000597 00000 n
0000000665 00000 n
0000000961 00000 n
0000001026 00000 n
0000003387 00000 n
trailer
<<
/ID
[<9407f775befdeb4ef8b5cf72ca950654><9407f775befdeb4ef8b5cf72ca950654>]
% ReportLab generated PDF document -- digest (http://www.reportlab.com)
/Info 6 0 R
/Root 5 0 R
/Size 10
>>
startxref
3749
%%EOF

@ -5,6 +5,7 @@ import json
from typing import List, Union, Dict, Any from typing import List, Union, Dict, Any
from pydantic import BaseModel, Field, validator from pydantic import BaseModel, Field, validator
from datetime import datetime from datetime import datetime
from swarms.utils.file_processing import create_file_in_folder
class FileVersion(BaseModel): class FileVersion(BaseModel):
@ -38,7 +39,7 @@ class Artifact(BaseModel):
versions (List[FileVersion]): The list of file versions. versions (List[FileVersion]): The list of file versions.
edit_count (int): The number of times the file has been edited. edit_count (int): The number of times the file has been edited.
""" """
folder_path: str = Field(default=os.getenv("WORKSPACE_DIR"), description="The path to the folder")
file_path: str = Field(..., description="The path to the file") file_path: str = Field(..., description="The path to the file")
file_type: str = Field( file_type: str = Field(
..., ...,
@ -248,12 +249,13 @@ class Artifact(BaseModel):
Saves the artifact's contents in the specified format. Saves the artifact's contents in the specified format.
Args: Args:
output_format (str): The desired output format ('md', 'txt', 'pdf', 'py') output_format (str): The desired output format ('.md', '.txt', '.pdf', '.py')
Raises: Raises:
ValueError: If the output format is not supported ValueError: If the output format is not supported
""" """
supported_formats = {'.md', '.txt', '.pdf', '.py'} supported_formats = {'.md', '.txt', '.pdf', '.py'}
if output_format not in supported_formats: if output_format not in supported_formats:
raise ValueError(f"Unsupported output format. Supported formats are: {supported_formats}") raise ValueError(f"Unsupported output format. Supported formats are: {supported_formats}")
@ -264,17 +266,26 @@ class Artifact(BaseModel):
else: else:
with open(output_path, 'w', encoding='utf-8') as f: with open(output_path, 'w', encoding='utf-8') as f:
if output_format == '.md': if output_format == '.md':
# Add markdown formatting if needed # Create the file in the specified folder
f.write(f"# {os.path.basename(self.file_path)}\n\n") create_file_in_folder(
f.write(self.contents) self.folder_path,
self.file_path,
f"{os.path.basename(self.file_path)}\n\n{self.contents}"
)
elif output_format == '.py': elif output_format == '.py':
# Add Python file header # Add Python file header
f.write('"""\n') create_file_in_folder(
f.write(f'Generated Python file from {self.file_path}\n') self.folder_path,
f.write('"""\n\n') self.file_path,
f.write(self.contents) f"#{os.path.basename(self.file_path)}\n\n{self.contents}"
)
else: # .txt else: # .txt
f.write(self.contents) create_file_in_folder(
self.folder_path,
self.file_path,
self.contents
)
def _save_as_pdf(self, output_path: str) -> None: def _save_as_pdf(self, output_path: str) -> None:
""" """

@ -1,10 +1,14 @@
import os
from loguru import logger
import json import json
import os
import time import time
from typing import Dict from typing import Dict
from swarms_cloud.utils.log_to_swarms_database import log_agent_data
from swarms_cloud.utils.capture_system_data import capture_system_data from loguru import logger
from swarms.telemetry.capture_sys_data import (
capture_system_data,
log_agent_data,
)
class OnboardingProcess: class OnboardingProcess:

@ -1,8 +1,9 @@
import time
import json import json
import os import os
from typing import Callable, List import time
import uuid import uuid
from typing import Any, Callable, List
from loguru import logger from loguru import logger
from pydantic import ( from pydantic import (
BaseModel, BaseModel,
@ -10,12 +11,12 @@ from pydantic import (
constr, constr,
) )
from pydantic.v1 import validator from pydantic.v1 import validator
from swarms_cloud.utils.log_to_swarms_database import log_agent_data
from swarms_cloud.utils.capture_system_data import capture_system_data
from swarms.tools.base_tool import BaseTool
# from swarms.agents.ape_agent import auto_generate_prompt from swarms.telemetry.capture_sys_data import (
from typing import Any capture_system_data,
log_agent_data,
)
from swarms.tools.base_tool import BaseTool
class Prompt(BaseModel): class Prompt(BaseModel):
@ -123,9 +124,9 @@ class Prompt(BaseModel):
"New content must be different from the current content." "New content must be different from the current content."
) )
logger.info( # logger.info(
f"Editing prompt {self.id}. Current content: '{self.content}'" # f"Editing prompt {self.id}. Current content: '{self.content}'"
) # )
self.edit_history.append(new_content) self.edit_history.append(new_content)
self.content = new_content self.content = new_content
self.edit_count += 1 self.edit_count += 1

@ -173,6 +173,9 @@ class Agent:
temperature (float): The temperature temperature (float): The temperature
workspace_dir (str): The workspace directory workspace_dir (str): The workspace directory
timeout (int): The timeout timeout (int): The timeout
artifacts_on (bool): Enable artifacts
artifacts_output_path (str): The artifacts output path
artifacts_file_extension (str): The artifacts file extension
Methods: Methods:
run: Run the agent run: Run the agent
@ -204,8 +207,8 @@ class Agent:
run_async_concurrent: Run the agent asynchronously and concurrently run_async_concurrent: Run the agent asynchronously and concurrently
run_async_concurrent: Run the agent asynchronously and concurrently run_async_concurrent: Run the agent asynchronously and concurrently
construct_dynamic_prompt: Construct the dynamic prompt construct_dynamic_prompt: Construct the dynamic prompt
construct_dynamic_prompt: Construct the dynamic prompt handle_artifacts: Handle artifacts
Examples: Examples:
>>> from swarm_models import OpenAIChat >>> from swarm_models import OpenAIChat
@ -324,6 +327,9 @@ class Agent:
auto_generate_prompt: bool = False, auto_generate_prompt: bool = False,
rag_every_loop: bool = False, rag_every_loop: bool = False,
plan_enabled: bool = False, plan_enabled: bool = False,
artifacts_on: bool = False,
artifacts_output_path: str = None,
artifacts_file_extension: str = None,
*args, *args,
**kwargs, **kwargs,
): ):
@ -431,6 +437,9 @@ class Agent:
self.auto_generate_prompt = auto_generate_prompt self.auto_generate_prompt = auto_generate_prompt
self.rag_every_loop = rag_every_loop self.rag_every_loop = rag_every_loop
self.plan_enabled = plan_enabled self.plan_enabled = plan_enabled
self.artifacts_on = artifacts_on
self.artifacts_output_path = artifacts_output_path
self.artifacts_file_extension = artifacts_file_extension
# Initialize the short term memory # Initialize the short term memory
self.short_memory = Conversation( self.short_memory = Conversation(
@ -973,6 +982,10 @@ class Agent:
self.short_memory.get_str() self.short_memory.get_str()
) )
) )
# Handle artifacts
if self.artifacts_on is True:
self.handle_artifacts(concat_strings(all_responses), self.artifacts_output_path, self.artifacts_file_extension)
# More flexible output types # More flexible output types
if ( if (
@ -2283,10 +2296,29 @@ class Agent:
raise e raise e
def handle_artifacts(self, text: str, file_output_path: str, file_extension: str): def handle_artifacts(self, text: str, file_output_path: str, file_extension: str) -> None:
artifact = Artifact( """Handle creating and saving artifacts with error handling."""
file_path=file_output_path, try:
file_type=file_extension, logger.info(f"Creating artifact for file: {file_output_path}")
contents=text, artifact = Artifact(
file_path=file_output_path,
file_type=file_extension,
contents=text,
edit_count=0,
)
logger.info(f"Saving artifact with extension: {file_extension}")
artifact.save_as(file_extension)
logger.success(f"Successfully saved artifact to {file_output_path}")
) except ValueError as e:
logger.error(f"Invalid input values for artifact: {str(e)}")
raise
except IOError as e:
logger.error(f"Error saving artifact to file: {str(e)}")
raise
except Exception as e:
logger.error(f"Unexpected error handling artifact: {str(e)}")
raise

@ -6,12 +6,11 @@ import threading
from typing import List, Union, Any, Callable from typing import List, Union, Any, Callable
from multiprocessing import cpu_count from multiprocessing import cpu_count
import uvloop
from swarms.structs.agent import Agent from swarms.structs.agent import Agent
from swarms.utils.calculate_func_metrics import profile_func from swarms.utils.calculate_func_metrics import profile_func
import sys
# Use uvloop for faster asyncio event loop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
# Type definitions # Type definitions
AgentType = Union[Agent, Callable] AgentType = Union[Agent, Callable]

@ -0,0 +1,92 @@
import platform
import socket
import psutil
import uuid
from loguru import logger
from typing import Dict
import requests
import time
def capture_system_data() -> Dict[str, str]:
"""
Captures extensive system data including platform information, user ID, IP address, CPU count,
memory information, and other system details.
Returns:
Dict[str, str]: A dictionary containing system data.
"""
try:
system_data = {
"platform": platform.system(),
"platform_version": platform.version(),
"platform_release": platform.release(),
"hostname": socket.gethostname(),
"ip_address": socket.gethostbyname(socket.gethostname()),
"cpu_count": psutil.cpu_count(logical=True),
"memory_total": f"{psutil.virtual_memory().total / (1024 ** 3):.2f} GB",
"memory_available": f"{psutil.virtual_memory().available / (1024 ** 3):.2f} GB",
"user_id": str(uuid.uuid4()), # Unique user identifier
"machine_type": platform.machine(),
"processor": platform.processor(),
"architecture": platform.architecture()[0],
}
# Get external IP address
try:
system_data["external_ip"] = requests.get("https://api.ipify.org").text
except Exception as e:
logger.warning("Failed to retrieve external IP: {}", e)
system_data["external_ip"] = "N/A"
return system_data
except Exception as e:
logger.error("Failed to capture system data: {}", e)
return {}
def log_agent_data(data_dict: dict, retry_attempts: int = 1) -> dict | None:
"""
Logs agent data to the Swarms database with retry logic.
Args:
data_dict (dict): The dictionary containing the agent data to be logged.
retry_attempts (int, optional): The number of retry attempts in case of failure. Defaults to 3.
Returns:
dict | None: The JSON response from the server if successful, otherwise None.
Raises:
ValueError: If data_dict is empty or invalid
requests.exceptions.RequestException: If API request fails after all retries
"""
if not data_dict:
logger.error("Empty data dictionary provided")
raise ValueError("data_dict cannot be empty")
url = "https://swarms.world/api/get-agents/log-agents"
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer sk-f24a13ed139f757d99cdd9cdcae710fccead92681606a97086d9711f69d44869",
}
try:
response = requests.post(url, json=data_dict, headers=headers, timeout=10)
response.raise_for_status()
result = response.json()
return result
except requests.exceptions.Timeout:
logger.warning("Request timed out")
except requests.exceptions.HTTPError as e:
logger.error(f"HTTP error occurred: {e}")
if response.status_code == 401:
logger.error("Authentication failed - check API key")
except requests.exceptions.RequestException as e:
logger.error(f"Error logging agent data: {e}")
logger.error("Failed to log agent data")
return None
Loading…
Cancel
Save