pull/703/head
Kye Gomez 6 months ago committed by mike dupont
parent 7f377bdb65
commit 8623a09e41

@ -0,0 +1,96 @@
This Dockerfile sets up a Python environment with numerous libraries for various applications,
including web frameworks, machine learning tools, and data manipulation. Here are some observations
and suggestions:
*** Key Points:
1. *Base Image*: Uses an official Python 3.11 slim image.
2. *Environment Variables*: Commented out variables ~PYTHONDONTWRITEBYTECODE~ and
~PYTHONUNBUFFERED~.
3. *Working Directory*: Sets ~/opt/swarms/~ as the working directory.
4. *System Updates*: Installs essential packages like ~git~, ~python3-virtualenv~, ~expect~, and
others using ~apt~.
5. *User Management*: Creates a user ~swarms~ and sets up permissions.
6. *Python Virtual Environment*: Sets up a Python virtual environment for the user.
7. *Pip Installations*:
- Installs multiple libraries including FastAPI, SQLAlchemy, PyTorch, Pillow, etc.
- Some packages are commented out and not installed, such as ~triton~ and ~torch~.
8. *Git Configuration*: Configures Git to allow operations in the ~/opt/swarms/~ directory.
9. *Copy Files*: Copies application files into the container.
10. *Command*: Runs Uvicorn with specific configurations.
*** Suggestions:
1. *Minimize Package Installation*:
- Consider removing or commenting out unused packages.
- Install only necessary versions of packages.
2. *Environment Variables*:
- Uncomment ~PYTHONDONTWRITEBYTECODE~ and ~PYTHONUNBUFFERED~ if needed for performance
optimization.
3. *User Management*:
- Ensure that the user ~swarms~ has all necessary permissions to avoid potential issues.
4. *Virtual Environment*:
- Consider using a more lightweight virtual environment tool like ~venv~ or ~conda~ if
performance is an issue.
5. *Caching*:
- Use ~.dockerignore~ to exclude unnecessary files from the build context.
- Utilize Docker's caching mechanism by keeping similar install commands together.
6. *Security*:
- Ensure that all installed packages are up-to-date and do not contain known vulnerabilities.
- Consider using a security scanner for Docker images.
7. *Command Execution*:
- If ~unbuffer~ is used to avoid buffering, ensure it's available or remove if not necessary.
*** Example of Refactored Dockerfile:
#+BEGIN_SRC dockerfile
# Use an official Python runtime as a parent image
FROM python:3.11-slim
# Set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
WORKDIR /opt/swarms/
# Update system packages
RUN apt update && apt install -y git python3-virtualenv expect jq netcat-traditional
# Install Python dependencies
RUN mkdir -p /var/swarms/agent_workspace/
RUN adduser --disabled-password --gecos "" swarms --home "/home/swarms"
RUN chown -R swarms:swarms /var/swarms/agent_workspace
USER swarms
RUN python3 -m venv /var/swarms/agent_workspace/.venv/
# Upgrade pip and install essential packages
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install --upgrade pip
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install aiofiles aiohappyeyeballs aiosignal frozenlist aiohttp attrs annotated-types anyio sniffio typing_extensions asyncio multidict propcache yarl idna certifi chardet charset-normalizer click dataclasses-json marshmallow typing-inspect distro docstring_parser filelock fastapi starlette pydantic pydantic_core GPUtil Jinja2 MarkupSafe PyYAML Pygments SQLAlchemy fsspec greenlet h11 httpcore httpx huggingface-hub importlib_metadata iniconfig jiter jsonpatch jsonpointer jsonschema-specifications jsonschema langchain-community langchain-core langsmith numpy orjson requests-toolbelt tenacity loguru lxml markdown-it-py mdurl mpmath msgpack multiprocess mypy-protobuf networkx ollama openai pathos pathspec platformdirs pluggy pox ppft protobuf psutil pytesseract pytest python-dateutil python-docx python-dotenv python-magic pytz ratelimit referencing regex reportlab requests rich rpds-py safetensors sentry-sdk six sympy termcolor tiktoken tokenizers toml tqdm types-chardet types-protobuf types-pytz types-toml tzdata urllib3 uvicorn zipp
# Set up Git configuration
RUN git config --global --add safe.directory "/opt/swarms"
# Copy application files and install dependencies
COPY swarms /opt/swarms/swarms
COPY pyproject.toml /opt/swarms/
COPY README.md /opt/swarms/
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install -e /opt/swarms/
# Add main.py file
COPY api/main.py /opt/swarms/api/main.py
WORKDIR /opt/swarms/api/
CMD ["unbuffer", "/var/swarms/agent_workspace/.venv/bin/uvicorn", "--proxy-headers", "--forwarded-allow-ips='*'", "--workers=4", "--port=8000", "--reload-delay=30", "main:create_app"]
#+END_SRC
*** Additional Tips:
- *Layer Optimization*: Ensure that frequently changing files (like ~api/main.py~) are placed in
separate layers to take advantage of Docker's caching.
- *Security Scans*: Run security scans on the final Docker image using tools like Trivy or Clair.
By following these suggestions, you can optimize your Dockerfile for better performance and
maintainability.

@ -0,0 +1,768 @@
{
"logStreams": [
{
"logStreamName": "0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735052065253,
"firstEventTimestamp": 1735052065225,
"lastEventTimestamp": 1735052065225,
"lastIngestionTime": 1735052065276,
"uploadSequenceToken": "49039859613988222776763637742290862475585593660493682344",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735052065255,
"firstEventTimestamp": 1735052065225,
"lastEventTimestamp": 1735052065225,
"lastIngestionTime": 1735052065276,
"uploadSequenceToken": "49039859613988222776763637742290863632254093157312911099",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stderr",
"creationTime": 1734899238367,
"firstEventTimestamp": 1734899238330,
"lastEventTimestamp": 1734899297327,
"lastIngestionTime": 1734899297329,
"uploadSequenceToken": "49039859613785159344752651490819636558466064069737469684",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stdout",
"creationTime": 1734899238369,
"firstEventTimestamp": 1734899238339,
"lastEventTimestamp": 1734899298326,
"lastIngestionTime": 1734899298328,
"uploadSequenceToken": "49039859613785160672651419279950594505581625482055407181",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735005102663,
"firstEventTimestamp": 1735005102632,
"lastEventTimestamp": 1735005102632,
"lastIngestionTime": 1735005102684,
"uploadSequenceToken": "49039859613925798784722613018397360500005004744706643512",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735005102670,
"firstEventTimestamp": 1735005102632,
"lastEventTimestamp": 1735005102632,
"lastIngestionTime": 1735005102690,
"uploadSequenceToken": "49039859613925798792697980993106856627627363850950432327",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735055856519,
"firstEventTimestamp": 1735055856488,
"lastEventTimestamp": 1735055858489,
"lastIngestionTime": 1735055858492,
"uploadSequenceToken": "49039859613993264825664897017738619986883194897110554355",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735055856522,
"firstEventTimestamp": 1735055856488,
"lastEventTimestamp": 1735055856488,
"lastIngestionTime": 1735055856544,
"uploadSequenceToken": "49039859613993262236328761228722500242420278656791107156",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735057193938,
"firstEventTimestamp": 1735057193900,
"lastEventTimestamp": 1735057195901,
"lastIngestionTime": 1735057195902,
"uploadSequenceToken": "49039859613995042548478739722076201475673363666687645245",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735057193935,
"firstEventTimestamp": 1735057193900,
"lastEventTimestamp": 1735057193900,
"lastIngestionTime": 1735057193957,
"uploadSequenceToken": "49039859613995039963130287920414829151619369814831552100",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735054554607,
"firstEventTimestamp": 1735054554576,
"lastEventTimestamp": 1735054554576,
"lastIngestionTime": 1735054554630,
"uploadSequenceToken": "49039859613991531695791856905758746050334104337153676827",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735054554605,
"firstEventTimestamp": 1735054554576,
"lastEventTimestamp": 1735054554576,
"lastIngestionTime": 1735054554627,
"uploadSequenceToken": "49039859613991531691804172918403999064288358809319921154",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stderr",
"creationTime": 1734870795997,
"firstEventTimestamp": 1734870795949,
"lastEventTimestamp": 1734870854947,
"lastIngestionTime": 1734870854950,
"uploadSequenceToken": "49039859613747352938319126511079397131401121383887820324",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stdout",
"creationTime": 1734870795998,
"firstEventTimestamp": 1734870795954,
"lastEventTimestamp": 1734870855948,
"lastIngestionTime": 1734870855951,
"uploadSequenceToken": "49039859613747354268876350291780186335989982121728824915",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735057433187,
"firstEventTimestamp": 1735057433156,
"lastEventTimestamp": 1735057435156,
"lastIngestionTime": 1735057435157,
"uploadSequenceToken": "49039859613995360572922871242123376126157469071980779228",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735057433185,
"firstEventTimestamp": 1735057433156,
"lastEventTimestamp": 1735057433156,
"lastIngestionTime": 1735057433206,
"uploadSequenceToken": "49039859613995357979599051465752508652951185554904340048",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stderr",
"creationTime": 1734904353404,
"firstEventTimestamp": 1734904353362,
"lastEventTimestamp": 1734904431373,
"lastIngestionTime": 1734904431379,
"uploadSequenceToken": "49039859613791983667744411038156926901475617546398750277",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stdout",
"creationTime": 1734904353398,
"firstEventTimestamp": 1734904353360,
"lastEventTimestamp": 1734904433362,
"lastIngestionTime": 1734904433365,
"uploadSequenceToken": "49039859613791986307591210666999851103808292347067651736",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stderr",
"creationTime": 1734858621142,
"firstEventTimestamp": 1734858621107,
"lastEventTimestamp": 1734858678106,
"lastIngestionTime": 1734858678110,
"uploadSequenceToken": "49039859613731167141690932916081590350341220235971810915",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stdout",
"creationTime": 1734858621142,
"firstEventTimestamp": 1734858621106,
"lastEventTimestamp": 1734858679105,
"lastIngestionTime": 1734858679109,
"uploadSequenceToken": "49039859613731168469589700705212547784983172850546267812",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735058328627,
"firstEventTimestamp": 1735058328582,
"lastEventTimestamp": 1735058329582,
"lastIngestionTime": 1735058329584,
"uploadSequenceToken": "49039859613996549470331457157072832959656785139474519737",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735058328626,
"firstEventTimestamp": 1735058328582,
"lastEventTimestamp": 1735058329582,
"lastIngestionTime": 1735058329584,
"uploadSequenceToken": "49039859613996549470331457157072833567318475661320803988",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735058290048,
"firstEventTimestamp": 1735058290011,
"lastEventTimestamp": 1735058291010,
"lastIngestionTime": 1735058291012,
"uploadSequenceToken": "49039859613996498199349203741297784502672927255682766343",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735058290047,
"firstEventTimestamp": 1735058290009,
"lastEventTimestamp": 1735058291008,
"lastIngestionTime": 1735058291010,
"uploadSequenceToken": "49039859613996498196690747749727953601812660823865054910",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735044217329,
"firstEventTimestamp": 1735044217309,
"lastEventTimestamp": 1735044217309,
"lastIngestionTime": 1735044217370,
"uploadSequenceToken": "49039859613977791120400149326302420330339315638580696710",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735044217344,
"firstEventTimestamp": 1735044217309,
"lastEventTimestamp": 1735044217309,
"lastIngestionTime": 1735044217366,
"uploadSequenceToken": "49039859613977791115083237343162757449412196868898576123",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stderr",
"creationTime": 1734856658896,
"firstEventTimestamp": 1734856658859,
"lastEventTimestamp": 1734856724859,
"lastIngestionTime": 1734856724862,
"uploadSequenceToken": "49039859613728570829766622020722677450385238021455632064",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stdout",
"creationTime": 1734856658897,
"firstEventTimestamp": 1734856658859,
"lastEventTimestamp": 1734856725859,
"lastIngestionTime": 1734856725862,
"uploadSequenceToken": "49039859613728572158994617805638550864288006496292067018",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735005234228,
"firstEventTimestamp": 1735005234199,
"lastEventTimestamp": 1735005234199,
"lastIngestionTime": 1735005234249,
"uploadSequenceToken": "49039859613925973664603878460854192727649656492823303866",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735005234229,
"firstEventTimestamp": 1735005234199,
"lastEventTimestamp": 1735005234199,
"lastIngestionTime": 1735005234250,
"uploadSequenceToken": "49039859613925973665933106456639109128303282286887851775",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735055317414,
"firstEventTimestamp": 1735055317383,
"lastEventTimestamp": 1735055317383,
"lastIngestionTime": 1735055317439,
"uploadSequenceToken": "49039859613992545642870093601650851001566812901843293740",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735055317416,
"firstEventTimestamp": 1735055317383,
"lastEventTimestamp": 1735055317383,
"lastIngestionTime": 1735055317438,
"uploadSequenceToken": "49039859613992545641540865605865935623338388911938229871",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stderr",
"creationTime": 1734818540131,
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stdout",
"creationTime": 1734818540128,
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735058057641,
"firstEventTimestamp": 1735058057609,
"lastEventTimestamp": 1735058060610,
"lastIngestionTime": 1735058060612,
"uploadSequenceToken": "49039859613996191945218974896680674139788828833329600156",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735058057643,
"firstEventTimestamp": 1735058057609,
"lastEventTimestamp": 1735058057609,
"lastIngestionTime": 1735058057667,
"uploadSequenceToken": "49039859613996188030642527310103428934121513664810735175",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735057233804,
"firstEventTimestamp": 1735057233769,
"lastEventTimestamp": 1735057235769,
"lastIngestionTime": 1735057235771,
"uploadSequenceToken": "49039859613995095543469703670887153246153117076553618958",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735057233797,
"firstEventTimestamp": 1735057233769,
"lastEventTimestamp": 1735057233769,
"lastIngestionTime": 1735057233815,
"uploadSequenceToken": "49039859613995092943499743915591706324624816391965267548",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stderr",
"creationTime": 1734830413282,
"firstEventTimestamp": 1734830413242,
"lastEventTimestamp": 1734830433243,
"lastIngestionTime": 1734830433276,
"uploadSequenceToken": "49039859613693623317601835267547467856431474809478469252",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stdout",
"creationTime": 1734830413288,
"firstEventTimestamp": 1734830413242,
"lastEventTimestamp": 1734830433246,
"lastIngestionTime": 1734830433252,
"uploadSequenceToken": "49039859613693623285700363368709487386499566467274397437",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735057357453,
"firstEventTimestamp": 1735057357422,
"lastEventTimestamp": 1735057359422,
"lastIngestionTime": 1735057359426,
"uploadSequenceToken": "49039859613995259909157522454659419100657152802661750380",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735057357450,
"firstEventTimestamp": 1735057357422,
"lastEventTimestamp": 1735057357422,
"lastIngestionTime": 1735057357476,
"uploadSequenceToken": "49039859613995257317162930674073467432332307574061684451",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stderr",
"creationTime": 1734904330835,
"firstEventTimestamp": 1734904330793,
"lastEventTimestamp": 1734904412792,
"lastIngestionTime": 1734904412798,
"uploadSequenceToken": "49039859613791958969359021358635106202736237108550063861",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stdout",
"creationTime": 1734904330833,
"firstEventTimestamp": 1734904330794,
"lastEventTimestamp": 1734904414792,
"lastIngestionTime": 1734904414794,
"uploadSequenceToken": "49039859613791961622498100945327188974427354123974104746",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735055525532,
"firstEventTimestamp": 1735055525476,
"lastEventTimestamp": 1735055529477,
"lastIngestionTime": 1735055529478,
"uploadSequenceToken": "49039859613992827491045091839427632436711007109045442148",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735055525531,
"firstEventTimestamp": 1735055525492,
"lastEventTimestamp": 1735055527477,
"lastIngestionTime": 1735055527492,
"uploadSequenceToken": "49039859613992824851198292210584709371014820662651803280",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735054456369,
"firstEventTimestamp": 1735054456335,
"lastEventTimestamp": 1735054456335,
"lastIngestionTime": 1735054456396,
"uploadSequenceToken": "49039859613991401120408918970332906052237331985426437749",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735054456371,
"firstEventTimestamp": 1735054456335,
"lastEventTimestamp": 1735054456335,
"lastIngestionTime": 1735054456395,
"uploadSequenceToken": "49039859613991401119079690974547990635818383155419567797",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735057898295,
"firstEventTimestamp": 1735057898263,
"lastEventTimestamp": 1735057900263,
"lastIngestionTime": 1735057900265,
"uploadSequenceToken": "49039859613995978807497534772775208356660176145935652414",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735057898299,
"firstEventTimestamp": 1735057898263,
"lastEventTimestamp": 1735057898263,
"lastIngestionTime": 1735057898322,
"uploadSequenceToken": "49039859613995976224807538962683667811200001701897459307",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stderr",
"creationTime": 1734830394075,
"firstEventTimestamp": 1734830394042,
"lastEventTimestamp": 1734830472041,
"lastIngestionTime": 1734830472043,
"uploadSequenceToken": "49039859613693674847783547861384144735279739289454982245",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stdout",
"creationTime": 1734830394075,
"firstEventTimestamp": 1734830394042,
"lastEventTimestamp": 1734830473042,
"lastIngestionTime": 1734830473045,
"uploadSequenceToken": "49039859613693676179669999637869850504453252257646589025",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "dupont-2gt45qizfn4ezip8dpkrilhaa4",
"creationTime": 1734982366536,
"firstEventTimestamp": 1734982366502,
"lastEventTimestamp": 1734983612502,
"lastIngestionTime": 1734983612523,
"uploadSequenceToken": "49039859613897233461087487854920254489498942830027682928",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-2gt45qizfn4ezip8dpkrilhaa4",
"storedBytes": 0
},
{
"logStreamName": "dupont-2qj8xj6luppbvlilgdivc5sug8",
"creationTime": 1734866718407,
"firstEventTimestamp": 1734866718373,
"lastEventTimestamp": 1734866718373,
"lastIngestionTime": 1734866718430,
"uploadSequenceToken": "49039859613741854560130002290875861318885575337716691992",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-2qj8xj6luppbvlilgdivc5sug8",
"storedBytes": 0
},
{
"logStreamName": "dupont-6kq4qtpy72q753vkvy6if872q4",
"creationTime": 1734820331291,
"firstEventTimestamp": 1734820935254,
"lastEventTimestamp": 1734820935254,
"lastIngestionTime": 1734820935319,
"uploadSequenceToken": "49039859613680998367254673955341045032917736173318386920",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-6kq4qtpy72q753vkvy6if872q4",
"storedBytes": 0
},
{
"logStreamName": "dupont-9d9hresjnrxhaoqz8a5pesy9uy",
"creationTime": 1734869641914,
"firstEventTimestamp": 1734869641884,
"lastEventTimestamp": 1734869993883,
"lastIngestionTime": 1734869993885,
"uploadSequenceToken": "49039859613746208386614935972496344653675690982984332418",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-9d9hresjnrxhaoqz8a5pesy9uy",
"storedBytes": 0
},
{
"logStreamName": "dupont-acc5uyp4sfig2tz6zxf5fei6ji",
"creationTime": 1734866903573,
"firstEventTimestamp": 1734866903548,
"lastEventTimestamp": 1734867977549,
"lastIngestionTime": 1734867977576,
"uploadSequenceToken": "49039859613743528252243982884557566430997070622784959605",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-acc5uyp4sfig2tz6zxf5fei6ji",
"storedBytes": 0
},
{
"logStreamName": "dupont-ale49vtarq3lv2qh5g4bbgbo2u",
"creationTime": 1734823328568,
"firstEventTimestamp": 1734823328544,
"lastEventTimestamp": 1734823328544,
"lastIngestionTime": 1734823328587,
"uploadSequenceToken": "49039859613684179566081690129382359560174988019272201245",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ale49vtarq3lv2qh5g4bbgbo2u",
"storedBytes": 0
},
{
"logStreamName": "dupont-cfi2lbnsn34debav6jbb726ooa",
"creationTime": 1734821129540,
"firstEventTimestamp": 1734821129509,
"lastEventTimestamp": 1734821129509,
"lastIngestionTime": 1734821129570,
"uploadSequenceToken": "49039859613681256571122083171034274844280438270068470784",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-cfi2lbnsn34debav6jbb726ooa",
"storedBytes": 0
},
{
"logStreamName": "dupont-ecrtlu7kr5rch6g5ndble8kqha",
"creationTime": 1734856273279,
"firstEventTimestamp": 1734856273247,
"lastEventTimestamp": 1734856273247,
"lastIngestionTime": 1734856273300,
"uploadSequenceToken": "49039859613727970600914389392544321167059767430010844324",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ecrtlu7kr5rch6g5ndble8kqha",
"storedBytes": 0
},
{
"logStreamName": "dupont-ehhpaubgebqzgzc6qos2jc9qz8",
"creationTime": 1734869593887,
"firstEventTimestamp": 1734869593858,
"lastEventTimestamp": 1734869655858,
"lastIngestionTime": 1734869655863,
"uploadSequenceToken": "49039859613745759078309344763663156946262011664604542059",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ehhpaubgebqzgzc6qos2jc9qz8",
"storedBytes": 0
},
{
"logStreamName": "dupont-gag5kejahuzr9873ietdbkpoa8",
"creationTime": 1734823566407,
"firstEventTimestamp": 1734823566372,
"lastEventTimestamp": 1734825788372,
"lastIngestionTime": 1734825788392,
"uploadSequenceToken": "49039859613687449207751861844371110110764337414286292026",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-gag5kejahuzr9873ietdbkpoa8",
"storedBytes": 0
},
{
"logStreamName": "dupont-haaiif5dpxck5ehtqbqxbhflu8",
"creationTime": 1734871696978,
"firstEventTimestamp": 1734871696953,
"lastEventTimestamp": 1734872507953,
"lastIngestionTime": 1734872507972,
"uploadSequenceToken": "49039859613749550181439174884288509989399115071987856439",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-haaiif5dpxck5ehtqbqxbhflu8",
"storedBytes": 0
},
{
"logStreamName": "dupont-j5tad5uqe6rg579d76npjrbyta",
"creationTime": 1734856254710,
"firstEventTimestamp": 1734856254693,
"lastEventTimestamp": 1734856254693,
"lastIngestionTime": 1734856254734,
"uploadSequenceToken": "49039859613727945922467419649796227269299585648452908096",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-j5tad5uqe6rg579d76npjrbyta",
"storedBytes": 0
},
{
"logStreamName": "dupont-nbs3222zo2bdt8n5lqz6i5e2h4",
"creationTime": 1734856866781,
"firstEventTimestamp": 1734856866754,
"lastEventTimestamp": 1734858481754,
"lastIngestionTime": 1734858481781,
"uploadSequenceToken": "49039859613730906175687748459336230657047697575836857590",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-nbs3222zo2bdt8n5lqz6i5e2h4",
"storedBytes": 0
},
{
"logStreamName": "dupont-psfjp3f7bnkatx76s852aocut4",
"creationTime": 1734823506148,
"firstEventTimestamp": 1734823506113,
"lastEventTimestamp": 1734823506113,
"lastIngestionTime": 1734823506173,
"uploadSequenceToken": "49039859613684415618364549589452569865211766620082091138",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-psfjp3f7bnkatx76s852aocut4",
"storedBytes": 0
},
{
"logStreamName": "dupont-taejkp2vrs9ptlzfk3hi7sc8tu",
"creationTime": 1735057761372,
"firstEventTimestamp": 1735057761331,
"lastEventTimestamp": 1735059010330,
"lastIngestionTime": 1735059010356,
"uploadSequenceToken": "49039859613997454371532603645824513397886961371254043702",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-taejkp2vrs9ptlzfk3hi7sc8tu",
"storedBytes": 0
},
{
"logStreamName": "dupont-tx6jfbkpuisogketgtnii3loiq",
"creationTime": 1734821136150,
"firstEventTimestamp": 1734821136115,
"lastEventTimestamp": 1734821136115,
"lastIngestionTime": 1734821136170,
"uploadSequenceToken": "49039859613681265344026855351479041441540820263578055706",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-tx6jfbkpuisogketgtnii3loiq",
"storedBytes": 0
},
{
"logStreamName": "dupont-u28lll5bhz7293b56aqzlo6i9a",
"creationTime": 1734868152559,
"firstEventTimestamp": 1734868152530,
"lastEventTimestamp": 1734869653530,
"lastIngestionTime": 1734869653598,
"uploadSequenceToken": "49039859613745756067607934310828709627686385468631102662",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-u28lll5bhz7293b56aqzlo6i9a",
"storedBytes": 0
},
{
"logStreamName": "dupont-zq8btstj4tr9ajp9xps278hkqq",
"creationTime": 1734860241487,
"firstEventTimestamp": 1734860241462,
"lastEventTimestamp": 1734861464462,
"lastIngestionTime": 1734861464487,
"uploadSequenceToken": "49039859613734870872006144102619839037825530972265460909",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-zq8btstj4tr9ajp9xps278hkqq",
"storedBytes": 0
},
{
"logStreamName": "ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stderr",
"creationTime": 1734899352252,
"firstEventTimestamp": 1734899352209,
"lastEventTimestamp": 1734899414209,
"lastIngestionTime": 1734899414213,
"uploadSequenceToken": "49039859613785314710237710814929589941728888154770584703",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stdout",
"creationTime": 1734899352254,
"firstEventTimestamp": 1734899352213,
"lastEventTimestamp": 1734899415209,
"lastIngestionTime": 1734899415211,
"uploadSequenceToken": "49039859613785316036807250608275631703066569194125079656",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stderr",
"creationTime": 1734817872118,
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stderr",
"storedBytes": 0
},
{
"logStreamName": "f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stdout",
"creationTime": 1734817872118,
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stdout",
"storedBytes": 0
},
{
"logStreamName": "f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735056783402,
"firstEventTimestamp": 1735056783370,
"lastEventTimestamp": 1735056785369,
"lastIngestionTime": 1735056785372,
"uploadSequenceToken": "49039859613994496860509630140565960835917365413277283529",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735056783407,
"firstEventTimestamp": 1735056783370,
"lastEventTimestamp": 1735056783370,
"lastIngestionTime": 1735056783431,
"uploadSequenceToken": "49039859613994494280478090322044252140748316444795286615",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
},
{
"logStreamName": "fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"creationTime": 1735005652683,
"firstEventTimestamp": 1735005652644,
"lastEventTimestamp": 1735005652644,
"lastIngestionTime": 1735005652707,
"uploadSequenceToken": "49039859613926529890692538625183589108539545514173740282",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
"storedBytes": 0
},
{
"logStreamName": "fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"creationTime": 1735005652676,
"firstEventTimestamp": 1735005652644,
"lastEventTimestamp": 1735005652644,
"lastIngestionTime": 1735005652699,
"uploadSequenceToken": "49039859613926529880058714658904262714740208262181499974",
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
"storedBytes": 0
}
]
}

@ -105,7 +105,7 @@ class AgentConfig(BaseModel):
..., description="System prompt for the agent"
)
model_name: str = Field(
default="gpt-4", description="Model name to use"
default="gpt-4o-mini", description="Model name to use"
)
temperature: float = Field(
default=0.1,
@ -145,6 +145,10 @@ class AgentConfig(BaseModel):
default=False,
description="Auto-generate prompt based on agent details such as name, description, etc.",
)
max_tokens: int = Field(
default_factory=int,
description="The number of max output tokens",
)
class AgentUpdate(BaseModel):
@ -286,6 +290,7 @@ class AgentStore:
streaming_on=config.streaming_on,
stopping_token=config.stopping_token,
auto_generate_prompt=config.auto_generate_prompt,
max_tokens=config.max_tokens,
)
agent_id = uuid4()
@ -552,8 +557,10 @@ class AgentStore:
response=response,
metadata={
"agent_name": agent.agent_name,
# "model_name": agent.llm.model_name,
# "temperature": 0.5,
"model_name": agent.llm.model_name,
"temperature": agent.llm.temperature,
"max_loops": agent.max_loops,
"context_window": agent.context_length,
},
timestamp=datetime.utcnow(),
processing_time=processing_time,

@ -3,6 +3,7 @@ import json
from time import sleep
BASE_URL = "http://0.0.0.0:8000/v1"
#BASE_URL = "http://api.swarms.ai:8000"
def make_request(method, endpoint, data=None):

@ -0,0 +1,919 @@
consider a graph paritioning algorithm
based on the quasi-meta-data extracted from python,git,documents,chats,
asts,traces,debug logs,audit logs and other traces.
consider the Model gpt-4o-2024-08-06
Input tokens: 128,000 context window!
Output tokens: 16,384 window
Now lets device a strategy to break our knowledge graph up into these window sizes.
here are the files, consider the names and how we can group them
find swarms/ -name \*.py -exec wc -c {} \; -print
Take these byte sizes and names and construct a model of the code in your mind as
a plantuml diagram as you go.
10599 swarms/structs/agent_registry.py
swarms/structs/agent_registry.py
369 swarms/structs/omni_agent_types.py
swarms/structs/omni_agent_types.py
8215 swarms/structs/auto_swarm.py
swarms/structs/auto_swarm.py
5404 swarms/structs/workspace_manager.py
swarms/structs/workspace_manager.py
15901 swarms/structs/base_structure.py
swarms/structs/base_structure.py
13037 swarms/structs/agent_router.py
swarms/structs/agent_router.py
16250 swarms/structs/groupchat.py
swarms/structs/groupchat.py
484 swarms/structs/stopping_conditions.py
swarms/structs/stopping_conditions.py
9933 swarms/structs/spreadsheet_swarm.py
swarms/structs/spreadsheet_swarm.py
9069 swarms/structs/pulsar_swarm.py
swarms/structs/pulsar_swarm.py
3935 swarms/structs/__init__.py
swarms/structs/__init__.py
13502 swarms/structs/agent_memory_manager.py
swarms/structs/agent_memory_manager.py
19483 swarms/structs/hiearchical_swarm.py
swarms/structs/hiearchical_swarm.py
25654 swarms/structs/rearrange.py
swarms/structs/rearrange.py
188 swarms/structs/output_types.py
swarms/structs/output_types.py
7573 swarms/structs/round_robin.py
swarms/structs/round_robin.py
5370 swarms/structs/company.py
swarms/structs/company.py
8117 swarms/structs/mixture_of_agents.py
swarms/structs/mixture_of_agents.py
7973 swarms/structs/multi_process_workflow.py
swarms/structs/multi_process_workflow.py
22308 swarms/structs/concurrent_workflow.py
swarms/structs/concurrent_workflow.py
13382 swarms/structs/swarming_architectures.py
swarms/structs/swarming_architectures.py
93913 swarms/structs/agent.py
swarms/structs/agent.py
2804 swarms/structs/agents_available.py
swarms/structs/agents_available.py
7124 swarms/structs/safe_loading.py
swarms/structs/safe_loading.py
13289 swarms/structs/base_workflow.py
swarms/structs/base_workflow.py
11374 swarms/structs/swarm_load_balancer.py
swarms/structs/swarm_load_balancer.py
6770 swarms/structs/queue_swarm.py
swarms/structs/queue_swarm.py
14314 swarms/structs/conversation.py
swarms/structs/conversation.py
11168 swarms/structs/swarm_builder.py
swarms/structs/swarm_builder.py
3680 swarms/structs/utils.py
swarms/structs/utils.py
7829 swarms/structs/groupchat_new.py
swarms/structs/groupchat_new.py
25845 swarms/structs/swarm_router.py
swarms/structs/swarm_router.py
20920 swarms/structs/graph_swarm.py
swarms/structs/graph_swarm.py
8236 swarms/structs/sequential_workflow.py
swarms/structs/sequential_workflow.py
11224 swarms/structs/auto_swarm_builder.py
swarms/structs/auto_swarm_builder.py
12751 swarms/structs/task.py
swarms/structs/task.py
732 swarms/structs/concat.py
swarms/structs/concat.py
23537 swarms/structs/swarm_matcher.py
swarms/structs/swarm_matcher.py
7730 swarms/structs/multi_agent_collab.py
swarms/structs/multi_agent_collab.py
13790 swarms/structs/multi_agent_exec.py
swarms/structs/multi_agent_exec.py
8629 swarms/structs/graph_workflow.py
swarms/structs/graph_workflow.py
24453 swarms/structs/base_swarm.py
swarms/structs/base_swarm.py
5578 swarms/structs/swarm_registry.py
swarms/structs/swarm_registry.py
15301 swarms/structs/swarm_arange.py
swarms/structs/swarm_arange.py
7009 swarms/structs/majority_voting.py
swarms/structs/majority_voting.py
68 swarms/structs/swarm_id_generator.py
swarms/structs/swarm_id_generator.py
12499 swarms/structs/tree_swarm.py
swarms/structs/tree_swarm.py
26214 swarms/structs/async_workflow.py
swarms/structs/async_workflow.py
1854 swarms/telemetry/bootup.py
swarms/telemetry/bootup.py
1879 swarms/telemetry/user_utils.py
swarms/telemetry/user_utils.py
3801 swarms/telemetry/sys_info.py
swarms/telemetry/sys_info.py
791 swarms/telemetry/__init__.py
swarms/telemetry/__init__.py
1257 swarms/telemetry/sentry_active.py
swarms/telemetry/sentry_active.py
3325 swarms/telemetry/capture_sys_data.py
swarms/telemetry/capture_sys_data.py
1309 swarms/telemetry/auto_upgrade_swarms.py
swarms/telemetry/auto_upgrade_swarms.py
512 swarms/__init__.py
swarms/__init__.py
185 swarms/schemas/__init__.py
swarms/schemas/__init__.py
6345 swarms/schemas/agent_input_schema.py
swarms/schemas/agent_input_schema.py
2573 swarms/schemas/agent_step_schemas.py
swarms/schemas/agent_step_schemas.py
3267 swarms/schemas/base_schemas.py
swarms/schemas/base_schemas.py
4665 swarms/utils/add_docs_to_agents.py
swarms/utils/add_docs_to_agents.py
7695 swarms/utils/lazy_loader.py
swarms/utils/lazy_loader.py
3427 swarms/utils/data_to_text.py
swarms/utils/data_to_text.py
3417 swarms/utils/litellm_wrapper.py
swarms/utils/litellm_wrapper.py
685 swarms/utils/agent_ops_check.py
swarms/utils/agent_ops_check.py
1106 swarms/utils/loguru_logger.py
swarms/utils/loguru_logger.py
1067 swarms/utils/__init__.py
swarms/utils/__init__.py
1660 swarms/utils/update_agent_system_prompts.py
swarms/utils/update_agent_system_prompts.py
3917 swarms/utils/try_except_wrapper.py
swarms/utils/try_except_wrapper.py
2562 swarms/utils/swarm_reliability_checks.py
swarms/utils/swarm_reliability_checks.py
3156 swarms/utils/async_file_creation.py
swarms/utils/async_file_creation.py
2376 swarms/utils/pandas_utils.py
swarms/utils/pandas_utils.py
3242 swarms/utils/disable_logging.py
swarms/utils/disable_logging.py
4495 swarms/utils/auto_download_check_packages.py
swarms/utils/auto_download_check_packages.py
2936 swarms/utils/any_to_str.py
swarms/utils/any_to_str.py
5007 swarms/utils/calculate_func_metrics.py
swarms/utils/calculate_func_metrics.py
652 swarms/utils/markdown_message.py
swarms/utils/markdown_message.py
4194 swarms/utils/formatter.py
swarms/utils/formatter.py
1008 swarms/utils/class_args_wrapper.py
swarms/utils/class_args_wrapper.py
1987 swarms/utils/parse_code.py
swarms/utils/parse_code.py
3739 swarms/utils/wrapper_clusterop.py
swarms/utils/wrapper_clusterop.py
4865 swarms/utils/file_processing.py
swarms/utils/file_processing.py
1276 swarms/utils/pdf_to_text.py
swarms/utils/pdf_to_text.py
83 swarms/artifacts/__init__.py
swarms/artifacts/__init__.py
11076 swarms/artifacts/main_artifact.py
swarms/artifacts/main_artifact.py
5233 swarms/prompts/ai_research_team.py
swarms/prompts/ai_research_team.py
2960 swarms/prompts/react.py
swarms/prompts/react.py
5126 swarms/prompts/sales.py
swarms/prompts/sales.py
9156 swarms/prompts/aga.py
swarms/prompts/aga.py
2235 swarms/prompts/code_interpreter.py
swarms/prompts/code_interpreter.py
13215 swarms/prompts/python.py
swarms/prompts/python.py
2705 swarms/prompts/agent_prompt.py
swarms/prompts/agent_prompt.py
747 swarms/prompts/__init__.py
swarms/prompts/__init__.py
11320 swarms/prompts/accountant_swarm_prompts.py
swarms/prompts/accountant_swarm_prompts.py
4280 swarms/prompts/swarm_manager_agent.py
swarms/prompts/swarm_manager_agent.py
7157 swarms/prompts/documentation.py
swarms/prompts/documentation.py
3511 swarms/prompts/multi_modal_prompts.py
swarms/prompts/multi_modal_prompts.py
4106 swarms/prompts/code_spawner.py
swarms/prompts/code_spawner.py
880 swarms/prompts/idea2img.py
swarms/prompts/idea2img.py
5604 swarms/prompts/autoswarm.py
swarms/prompts/autoswarm.py
3454 swarms/prompts/operations_agent_prompt.py
swarms/prompts/operations_agent_prompt.py
3225 swarms/prompts/multi_modal_visual_prompts.py
swarms/prompts/multi_modal_visual_prompts.py
4271 swarms/prompts/tests.py
swarms/prompts/tests.py
3801 swarms/prompts/chat_prompt.py
swarms/prompts/chat_prompt.py
4070 swarms/prompts/sop_generator_agent_prompt.py
swarms/prompts/sop_generator_agent_prompt.py
6886 swarms/prompts/agent_prompts.py
swarms/prompts/agent_prompts.py
3346 swarms/prompts/legal_agent_prompt.py
swarms/prompts/legal_agent_prompt.py
3984 swarms/prompts/support_agent_prompt.py
swarms/prompts/support_agent_prompt.py
8333 swarms/prompts/product_agent_prompt.py
swarms/prompts/product_agent_prompt.py
13981 swarms/prompts/autobloggen.py
swarms/prompts/autobloggen.py
6132 swarms/prompts/finance_agent_sys_prompt.py
swarms/prompts/finance_agent_sys_prompt.py
9589 swarms/prompts/prompt.py
swarms/prompts/prompt.py
3823 swarms/prompts/prompt_generator_optimizer.py
swarms/prompts/prompt_generator_optimizer.py
3374 swarms/prompts/meta_system_prompt.py
swarms/prompts/meta_system_prompt.py
2349 swarms/prompts/xray_swarm_prompt.py
swarms/prompts/xray_swarm_prompt.py
1566 swarms/prompts/debate.py
swarms/prompts/debate.py
1148 swarms/prompts/aot_prompt.py
swarms/prompts/aot_prompt.py
728 swarms/prompts/task_assignment_prompt.py
swarms/prompts/task_assignment_prompt.py
2658 swarms/prompts/ag_prompt.py
swarms/prompts/ag_prompt.py
2679 swarms/prompts/security_team.py
swarms/prompts/security_team.py
10662 swarms/prompts/multi_modal_autonomous_instruction_prompt.py
swarms/prompts/multi_modal_autonomous_instruction_prompt.py
1767 swarms/prompts/education.py
swarms/prompts/education.py
4117 swarms/prompts/growth_agent_prompt.py
swarms/prompts/growth_agent_prompt.py
2149 swarms/prompts/personal_stylist.py
swarms/prompts/personal_stylist.py
3702 swarms/prompts/tools.py
swarms/prompts/tools.py
3675 swarms/prompts/visual_cot.py
swarms/prompts/visual_cot.py
2398 swarms/prompts/urban_planning.py
swarms/prompts/urban_planning.py
10144 swarms/prompts/programming.py
swarms/prompts/programming.py
3264 swarms/prompts/self_operating_prompt.py
swarms/prompts/self_operating_prompt.py
4785 swarms/prompts/logistics.py
swarms/prompts/logistics.py
4412 swarms/prompts/prompt_generator.py
swarms/prompts/prompt_generator.py
0 swarms/prompts/refiner_agent_prompt.py
swarms/prompts/refiner_agent_prompt.py
4640 swarms/prompts/summaries_prompts.py
swarms/prompts/summaries_prompts.py
5013 swarms/prompts/sales_prompts.py
swarms/prompts/sales_prompts.py
7113 swarms/prompts/agent_system_prompts.py
swarms/prompts/agent_system_prompts.py
2128 swarms/prompts/project_manager.py
swarms/prompts/project_manager.py
5554 swarms/prompts/worker_prompt.py
swarms/prompts/worker_prompt.py
4180 swarms/prompts/finance_agent_prompt.py
swarms/prompts/finance_agent_prompt.py
628 swarms/agents/__init__.py
swarms/agents/__init__.py
8474 swarms/agents/auto_generate_swarm_config.py
swarms/agents/auto_generate_swarm_config.py
10408 swarms/agents/openai_assistant.py
swarms/agents/openai_assistant.py
1563 swarms/agents/ape_agent.py
swarms/agents/ape_agent.py
9389 swarms/agents/create_agents_from_yaml.py
swarms/agents/create_agents_from_yaml.py
5169 swarms/agents/tool_agent.py
swarms/agents/tool_agent.py
1129 swarms/tools/func_to_str.py
swarms/tools/func_to_str.py
7184 swarms/tools/prebuilt/code_interpreter.py
swarms/tools/prebuilt/code_interpreter.py
167 swarms/tools/prebuilt/__init__.py
swarms/tools/prebuilt/__init__.py
1461 swarms/tools/prebuilt/math_eval.py
swarms/tools/prebuilt/math_eval.py
4297 swarms/tools/prebuilt/code_executor.py
swarms/tools/prebuilt/code_executor.py
2304 swarms/tools/prebuilt/bing_api.py
swarms/tools/prebuilt/bing_api.py
978 swarms/tools/openai_func_calling_schema_pydantic.py
swarms/tools/openai_func_calling_schema_pydantic.py
8880 swarms/tools/func_calling_executor.py
swarms/tools/func_calling_executor.py
1639 swarms/tools/__init__.py
swarms/tools/__init__.py
15776 swarms/tools/base_tool.py
swarms/tools/base_tool.py
14504 swarms/tools/json_former.py
swarms/tools/json_former.py
600 swarms/tools/cohere_func_call_schema.py
swarms/tools/cohere_func_call_schema.py
4186 swarms/tools/tool_parse_exec.py
swarms/tools/tool_parse_exec.py
15710 swarms/tools/py_func_to_openai_func_str.py
swarms/tools/py_func_to_openai_func_str.py
3850 swarms/tools/pydantic_to_json.py
swarms/tools/pydantic_to_json.py
773 swarms/tools/function_util.py
swarms/tools/function_util.py
7992 swarms/tools/tool_registry.py
swarms/tools/tool_registry.py
2816 swarms/tools/tool_utils.py
swarms/tools/tool_utils.py
1316 swarms/tools/json_utils.py
swarms/tools/json_utils.py
2965 swarms/tools/logits_processor.py
swarms/tools/logits_processor.py
3542 swarms/tools/func_calling_utils.py
swarms/tools/func_calling_utils.py
2578 swarms/tools/openai_tool_creator_decorator.py
swarms/tools/openai_tool_creator_decorator.py
10332 swarms/cli/main.py
swarms/cli/main.py
1128 swarms/cli/create_agent.py
swarms/cli/create_agent.py
6952 swarms/cli/onboarding_process.py
swarms/cli/onboarding_process.py
0 swarms/cli/__init__.py
swarms/cli/__init__.py
To devise a strategy for breaking the knowledge graph into manageable window sizes based on the provided file data, we can follow a structured approach:
### 1. **Classification of Files**
Group files by their directories and functionalities. Heres a categorized breakdown:
- **Structs**
- Core agent structures (e.g., `agent.py`, `base_structure.py`, etc.)
- Workflow management (e.g., `auto_swarm.py`, `multi_process_workflow.py`)
- **Telemetry**
- System information and logging (`bootup.py`, `user_utils.py`, etc.)
- **Schemas**
- Input and output schemas (`agent_input_schema.py`, `base_schemas.py`)
- **Prompts**
- Various prompt templates and agents (`ai_research_team.py`, `sales.py` etc.)
- **Agents**
- Agent creation and management logic (`openai_assistant.py`, `auto_generate_swarm_config.py`)
- **Tools**
- Utility functions and pre-built tools (`func_calling_executor.py`, `json_former.py`)
- **CLI**
- Command line interface files (`main.py`, `create_agent.py`)
### 2. **Calculating Byte Sizes**
From the provided data, calculate the cumulative sizes of files in each classified category if they are within the limit of the output token window size (around 16,384 bytes).
### 3. **Constructing Knowledge Partitions**
Create partitions ensuring that each partition stays within the token limit. Here are some example partitions:
- **Partition 1: Core Structures (Total Size ~43,000 bytes)**
- `agent.py` (93,913)
- `base_structure.py` (15,901)
- `auto_swarm.py` (8,215)
- **Partition 2: Workflow Management (Total Size ~52,000 bytes)**
- `multi_process_workflow.py` (7,973)
- `concurrent_workflow.py` (22,308)
- `swarming_architectures.py` (13,382)
- **Partition 3: Prompts (Total Size ~49,000 bytes)**
- `ai_research_team.py` (5,233)
- `sales.py` (5,126)
- `python.py` (13,215)
- **Partition 4: Telemetry (Total Size ~17,000 bytes)**
- `bootup.py` (1,854)
- `user_utils.py` (1,879)
- `capture_sys_data.py` (3,325)
- **Partition 5: Agents (Total Size ~60,000 bytes)**
- `openai_assistant.py` (10,408)
- `create_agents_from_yaml.py` (9,389)
- Other utility agents...
### 4. **Visual Representation**
Use PlantUML to visualize the knowledge graph with each partition. Heres a simplified representation to get started:
```plantuml
@startuml
package "Knowledge Graph" {
package "Core Structures" {
[Agent] --> [Base Structure]
[Agent] --> [Auto Swarm]
}
package "Workflow Management" {
[Multi-Process Workflow] --> [Concurrent Workflow]
[Swarming Architectures] --> [Auto Swarm]
}
package "Prompts" {
[AI Research Team] --> [Sales]
[Python] --> [Agent Prompts]
}
package "Telemetry" {
[Bootup] --> [User Utils]
[Capture Sys Data] --> [Sys Info]
}
package "Agents" {
[OpenAI Assistant] --> [Agent Creator]
[Agent Config] --> [Agent Loader]
}
package "Tools" {
[Function Executor] --> [JSON Former]
[Tool Registry] --> [Tool Parser]
}
}
@enduml
```
### 5. **Handling Output Size Constraints**
For any partition exceeding token limits, consider:
- **Further subdivision** into smaller units.
- **Prioritizing critical files** for knowledge transfer based on usage frequency or dependency.
### 6. **Implementation**
Adapt and iterate through the classification and partitioning process to accommodate any additional files or changes in structure, ensuring optimal usability of your knowledge graph.
Here is a simplified PlantUML diagram representing the code structure you provided. It categorizes files into their respective folders based on the given paths.
```plantuml
consider each of these packages, tag them and consider what we would expect of them
@startuml
package "swarms" {
package "structs" {
[agent.py] <<file>>
[agent_registry.py] <<file>>
[agent_router.py] <<file>>
[agent_memory_manager.py] <<file>>
[auto_swarm.py] <<file>>
[base_structure.py] <<file>>
[groupchat.py] <<file>>
[stopping_conditions.py] <<file>>
[spreadsheets_swarm.py] <<file>>
[pulsar_swarm.py] <<file>>
[hiearchical_swarm.py] <<file>>
[rearrange.py] <<file>>
[output_types.py] <<file>>
[round_robin.py] <<file>>
[company.py] <<file>>
[mixture_of_agents.py] <<file>>
[multi_process_workflow.py] <<file>>
[concurrent_workflow.py] <<file>>
[swarming_architectures.py] <<file>>
[agents_available.py] <<file>>
[safe_loading.py] <<file>>
[base_workflow.py] <<file>>
[swarm_load_balancer.py] <<file>>
[queue_swarm.py] <<file>>
[conversation.py] <<file>>
[swarm_builder.py] <<file>>
[utils.py] <<file>>
[groupchat_new.py] <<file>>
[swarm_router.py] <<file>>
[graph_swarm.py] <<file>>
[sequential_workflow.py] <<file>>
[task.py] <<file>>
[concat.py] <<file>>
[swarm_matcher.py] <<file>>
[multi_agent_collab.py] <<file>>
[multi_agent_exec.py] <<file>>
[graph_workflow.py] <<file>>
[base_swarm.py] <<file>>
[swarm_registry.py] <<file>>
[swarm_arange.py] <<file>>
[majority_voting.py] <<file>>
[swarm_id_generator.py] <<file>>
[tree_swarm.py] <<file>>
[async_workflow.py] <<file>>
}
To consider each of these packages, we can categorize them based on their functionality and expected outcomes within a swarming architecture. Heres a brief overview of expected functionalities for some groups:
### Agent Management
- **agent.py**: Manages individual agent behaviors and properties.
- **agent_registry.py**: Tracks and manages the lifecycle of agents in the environment.
- **agent_memory_manager.py**: Handles memory allocation and data storage for agents.
### Workflow Coordination
- **base_workflow.py**: Defines the foundational structure for a swarm's workflow.
- **multi_process_workflow.py**: Manages workflows that involve multiple processes running concurrently.
- **async_workflow.py**: Implements asynchronous processing in workflows.
### Communication and Collaboration
- **groupchat.py**: Facilitates communication between agents in a chat-like manner.
- **multi_agent_collab.py**: Coordinates collaboration activities between multiple agents.
### Task Management
- **task.py**: Represents a unit of work for the agents.
- **task_queue.py**: Manages task distribution among agents.
### Swarm Strategies
- **base_swarm.py**: Establishes the base structure and rules for swarm operations.
- **round_robin.py**: Implements a round-robin approach for task assignments.
- **majority_voting.py**: Facilitates decision-making through majority voting among agents.
### Utilization and Tracking
- **utils.py**: Contains utility functions that aid in various operations across agents and workflows.
- **swarm_registry.py**: Keeps a registry of active swarms and their states.
### Data Structure and Graphs
- **graph_swarm.py**: Implements swarming strategies based on graph structures.
- **tree_swarm.py**: Utilizes tree structures for organizing and guiding agent actions.
Each package should interact seamlessly, following the principles of swarming (e.g., decentralized control, self-organization) while fulfilling its expected role.
package "telemetry" {
[bootup.py] <<file>>
[user_utils.py] <<file>>
[sys_info.py] <<file>>
[sentry_active.py] <<file>>
[capture_sys_data.py] <<file>>
[auto_upgrade_swarms.py] <<file>>
}
package "schemas" {
[agent_input_schema.py] <<file>>
[agent_step_schemas.py] <<file>>
[base_schemas.py] <<file>>
}
package "utils" {
[add_docs_to_agents.py] <<file>>
[lazy_loader.py] <<file>>
[data_to_text.py] <<file>>
[litellm_wrapper.py] <<file>>
[agent_ops_check.py] <<file>>
[loguru_logger.py] <<file>>
[update_agent_system_prompts.py] <<file>>
[try_except_wrapper.py] <<file>>
[swarm_reliability_checks.py] <<file>>
[async_file_creation.py] <<file>>
[pandas_utils.py] <<file>>
}
package "artifacts" {
[main_artifact.py] <<file>>
}
package "prompts" {
[ai_research_team.py] <<file>>
[react.py] <<file>>
[sales.py] <<file>>
}
package "agents" {
[openai_assistant.py] <<file>>
[auto_generate_swarm_config.py] <<file>>
}
package "tools" {
[base_tool.py] <<file>>
[tool_registry.py] <<file>>
[func_calling_executor.py] <<file>>
}
package "cli" {
[main.py] <<file>>
[create_agent.py] <<file>>
}
Heres a concise tagging and expectations summary for each package in the code structure:
### 1. Swarms
- **Tag:** Core Functionality
- **Expectations:** Implements swarming algorithms, agent management, workflows, and communication protocols.
### 2. Telemetry
- **Tag:** Monitoring & Logging
- **Expectations:** Tracks system data, manages boot-up processes, and captures system metrics for performance analysis.
### 3. Schemas
- **Tag:** Data Structures
- **Expectations:** Defines input/output schemas for agents and ensures data validation and integrity during processing.
### 4. Utils
- **Tag:** Utility Functions
- **Expectations:** Provides helper functions for logging, data conversion, error handling, and asynchronous file operations.
### 5. Artifacts
- **Tag:** Data Storage
- **Expectations:** Manages persistent data artifacts or models generated by the system, facilitating retrieval and usage.
### 6. Prompts
- **Tag:** Interaction Templates
- **Expectations:** Contains predefined interaction prompts for different contexts or agents, aiding in task-specific communication.
### 7. Agents
- **Tag:** AI Agents
- **Expectations:** Implements specific agent functionalities, including interaction with external APIs like OpenAI.
### 8. Tools
- **Tag:** Functional Extensions
- **Expectations:** Provides tools for executing functions, managing tool resources, and enhancing agent capabilities.
### 9. CLI
- **Tag:** Command Line Interface
- **Expectations:** Responsible for user interaction via command line, including agent creation and overall system management.}
@enduml
```
This representation captures the hierarchical structure of the code with the respective files in packages (folders). Each file is tagged as a `<<file>>` for clarity. You can visualize it using a PlantUML renderer. Adjust and add additional files or packages as necessary to fit your requirements!
Given the files and their sizes, we can devise a strategy to group them into context windows of 16,384 output tokens. Here's a concise plan:
1. **Calculate Total File Sizes**: First, sum the sizes of all the files to determine how many chunks will be needed.
2. **File Size Distribution**: Sort the files by size in descending order. This will help prioritize larger files, ensuring we utilize the token limit effectively.
3. **Chunking Strategy**:
- Start from the largest file and keep adding files to the current chunk until the total size reaches or slightly exceeds the 16,384 token limit.
- If adding a file exceeds the limit, finalize the current chunk and start a new chunk with the current file.
- Repeat until all files are organized into chunks.
4. **Adjustments for Small Files**: If you encounter many small files, consider batching them together in a single chunk if they collectively do not exceed the token limit. This avoids many small chunks.
5. **Consider Dependencies**: If certain files depend on others (e.g., modules), try to keep those files within the same chunk to avoid issues when reading the code.
6. **Testing**: Run a test with a few chunks to ensure that they load correctly within the context window size.
Here's a pseudocode implementation:
```python
file_sizes = [
("swarms/structs/agent_registry.py", 10599),
("swarms/structs/omni_agent_types.py", 369),
# Add all other files with their sizes
]
max_size = 16384
current_chunk = []
current_size = 0
for file, size in sorted(file_sizes, key=lambda x: x[1], reverse=True):
if current_size + size <= max_size:
current_chunk.append(file)
current_size += size
else:
# Save current_chunk to a list of chunks
save_chunk(current_chunk)
current_chunk = [file]
current_size = size
# Don't forget to save the last chunk
if current_chunk:
save_chunk(current_chunk)
```
By following this strategy, you can ensure that your code is managed within the constraints of your output token limit effectively.
To effectively manage the source code within the context window sizes of the GPT-4o model, you can implement a strategy to break down your Python files into chunks that fit within the token limits. Here's a straightforward approach:
### 2. Group Files by Size
Analyze the output to determine how to combine smaller files. Keep files that are individually below the model's limits together while ensuring combined sizes do not exceed:
- Input Context: 128,000 tokens
- Output Context: 16,384 tokens
To manage your source code within the context window sizes of the GPT-4o model, follow this strategy:
### 1. Calculate Token Size
Estimate the token size for each file. Typically, 1 token is approximately 4 characters of code plus a newline. For simplicity, assume that 1,000 characters correspond to about 750 tokens.
### 2. Gather File Size Information
From your `wc -c` output, note the byte sizes of each file:
| Filename | Size (Bytes) | Approx Tokens |
|-----------------------------------------------|--------------|---------------|
| **Total** | **508,058** | **~381,044** |
| ... | ... | ... |
| (List all files as before) | | |
### 3. Group Smaller Files
You can group files to ensure the combined size remains well below limits:
- **Single Files**: For files under approximately 12,000 bytes (9000 tokens approx), theyre safe to process individually.
- **Batching**: For larger files, consider batching to stay within a buffer of 128,000 input tokens and 16,384 output tokens.
### 4. List Groupings
Group files maintaining the total size for each group within limits. Example grouping could look like this:
- **Group 1**:
- `swarms/structs/agent_registry.py` (~10,599 bytes)
- `swarms/structs/omni_agent_types.py` (~369 bytes)
- (Continue grouping similar-sized files while keeping the total < 128,000 bytes)
### 5. Example Grouping
A practical example could be:
- **Group 1**: `agent_registry.py`, `omni_agent_types.py`, `auto_swarm.py`, (`~20,000 bytes`)
- **Group 2**: `base_structure.py`, `agent_router.py` (`~30,000 bytes`)
- **Group 3**: `pulsar_swarm.py`, `workspace_manager.py` (`~18,000 bytes`)
### 6. Implementation
- Use a script to automate grouping. Loop through the files, calculate running total sizes while grouping files into lists that meet the criteria.
### 7. Processing
Once grouped, you can feed each list of files into the GPT-4o model one at a time until all groups have been processed.
### Conclusion
This general structure should help you split the source code effectively, allowing you to work within the limits of the GPT-4o model while maintaining logical file groupings.
### 3. Chunking Strategy
Given that:
- Individual larger files ( `swarms/structs/agent.py`) should remain uncombined.
- Smaller files can be combined as follows:
#### Example Grouping:
- Group files that total under 16,384, for example, from each subdirectory. Group smaller files together, ensuring they remain under the threshold.
```plaintext
- Group 1: (combine smaller files)
- swarms/telemetry/*.py
- swarms/utils/*.py
- Group 2: (larger files)
- swarms/artifacts/*.py
- swarms/prompts/*.py
```
### 4. Implementation
Create a script that iterates through the directories, groups files until the predefined token limits are reached, and saves each group in a way that retains the original directory structure.
### 5. Testing and Validation
After chunking, run a test to ensure that:
1. Each chunk remains within token limits.
2. No critical functionality is lost when files are combined or separated.
3. The script handles edge cases (e.g., very small files or files with comments).
This strategy maintains modularity while improving processing efficiency within the given constraints of the model.
It appears your message was blank. If you have any further questions or need additional assistance, please let me know!
Now consider recombination patterns to recombine the results in a quasi-meta-merge sort.
we can see our work as a knuthean searching and sorting algorithm using the llm as our processor for partial ordering.
To implement a quasi-meta-merge sort using a language model (LLM) for recombination patterns, you can follow these algorithmic steps:
### 1. **Chunk Generation**
After chunking the Python files as described previously, treat each chunk as an item for processing. Each chunk will serve as an input for the LLM.
### 2. **Define Quasi-Merge Criteria**
Establish criteria for how chunks will be combined and ordered:
- **Semantic Cohesion**: Chunks that deal with similar functionalities or modules (e.g., all agent-related chunks).
- **File Size**: Combine smaller chunks first before larger ones for efficient processing.
- **Dependency Order**: Ensure that files relying on others are processed in the correct order, using Dagger-like dependency management.
### 3. **Processing with LLM**
Use the LLM to analyze chunks and suggest combinations based on the defined merge criteria:
- **Input**: Provide the LLM with pairs of chunks.
- **Output**: Ask it to rate or assess the coherence and compatibility of merging these chunks based on their content.
### 4. **Recombination Patterns**
Implement a quasi-meta-merge pattern as follows:
1. **Pairing**: Start by pairing chunks for evaluation. Use a method like binary tree pairing or bucket sorting based on size or functionality.
2. **Evaluation**: For each pair, use the LLM to produce a compatibility score or a combined summary.
3. **Sorting**: Sort the results based on the LLM's feedback and scores, leading to a preliminary ordered list of chunks.
4. **Merge Process**: Recombine the pairs, generating merged outputs that represent cohesive functionalities.
### 5. **Iterative Refinement**
After the initial combination:
- **Re-evaluate**: Feed merged outputs back into the LLM for assessment.
- **Adjust Pairings**: Depending on the feedback, adjust combinations, possibly splitting or merging further based on newly identified relationships.
### 6. **Final Collection**
Once pairs have been assessed and recombined:
- Collect final outputs.
- Reorganize them based on the original directory structure.
- Conduct a final review or testing phase ensuring that combined outputs maintain expected functionality.
### Example Pseudocode
Heres a brief pseudo-implementation for reference:
```python
def quasi_meta_merge_sort(chunks):
pairs = generate_pairs(chunks)
evaluated_pairs = [(chunk1, chunk2, llm.evaluate(chunk1, chunk2)) for chunk1, chunk2 in pairs]
sorted_pairs = sort_by_evaluation_score(evaluated_pairs)
merged_chunks = []
for chunk1, chunk2, score in sorted_pairs:
merged_chunk = llm.merge(chunk1, chunk2)
merged_chunks.append(merged_chunk)
# Re-evaluate and adjust iteratively
while needs_refinement(merged_chunks):
merged_chunks = re_evaluate(merged_chunks)
return final_output(merged_chunks)
```
### Conclusion
This algorithm leverages the LLM's processing capabilities to facilitate an advanced sorting and merging strategy. The result is a more organized collection of source code chunks, preserving functionality while enhancing modularity and clarity. This approach aligns well with Knuths methodology, focusing on efficient sorting and processing through intelligent structuring.
Here's how you can implement a quasi-meta-merge sort using a language model (LLM) to facilitate a knuthian searching and sorting algorithm for recombining the Python file chunks based on their functionalities and dependencies.
### Steps for Implementation
1. **Chunk Generation**:
- Start with previously created chunks of Python files, considering their token size and relevance.
2. **Define Quasi-Merge Criteria**:
- **Semantic Cohesion**: Identify functionally similar chunks (e.g., all related to agents).
- **File Size**: Prefer smaller chunks for initial combinations.
- **Dependency Management**: Maintain execution order based on dependencies.
3. **Processing with the LLM**:
- Use the LLM to analyze the compatibility of merging chunks. The input can be pairs of chunks, and the output will provide a compatibility score or suggestions.
4. **Recombination Patterns**:
- **Pairing**: Form an initial set of pairs from the chunks using methods like binary pairing or random sampling.
- **Evaluation via LLM**: For each pair, check how well they can be merged based on content and functionality:
```python
def evaluate_pairs(pair_list):
results = []
for chunk1, chunk2 in pair_list:
score, reasons = llm.evaluate_merge(chunk1, chunk2)
results.append((chunk1, chunk2, score, reasons))
return sorted(results, key=lambda x: x[2], reverse=True) # Sort by score
```
5. **Sort and Merge**:
- Use the evaluation results to sort and merge chunks, prioritizing those with higher compatibility scores first.
- Merge chunks based on evaluation.
```python
def merge_chunks(sorted_pairs):
merged_results = []
for chunk1, chunk2, score, reasons in sorted_pairs:
merged_chunk = llm.merge(chunk1, chunk2)
merged_results.append(merged_chunk)
return merged_results
```
6. **Iterative Refinement**:
- After the initial combination, submit the merged output to the LLM for further assessment. If required, adjust pairings based on new insights:
```python
def refine_merges(merged_chunks):
while requires_refinement(merged_chunks):
updated_chunks = []
for chunk in merged_chunks:
feedback = llm.refine(chunk)
updated_chunks.append(feedback)
merged_chunks = updated_chunks
return merged_chunks
```
7. **Final Collection**:
- Organize the final output, maintaining directory structure and ensuring functionality:
```python
def collect_final_output(merged_chunks):
output_path = organize_by_structure(merged_chunks)
save_to_directory(output_path)
```
### Pseudocode Example
Heres a brief pseudocode putting it all together:
```python
def quasi_meta_merge_sort(chunks):
pairs = generate_pairs(chunks)
evaluated_pairs = evaluate_pairs(pairs)
sorted_pairs = sort_by_evaluation_score(evaluated_pairs)
merged_chunks = merge_chunks(sorted_pairs)
# Iteratively refine until no further improvements can be made
merged_chunks = refine_merges(merged_chunks)
return collect_final_output(merged_chunks)
```
### Considerations
- **Performance**: Monitor the efficiency, especially if the number of chunks is large.
- **LLM Costs**: Be mindful of usage costs or performance issues while querying the LLM.
- **Testing**: Ensure that the recombined code retains functionality; consider including unit tests for major components.
### Conclusion
The outlined approach leverages an intelligent merging algorithm supported by semantic analysis from an LLM, creating an organized, high-quality set of Python files. By implementing these patterns, you can achieve a robust, modular codebase that aligns with the principles of efficient searching and sorting, similar to Knuth's methodologies.

@ -0,0 +1,59 @@
Your script appears to be a set of commands intended to be run by the "swarms" user. It sets up an environment, installs dependencies, and prepares to launch a service. Here are some suggestions to improve and enhance the script:
1. **Add Error Handling:**
- Implement error handling mechanisms with appropriate exits and messages to handle failures gracefully.
2. **Use Absolute Paths:**
- Prefer using absolute paths instead of relative paths to avoid any ambiguity.
3. **Add Logging:**
- Incorporate logging statements to track the progress and potential issues during script execution.
4. **Documentation:**
- Include comments to explain each step and document the purpose of the script and individual commands.
5. **Finalize Service Launch:**
- Ensure to start the required service once the dependencies are installed and configurations are completed.
Here's an enhanced version of your script incorporating these suggestions:
```bash
#!/bin/bash
set -e
set -x
# Set environment variables
export ROOT="/mnt/data1/swarms"
export HOME="${ROOT}/home/swarms"
unset CONDA_EXE
unset CONDA_PYTHON_EXE
export PATH="${ROOT}/var/swarms/agent_workspace/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
# Activate virtual environment
if [ ! -f "${ROOT}/var/swarms/agent_workspace/.venv/" ]; then
virtualenv "${ROOT}/var/swarms/agent_workspace/.venv/"
fi
source "${ROOT}/var/swarms/agent_workspace/.venv/bin/activate"
# Install dependencies
pip install fastapi uvicorn termcolor
pip install -e "${ROOT}/opt/swarms/"
cd "${ROOT}/var/swarms/"
pip install -e "${ROOT}/opt/swarms-memory"
pip install "fastapi[standard]" "loguru" pydantic==2.8.2
# Verify installation
#pip freeze
# Launch your service (Uncomment and add your starting command)
#python /opt/swarms/api/main.py
# Start your service as a systemd service
# You can add the relevant service configuration and enable it here
```
Make sure to uncomment and add the necessary command for starting your service. Additionally, continue the script with the configuration and setup required to launch the service, such as creating a systemd service unit file and enabling the service.
Please adapt this script according to your specific service requirements, and ensure you have the necessary permissions and environment configurations to run the script successfully.
mdupont@mdupont-G470:~/2024/05/swarms/api$

@ -0,0 +1,64 @@
░░
░░ A start job for unit swarms-uvicorn.service has finished successfully.
░░
░░ The job identifier is 155034.
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: Traceback (most recent call last):
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn", line 8, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: sys.exit(main())
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return self.main(*args, **kwargs)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1078, in main
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: rv = self.invoke(ctx)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return ctx.invoke(self.callback, **ctx.params)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 783, in invoke
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return __callback(*args, **kwargs)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/main.py", line 412, in main
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: run(
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/main.py", line 579, in run
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: server.run()
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 65, in run
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return asyncio.run(self.serve(sockets=sockets))
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/usr/lib/python3.10/asyncio/runners.py", line 44, in run
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return loop.run_until_complete(main)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 69, in serve
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: await self._serve(sockets)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 76, in _serve
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: config.load()
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/config.py", line 434, in load
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self.loaded_app = import_from_string(self.app)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/importer.py", line 19, in import_from_string
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: module = importlib.import_module(module_str)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return _bootstrap._gcd_import(name[level:], package, level)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap_external>", line 883, in exec_module
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/api/main.py", line 18, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms import Agent
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/__init__.py", line 13, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.telemetry.bootup import bootup # noqa: E402, F403
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/telemetry/bootup.py", line 6, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.telemetry.auto_upgrade_swarms import auto_update
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/telemetry/auto_upgrade_swarms.py", line 4, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.loguru_logger import initialize_logger
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/__init__.py", line 2, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.data_to_text import (
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/data_to_text.py", line 5, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.pdf_to_text import pdf_to_text
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/pdf_to_text.py", line 1, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.try_except_wrapper import try_except_wrapper
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/try_except_wrapper.py", line 7, in <module>
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: logger = initialize_logger("try_except_wrapper")
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/loguru_logger.py", line 28, in initialize_logger
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: logger.add(
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_logger.py", line 802, in add
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: wrapped_sink = FileSink(path, **kwargs)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_file_sink.py", line 192, in __init__
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self._create_file(path)
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_file_sink.py", line 226, in _create_file
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self._file = open(path, **self._kwargs)

@ -0,0 +1,79 @@
It looks like you are setting up a project environment and configuring services using bash scripts. Your script contains steps to clone repositories, switch branches, copy files, set permissions, create directories, install dependencies, and start services.
To improve readability and maintainability, you can consider the following suggestions:
1. **Modularize your Script:**
- Separate different sets of tasks into functions for better organization.
- Using functions can help with code reuse and make each part of the script easier to understand.
2. **Error Handling:**
- Implement proper error handling and informative messages for critical steps.
- Consider using `trap` to catch and handle errors gracefully.
3. **Logging:**
- Add logging statements to track the script execution flow and display informative messages.
4. **Consistent Variable Naming:**
- Ensure consistency in variable names and use descriptive names for clarity.
5. **Comments and Documentation:**
- Add comments to explain complex logic or processes in the script.
- Consider adding a header comment to describe the purpose of the script, its inputs, and expected outputs.
6. **Configuration Management:**
- Consider using configuration files to store variables and settings that may need to change without modifying the script itself.
Here is an example structure to illustrate these suggestions:
```bash
#!/bin/bash
set -e
set -x
# Global variables
ROOT="/mnt/data1/swarms"
WORKSOURCE="${ROOT}/opt/swarms/api"
# Function to clone or update a git repository
git_clone_or_update() {
local repo_url=$1
local target_dir=$2
if [ ! -d "$target_dir" ]; then
git clone "$repo_url" "$target_dir"
else
pushd "$target_dir" || exit 1
git pull
popd || exit 2
fi
}
# Ensure swarms repository is cloned
git_clone_or_update "https://github.com/jmikedupont2/swarms" "${ROOT}/opt/swarms"
# Switch to a specific branch
pushd "${ROOT}/opt/swarms/" || exit 1
git checkout feature/ec2
git pull local feature/ec2
popd || exit 2
# Ensure swarms-memory repository is cloned
git_clone_or_update "https://github.com/The-Swarm-Corporation/swarms-memory" "${ROOT}/opt/swarms-memory"
# Other setup steps go here...
# Additional set up for uvicorn and nginx
mkdir -p "${ROOT}/var/run/uvicorn/env/"
if [ ! -f "${ROOT}/var/run/uvicorn/env/" ]; then
virtualenv "${ROOT}/var/run/uvicorn/env/"
fi
. "${ROOT}/var/run/uvicorn/env/bin/activate"
pip install uvicorn
# Start services
systemctl daemon-reload
systemctl start swarms-uvicorn
systemctl enable swarms-uvicorn
service nginx restart

@ -0,0 +1,210 @@
MOD sentry_sdk
MOD sentry_sdk._compat
MOD sentry_sdk._init_implementation
MOD sentry_sdk._lru_cache
MOD sentry_sdk._werkzeug
MOD sentry_sdk.ai
MOD sentry_sdk.ai.monitoring
MOD sentry_sdk.ai.utils
MOD sentry_sdk.api
MOD sentry_sdk.attachments
MOD sentry_sdk.client
MOD sentry_sdk.consts
MOD sentry_sdk.crons
MOD sentry_sdk.crons.api
MOD sentry_sdk.crons.consts
MOD sentry_sdk.crons.decorator
MOD sentry_sdk.debug
MOD sentry_sdk.envelope
MOD sentry_sdk.flag_utils
MOD sentry_sdk.hub
MOD sentry_sdk.integrations
MOD sentry_sdk.integrations._asgi_common
MOD sentry_sdk.integrations._wsgi_common
MOD sentry_sdk.integrations.aiohttp
MOD sentry_sdk.integrations.anthropic
MOD sentry_sdk.integrations.argv
MOD sentry_sdk.integrations.ariadne
MOD sentry_sdk.integrations.arq
MOD sentry_sdk.integrations.asgi
MOD sentry_sdk.integrations.asyncpg
MOD sentry_sdk.integrations.atexit
MOD sentry_sdk.integrations.aws_lambda
MOD sentry_sdk.integrations.boto3
MOD sentry_sdk.integrations.bottle
MOD sentry_sdk.integrations.celery
MOD sentry_sdk.integrations.celery.beat
MOD sentry_sdk.integrations.celery.utils
MOD sentry_sdk.integrations.chalice
MOD sentry_sdk.integrations.clickhouse_driver
MOD sentry_sdk.integrations.cohere
MOD sentry_sdk.integrations.dedupe
MOD sentry_sdk.integrations.django
MOD sentry_sdk.integrations.excepthook
MOD sentry_sdk.integrations.falcon
MOD sentry_sdk.integrations.fastapi
MOD sentry_sdk.integrations.flask
MOD sentry_sdk.integrations.gql
MOD sentry_sdk.integrations.graphene
MOD sentry_sdk.integrations.httpx
MOD sentry_sdk.integrations.huey
MOD sentry_sdk.integrations.huggingface_hub
MOD sentry_sdk.integrations.langchain
MOD sentry_sdk.integrations.litestar
MOD sentry_sdk.integrations.logging
MOD sentry_sdk.integrations.loguru
MOD sentry_sdk.integrations.modules
MOD sentry_sdk.integrations.openai
MOD sentry_sdk.integrations.pymongo
MOD sentry_sdk.integrations.pyramid
MOD sentry_sdk.integrations.quart
MOD sentry_sdk.integrations.redis
MOD sentry_sdk.integrations.redis._sync_common
MOD sentry_sdk.integrations.redis.consts
MOD sentry_sdk.integrations.redis.modules
MOD sentry_sdk.integrations.redis.modules.caches
MOD sentry_sdk.integrations.redis.modules.queries
MOD sentry_sdk.integrations.redis.rb
MOD sentry_sdk.integrations.redis.redis
MOD sentry_sdk.integrations.redis.redis_cluster
MOD sentry_sdk.integrations.redis.redis_py_cluster_legacy
MOD sentry_sdk.integrations.redis.utils
MOD sentry_sdk.integrations.rq
MOD sentry_sdk.integrations.sanic
MOD sentry_sdk.integrations.sqlalchemy
MOD sentry_sdk.integrations.starlette
MOD sentry_sdk.integrations.starlite
MOD sentry_sdk.integrations.stdlib
MOD sentry_sdk.integrations.strawberry
MOD sentry_sdk.integrations.threading
MOD sentry_sdk.integrations.tornado
MOD sentry_sdk.integrations.wsgi
MOD sentry_sdk.metrics
MOD sentry_sdk.monitor
MOD sentry_sdk.profiler
MOD sentry_sdk.profiler.continuous_profiler
MOD sentry_sdk.profiler.transaction_profiler
MOD sentry_sdk.profiler.utils
MOD sentry_sdk.scope
MOD sentry_sdk.scrubber
MOD sentry_sdk.serializer
MOD sentry_sdk.session
MOD sentry_sdk.sessions
MOD sentry_sdk.spotlight
MOD sentry_sdk.tracing
MOD sentry_sdk.tracing_utils
MOD sentry_sdk.transport
MOD sentry_sdk.utils
MOD swarm_models
MOD swarm_models.base_llm
MOD swarm_models.base_multimodal_model
MOD swarms.agents
MOD swarms.agents.ape_agent
MOD swarms.agents.create_agents_from_yaml
MOD swarms.agents.stopping_conditions
MOD swarms.agents.tool_agent
MOD swarms.artifacts
MOD swarms.artifacts.main_artifact
MOD swarms.prompts
MOD swarms.prompts.ag_prompt
MOD swarms.prompts.agent_system_prompts
MOD swarms.prompts.code_interpreter
MOD swarms.prompts.documentation
MOD swarms.prompts.finance_agent_prompt
MOD swarms.prompts.growth_agent_prompt
MOD swarms.prompts.legal_agent_prompt
MOD swarms.prompts.multi_modal_autonomous_instruction_prompt
MOD swarms.prompts.operations_agent_prompt
MOD swarms.prompts.product_agent_prompt
MOD swarms.prompts.prompt
MOD swarms.prompts.prompt_generator
MOD swarms.prompts.prompt_generator_optimizer
MOD swarms.prompts.tools
MOD swarms.schemas
MOD swarms.schemas.agent_input_schema
MOD swarms.schemas.agent_step_schemas
MOD swarms.schemas.base_schemas
MOD swarms.structs
MOD swarms.structs.agent
MOD swarms.structs.agents_available
MOD swarms.structs.async_workflow
MOD swarms.structs.auto_swarm
MOD swarms.structs.base_structure
MOD swarms.structs.base_swarm
MOD swarms.structs.base_workflow
MOD swarms.structs.concat
MOD swarms.structs.concurrent_workflow
MOD swarms.structs.conversation
MOD swarms.structs.graph_workflow
MOD swarms.structs.groupchat
MOD swarms.structs.majority_voting
MOD swarms.structs.message
MOD swarms.structs.mixture_of_agents
MOD swarms.structs.multi_agent_collab
MOD swarms.structs.multi_agent_exec
MOD swarms.structs.omni_agent_types
MOD swarms.structs.queue_swarm
MOD swarms.structs.rearrange
MOD swarms.structs.round_robin
MOD swarms.structs.sequential_workflow
MOD swarms.structs.spreadsheet_swarm
MOD swarms.structs.swarm_arange
MOD swarms.structs.swarm_matcher
MOD swarms.structs.swarm_net
MOD swarms.structs.swarm_router
MOD swarms.structs.swarming_architectures
MOD swarms.structs.task
MOD swarms.structs.utils
MOD swarms.telemetry
MOD swarms.telemetry.auto_upgrade_swarms
MOD swarms.telemetry.bootup
MOD swarms.telemetry.capture_sys_data
MOD swarms.telemetry.check_update
MOD swarms.telemetry.sentry_active
MOD swarms.telemetry.sys_info
MOD swarms.telemetry.user_utils
MOD swarms.tools
MOD swarms.tools.base_tool
MOD swarms.tools.cohere_func_call_schema
MOD swarms.tools.func_calling_executor
MOD swarms.tools.func_calling_utils
MOD swarms.tools.func_to_str
MOD swarms.tools.function_util
MOD swarms.tools.json_former
MOD swarms.tools.logits_processor
MOD swarms.tools.openai_func_calling_schema_pydantic
MOD swarms.tools.openai_tool_creator_decorator
MOD swarms.tools.prebuilt
MOD swarms.tools.prebuilt.code_executor
MOD swarms.tools.prebuilt.math_eval
MOD swarms.tools.py_func_to_openai_func_str
MOD swarms.tools.pydantic_to_json
MOD swarms.tools.tool_parse_exec
MOD swarms.tools.tool_registry
MOD swarms.tools.tool_utils
MOD swarms.utils
MOD swarms.utils.add_docs_to_agents
MOD swarms.utils.any_to_str
MOD swarms.utils.calculate_func_metrics
MOD swarms.utils.class_args_wrapper
MOD swarms.utils.data_to_text
MOD swarms.utils.disable_logging
MOD swarms.utils.file_processing
MOD swarms.utils.formatter
MOD swarms.utils.litellm
MOD swarms.utils.loguru_logger
MOD swarms.utils.markdown_message
MOD swarms.utils.pandas_utils
MOD swarms.utils.parse_code
MOD swarms.utils.pdf_to_text
MOD swarms.utils.try_except_wrapper
MOD swarms.utils.wrapper_clusterop
MOD swarms_memory
MOD swarms_memory.vector_dbs
MOD swarms_memory.vector_dbs.chroma_db_wrapper

@ -0,0 +1,27 @@
# from https://github.com/encode/uvicorn/issues/678
[Unit]
Description=swarms
After=network.target
[Service]
Type=simple
User=swarms
Group=swarms
DynamicUser=true
WorkingDirectory=ROOT/var/run/swarms/
PrivateTmp=true
EnvironmentFile=ROOT/var/run/swarms/secrets/env
ExecStart=ROOT/var/run/uvicorn/env/bin/uvicorn \
--proxy-headers \
--forwarded-allow-ips='*' \
--workers=4 \
--port=54748 \
--no-access-log \
--uds ROOT/run/uvicorn/uvicorn-swarms-api.sock \
_.asgi:application
ExecReload=/bin/kill -HUP ${MAINPID}
RestartSec=1
Restart=always
[Install]
WantedBy=multi-user.target

@ -0,0 +1,3 @@
we can add the introspector to any project like this:
1. git submodule from git/introspector/projects/projectname/ -> project dir/introspector
1. api link from git/projectname/[issues,tasks,prs] -> project dir/introspector/[tasks,issues, prs]

@ -5,7 +5,8 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "swarms"
version = "6.7.9"
version = "6.7.5"
description = "Swarms - TGSC"
license = "MIT"
authors = ["Kye Gomez <kye@apac.ai>"]
@ -62,9 +63,12 @@ python = ">=3.10,<4.0"
asyncio = ">=3.4.3,<4.0"
toml = "*"
pypdf = "5.1.0"
swarm-models = "*"
loguru = "*"
<<<<<<< HEAD
pydantic = "*"
=======
pydantic = ">=2.8.2<3.0"
>>>>>>> 66cb7a58 ([6.5.7])
tenacity = "*"
psutil = "*"
sentry-sdk = "*"
@ -74,13 +78,21 @@ docstring_parser = "0.16" # TODO:
tiktoken = "*"
networkx = "*"
aiofiles = "*"
<<<<<<< HEAD
=======
clusterops = "*"
>>>>>>> 66cb7a58 ([6.5.7])
# chromadb = "*"
reportlab = "*"
doc-master = "*"
rich = "*"
# sentence-transformers = "*"
swarm-models = "*"
<<<<<<< HEAD
termcolor = "*"
clusterops = { git = "https://github.com/patrickbdevaney/clusterops.git", branch = "main" }
=======
>>>>>>> 66cb7a58 ([6.5.7])
# [tool.poetry.extras]

@ -0,0 +1,618 @@
import torch
from torch.utils.data import DataLoader, TensorDataset
import numpy as np
from loguru import logger
from dataclasses import dataclass
from typing import Optional, Tuple, Dict
import math
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
@dataclass
class TransformerConfig:
"""Configuration class for MoE Transformer model parameters."""
vocab_size: int = 50257
hidden_size: int = 768
num_attention_heads: int = 12
num_expert_layers: int = 4
num_experts: int = 8
expert_capacity: int = 32
max_position_embeddings: int = 1024
dropout_prob: float = 0.1
layer_norm_epsilon: float = 1e-5
initializer_range: float = 0.02
num_query_groups: int = 4 # For multi-query attention
class ExpertLayer(nn.Module):
"""Individual expert neural network."""
def __init__(self, config: TransformerConfig):
super().__init__()
self.fc1 = nn.Linear(
config.hidden_size, 4 * config.hidden_size
)
self.fc2 = nn.Linear(
4 * config.hidden_size, config.hidden_size
)
self.activation = nn.GELU()
self.dropout = nn.Dropout(config.dropout_prob)
def forward(self, x: Tensor) -> Tensor:
x = self.fc1(x)
x = self.activation(x)
x = self.dropout(x)
x = self.fc2(x)
return x
class MixtureOfExperts(nn.Module):
"""Mixture of Experts layer with dynamic routing."""
def __init__(self, config: TransformerConfig):
super().__init__()
self.num_experts = config.num_experts
self.expert_capacity = config.expert_capacity
# Create expert networks
self.experts = nn.ModuleList(
[ExpertLayer(config) for _ in range(config.num_experts)]
)
# Router network
self.router = nn.Linear(
config.hidden_size, config.num_experts
)
def forward(self, x: Tensor) -> Tuple[Tensor, Dict]:
"""Route inputs to experts and combine outputs."""
batch_size, seq_len, hidden_size = x.shape
# Calculate routing probabilities
router_logits = self.router(x)
routing_weights = F.softmax(router_logits, dim=-1)
# Select top-k experts
top_k = 2
gates, indices = torch.topk(routing_weights, top_k, dim=-1)
gates = F.softmax(gates, dim=-1)
# Process inputs through selected experts
final_output = torch.zeros_like(x)
router_load = torch.zeros(self.num_experts, device=x.device)
for i in range(top_k):
expert_index = indices[..., i]
gate = gates[..., i : i + 1]
# Count expert assignments
for j in range(self.num_experts):
router_load[j] += (expert_index == j).float().sum()
# Process through selected experts
for j in range(self.num_experts):
mask = expert_index == j
if not mask.any():
continue
expert_input = x[mask]
expert_output = self.experts[j](expert_input)
final_output[mask] += gate[mask] * expert_output
aux_loss = router_load.float().var() / (
router_load.float().mean() ** 2
)
return final_output, {"load_balancing_loss": aux_loss}
class MultiQueryAttention(nn.Module):
"""Multi-Query Attention mechanism with proper multi-query group handling."""
def __init__(self, config: TransformerConfig):
super().__init__()
self.num_attention_heads = config.num_attention_heads
self.num_query_groups = config.num_query_groups
self.hidden_size = config.hidden_size
self.head_dim = (
config.hidden_size // config.num_attention_heads
)
# Query projection maintains full head dimension
self.q_proj = nn.Linear(
config.hidden_size, config.hidden_size
)
# Key and value projections use reduced number of heads (query groups)
self.k_proj = nn.Linear(
config.hidden_size,
self.head_dim * config.num_query_groups,
)
self.v_proj = nn.Linear(
config.hidden_size,
self.head_dim * config.num_query_groups,
)
self.dropout = nn.Dropout(config.dropout_prob)
# Calculate heads per group for proper reshaping
self.heads_per_group = (
self.num_attention_heads // self.num_query_groups
)
def forward(
self,
hidden_states: Tensor,
attention_mask: Optional[Tensor] = None,
cache: Optional[Dict[str, Tensor]] = None,
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
batch_size, seq_length, _ = hidden_states.shape
# Project queries, keys, and values
queries = self.q_proj(hidden_states)
keys = self.k_proj(hidden_states)
values = self.v_proj(hidden_states)
# Reshape queries to full number of heads
queries = queries.view(
batch_size,
seq_length,
self.num_attention_heads,
self.head_dim,
)
# Reshape keys and values to number of query groups
keys = keys.view(
batch_size,
seq_length,
self.num_query_groups,
self.head_dim,
)
values = values.view(
batch_size,
seq_length,
self.num_query_groups,
self.head_dim,
)
# Transpose for batch matrix multiplication
queries = queries.transpose(
1, 2
) # (batch, n_heads, seq_len, head_dim)
keys = keys.transpose(
1, 2
) # (batch, n_groups, seq_len, head_dim)
values = values.transpose(
1, 2
) # (batch, n_groups, seq_len, head_dim)
# Repeat keys and values for each head in the group
keys = keys.repeat_interleave(self.heads_per_group, dim=1)
values = values.repeat_interleave(self.heads_per_group, dim=1)
# Compute attention scores
scale = 1.0 / math.sqrt(self.head_dim)
scores = torch.matmul(queries, keys.transpose(-2, -1)) * scale
if attention_mask is not None:
# Expand attention mask to match scores dimensions
expanded_mask = attention_mask.unsqueeze(1).unsqueeze(2)
expanded_mask = expanded_mask.expand(
batch_size,
self.num_attention_heads,
seq_length,
seq_length,
)
mask_value = torch.finfo(scores.dtype).min
attention_mask = expanded_mask.eq(0).float() * mask_value
scores = scores + attention_mask
attention_weights = F.softmax(scores, dim=-1)
attention_weights = self.dropout(attention_weights)
# Compute attention output
attention_output = torch.matmul(attention_weights, values)
attention_output = attention_output.transpose(1, 2)
attention_output = attention_output.reshape(
batch_size, seq_length, -1
)
return attention_output, None
class MoETransformer(nn.Module):
"""
Production-grade Transformer model with Mixture of Experts and Multi-Query Attention.
Features:
- Multi-Query Attention mechanism for efficient inference
- Mixture of Experts for dynamic routing and specialization
- Real-time weight updates based on input similarity
- Built-in logging and monitoring
- Type annotations for better code maintainability
"""
def __init__(self, config: TransformerConfig):
super().__init__()
self.config = config
# Initialize components
self.embedding = nn.Embedding(
config.vocab_size, config.hidden_size
)
self.position_embedding = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
# Multi-Query Attention layers
self.attention_layers = nn.ModuleList(
[
MultiQueryAttention(config)
for _ in range(config.num_expert_layers)
]
)
# Mixture of Experts layers
self.moe_layers = nn.ModuleList(
[
MixtureOfExperts(config)
for _ in range(config.num_expert_layers)
]
)
# Layer normalization and dropout
self.layer_norm = nn.LayerNorm(
config.hidden_size, eps=config.layer_norm_epsilon
)
self.dropout = nn.Dropout(config.dropout_prob)
# Output projection
self.output_projection = nn.Linear(
config.hidden_size, config.vocab_size
)
# Initialize weights
self.apply(self._init_weights)
logger.info("Initialized MoETransformer model")
def _init_weights(self, module: nn.Module):
"""Initialize model weights."""
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(
mean=0.0, std=self.config.initializer_range
)
if (
isinstance(module, nn.Linear)
and module.bias is not None
):
module.bias.data.zero_()
def get_position_embeddings(self, position_ids: Tensor) -> Tensor:
"""Generate position embeddings."""
return self.position_embedding(position_ids)
def forward(
self,
input_ids: Tensor,
attention_mask: Optional[Tensor] = None,
position_ids: Optional[Tensor] = None,
cache: Optional[Dict[str, Tensor]] = None,
) -> Tuple[Tensor, Dict]:
"""
Forward pass through the model.
Args:
input_ids: Input token IDs
attention_mask: Attention mask for padding
position_ids: Position IDs for positioning encoding
cache: Cache for key/value states in generation
Returns:
tuple: (logits, auxiliary_outputs)
"""
batch_size, seq_length = input_ids.shape
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(
input_ids
)
# Get embeddings
inputs_embeds = self.embedding(input_ids)
position_embeds = self.get_position_embeddings(position_ids)
hidden_states = inputs_embeds + position_embeds
# Initialize auxiliary outputs
aux_outputs = {"moe_losses": []}
# Process through transformer layers
for attention_layer, moe_layer in zip(
self.attention_layers, self.moe_layers
):
# Multi-Query Attention
attention_output, _ = attention_layer(
hidden_states, attention_mask, cache
)
hidden_states = self.layer_norm(
hidden_states + attention_output
)
# Mixture of Experts
moe_output, moe_aux = moe_layer(hidden_states)
hidden_states = self.layer_norm(
hidden_states + moe_output
)
aux_outputs["moe_losses"].append(
moe_aux["load_balancing_loss"]
)
# Final output projection
logits = self.output_projection(hidden_states)
return logits, aux_outputs
def fetch_loss(
self,
logits: Tensor,
labels: Tensor,
aux_outputs: Dict,
reduction: str = "mean",
) -> Tensor:
"""
Calculate the total loss including MoE balancing losses.
Args:
logits: Model output logits
labels: Ground truth labels
aux_outputs: Auxiliary outputs from forward pass
reduction: Loss reduction method
Returns:
Tensor: Total loss
"""
# Calculate cross entropy loss
ce_loss = F.cross_entropy(
logits.view(-1, self.config.vocab_size),
labels.view(-1),
reduction=reduction,
)
# Calculate MoE loss
moe_loss = torch.stack(aux_outputs["moe_losses"]).mean()
# Combine losses
total_loss = ce_loss + 0.01 * moe_loss
logger.debug(
f"CE Loss: {ce_loss.item():.4f}, "
f"MoE Loss: {moe_loss.item():.4f}"
)
return total_loss
@torch.no_grad()
def generate(
self,
input_ids: Tensor,
max_length: int = 100,
temperature: float = 1.0,
top_k: int = 50,
top_p: float = 0.9,
) -> Tensor:
"""
Generate text using the model.
Args:
input_ids: Initial input tokens
max_length: Maximum sequence length to generate
temperature: Sampling temperature
top_k: Number of highest probability tokens to keep
top_p: Cumulative probability for nucleus sampling
Returns:
Tensor: Generated token IDs
"""
batch_size = input_ids.shape[0]
device = input_ids.device
# Initialize sequence with input_ids
generated = input_ids
# Cache for key-value pairs
cache = {}
for _ in range(max_length):
# Get position IDs for current sequence
position_ids = torch.arange(
generated.shape[1], dtype=torch.long, device=device
)
position_ids = position_ids.unsqueeze(0).expand(
batch_size, -1
)
# Forward pass
logits, _ = self.forward(
generated, position_ids=position_ids, cache=cache
)
# Get next token logits
next_token_logits = logits[:, -1, :] / temperature
# Apply top-k filtering
if top_k > 0:
indices_to_remove = (
next_token_logits
< torch.topk(next_token_logits, top_k)[0][
..., -1, None
]
)
next_token_logits[indices_to_remove] = float("-inf")
# Apply top-p (nucleus) filtering
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(
next_token_logits, descending=True
)
cumulative_probs = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
sorted_indices_to_remove[..., 1:] = (
sorted_indices_to_remove[..., :-1].clone()
)
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[
sorted_indices_to_remove
]
next_token_logits[indices_to_remove] = float("-inf")
# Sample next token
probs = F.softmax(next_token_logits, dim=-1)
next_token = torch.multinomial(probs, num_samples=1)
# Append next token to sequence
generated = torch.cat((generated, next_token), dim=1)
# Check for end of sequence token
if (next_token == self.config.vocab_size - 1).all():
break
return generated
# Initialize model configuration
config = TransformerConfig(
vocab_size=50257,
hidden_size=768,
num_attention_heads=12,
num_expert_layers=4,
num_experts=8,
expert_capacity=32,
max_position_embeddings=1024,
num_query_groups=4,
)
def prepare_sample_data(
batch_size: int = 8,
seq_length: int = 512,
vocab_size: int = 50257,
) -> DataLoader:
"""Create sample data for demonstration."""
# Create random input sequences
input_ids = torch.randint(
0, vocab_size, (100, seq_length) # 100 samples
)
# Create target sequences (shifted by 1)
labels = torch.randint(0, vocab_size, (100, seq_length))
# Create attention masks (1 for real tokens, 0 for padding)
attention_mask = torch.ones_like(input_ids)
# Create dataset and dataloader
dataset = TensorDataset(input_ids, attention_mask, labels)
dataloader = DataLoader(
dataset, batch_size=batch_size, shuffle=True
)
return dataloader
def train_step(
model: MoETransformer,
batch: tuple,
optimizer: torch.optim.Optimizer,
device: str = "cuda" if torch.cuda.is_available() else "cpu",
) -> float:
"""Execute single training step."""
model.train()
optimizer.zero_grad()
# Unpack batch
input_ids, attention_mask, labels = [b.to(device) for b in batch]
# Forward pass
logits, aux_outputs = model(
input_ids=input_ids, attention_mask=attention_mask
)
# Calculate loss
loss = model.fetch_loss(logits, labels, aux_outputs)
# Backward pass
loss.backward()
optimizer.step()
return loss.item()
def main():
# Set device
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using device: {device}")
# Initialize model
model = MoETransformer(config).to(device)
logger.info("Model initialized")
# Setup optimizer
optimizer = torch.optim.AdamW(
model.parameters(), lr=1e-4, weight_decay=0.01
)
# Prepare data
dataloader = prepare_sample_data()
logger.info("Data prepared")
# Training loop
num_epochs = 3
for epoch in range(num_epochs):
epoch_losses = []
for batch_idx, batch in enumerate(dataloader):
loss = train_step(model, batch, optimizer, device)
epoch_losses.append(loss)
if batch_idx % 10 == 0:
logger.info(
f"Epoch {epoch+1}/{num_epochs} "
f"Batch {batch_idx}/{len(dataloader)} "
f"Loss: {loss:.4f}"
)
avg_loss = np.mean(epoch_losses)
logger.info(f"Epoch {epoch+1} average loss: {avg_loss:.4f}")
# Generation example
model.eval()
with torch.no_grad():
# Prepare input prompt
prompt = torch.randint(0, config.vocab_size, (1, 10)).to(
device
)
# Generate sequence
generated = model.generate(
input_ids=prompt,
max_length=50,
temperature=0.7,
top_k=50,
top_p=0.9,
)
logger.info(f"Generated sequence shape: {generated.shape}")
if __name__ == "__main__":
main()

@ -0,0 +1,433 @@
import asyncio
import json
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import Dict, List, Optional, Set
import aiohttp
import matplotlib.pyplot as plt
import networkx as nx
import websockets
from loguru import logger
from swarms import Agent
TREND_AGENT_PROMPT = """You are a specialized blockchain trend analysis agent. Your role:
1. Analyze transaction patterns in Solana blockchain data
2. Identify volume trends, price movements, and temporal patterns
3. Focus on whale movements and their market impact
4. Format findings in clear, structured JSON
5. Include confidence scores for each insight
6. Flag unusual patterns or anomalies
7. Provide historical context for significant movements
Output format:
{
"trends": [
{"pattern": str, "confidence": float, "impact": str}
],
"whale_activity": {...},
"temporal_analysis": {...}
}"""
RISK_AGENT_PROMPT = """You are a blockchain risk assessment specialist. Your tasks:
1. Identify suspicious transaction patterns
2. Monitor for known exploit signatures
3. Assess wallet clustering and relationship patterns
4. Evaluate transaction velocity and size anomalies
5. Check for bridge-related risks
6. Monitor smart contract interactions
7. Flag potential wash trading
Output format:
{
"risk_score": float,
"flags": [...],
"recommendations": [...]
}"""
SUMMARY_AGENT_PROMPT = """You are a blockchain data synthesis expert. Your responsibilities:
1. Combine insights from trend and risk analyses
2. Prioritize actionable intelligence
3. Highlight critical patterns
4. Generate executive summaries
5. Provide market context
6. Make predictions with confidence intervals
7. Suggest trading strategies based on data
Output format:
{
"key_insights": [...],
"market_impact": str,
"recommendations": {...}
}"""
@dataclass
class Transaction:
signature: str
timestamp: datetime
amount: float
from_address: str
to_address: str
class SolanaRPC:
def __init__(
self, endpoint="https://api.mainnet-beta.solana.com"
):
self.endpoint = endpoint
self.session = None
async def get_signatures(self, address: str) -> List[Dict]:
if not self.session:
self.session = aiohttp.ClientSession()
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "getSignaturesForAddress",
"params": [address, {"limit": 100}],
}
async with self.session.post(
self.endpoint, json=payload
) as response:
result = await response.json()
return result.get("result", [])
async def get_transaction(self, signature: str) -> Dict:
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "getTransaction",
"params": [
signature,
{
"encoding": "json",
"maxSupportedTransactionVersion": 0,
},
],
}
async with self.session.post(
self.endpoint, json=payload
) as response:
result = await response.json()
return result.get("result", {})
class AlertSystem:
def __init__(self, email: str, threshold: float = 1000.0):
self.email = email
self.threshold = threshold
self.smtp_server = "smtp.gmail.com"
self.smtp_port = 587
async def check_and_alert(
self, transaction: Transaction, risk_score: float
):
if transaction.amount > self.threshold or risk_score > 0.8:
await self.send_alert(transaction, risk_score)
async def send_alert(
self, transaction: Transaction, risk_score: float
):
# msg = MIMEText(
# f"High-risk transaction detected:\n"
# f"Amount: {transaction.amount} SOL\n"
# f"Risk Score: {risk_score}\n"
# f"Signature: {transaction.signature}"
# )
logger.info(
f"Alert sent for transaction {transaction.signature}"
)
class WalletClusterAnalyzer:
def __init__(self):
self.graph = nx.Graph()
self.known_wallets: Set[str] = set()
def update_graph(self, transaction: Transaction):
self.graph.add_edge(
transaction.from_address,
transaction.to_address,
weight=transaction.amount,
)
self.known_wallets.add(transaction.from_address)
self.known_wallets.add(transaction.to_address)
def identify_clusters(self) -> Dict:
communities = nx.community.greedy_modularity_communities(
self.graph
)
return {
"clusters": [list(c) for c in communities],
"central_wallets": [
wallet
for wallet in self.known_wallets
if self.graph.degree[wallet] > 5
],
}
class TransactionVisualizer:
def __init__(self):
self.transaction_history = []
def add_transaction(self, transaction: Transaction):
self.transaction_history.append(asdict(transaction))
def generate_volume_chart(self) -> str:
volumes = [tx["amount"] for tx in self.transaction_history]
plt.figure(figsize=(12, 6))
plt.plot(volumes)
plt.title("Transaction Volume Over Time")
plt.savefig("volume_chart.png")
return "volume_chart.png"
def generate_network_graph(
self, wallet_analyzer: WalletClusterAnalyzer
) -> str:
plt.figure(figsize=(15, 15))
pos = nx.spring_layout(wallet_analyzer.graph)
nx.draw(
wallet_analyzer.graph,
pos,
node_size=1000,
node_color="lightblue",
with_labels=True,
)
plt.savefig("network_graph.png")
return "network_graph.png"
class SolanaMultiAgentAnalyzer:
def __init__(
self,
min_amount: float = 50.0,
websocket_url: str = "wss://api.mainnet-beta.solana.com",
alert_email: str = None,
):
self.rpc = SolanaRPC()
self.websocket_url = websocket_url
self.min_amount = min_amount
self.transactions = []
self.wallet_analyzer = WalletClusterAnalyzer()
self.visualizer = TransactionVisualizer()
self.alert_system = (
AlertSystem(alert_email) if alert_email else None
)
self.trend_agent = Agent(
agent_name="trend-analyzer",
system_prompt=TREND_AGENT_PROMPT,
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
self.risk_agent = Agent(
agent_name="risk-analyzer",
system_prompt=RISK_AGENT_PROMPT,
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
self.summary_agent = Agent(
agent_name="summary-agent",
system_prompt=SUMMARY_AGENT_PROMPT,
model_name="gpt-4o-mini",
max_loops=1,
streaming_on=True,
)
logger.add(
"solana_analysis.log", rotation="500 MB", level="INFO"
)
async def start_websocket_stream(self):
async with websockets.connect(
self.websocket_url
) as websocket:
subscribe_message = {
"jsonrpc": "2.0",
"id": 1,
"method": "programSubscribe",
"params": [
"11111111111111111111111111111111",
{"encoding": "json", "commitment": "confirmed"},
],
}
await websocket.send(json.dumps(subscribe_message))
while True:
try:
msg = await websocket.recv()
transaction = await self.parse_websocket_message(
msg
)
if (
transaction
and transaction.amount >= self.min_amount
):
await self.process_transaction(transaction)
except Exception as e:
logger.error(f"Websocket error: {e}")
await asyncio.sleep(5)
async def parse_websocket_message(
self, msg: str
) -> Optional[Transaction]:
try:
data = json.loads(msg)
if "params" in data and "result" in data["params"]:
tx_data = data["params"]["result"]
return Transaction(
signature=tx_data["signature"],
timestamp=datetime.fromtimestamp(
tx_data["blockTime"]
),
amount=float(
tx_data["meta"]["postBalances"][0]
- tx_data["meta"]["preBalances"][0]
)
/ 1e9,
from_address=tx_data["transaction"]["message"][
"accountKeys"
][0],
to_address=tx_data["transaction"]["message"][
"accountKeys"
][1],
)
except Exception as e:
logger.error(f"Error parsing websocket message: {e}")
return None
async def process_transaction(self, transaction: Transaction):
self.wallet_analyzer.update_graph(transaction)
self.visualizer.add_transaction(transaction)
risk_analysis = await self.risk_agent.run(
f"Analyze risk for transaction: {json.dumps(asdict(transaction))}"
)
if self.alert_system:
await self.alert_system.check_and_alert(
transaction, risk_analysis.get("risk_score", 0)
)
async def fetch_transactions(self) -> List[Transaction]:
try:
signatures = await self.rpc.get_signatures(
"11111111111111111111111111111111"
)
transactions = []
for sig_info in signatures:
tx_data = await self.rpc.get_transaction(
sig_info["signature"]
)
if not tx_data or "meta" not in tx_data:
continue
pre_balances = tx_data["meta"]["preBalances"]
post_balances = tx_data["meta"]["postBalances"]
amount = abs(pre_balances[0] - post_balances[0]) / 1e9
if amount >= self.min_amount:
tx = Transaction(
signature=sig_info["signature"],
timestamp=datetime.fromtimestamp(
tx_data["blockTime"]
),
amount=amount,
from_address=tx_data["transaction"][
"message"
]["accountKeys"][0],
to_address=tx_data["transaction"]["message"][
"accountKeys"
][1],
)
transactions.append(tx)
return transactions
except Exception as e:
logger.error(f"Error fetching transactions: {e}")
return []
async def analyze_transactions(
self, transactions: List[Transaction]
) -> Dict:
tx_data = [asdict(tx) for tx in transactions]
cluster_data = self.wallet_analyzer.identify_clusters()
trend_analysis = await self.trend_agent.run(
f"Analyze trends in: {json.dumps(tx_data)}"
)
print(trend_analysis)
risk_analysis = await self.risk_agent.run(
f"Analyze risks in: {json.dumps({'transactions': tx_data, 'clusters': cluster_data})}"
)
print(risk_analysis)
summary = await self.summary_agent.run(
f"Synthesize insights from: {trend_analysis}, {risk_analysis}"
)
print(summary)
volume_chart = self.visualizer.generate_volume_chart()
network_graph = self.visualizer.generate_network_graph(
self.wallet_analyzer
)
return {
"transactions": tx_data,
"trend_analysis": trend_analysis,
"risk_analysis": risk_analysis,
"cluster_analysis": cluster_data,
"summary": summary,
"visualizations": {
"volume_chart": volume_chart,
"network_graph": network_graph,
},
}
async def run_continuous_analysis(self):
logger.info("Starting continuous analysis")
asyncio.create_task(self.start_websocket_stream())
while True:
try:
transactions = await self.fetch_transactions()
if transactions:
analysis = await self.analyze_transactions(
transactions
)
timestamp = datetime.now().strftime(
"%Y%m%d_%H%M%S"
)
with open(f"analysis_{timestamp}.json", "w") as f:
json.dump(analysis, f, indent=2, default=str)
logger.info(
f"Analysis completed: analysis_{timestamp}.json"
)
await asyncio.sleep(60)
except Exception as e:
logger.error(f"Error in analysis loop: {e}")
await asyncio.sleep(60)
# Add to __main__:
if __name__ == "__main__":
logger.info("Starting Solana analyzer...")
analyzer = SolanaMultiAgentAnalyzer(alert_email="your@email.com")
try:
asyncio.run(analyzer.run_continuous_analysis())
except Exception as e:
logger.error(f"Critical error: {e}")

@ -27,13 +27,25 @@ def run_agent_by_name(
Returns:
The output of the task run by the agent.
"""
try:
agent = Agent(
agent_name=name,
system_prompt=system_prompt,
model_name=model_name,
max_loops=max_loops,
)
# Initialize the agent
agent = Agent(
agent_name=name,
system_prompt=system_prompt,
model_name=model_name,
max_loops=max_loops,
autosave=True,
dashboard=False,
verbose=True,
dynamic_temperature_enabled=True,
saved_state_path=f"{name}.json",
user_name="swarms_corp",
retry_attempts=1,
context_length=200000,
# return_step_meta=True,
# disable_print_every_step=True,
# output_type="json",
interactive=True,
)
output = agent.run(task=task, img=img, *args, **kwargs)

@ -17,6 +17,7 @@ from swarms.utils.loguru_logger import initialize_logger
from swarms.utils.wrapper_clusterop import (
exec_callable_with_clusterops,
)
from swarms.structs.output_types import OutputType
logger = initialize_logger(log_folder="rearrange")

@ -0,0 +1,42 @@
from swarms.structs.tree_swarm import ForestSwarm, Tree, TreeAgent
agents_tree1 = [
TreeAgent(
system_prompt="Stock Analysis Agent",
agent_name="Stock Analysis Agent",
),
TreeAgent(
system_prompt="Financial Planning Agent",
agent_name="Financial Planning Agent",
),
TreeAgent(
agent_name="Retirement Strategy Agent",
system_prompt="Retirement Strategy Agent",
),
]
agents_tree2 = [
TreeAgent(
system_prompt="Tax Filing Agent",
agent_name="Tax Filing Agent",
),
TreeAgent(
system_prompt="Investment Strategy Agent",
agent_name="Investment Strategy Agent",
),
TreeAgent(
system_prompt="ROTH IRA Agent", agent_name="ROTH IRA Agent"
),
]
# Create trees
tree1 = Tree(tree_name="Financial Tree", agents=agents_tree1)
tree2 = Tree(tree_name="Investment Tree", agents=agents_tree2)
# Create the ForestSwarm
multi_agent_structure = ForestSwarm(trees=[tree1, tree2])
# Run a task
task = "Our company is incorporated in delaware, how do we do our taxes for free?"
multi_agent_structure.run(task)

206
zpk.py

@ -0,0 +1,206 @@
from swarms import Agent
from loguru import logger
import random
import re
# Configure loguru
logger.add("zkp_log.log", rotation="500 KB", retention="10 days", level="INFO")
class ProverAgent:
"""
Prover Agent for Zero Knowledge Proof.
Responsibilities:
- Generate commitments based on a secret.
- Respond to challenges from the Verifier.
Attributes:
agent (Agent): Swarms agent instance.
p (int): The prime modulus.
g (int): The generator.
x (int): The Prover's secret.
"""
def __init__(self, p: int, g: int, secret: int):
self.p = p
self.g = g
self.x = secret # Prover's secret
self.agent = Agent(
agent_name="ProverAgent",
model_name="gpt-4o-mini",
max_loop=1,
interactive=False,
streaming_on=True,
system_prompt=(
"You are the Prover in a Zero Knowledge Proof (ZKP) system. "
"Your responsibilities are to generate commitments based on a secret value and "
"respond to challenges from the Verifier without revealing the secret. "
"Follow mathematical rules of modular arithmetic when performing computations."
),
)
logger.info("Initialized ProverAgent with p={}, g={}, secret={}", p, g, secret)
def generate_commitment(self) -> tuple[int, int]:
"""
Generates a random commitment for the proof.
Returns:
tuple[int, int]: The random value (r) and the commitment (t).
"""
r = random.randint(1, self.p - 2)
task = (
f"Compute the commitment t = g^r % p for g={self.g}, r={r}, p={self.p}. "
"Return only the numerical value of t as an integer."
)
t = self.agent.run(task=task)
t_value = self._extract_integer(t, "commitment")
logger.info("Prover generated commitment: r={}, t={}", r, t_value)
return r, t_value
def _extract_integer(self, response: str, label: str) -> int:
"""
Extracts an integer from the LLM response.
Args:
response (str): The response from the agent.
label (str): A label for logging purposes.
Returns:
int: The extracted integer value.
"""
try:
# Use regex to find the first integer in the response
match = re.search(r"\b\d+\b", response)
if match:
value = int(match.group(0))
return value
else:
raise ValueError(f"No integer found in {label} response: {response}")
except Exception as e:
logger.error("Failed to extract integer from {label} response: {response}")
raise ValueError(f"Invalid {label} response: {response}") from e
def respond_to_challenge(self, r: int, c: int) -> int:
"""
Computes the response to a challenge.
Args:
r (int): The random value used in the commitment.
c (int): The challenge issued by the Verifier.
Returns:
int: The response (z).
"""
task = f"Compute the response z = (r + c * x) % (p-1) for r={r}, c={c}, x={self.x}, p={self.p}."
z = self.agent.run(task=task)
logger.info("Prover responded to challenge: z={}", z)
return int(z)
class VerifierAgent:
"""
Verifier Agent for Zero Knowledge Proof.
Responsibilities:
- Issue challenges to the Prover.
- Verify the Prover's response.
Attributes:
agent (Agent): Swarms agent instance.
p (int): The prime modulus.
g (int): The generator.
y (int): The public value from the Prover.
"""
def __init__(self, p: int, g: int, y: int):
self.p = p
self.g = g
self.y = y # Public value
self.agent = Agent(
agent_name="VerifierAgent",
model_name="gpt-4o-mini",
max_loop=1,
interactive=False,
streaming_on=True,
system_prompt=(
"You are the Verifier in a Zero Knowledge Proof (ZKP) system. "
"Your responsibilities are to issue random challenges and verify the Prover's response. "
"Use modular arithmetic to check if the proof satisfies g^z % p == (t * y^c) % p."
),
)
logger.info("Initialized VerifierAgent with p={}, g={}, y={}", p, g, y)
def issue_challenge(self) -> int:
"""
Issues a random challenge to the Prover.
Returns:
int: The challenge value (c).
"""
c = random.randint(1, 10)
logger.info("Verifier issued challenge: c={}", c)
return c
def verify_proof(self, t: int, z: int, c: int) -> bool:
"""
Verifies the Prover's response.
Args:
t (int): The commitment from the Prover.
z (int): The response from the Prover.
c (int): The challenge issued to the Prover.
Returns:
bool: True if the proof is valid, False otherwise.
"""
task = f"Verify if g^z % p == (t * y^c) % p for g={self.g}, z={z}, p={self.p}, t={t}, y={self.y}, c={c}."
verification_result = self.agent.run(task=task)
is_valid = verification_result.strip().lower() == "true"
logger.info("Verifier checked proof: t={}, z={}, c={}, valid={}", t, z, c, is_valid)
return is_valid
class CoordinatorAgent:
"""
Coordinator for orchestrating the Zero Knowledge Proof protocol.
Responsibilities:
- Initialize parameters.
- Facilitate interaction between Prover and Verifier agents.
"""
def __init__(self, p: int, g: int, secret: int):
self.p = p
self.g = g
self.prover = ProverAgent(p, g, secret)
y = pow(g, secret, p) # Public value
self.verifier = VerifierAgent(p, g, y)
logger.info("Coordinator initialized with p={}, g={}, secret={}", p, g, secret)
def orchestrate(self) -> bool:
"""
Orchestrates the Zero Knowledge Proof protocol.
Returns:
bool: True if the proof is valid, False otherwise.
"""
logger.info("Starting ZKP protocol orchestration.")
r, t = self.prover.generate_commitment()
c = self.verifier.issue_challenge()
z = self.prover.respond_to_challenge(r, c)
is_valid = self.verifier.verify_proof(t, z, c)
logger.info("ZKP protocol completed. Valid proof: {}", is_valid)
return is_valid
if __name__ == "__main__":
# Example parameters
p = 23 # Prime number
g = 5 # Generator
secret = 7 # Prover's secret
# Initialize the Coordinator and run the protocol
coordinator = CoordinatorAgent(p, g, secret)
result = coordinator.orchestrate()
print(f"Zero Knowledge Proof Verification Result: {'Valid' if result else 'Invalid'}")
Loading…
Cancel
Save