parent
7f377bdb65
commit
8623a09e41
@ -0,0 +1,96 @@
|
||||
This Dockerfile sets up a Python environment with numerous libraries for various applications,
|
||||
including web frameworks, machine learning tools, and data manipulation. Here are some observations
|
||||
and suggestions:
|
||||
|
||||
*** Key Points:
|
||||
1. *Base Image*: Uses an official Python 3.11 slim image.
|
||||
2. *Environment Variables*: Commented out variables ~PYTHONDONTWRITEBYTECODE~ and
|
||||
~PYTHONUNBUFFERED~.
|
||||
3. *Working Directory*: Sets ~/opt/swarms/~ as the working directory.
|
||||
4. *System Updates*: Installs essential packages like ~git~, ~python3-virtualenv~, ~expect~, and
|
||||
others using ~apt~.
|
||||
5. *User Management*: Creates a user ~swarms~ and sets up permissions.
|
||||
6. *Python Virtual Environment*: Sets up a Python virtual environment for the user.
|
||||
7. *Pip Installations*:
|
||||
- Installs multiple libraries including FastAPI, SQLAlchemy, PyTorch, Pillow, etc.
|
||||
- Some packages are commented out and not installed, such as ~triton~ and ~torch~.
|
||||
8. *Git Configuration*: Configures Git to allow operations in the ~/opt/swarms/~ directory.
|
||||
9. *Copy Files*: Copies application files into the container.
|
||||
10. *Command*: Runs Uvicorn with specific configurations.
|
||||
|
||||
*** Suggestions:
|
||||
|
||||
1. *Minimize Package Installation*:
|
||||
- Consider removing or commenting out unused packages.
|
||||
- Install only necessary versions of packages.
|
||||
|
||||
2. *Environment Variables*:
|
||||
- Uncomment ~PYTHONDONTWRITEBYTECODE~ and ~PYTHONUNBUFFERED~ if needed for performance
|
||||
optimization.
|
||||
|
||||
3. *User Management*:
|
||||
- Ensure that the user ~swarms~ has all necessary permissions to avoid potential issues.
|
||||
|
||||
4. *Virtual Environment*:
|
||||
- Consider using a more lightweight virtual environment tool like ~venv~ or ~conda~ if
|
||||
performance is an issue.
|
||||
|
||||
5. *Caching*:
|
||||
- Use ~.dockerignore~ to exclude unnecessary files from the build context.
|
||||
- Utilize Docker's caching mechanism by keeping similar install commands together.
|
||||
|
||||
6. *Security*:
|
||||
- Ensure that all installed packages are up-to-date and do not contain known vulnerabilities.
|
||||
- Consider using a security scanner for Docker images.
|
||||
|
||||
7. *Command Execution*:
|
||||
- If ~unbuffer~ is used to avoid buffering, ensure it's available or remove if not necessary.
|
||||
|
||||
*** Example of Refactored Dockerfile:
|
||||
#+BEGIN_SRC dockerfile
|
||||
# Use an official Python runtime as a parent image
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE 1
|
||||
ENV PYTHONUNBUFFERED 1
|
||||
|
||||
WORKDIR /opt/swarms/
|
||||
|
||||
# Update system packages
|
||||
RUN apt update && apt install -y git python3-virtualenv expect jq netcat-traditional
|
||||
|
||||
# Install Python dependencies
|
||||
RUN mkdir -p /var/swarms/agent_workspace/
|
||||
RUN adduser --disabled-password --gecos "" swarms --home "/home/swarms"
|
||||
RUN chown -R swarms:swarms /var/swarms/agent_workspace
|
||||
USER swarms
|
||||
RUN python3 -m venv /var/swarms/agent_workspace/.venv/
|
||||
|
||||
# Upgrade pip and install essential packages
|
||||
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install --upgrade pip
|
||||
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install aiofiles aiohappyeyeballs aiosignal frozenlist aiohttp attrs annotated-types anyio sniffio typing_extensions asyncio multidict propcache yarl idna certifi chardet charset-normalizer click dataclasses-json marshmallow typing-inspect distro docstring_parser filelock fastapi starlette pydantic pydantic_core GPUtil Jinja2 MarkupSafe PyYAML Pygments SQLAlchemy fsspec greenlet h11 httpcore httpx huggingface-hub importlib_metadata iniconfig jiter jsonpatch jsonpointer jsonschema-specifications jsonschema langchain-community langchain-core langsmith numpy orjson requests-toolbelt tenacity loguru lxml markdown-it-py mdurl mpmath msgpack multiprocess mypy-protobuf networkx ollama openai pathos pathspec platformdirs pluggy pox ppft protobuf psutil pytesseract pytest python-dateutil python-docx python-dotenv python-magic pytz ratelimit referencing regex reportlab requests rich rpds-py safetensors sentry-sdk six sympy termcolor tiktoken tokenizers toml tqdm types-chardet types-protobuf types-pytz types-toml tzdata urllib3 uvicorn zipp
|
||||
|
||||
# Set up Git configuration
|
||||
RUN git config --global --add safe.directory "/opt/swarms"
|
||||
|
||||
# Copy application files and install dependencies
|
||||
COPY swarms /opt/swarms/swarms
|
||||
COPY pyproject.toml /opt/swarms/
|
||||
COPY README.md /opt/swarms/
|
||||
RUN /var/swarms/agent_workspace/.venv/bin/python -m pip install -e /opt/swarms/
|
||||
|
||||
# Add main.py file
|
||||
COPY api/main.py /opt/swarms/api/main.py
|
||||
|
||||
WORKDIR /opt/swarms/api/
|
||||
CMD ["unbuffer", "/var/swarms/agent_workspace/.venv/bin/uvicorn", "--proxy-headers", "--forwarded-allow-ips='*'", "--workers=4", "--port=8000", "--reload-delay=30", "main:create_app"]
|
||||
#+END_SRC
|
||||
|
||||
*** Additional Tips:
|
||||
- *Layer Optimization*: Ensure that frequently changing files (like ~api/main.py~) are placed in
|
||||
separate layers to take advantage of Docker's caching.
|
||||
- *Security Scans*: Run security scans on the final Docker image using tools like Trivy or Clair.
|
||||
|
||||
By following these suggestions, you can optimize your Dockerfile for better performance and
|
||||
maintainability.
|
@ -0,0 +1,768 @@
|
||||
{
|
||||
"logStreams": [
|
||||
{
|
||||
"logStreamName": "0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735052065253,
|
||||
"firstEventTimestamp": 1735052065225,
|
||||
"lastEventTimestamp": 1735052065225,
|
||||
"lastIngestionTime": 1735052065276,
|
||||
"uploadSequenceToken": "49039859613988222776763637742290862475585593660493682344",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735052065255,
|
||||
"firstEventTimestamp": 1735052065225,
|
||||
"lastEventTimestamp": 1735052065225,
|
||||
"lastIngestionTime": 1735052065276,
|
||||
"uploadSequenceToken": "49039859613988222776763637742290863632254093157312911099",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0654eb29-8948-4c3b-bf5c-ea057bdda8a2/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"creationTime": 1734899238367,
|
||||
"firstEventTimestamp": 1734899238330,
|
||||
"lastEventTimestamp": 1734899297327,
|
||||
"lastIngestionTime": 1734899297329,
|
||||
"uploadSequenceToken": "49039859613785159344752651490819636558466064069737469684",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"creationTime": 1734899238369,
|
||||
"firstEventTimestamp": 1734899238339,
|
||||
"lastEventTimestamp": 1734899298326,
|
||||
"lastIngestionTime": 1734899298328,
|
||||
"uploadSequenceToken": "49039859613785160672651419279950594505581625482055407181",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:065e7a0d-7c43-4eab-b653-b9ebabc7b37f/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735005102663,
|
||||
"firstEventTimestamp": 1735005102632,
|
||||
"lastEventTimestamp": 1735005102632,
|
||||
"lastIngestionTime": 1735005102684,
|
||||
"uploadSequenceToken": "49039859613925798784722613018397360500005004744706643512",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735005102670,
|
||||
"firstEventTimestamp": 1735005102632,
|
||||
"lastEventTimestamp": 1735005102632,
|
||||
"lastIngestionTime": 1735005102690,
|
||||
"uploadSequenceToken": "49039859613925798792697980993106856627627363850950432327",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:0e5ccc67-1f79-4d01-84f6-2dd97a656b58/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735055856519,
|
||||
"firstEventTimestamp": 1735055856488,
|
||||
"lastEventTimestamp": 1735055858489,
|
||||
"lastIngestionTime": 1735055858492,
|
||||
"uploadSequenceToken": "49039859613993264825664897017738619986883194897110554355",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735055856522,
|
||||
"firstEventTimestamp": 1735055856488,
|
||||
"lastEventTimestamp": 1735055856488,
|
||||
"lastIngestionTime": 1735055856544,
|
||||
"uploadSequenceToken": "49039859613993262236328761228722500242420278656791107156",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:1172b81b-62ca-4a15-99eb-f30d9a346ca0/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735057193938,
|
||||
"firstEventTimestamp": 1735057193900,
|
||||
"lastEventTimestamp": 1735057195901,
|
||||
"lastIngestionTime": 1735057195902,
|
||||
"uploadSequenceToken": "49039859613995042548478739722076201475673363666687645245",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735057193935,
|
||||
"firstEventTimestamp": 1735057193900,
|
||||
"lastEventTimestamp": 1735057193900,
|
||||
"lastIngestionTime": 1735057193957,
|
||||
"uploadSequenceToken": "49039859613995039963130287920414829151619369814831552100",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2a7102b2-dc75-4a54-892b-59a3f6fd06c1/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735054554607,
|
||||
"firstEventTimestamp": 1735054554576,
|
||||
"lastEventTimestamp": 1735054554576,
|
||||
"lastIngestionTime": 1735054554630,
|
||||
"uploadSequenceToken": "49039859613991531695791856905758746050334104337153676827",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735054554605,
|
||||
"firstEventTimestamp": 1735054554576,
|
||||
"lastEventTimestamp": 1735054554576,
|
||||
"lastIngestionTime": 1735054554627,
|
||||
"uploadSequenceToken": "49039859613991531691804172918403999064288358809319921154",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:2cea03be-962a-4284-860c-6f6a22316c4d/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stderr",
|
||||
"creationTime": 1734870795997,
|
||||
"firstEventTimestamp": 1734870795949,
|
||||
"lastEventTimestamp": 1734870854947,
|
||||
"lastIngestionTime": 1734870854950,
|
||||
"uploadSequenceToken": "49039859613747352938319126511079397131401121383887820324",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stdout",
|
||||
"creationTime": 1734870795998,
|
||||
"firstEventTimestamp": 1734870795954,
|
||||
"lastEventTimestamp": 1734870855948,
|
||||
"lastIngestionTime": 1734870855951,
|
||||
"uploadSequenceToken": "49039859613747354268876350291780186335989982121728824915",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:313e88da-c7e7-4bad-a818-edb4360b2b1f/i-0c6fd324f8eea1aab/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735057433187,
|
||||
"firstEventTimestamp": 1735057433156,
|
||||
"lastEventTimestamp": 1735057435156,
|
||||
"lastIngestionTime": 1735057435157,
|
||||
"uploadSequenceToken": "49039859613995360572922871242123376126157469071980779228",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735057433185,
|
||||
"firstEventTimestamp": 1735057433156,
|
||||
"lastEventTimestamp": 1735057433156,
|
||||
"lastIngestionTime": 1735057433206,
|
||||
"uploadSequenceToken": "49039859613995357979599051465752508652951185554904340048",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:557f68da-0078-4fa9-a978-538b9c2c4e5e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"creationTime": 1734904353404,
|
||||
"firstEventTimestamp": 1734904353362,
|
||||
"lastEventTimestamp": 1734904431373,
|
||||
"lastIngestionTime": 1734904431379,
|
||||
"uploadSequenceToken": "49039859613791983667744411038156926901475617546398750277",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"creationTime": 1734904353398,
|
||||
"firstEventTimestamp": 1734904353360,
|
||||
"lastEventTimestamp": 1734904433362,
|
||||
"lastIngestionTime": 1734904433365,
|
||||
"uploadSequenceToken": "49039859613791986307591210666999851103808292347067651736",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:617c0b4f-317a-40e2-9484-5a47157cb6c3/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stderr",
|
||||
"creationTime": 1734858621142,
|
||||
"firstEventTimestamp": 1734858621107,
|
||||
"lastEventTimestamp": 1734858678106,
|
||||
"lastIngestionTime": 1734858678110,
|
||||
"uploadSequenceToken": "49039859613731167141690932916081590350341220235971810915",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stdout",
|
||||
"creationTime": 1734858621142,
|
||||
"firstEventTimestamp": 1734858621106,
|
||||
"lastEventTimestamp": 1734858679105,
|
||||
"lastIngestionTime": 1734858679109,
|
||||
"uploadSequenceToken": "49039859613731168469589700705212547784983172850546267812",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:694d4ac6-12a7-4e96-a6c9-06f010415df9/i-0102c629cfe30ab41/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735058328627,
|
||||
"firstEventTimestamp": 1735058328582,
|
||||
"lastEventTimestamp": 1735058329582,
|
||||
"lastIngestionTime": 1735058329584,
|
||||
"uploadSequenceToken": "49039859613996549470331457157072832959656785139474519737",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735058328626,
|
||||
"firstEventTimestamp": 1735058328582,
|
||||
"lastEventTimestamp": 1735058329582,
|
||||
"lastIngestionTime": 1735058329584,
|
||||
"uploadSequenceToken": "49039859613996549470331457157072833567318475661320803988",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6a6fe26e-841f-42a4-aca4-5a2656821b29/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735058290048,
|
||||
"firstEventTimestamp": 1735058290011,
|
||||
"lastEventTimestamp": 1735058291010,
|
||||
"lastIngestionTime": 1735058291012,
|
||||
"uploadSequenceToken": "49039859613996498199349203741297784502672927255682766343",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735058290047,
|
||||
"firstEventTimestamp": 1735058290009,
|
||||
"lastEventTimestamp": 1735058291008,
|
||||
"lastIngestionTime": 1735058291010,
|
||||
"uploadSequenceToken": "49039859613996498196690747749727953601812660823865054910",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:6c96c523-2970-4071-9d5d-f95a041703ef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735044217329,
|
||||
"firstEventTimestamp": 1735044217309,
|
||||
"lastEventTimestamp": 1735044217309,
|
||||
"lastIngestionTime": 1735044217370,
|
||||
"uploadSequenceToken": "49039859613977791120400149326302420330339315638580696710",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735044217344,
|
||||
"firstEventTimestamp": 1735044217309,
|
||||
"lastEventTimestamp": 1735044217309,
|
||||
"lastIngestionTime": 1735044217366,
|
||||
"uploadSequenceToken": "49039859613977791115083237343162757449412196868898576123",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:73e818e3-25b4-4b2d-9efe-07b74226b91e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stderr",
|
||||
"creationTime": 1734856658896,
|
||||
"firstEventTimestamp": 1734856658859,
|
||||
"lastEventTimestamp": 1734856724859,
|
||||
"lastIngestionTime": 1734856724862,
|
||||
"uploadSequenceToken": "49039859613728570829766622020722677450385238021455632064",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stdout",
|
||||
"creationTime": 1734856658897,
|
||||
"firstEventTimestamp": 1734856658859,
|
||||
"lastEventTimestamp": 1734856725859,
|
||||
"lastIngestionTime": 1734856725862,
|
||||
"uploadSequenceToken": "49039859613728572158994617805638550864288006496292067018",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:78aa5989-2ff2-4445-84b6-1a6c7f970381/i-0102c629cfe30ab41/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735005234228,
|
||||
"firstEventTimestamp": 1735005234199,
|
||||
"lastEventTimestamp": 1735005234199,
|
||||
"lastIngestionTime": 1735005234249,
|
||||
"uploadSequenceToken": "49039859613925973664603878460854192727649656492823303866",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735005234229,
|
||||
"firstEventTimestamp": 1735005234199,
|
||||
"lastEventTimestamp": 1735005234199,
|
||||
"lastIngestionTime": 1735005234250,
|
||||
"uploadSequenceToken": "49039859613925973665933106456639109128303282286887851775",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:82d43144-a4f4-4b6d-a507-23ad5179e0b4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735055317414,
|
||||
"firstEventTimestamp": 1735055317383,
|
||||
"lastEventTimestamp": 1735055317383,
|
||||
"lastIngestionTime": 1735055317439,
|
||||
"uploadSequenceToken": "49039859613992545642870093601650851001566812901843293740",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735055317416,
|
||||
"firstEventTimestamp": 1735055317383,
|
||||
"lastEventTimestamp": 1735055317383,
|
||||
"lastIngestionTime": 1735055317438,
|
||||
"uploadSequenceToken": "49039859613992545641540865605865935623338388911938229871",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93cea26c-bb71-4b98-8923-b3ed3b3f4c4a/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stderr",
|
||||
"creationTime": 1734818540131,
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stdout",
|
||||
"creationTime": 1734818540128,
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:93e43e30-ae7f-4a8c-83cf-a2745b46dbd9/i-046ddceb61d38baa4/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735058057641,
|
||||
"firstEventTimestamp": 1735058057609,
|
||||
"lastEventTimestamp": 1735058060610,
|
||||
"lastIngestionTime": 1735058060612,
|
||||
"uploadSequenceToken": "49039859613996191945218974896680674139788828833329600156",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735058057643,
|
||||
"firstEventTimestamp": 1735058057609,
|
||||
"lastEventTimestamp": 1735058057609,
|
||||
"lastIngestionTime": 1735058057667,
|
||||
"uploadSequenceToken": "49039859613996188030642527310103428934121513664810735175",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9af6222c-21c3-425c-b965-a5ce3426c0c4/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735057233804,
|
||||
"firstEventTimestamp": 1735057233769,
|
||||
"lastEventTimestamp": 1735057235769,
|
||||
"lastIngestionTime": 1735057235771,
|
||||
"uploadSequenceToken": "49039859613995095543469703670887153246153117076553618958",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735057233797,
|
||||
"firstEventTimestamp": 1735057233769,
|
||||
"lastEventTimestamp": 1735057233769,
|
||||
"lastIngestionTime": 1735057233815,
|
||||
"uploadSequenceToken": "49039859613995092943499743915591706324624816391965267548",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:9b400186-1c4a-4405-b99f-c5062ee8f6fe/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stderr",
|
||||
"creationTime": 1734830413282,
|
||||
"firstEventTimestamp": 1734830413242,
|
||||
"lastEventTimestamp": 1734830433243,
|
||||
"lastIngestionTime": 1734830433276,
|
||||
"uploadSequenceToken": "49039859613693623317601835267547467856431474809478469252",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stdout",
|
||||
"creationTime": 1734830413288,
|
||||
"firstEventTimestamp": 1734830413242,
|
||||
"lastEventTimestamp": 1734830433246,
|
||||
"lastIngestionTime": 1734830433252,
|
||||
"uploadSequenceToken": "49039859613693623285700363368709487386499566467274397437",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:a2d8a6c6-f797-4ae2-a3c7-4420e92a0343/i-03f02fde2519d21d2/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735057357453,
|
||||
"firstEventTimestamp": 1735057357422,
|
||||
"lastEventTimestamp": 1735057359422,
|
||||
"lastIngestionTime": 1735057359426,
|
||||
"uploadSequenceToken": "49039859613995259909157522454659419100657152802661750380",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735057357450,
|
||||
"firstEventTimestamp": 1735057357422,
|
||||
"lastEventTimestamp": 1735057357422,
|
||||
"lastIngestionTime": 1735057357476,
|
||||
"uploadSequenceToken": "49039859613995257317162930674073467432332307574061684451",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ab133779-b3ec-44b2-b198-194e6795567e/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"creationTime": 1734904330835,
|
||||
"firstEventTimestamp": 1734904330793,
|
||||
"lastEventTimestamp": 1734904412792,
|
||||
"lastIngestionTime": 1734904412798,
|
||||
"uploadSequenceToken": "49039859613791958969359021358635106202736237108550063861",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"creationTime": 1734904330833,
|
||||
"firstEventTimestamp": 1734904330794,
|
||||
"lastEventTimestamp": 1734904414792,
|
||||
"lastIngestionTime": 1734904414794,
|
||||
"uploadSequenceToken": "49039859613791961622498100945327188974427354123974104746",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:affbd717-8495-4b3c-b9a9-fe1cb4a38049/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735055525532,
|
||||
"firstEventTimestamp": 1735055525476,
|
||||
"lastEventTimestamp": 1735055529477,
|
||||
"lastIngestionTime": 1735055529478,
|
||||
"uploadSequenceToken": "49039859613992827491045091839427632436711007109045442148",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735055525531,
|
||||
"firstEventTimestamp": 1735055525492,
|
||||
"lastEventTimestamp": 1735055527477,
|
||||
"lastIngestionTime": 1735055527492,
|
||||
"uploadSequenceToken": "49039859613992824851198292210584709371014820662651803280",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:b8fe6970-5ccc-4d74-8e8a-e5887649ca79/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735054456369,
|
||||
"firstEventTimestamp": 1735054456335,
|
||||
"lastEventTimestamp": 1735054456335,
|
||||
"lastIngestionTime": 1735054456396,
|
||||
"uploadSequenceToken": "49039859613991401120408918970332906052237331985426437749",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735054456371,
|
||||
"firstEventTimestamp": 1735054456335,
|
||||
"lastEventTimestamp": 1735054456335,
|
||||
"lastIngestionTime": 1735054456395,
|
||||
"uploadSequenceToken": "49039859613991401119079690974547990635818383155419567797",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c1f49ab1-170a-4a69-b73d-5f35205febdc/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735057898295,
|
||||
"firstEventTimestamp": 1735057898263,
|
||||
"lastEventTimestamp": 1735057900263,
|
||||
"lastIngestionTime": 1735057900265,
|
||||
"uploadSequenceToken": "49039859613995978807497534772775208356660176145935652414",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735057898299,
|
||||
"firstEventTimestamp": 1735057898263,
|
||||
"lastEventTimestamp": 1735057898263,
|
||||
"lastIngestionTime": 1735057898322,
|
||||
"uploadSequenceToken": "49039859613995976224807538962683667811200001701897459307",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c2947dc0-ba37-4043-bb3f-dcbcec1b1048/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stderr",
|
||||
"creationTime": 1734830394075,
|
||||
"firstEventTimestamp": 1734830394042,
|
||||
"lastEventTimestamp": 1734830472041,
|
||||
"lastIngestionTime": 1734830472043,
|
||||
"uploadSequenceToken": "49039859613693674847783547861384144735279739289454982245",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stdout",
|
||||
"creationTime": 1734830394075,
|
||||
"firstEventTimestamp": 1734830394042,
|
||||
"lastEventTimestamp": 1734830473042,
|
||||
"lastIngestionTime": 1734830473045,
|
||||
"uploadSequenceToken": "49039859613693676179669999637869850504453252257646589025",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:c916fcee-bc52-4857-aaa0-2374308031ac/i-03f02fde2519d21d2/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-2gt45qizfn4ezip8dpkrilhaa4",
|
||||
"creationTime": 1734982366536,
|
||||
"firstEventTimestamp": 1734982366502,
|
||||
"lastEventTimestamp": 1734983612502,
|
||||
"lastIngestionTime": 1734983612523,
|
||||
"uploadSequenceToken": "49039859613897233461087487854920254489498942830027682928",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-2gt45qizfn4ezip8dpkrilhaa4",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-2qj8xj6luppbvlilgdivc5sug8",
|
||||
"creationTime": 1734866718407,
|
||||
"firstEventTimestamp": 1734866718373,
|
||||
"lastEventTimestamp": 1734866718373,
|
||||
"lastIngestionTime": 1734866718430,
|
||||
"uploadSequenceToken": "49039859613741854560130002290875861318885575337716691992",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-2qj8xj6luppbvlilgdivc5sug8",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-6kq4qtpy72q753vkvy6if872q4",
|
||||
"creationTime": 1734820331291,
|
||||
"firstEventTimestamp": 1734820935254,
|
||||
"lastEventTimestamp": 1734820935254,
|
||||
"lastIngestionTime": 1734820935319,
|
||||
"uploadSequenceToken": "49039859613680998367254673955341045032917736173318386920",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-6kq4qtpy72q753vkvy6if872q4",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-9d9hresjnrxhaoqz8a5pesy9uy",
|
||||
"creationTime": 1734869641914,
|
||||
"firstEventTimestamp": 1734869641884,
|
||||
"lastEventTimestamp": 1734869993883,
|
||||
"lastIngestionTime": 1734869993885,
|
||||
"uploadSequenceToken": "49039859613746208386614935972496344653675690982984332418",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-9d9hresjnrxhaoqz8a5pesy9uy",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-acc5uyp4sfig2tz6zxf5fei6ji",
|
||||
"creationTime": 1734866903573,
|
||||
"firstEventTimestamp": 1734866903548,
|
||||
"lastEventTimestamp": 1734867977549,
|
||||
"lastIngestionTime": 1734867977576,
|
||||
"uploadSequenceToken": "49039859613743528252243982884557566430997070622784959605",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-acc5uyp4sfig2tz6zxf5fei6ji",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-ale49vtarq3lv2qh5g4bbgbo2u",
|
||||
"creationTime": 1734823328568,
|
||||
"firstEventTimestamp": 1734823328544,
|
||||
"lastEventTimestamp": 1734823328544,
|
||||
"lastIngestionTime": 1734823328587,
|
||||
"uploadSequenceToken": "49039859613684179566081690129382359560174988019272201245",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ale49vtarq3lv2qh5g4bbgbo2u",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-cfi2lbnsn34debav6jbb726ooa",
|
||||
"creationTime": 1734821129540,
|
||||
"firstEventTimestamp": 1734821129509,
|
||||
"lastEventTimestamp": 1734821129509,
|
||||
"lastIngestionTime": 1734821129570,
|
||||
"uploadSequenceToken": "49039859613681256571122083171034274844280438270068470784",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-cfi2lbnsn34debav6jbb726ooa",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-ecrtlu7kr5rch6g5ndble8kqha",
|
||||
"creationTime": 1734856273279,
|
||||
"firstEventTimestamp": 1734856273247,
|
||||
"lastEventTimestamp": 1734856273247,
|
||||
"lastIngestionTime": 1734856273300,
|
||||
"uploadSequenceToken": "49039859613727970600914389392544321167059767430010844324",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ecrtlu7kr5rch6g5ndble8kqha",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-ehhpaubgebqzgzc6qos2jc9qz8",
|
||||
"creationTime": 1734869593887,
|
||||
"firstEventTimestamp": 1734869593858,
|
||||
"lastEventTimestamp": 1734869655858,
|
||||
"lastIngestionTime": 1734869655863,
|
||||
"uploadSequenceToken": "49039859613745759078309344763663156946262011664604542059",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-ehhpaubgebqzgzc6qos2jc9qz8",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-gag5kejahuzr9873ietdbkpoa8",
|
||||
"creationTime": 1734823566407,
|
||||
"firstEventTimestamp": 1734823566372,
|
||||
"lastEventTimestamp": 1734825788372,
|
||||
"lastIngestionTime": 1734825788392,
|
||||
"uploadSequenceToken": "49039859613687449207751861844371110110764337414286292026",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-gag5kejahuzr9873ietdbkpoa8",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-haaiif5dpxck5ehtqbqxbhflu8",
|
||||
"creationTime": 1734871696978,
|
||||
"firstEventTimestamp": 1734871696953,
|
||||
"lastEventTimestamp": 1734872507953,
|
||||
"lastIngestionTime": 1734872507972,
|
||||
"uploadSequenceToken": "49039859613749550181439174884288509989399115071987856439",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-haaiif5dpxck5ehtqbqxbhflu8",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-j5tad5uqe6rg579d76npjrbyta",
|
||||
"creationTime": 1734856254710,
|
||||
"firstEventTimestamp": 1734856254693,
|
||||
"lastEventTimestamp": 1734856254693,
|
||||
"lastIngestionTime": 1734856254734,
|
||||
"uploadSequenceToken": "49039859613727945922467419649796227269299585648452908096",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-j5tad5uqe6rg579d76npjrbyta",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-nbs3222zo2bdt8n5lqz6i5e2h4",
|
||||
"creationTime": 1734856866781,
|
||||
"firstEventTimestamp": 1734856866754,
|
||||
"lastEventTimestamp": 1734858481754,
|
||||
"lastIngestionTime": 1734858481781,
|
||||
"uploadSequenceToken": "49039859613730906175687748459336230657047697575836857590",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-nbs3222zo2bdt8n5lqz6i5e2h4",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-psfjp3f7bnkatx76s852aocut4",
|
||||
"creationTime": 1734823506148,
|
||||
"firstEventTimestamp": 1734823506113,
|
||||
"lastEventTimestamp": 1734823506113,
|
||||
"lastIngestionTime": 1734823506173,
|
||||
"uploadSequenceToken": "49039859613684415618364549589452569865211766620082091138",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-psfjp3f7bnkatx76s852aocut4",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-taejkp2vrs9ptlzfk3hi7sc8tu",
|
||||
"creationTime": 1735057761372,
|
||||
"firstEventTimestamp": 1735057761331,
|
||||
"lastEventTimestamp": 1735059010330,
|
||||
"lastIngestionTime": 1735059010356,
|
||||
"uploadSequenceToken": "49039859613997454371532603645824513397886961371254043702",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-taejkp2vrs9ptlzfk3hi7sc8tu",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-tx6jfbkpuisogketgtnii3loiq",
|
||||
"creationTime": 1734821136150,
|
||||
"firstEventTimestamp": 1734821136115,
|
||||
"lastEventTimestamp": 1734821136115,
|
||||
"lastIngestionTime": 1734821136170,
|
||||
"uploadSequenceToken": "49039859613681265344026855351479041441540820263578055706",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-tx6jfbkpuisogketgtnii3loiq",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-u28lll5bhz7293b56aqzlo6i9a",
|
||||
"creationTime": 1734868152559,
|
||||
"firstEventTimestamp": 1734868152530,
|
||||
"lastEventTimestamp": 1734869653530,
|
||||
"lastIngestionTime": 1734869653598,
|
||||
"uploadSequenceToken": "49039859613745756067607934310828709627686385468631102662",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-u28lll5bhz7293b56aqzlo6i9a",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "dupont-zq8btstj4tr9ajp9xps278hkqq",
|
||||
"creationTime": 1734860241487,
|
||||
"firstEventTimestamp": 1734860241462,
|
||||
"lastEventTimestamp": 1734861464462,
|
||||
"lastIngestionTime": 1734861464487,
|
||||
"uploadSequenceToken": "49039859613734870872006144102619839037825530972265460909",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:dupont-zq8btstj4tr9ajp9xps278hkqq",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"creationTime": 1734899352252,
|
||||
"firstEventTimestamp": 1734899352209,
|
||||
"lastEventTimestamp": 1734899414209,
|
||||
"lastIngestionTime": 1734899414213,
|
||||
"uploadSequenceToken": "49039859613785314710237710814929589941728888154770584703",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"creationTime": 1734899352254,
|
||||
"firstEventTimestamp": 1734899352213,
|
||||
"lastEventTimestamp": 1734899415209,
|
||||
"lastIngestionTime": 1734899415211,
|
||||
"uploadSequenceToken": "49039859613785316036807250608275631703066569194125079656",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:ec25d72c-83ea-4df2-be57-bc64df26ef7c/i-0fc541c128fc194ec/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stderr",
|
||||
"creationTime": 1734817872118,
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stdout",
|
||||
"creationTime": 1734817872118,
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f1c32370-313b-4eb8-9013-65f8f28b9830/i-046ddceb61d38baa4/Deploy/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735056783402,
|
||||
"firstEventTimestamp": 1735056783370,
|
||||
"lastEventTimestamp": 1735056785369,
|
||||
"lastIngestionTime": 1735056785372,
|
||||
"uploadSequenceToken": "49039859613994496860509630140565960835917365413277283529",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735056783407,
|
||||
"firstEventTimestamp": 1735056783370,
|
||||
"lastEventTimestamp": 1735056783370,
|
||||
"lastIngestionTime": 1735056783431,
|
||||
"uploadSequenceToken": "49039859613994494280478090322044252140748316444795286615",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:f985b14a-9914-413c-b1a9-2cd3cc533c92/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"creationTime": 1735005652683,
|
||||
"firstEventTimestamp": 1735005652644,
|
||||
"lastEventTimestamp": 1735005652644,
|
||||
"lastIngestionTime": 1735005652707,
|
||||
"uploadSequenceToken": "49039859613926529890692538625183589108539545514173740282",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stderr",
|
||||
"storedBytes": 0
|
||||
},
|
||||
{
|
||||
"logStreamName": "fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"creationTime": 1735005652676,
|
||||
"firstEventTimestamp": 1735005652644,
|
||||
"lastEventTimestamp": 1735005652644,
|
||||
"lastIngestionTime": 1735005652699,
|
||||
"uploadSequenceToken": "49039859613926529880058714658904262714740208262181499974",
|
||||
"arn": "arn:aws:logs:us-east-2:916723593639:log-group:/ssm/session-logs-20241221151803393300000006:log-stream:fdf9e780-ad22-4acb-a9f7-5fe6fa97ecef/i-0a3dae164f8f3c09a/DeployDocker/stdout",
|
||||
"storedBytes": 0
|
||||
}
|
||||
]
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
Your script appears to be a set of commands intended to be run by the "swarms" user. It sets up an environment, installs dependencies, and prepares to launch a service. Here are some suggestions to improve and enhance the script:
|
||||
|
||||
1. **Add Error Handling:**
|
||||
- Implement error handling mechanisms with appropriate exits and messages to handle failures gracefully.
|
||||
|
||||
2. **Use Absolute Paths:**
|
||||
- Prefer using absolute paths instead of relative paths to avoid any ambiguity.
|
||||
|
||||
3. **Add Logging:**
|
||||
- Incorporate logging statements to track the progress and potential issues during script execution.
|
||||
|
||||
4. **Documentation:**
|
||||
- Include comments to explain each step and document the purpose of the script and individual commands.
|
||||
|
||||
5. **Finalize Service Launch:**
|
||||
- Ensure to start the required service once the dependencies are installed and configurations are completed.
|
||||
|
||||
Here's an enhanced version of your script incorporating these suggestions:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# Set environment variables
|
||||
export ROOT="/mnt/data1/swarms"
|
||||
export HOME="${ROOT}/home/swarms"
|
||||
unset CONDA_EXE
|
||||
unset CONDA_PYTHON_EXE
|
||||
export PATH="${ROOT}/var/swarms/agent_workspace/.venv/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
|
||||
# Activate virtual environment
|
||||
if [ ! -f "${ROOT}/var/swarms/agent_workspace/.venv/" ]; then
|
||||
virtualenv "${ROOT}/var/swarms/agent_workspace/.venv/"
|
||||
fi
|
||||
|
||||
source "${ROOT}/var/swarms/agent_workspace/.venv/bin/activate"
|
||||
|
||||
# Install dependencies
|
||||
pip install fastapi uvicorn termcolor
|
||||
pip install -e "${ROOT}/opt/swarms/"
|
||||
cd "${ROOT}/var/swarms/"
|
||||
pip install -e "${ROOT}/opt/swarms-memory"
|
||||
pip install "fastapi[standard]" "loguru" pydantic==2.8.2
|
||||
|
||||
# Verify installation
|
||||
#pip freeze
|
||||
|
||||
# Launch your service (Uncomment and add your starting command)
|
||||
#python /opt/swarms/api/main.py
|
||||
|
||||
# Start your service as a systemd service
|
||||
# You can add the relevant service configuration and enable it here
|
||||
```
|
||||
|
||||
Make sure to uncomment and add the necessary command for starting your service. Additionally, continue the script with the configuration and setup required to launch the service, such as creating a systemd service unit file and enabling the service.
|
||||
|
||||
Please adapt this script according to your specific service requirements, and ensure you have the necessary permissions and environment configurations to run the script successfully.
|
||||
mdupont@mdupont-G470:~/2024/05/swarms/api$
|
@ -0,0 +1,64 @@
|
||||
░░
|
||||
░░ A start job for unit swarms-uvicorn.service has finished successfully.
|
||||
░░
|
||||
░░ The job identifier is 155034.
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: Traceback (most recent call last):
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/bin/uvicorn", line 8, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: sys.exit(main())
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1157, in __call__
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return self.main(*args, **kwargs)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1078, in main
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: rv = self.invoke(ctx)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 1434, in invoke
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return ctx.invoke(self.callback, **ctx.params)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/click/core.py", line 783, in invoke
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return __callback(*args, **kwargs)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/main.py", line 412, in main
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: run(
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/main.py", line 579, in run
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: server.run()
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 65, in run
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return asyncio.run(self.serve(sockets=sockets))
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/usr/lib/python3.10/asyncio/runners.py", line 44, in run
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return loop.run_until_complete(main)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "uvloop/loop.pyx", line 1518, in uvloop.loop.Loop.run_until_complete
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 69, in serve
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: await self._serve(sockets)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/server.py", line 76, in _serve
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: config.load()
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/config.py", line 434, in load
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self.loaded_app = import_from_string(self.app)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/uvicorn/importer.py", line 19, in import_from_string
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: module = importlib.import_module(module_str)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/usr/lib/python3.10/importlib/__init__.py", line 126, in import_module
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: return _bootstrap._gcd_import(name[level:], package, level)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1050, in _gcd_import
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1027, in _find_and_load
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 1006, in _find_and_load_unlocked
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 688, in _load_unlocked
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap_external>", line 883, in exec_module
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/api/main.py", line 18, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms import Agent
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/__init__.py", line 13, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.telemetry.bootup import bootup # noqa: E402, F403
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/telemetry/bootup.py", line 6, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.telemetry.auto_upgrade_swarms import auto_update
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/telemetry/auto_upgrade_swarms.py", line 4, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.loguru_logger import initialize_logger
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/__init__.py", line 2, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.data_to_text import (
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/data_to_text.py", line 5, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.pdf_to_text import pdf_to_text
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/pdf_to_text.py", line 1, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: from swarms.utils.try_except_wrapper import try_except_wrapper
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/try_except_wrapper.py", line 7, in <module>
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: logger = initialize_logger("try_except_wrapper")
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/opt/swarms/swarms/utils/loguru_logger.py", line 28, in initialize_logger
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: logger.add(
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_logger.py", line 802, in add
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: wrapped_sink = FileSink(path, **kwargs)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_file_sink.py", line 192, in __init__
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self._create_file(path)
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: File "/mnt/data1/swarms/var/swarms/agent_workspace/.venv/lib/python3.10/site-packages/loguru/_file_sink.py", line 226, in _create_file
|
||||
Dec 12 10:55:50 mdupont-G470 unbuffer[3921723]: self._file = open(path, **self._kwargs)
|
@ -0,0 +1,79 @@
|
||||
It looks like you are setting up a project environment and configuring services using bash scripts. Your script contains steps to clone repositories, switch branches, copy files, set permissions, create directories, install dependencies, and start services.
|
||||
|
||||
To improve readability and maintainability, you can consider the following suggestions:
|
||||
|
||||
1. **Modularize your Script:**
|
||||
- Separate different sets of tasks into functions for better organization.
|
||||
- Using functions can help with code reuse and make each part of the script easier to understand.
|
||||
|
||||
2. **Error Handling:**
|
||||
- Implement proper error handling and informative messages for critical steps.
|
||||
- Consider using `trap` to catch and handle errors gracefully.
|
||||
|
||||
3. **Logging:**
|
||||
- Add logging statements to track the script execution flow and display informative messages.
|
||||
|
||||
4. **Consistent Variable Naming:**
|
||||
- Ensure consistency in variable names and use descriptive names for clarity.
|
||||
|
||||
5. **Comments and Documentation:**
|
||||
- Add comments to explain complex logic or processes in the script.
|
||||
- Consider adding a header comment to describe the purpose of the script, its inputs, and expected outputs.
|
||||
|
||||
6. **Configuration Management:**
|
||||
- Consider using configuration files to store variables and settings that may need to change without modifying the script itself.
|
||||
|
||||
Here is an example structure to illustrate these suggestions:
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
# Global variables
|
||||
ROOT="/mnt/data1/swarms"
|
||||
WORKSOURCE="${ROOT}/opt/swarms/api"
|
||||
|
||||
# Function to clone or update a git repository
|
||||
git_clone_or_update() {
|
||||
local repo_url=$1
|
||||
local target_dir=$2
|
||||
|
||||
if [ ! -d "$target_dir" ]; then
|
||||
git clone "$repo_url" "$target_dir"
|
||||
else
|
||||
pushd "$target_dir" || exit 1
|
||||
git pull
|
||||
popd || exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
# Ensure swarms repository is cloned
|
||||
git_clone_or_update "https://github.com/jmikedupont2/swarms" "${ROOT}/opt/swarms"
|
||||
|
||||
# Switch to a specific branch
|
||||
pushd "${ROOT}/opt/swarms/" || exit 1
|
||||
git checkout feature/ec2
|
||||
git pull local feature/ec2
|
||||
popd || exit 2
|
||||
|
||||
# Ensure swarms-memory repository is cloned
|
||||
git_clone_or_update "https://github.com/The-Swarm-Corporation/swarms-memory" "${ROOT}/opt/swarms-memory"
|
||||
|
||||
# Other setup steps go here...
|
||||
|
||||
# Additional set up for uvicorn and nginx
|
||||
mkdir -p "${ROOT}/var/run/uvicorn/env/"
|
||||
|
||||
if [ ! -f "${ROOT}/var/run/uvicorn/env/" ]; then
|
||||
virtualenv "${ROOT}/var/run/uvicorn/env/"
|
||||
fi
|
||||
|
||||
. "${ROOT}/var/run/uvicorn/env/bin/activate"
|
||||
pip install uvicorn
|
||||
|
||||
# Start services
|
||||
systemctl daemon-reload
|
||||
systemctl start swarms-uvicorn
|
||||
systemctl enable swarms-uvicorn
|
||||
service nginx restart
|
@ -0,0 +1,210 @@
|
||||
MOD sentry_sdk
|
||||
MOD sentry_sdk._compat
|
||||
MOD sentry_sdk._init_implementation
|
||||
MOD sentry_sdk._lru_cache
|
||||
MOD sentry_sdk._werkzeug
|
||||
MOD sentry_sdk.ai
|
||||
MOD sentry_sdk.ai.monitoring
|
||||
MOD sentry_sdk.ai.utils
|
||||
MOD sentry_sdk.api
|
||||
MOD sentry_sdk.attachments
|
||||
MOD sentry_sdk.client
|
||||
MOD sentry_sdk.consts
|
||||
MOD sentry_sdk.crons
|
||||
MOD sentry_sdk.crons.api
|
||||
MOD sentry_sdk.crons.consts
|
||||
MOD sentry_sdk.crons.decorator
|
||||
MOD sentry_sdk.debug
|
||||
MOD sentry_sdk.envelope
|
||||
MOD sentry_sdk.flag_utils
|
||||
MOD sentry_sdk.hub
|
||||
MOD sentry_sdk.integrations
|
||||
MOD sentry_sdk.integrations._asgi_common
|
||||
MOD sentry_sdk.integrations._wsgi_common
|
||||
MOD sentry_sdk.integrations.aiohttp
|
||||
MOD sentry_sdk.integrations.anthropic
|
||||
MOD sentry_sdk.integrations.argv
|
||||
MOD sentry_sdk.integrations.ariadne
|
||||
MOD sentry_sdk.integrations.arq
|
||||
MOD sentry_sdk.integrations.asgi
|
||||
MOD sentry_sdk.integrations.asyncpg
|
||||
MOD sentry_sdk.integrations.atexit
|
||||
MOD sentry_sdk.integrations.aws_lambda
|
||||
MOD sentry_sdk.integrations.boto3
|
||||
MOD sentry_sdk.integrations.bottle
|
||||
MOD sentry_sdk.integrations.celery
|
||||
MOD sentry_sdk.integrations.celery.beat
|
||||
MOD sentry_sdk.integrations.celery.utils
|
||||
MOD sentry_sdk.integrations.chalice
|
||||
MOD sentry_sdk.integrations.clickhouse_driver
|
||||
MOD sentry_sdk.integrations.cohere
|
||||
MOD sentry_sdk.integrations.dedupe
|
||||
MOD sentry_sdk.integrations.django
|
||||
MOD sentry_sdk.integrations.excepthook
|
||||
MOD sentry_sdk.integrations.falcon
|
||||
MOD sentry_sdk.integrations.fastapi
|
||||
MOD sentry_sdk.integrations.flask
|
||||
MOD sentry_sdk.integrations.gql
|
||||
MOD sentry_sdk.integrations.graphene
|
||||
MOD sentry_sdk.integrations.httpx
|
||||
MOD sentry_sdk.integrations.huey
|
||||
MOD sentry_sdk.integrations.huggingface_hub
|
||||
MOD sentry_sdk.integrations.langchain
|
||||
MOD sentry_sdk.integrations.litestar
|
||||
MOD sentry_sdk.integrations.logging
|
||||
MOD sentry_sdk.integrations.loguru
|
||||
MOD sentry_sdk.integrations.modules
|
||||
MOD sentry_sdk.integrations.openai
|
||||
MOD sentry_sdk.integrations.pymongo
|
||||
MOD sentry_sdk.integrations.pyramid
|
||||
MOD sentry_sdk.integrations.quart
|
||||
MOD sentry_sdk.integrations.redis
|
||||
MOD sentry_sdk.integrations.redis._sync_common
|
||||
MOD sentry_sdk.integrations.redis.consts
|
||||
MOD sentry_sdk.integrations.redis.modules
|
||||
MOD sentry_sdk.integrations.redis.modules.caches
|
||||
MOD sentry_sdk.integrations.redis.modules.queries
|
||||
MOD sentry_sdk.integrations.redis.rb
|
||||
MOD sentry_sdk.integrations.redis.redis
|
||||
MOD sentry_sdk.integrations.redis.redis_cluster
|
||||
MOD sentry_sdk.integrations.redis.redis_py_cluster_legacy
|
||||
MOD sentry_sdk.integrations.redis.utils
|
||||
MOD sentry_sdk.integrations.rq
|
||||
MOD sentry_sdk.integrations.sanic
|
||||
MOD sentry_sdk.integrations.sqlalchemy
|
||||
MOD sentry_sdk.integrations.starlette
|
||||
MOD sentry_sdk.integrations.starlite
|
||||
MOD sentry_sdk.integrations.stdlib
|
||||
MOD sentry_sdk.integrations.strawberry
|
||||
MOD sentry_sdk.integrations.threading
|
||||
MOD sentry_sdk.integrations.tornado
|
||||
MOD sentry_sdk.integrations.wsgi
|
||||
MOD sentry_sdk.metrics
|
||||
MOD sentry_sdk.monitor
|
||||
MOD sentry_sdk.profiler
|
||||
MOD sentry_sdk.profiler.continuous_profiler
|
||||
MOD sentry_sdk.profiler.transaction_profiler
|
||||
MOD sentry_sdk.profiler.utils
|
||||
MOD sentry_sdk.scope
|
||||
MOD sentry_sdk.scrubber
|
||||
MOD sentry_sdk.serializer
|
||||
MOD sentry_sdk.session
|
||||
MOD sentry_sdk.sessions
|
||||
MOD sentry_sdk.spotlight
|
||||
MOD sentry_sdk.tracing
|
||||
MOD sentry_sdk.tracing_utils
|
||||
MOD sentry_sdk.transport
|
||||
MOD sentry_sdk.utils
|
||||
|
||||
MOD swarm_models
|
||||
MOD swarm_models.base_llm
|
||||
MOD swarm_models.base_multimodal_model
|
||||
|
||||
MOD swarms.agents
|
||||
MOD swarms.agents.ape_agent
|
||||
MOD swarms.agents.create_agents_from_yaml
|
||||
MOD swarms.agents.stopping_conditions
|
||||
MOD swarms.agents.tool_agent
|
||||
MOD swarms.artifacts
|
||||
MOD swarms.artifacts.main_artifact
|
||||
MOD swarms.prompts
|
||||
MOD swarms.prompts.ag_prompt
|
||||
MOD swarms.prompts.agent_system_prompts
|
||||
MOD swarms.prompts.code_interpreter
|
||||
MOD swarms.prompts.documentation
|
||||
MOD swarms.prompts.finance_agent_prompt
|
||||
MOD swarms.prompts.growth_agent_prompt
|
||||
MOD swarms.prompts.legal_agent_prompt
|
||||
MOD swarms.prompts.multi_modal_autonomous_instruction_prompt
|
||||
MOD swarms.prompts.operations_agent_prompt
|
||||
MOD swarms.prompts.product_agent_prompt
|
||||
MOD swarms.prompts.prompt
|
||||
MOD swarms.prompts.prompt_generator
|
||||
MOD swarms.prompts.prompt_generator_optimizer
|
||||
MOD swarms.prompts.tools
|
||||
|
||||
MOD swarms.schemas
|
||||
MOD swarms.schemas.agent_input_schema
|
||||
MOD swarms.schemas.agent_step_schemas
|
||||
MOD swarms.schemas.base_schemas
|
||||
|
||||
MOD swarms.structs
|
||||
MOD swarms.structs.agent
|
||||
MOD swarms.structs.agents_available
|
||||
MOD swarms.structs.async_workflow
|
||||
MOD swarms.structs.auto_swarm
|
||||
MOD swarms.structs.base_structure
|
||||
MOD swarms.structs.base_swarm
|
||||
MOD swarms.structs.base_workflow
|
||||
MOD swarms.structs.concat
|
||||
MOD swarms.structs.concurrent_workflow
|
||||
MOD swarms.structs.conversation
|
||||
MOD swarms.structs.graph_workflow
|
||||
MOD swarms.structs.groupchat
|
||||
MOD swarms.structs.majority_voting
|
||||
MOD swarms.structs.message
|
||||
MOD swarms.structs.mixture_of_agents
|
||||
MOD swarms.structs.multi_agent_collab
|
||||
MOD swarms.structs.multi_agent_exec
|
||||
MOD swarms.structs.omni_agent_types
|
||||
MOD swarms.structs.queue_swarm
|
||||
MOD swarms.structs.rearrange
|
||||
MOD swarms.structs.round_robin
|
||||
MOD swarms.structs.sequential_workflow
|
||||
MOD swarms.structs.spreadsheet_swarm
|
||||
MOD swarms.structs.swarm_arange
|
||||
MOD swarms.structs.swarm_matcher
|
||||
MOD swarms.structs.swarm_net
|
||||
MOD swarms.structs.swarm_router
|
||||
MOD swarms.structs.swarming_architectures
|
||||
MOD swarms.structs.task
|
||||
MOD swarms.structs.utils
|
||||
|
||||
MOD swarms.telemetry
|
||||
MOD swarms.telemetry.auto_upgrade_swarms
|
||||
MOD swarms.telemetry.bootup
|
||||
MOD swarms.telemetry.capture_sys_data
|
||||
MOD swarms.telemetry.check_update
|
||||
MOD swarms.telemetry.sentry_active
|
||||
MOD swarms.telemetry.sys_info
|
||||
MOD swarms.telemetry.user_utils
|
||||
|
||||
MOD swarms.tools
|
||||
MOD swarms.tools.base_tool
|
||||
MOD swarms.tools.cohere_func_call_schema
|
||||
MOD swarms.tools.func_calling_executor
|
||||
MOD swarms.tools.func_calling_utils
|
||||
MOD swarms.tools.func_to_str
|
||||
MOD swarms.tools.function_util
|
||||
MOD swarms.tools.json_former
|
||||
MOD swarms.tools.logits_processor
|
||||
MOD swarms.tools.openai_func_calling_schema_pydantic
|
||||
MOD swarms.tools.openai_tool_creator_decorator
|
||||
MOD swarms.tools.prebuilt
|
||||
MOD swarms.tools.prebuilt.code_executor
|
||||
MOD swarms.tools.prebuilt.math_eval
|
||||
MOD swarms.tools.py_func_to_openai_func_str
|
||||
MOD swarms.tools.pydantic_to_json
|
||||
MOD swarms.tools.tool_parse_exec
|
||||
MOD swarms.tools.tool_registry
|
||||
MOD swarms.tools.tool_utils
|
||||
MOD swarms.utils
|
||||
MOD swarms.utils.add_docs_to_agents
|
||||
MOD swarms.utils.any_to_str
|
||||
MOD swarms.utils.calculate_func_metrics
|
||||
MOD swarms.utils.class_args_wrapper
|
||||
MOD swarms.utils.data_to_text
|
||||
MOD swarms.utils.disable_logging
|
||||
MOD swarms.utils.file_processing
|
||||
MOD swarms.utils.formatter
|
||||
MOD swarms.utils.litellm
|
||||
MOD swarms.utils.loguru_logger
|
||||
MOD swarms.utils.markdown_message
|
||||
MOD swarms.utils.pandas_utils
|
||||
MOD swarms.utils.parse_code
|
||||
MOD swarms.utils.pdf_to_text
|
||||
MOD swarms.utils.try_except_wrapper
|
||||
MOD swarms.utils.wrapper_clusterop
|
||||
MOD swarms_memory
|
||||
MOD swarms_memory.vector_dbs
|
||||
MOD swarms_memory.vector_dbs.chroma_db_wrapper
|
@ -0,0 +1,27 @@
|
||||
# from https://github.com/encode/uvicorn/issues/678
|
||||
[Unit]
|
||||
Description=swarms
|
||||
After=network.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=swarms
|
||||
Group=swarms
|
||||
DynamicUser=true
|
||||
WorkingDirectory=ROOT/var/run/swarms/
|
||||
PrivateTmp=true
|
||||
EnvironmentFile=ROOT/var/run/swarms/secrets/env
|
||||
ExecStart=ROOT/var/run/uvicorn/env/bin/uvicorn \
|
||||
--proxy-headers \
|
||||
--forwarded-allow-ips='*' \
|
||||
--workers=4 \
|
||||
--port=54748 \
|
||||
--no-access-log \
|
||||
--uds ROOT/run/uvicorn/uvicorn-swarms-api.sock \
|
||||
_.asgi:application
|
||||
ExecReload=/bin/kill -HUP ${MAINPID}
|
||||
RestartSec=1
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -0,0 +1,3 @@
|
||||
we can add the introspector to any project like this:
|
||||
1. git submodule from git/introspector/projects/projectname/ -> project dir/introspector
|
||||
1. api link from git/projectname/[issues,tasks,prs] -> project dir/introspector/[tasks,issues, prs]
|
@ -0,0 +1,618 @@
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, TensorDataset
|
||||
import numpy as np
|
||||
from loguru import logger
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional, Tuple, Dict
|
||||
import math
|
||||
import torch.nn as nn
|
||||
import torch.nn.functional as F
|
||||
from torch import Tensor
|
||||
|
||||
|
||||
@dataclass
|
||||
class TransformerConfig:
|
||||
"""Configuration class for MoE Transformer model parameters."""
|
||||
|
||||
vocab_size: int = 50257
|
||||
hidden_size: int = 768
|
||||
num_attention_heads: int = 12
|
||||
num_expert_layers: int = 4
|
||||
num_experts: int = 8
|
||||
expert_capacity: int = 32
|
||||
max_position_embeddings: int = 1024
|
||||
dropout_prob: float = 0.1
|
||||
layer_norm_epsilon: float = 1e-5
|
||||
initializer_range: float = 0.02
|
||||
num_query_groups: int = 4 # For multi-query attention
|
||||
|
||||
|
||||
class ExpertLayer(nn.Module):
|
||||
"""Individual expert neural network."""
|
||||
|
||||
def __init__(self, config: TransformerConfig):
|
||||
super().__init__()
|
||||
self.fc1 = nn.Linear(
|
||||
config.hidden_size, 4 * config.hidden_size
|
||||
)
|
||||
self.fc2 = nn.Linear(
|
||||
4 * config.hidden_size, config.hidden_size
|
||||
)
|
||||
self.activation = nn.GELU()
|
||||
self.dropout = nn.Dropout(config.dropout_prob)
|
||||
|
||||
def forward(self, x: Tensor) -> Tensor:
|
||||
x = self.fc1(x)
|
||||
x = self.activation(x)
|
||||
x = self.dropout(x)
|
||||
x = self.fc2(x)
|
||||
return x
|
||||
|
||||
|
||||
class MixtureOfExperts(nn.Module):
|
||||
"""Mixture of Experts layer with dynamic routing."""
|
||||
|
||||
def __init__(self, config: TransformerConfig):
|
||||
super().__init__()
|
||||
self.num_experts = config.num_experts
|
||||
self.expert_capacity = config.expert_capacity
|
||||
|
||||
# Create expert networks
|
||||
self.experts = nn.ModuleList(
|
||||
[ExpertLayer(config) for _ in range(config.num_experts)]
|
||||
)
|
||||
|
||||
# Router network
|
||||
self.router = nn.Linear(
|
||||
config.hidden_size, config.num_experts
|
||||
)
|
||||
|
||||
def forward(self, x: Tensor) -> Tuple[Tensor, Dict]:
|
||||
"""Route inputs to experts and combine outputs."""
|
||||
batch_size, seq_len, hidden_size = x.shape
|
||||
|
||||
# Calculate routing probabilities
|
||||
router_logits = self.router(x)
|
||||
routing_weights = F.softmax(router_logits, dim=-1)
|
||||
|
||||
# Select top-k experts
|
||||
top_k = 2
|
||||
gates, indices = torch.topk(routing_weights, top_k, dim=-1)
|
||||
gates = F.softmax(gates, dim=-1)
|
||||
|
||||
# Process inputs through selected experts
|
||||
final_output = torch.zeros_like(x)
|
||||
router_load = torch.zeros(self.num_experts, device=x.device)
|
||||
|
||||
for i in range(top_k):
|
||||
expert_index = indices[..., i]
|
||||
gate = gates[..., i : i + 1]
|
||||
|
||||
# Count expert assignments
|
||||
for j in range(self.num_experts):
|
||||
router_load[j] += (expert_index == j).float().sum()
|
||||
|
||||
# Process through selected experts
|
||||
for j in range(self.num_experts):
|
||||
mask = expert_index == j
|
||||
if not mask.any():
|
||||
continue
|
||||
|
||||
expert_input = x[mask]
|
||||
expert_output = self.experts[j](expert_input)
|
||||
final_output[mask] += gate[mask] * expert_output
|
||||
|
||||
aux_loss = router_load.float().var() / (
|
||||
router_load.float().mean() ** 2
|
||||
)
|
||||
|
||||
return final_output, {"load_balancing_loss": aux_loss}
|
||||
|
||||
|
||||
class MultiQueryAttention(nn.Module):
|
||||
"""Multi-Query Attention mechanism with proper multi-query group handling."""
|
||||
|
||||
def __init__(self, config: TransformerConfig):
|
||||
super().__init__()
|
||||
self.num_attention_heads = config.num_attention_heads
|
||||
self.num_query_groups = config.num_query_groups
|
||||
self.hidden_size = config.hidden_size
|
||||
self.head_dim = (
|
||||
config.hidden_size // config.num_attention_heads
|
||||
)
|
||||
|
||||
# Query projection maintains full head dimension
|
||||
self.q_proj = nn.Linear(
|
||||
config.hidden_size, config.hidden_size
|
||||
)
|
||||
|
||||
# Key and value projections use reduced number of heads (query groups)
|
||||
self.k_proj = nn.Linear(
|
||||
config.hidden_size,
|
||||
self.head_dim * config.num_query_groups,
|
||||
)
|
||||
self.v_proj = nn.Linear(
|
||||
config.hidden_size,
|
||||
self.head_dim * config.num_query_groups,
|
||||
)
|
||||
|
||||
self.dropout = nn.Dropout(config.dropout_prob)
|
||||
|
||||
# Calculate heads per group for proper reshaping
|
||||
self.heads_per_group = (
|
||||
self.num_attention_heads // self.num_query_groups
|
||||
)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
hidden_states: Tensor,
|
||||
attention_mask: Optional[Tensor] = None,
|
||||
cache: Optional[Dict[str, Tensor]] = None,
|
||||
) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]:
|
||||
batch_size, seq_length, _ = hidden_states.shape
|
||||
|
||||
# Project queries, keys, and values
|
||||
queries = self.q_proj(hidden_states)
|
||||
keys = self.k_proj(hidden_states)
|
||||
values = self.v_proj(hidden_states)
|
||||
|
||||
# Reshape queries to full number of heads
|
||||
queries = queries.view(
|
||||
batch_size,
|
||||
seq_length,
|
||||
self.num_attention_heads,
|
||||
self.head_dim,
|
||||
)
|
||||
|
||||
# Reshape keys and values to number of query groups
|
||||
keys = keys.view(
|
||||
batch_size,
|
||||
seq_length,
|
||||
self.num_query_groups,
|
||||
self.head_dim,
|
||||
)
|
||||
values = values.view(
|
||||
batch_size,
|
||||
seq_length,
|
||||
self.num_query_groups,
|
||||
self.head_dim,
|
||||
)
|
||||
|
||||
# Transpose for batch matrix multiplication
|
||||
queries = queries.transpose(
|
||||
1, 2
|
||||
) # (batch, n_heads, seq_len, head_dim)
|
||||
keys = keys.transpose(
|
||||
1, 2
|
||||
) # (batch, n_groups, seq_len, head_dim)
|
||||
values = values.transpose(
|
||||
1, 2
|
||||
) # (batch, n_groups, seq_len, head_dim)
|
||||
|
||||
# Repeat keys and values for each head in the group
|
||||
keys = keys.repeat_interleave(self.heads_per_group, dim=1)
|
||||
values = values.repeat_interleave(self.heads_per_group, dim=1)
|
||||
|
||||
# Compute attention scores
|
||||
scale = 1.0 / math.sqrt(self.head_dim)
|
||||
scores = torch.matmul(queries, keys.transpose(-2, -1)) * scale
|
||||
|
||||
if attention_mask is not None:
|
||||
# Expand attention mask to match scores dimensions
|
||||
expanded_mask = attention_mask.unsqueeze(1).unsqueeze(2)
|
||||
expanded_mask = expanded_mask.expand(
|
||||
batch_size,
|
||||
self.num_attention_heads,
|
||||
seq_length,
|
||||
seq_length,
|
||||
)
|
||||
mask_value = torch.finfo(scores.dtype).min
|
||||
attention_mask = expanded_mask.eq(0).float() * mask_value
|
||||
scores = scores + attention_mask
|
||||
|
||||
attention_weights = F.softmax(scores, dim=-1)
|
||||
attention_weights = self.dropout(attention_weights)
|
||||
|
||||
# Compute attention output
|
||||
attention_output = torch.matmul(attention_weights, values)
|
||||
attention_output = attention_output.transpose(1, 2)
|
||||
attention_output = attention_output.reshape(
|
||||
batch_size, seq_length, -1
|
||||
)
|
||||
|
||||
return attention_output, None
|
||||
|
||||
|
||||
class MoETransformer(nn.Module):
|
||||
"""
|
||||
Production-grade Transformer model with Mixture of Experts and Multi-Query Attention.
|
||||
|
||||
Features:
|
||||
- Multi-Query Attention mechanism for efficient inference
|
||||
- Mixture of Experts for dynamic routing and specialization
|
||||
- Real-time weight updates based on input similarity
|
||||
- Built-in logging and monitoring
|
||||
- Type annotations for better code maintainability
|
||||
"""
|
||||
|
||||
def __init__(self, config: TransformerConfig):
|
||||
super().__init__()
|
||||
self.config = config
|
||||
|
||||
# Initialize components
|
||||
self.embedding = nn.Embedding(
|
||||
config.vocab_size, config.hidden_size
|
||||
)
|
||||
self.position_embedding = nn.Embedding(
|
||||
config.max_position_embeddings, config.hidden_size
|
||||
)
|
||||
|
||||
# Multi-Query Attention layers
|
||||
self.attention_layers = nn.ModuleList(
|
||||
[
|
||||
MultiQueryAttention(config)
|
||||
for _ in range(config.num_expert_layers)
|
||||
]
|
||||
)
|
||||
|
||||
# Mixture of Experts layers
|
||||
self.moe_layers = nn.ModuleList(
|
||||
[
|
||||
MixtureOfExperts(config)
|
||||
for _ in range(config.num_expert_layers)
|
||||
]
|
||||
)
|
||||
|
||||
# Layer normalization and dropout
|
||||
self.layer_norm = nn.LayerNorm(
|
||||
config.hidden_size, eps=config.layer_norm_epsilon
|
||||
)
|
||||
self.dropout = nn.Dropout(config.dropout_prob)
|
||||
|
||||
# Output projection
|
||||
self.output_projection = nn.Linear(
|
||||
config.hidden_size, config.vocab_size
|
||||
)
|
||||
|
||||
# Initialize weights
|
||||
self.apply(self._init_weights)
|
||||
logger.info("Initialized MoETransformer model")
|
||||
|
||||
def _init_weights(self, module: nn.Module):
|
||||
"""Initialize model weights."""
|
||||
if isinstance(module, (nn.Linear, nn.Embedding)):
|
||||
module.weight.data.normal_(
|
||||
mean=0.0, std=self.config.initializer_range
|
||||
)
|
||||
if (
|
||||
isinstance(module, nn.Linear)
|
||||
and module.bias is not None
|
||||
):
|
||||
module.bias.data.zero_()
|
||||
|
||||
def get_position_embeddings(self, position_ids: Tensor) -> Tensor:
|
||||
"""Generate position embeddings."""
|
||||
return self.position_embedding(position_ids)
|
||||
|
||||
def forward(
|
||||
self,
|
||||
input_ids: Tensor,
|
||||
attention_mask: Optional[Tensor] = None,
|
||||
position_ids: Optional[Tensor] = None,
|
||||
cache: Optional[Dict[str, Tensor]] = None,
|
||||
) -> Tuple[Tensor, Dict]:
|
||||
"""
|
||||
Forward pass through the model.
|
||||
|
||||
Args:
|
||||
input_ids: Input token IDs
|
||||
attention_mask: Attention mask for padding
|
||||
position_ids: Position IDs for positioning encoding
|
||||
cache: Cache for key/value states in generation
|
||||
|
||||
Returns:
|
||||
tuple: (logits, auxiliary_outputs)
|
||||
"""
|
||||
batch_size, seq_length = input_ids.shape
|
||||
|
||||
if position_ids is None:
|
||||
position_ids = torch.arange(
|
||||
seq_length, dtype=torch.long, device=input_ids.device
|
||||
)
|
||||
position_ids = position_ids.unsqueeze(0).expand_as(
|
||||
input_ids
|
||||
)
|
||||
|
||||
# Get embeddings
|
||||
inputs_embeds = self.embedding(input_ids)
|
||||
position_embeds = self.get_position_embeddings(position_ids)
|
||||
hidden_states = inputs_embeds + position_embeds
|
||||
|
||||
# Initialize auxiliary outputs
|
||||
aux_outputs = {"moe_losses": []}
|
||||
|
||||
# Process through transformer layers
|
||||
for attention_layer, moe_layer in zip(
|
||||
self.attention_layers, self.moe_layers
|
||||
):
|
||||
# Multi-Query Attention
|
||||
attention_output, _ = attention_layer(
|
||||
hidden_states, attention_mask, cache
|
||||
)
|
||||
hidden_states = self.layer_norm(
|
||||
hidden_states + attention_output
|
||||
)
|
||||
|
||||
# Mixture of Experts
|
||||
moe_output, moe_aux = moe_layer(hidden_states)
|
||||
hidden_states = self.layer_norm(
|
||||
hidden_states + moe_output
|
||||
)
|
||||
aux_outputs["moe_losses"].append(
|
||||
moe_aux["load_balancing_loss"]
|
||||
)
|
||||
|
||||
# Final output projection
|
||||
logits = self.output_projection(hidden_states)
|
||||
|
||||
return logits, aux_outputs
|
||||
|
||||
def fetch_loss(
|
||||
self,
|
||||
logits: Tensor,
|
||||
labels: Tensor,
|
||||
aux_outputs: Dict,
|
||||
reduction: str = "mean",
|
||||
) -> Tensor:
|
||||
"""
|
||||
Calculate the total loss including MoE balancing losses.
|
||||
|
||||
Args:
|
||||
logits: Model output logits
|
||||
labels: Ground truth labels
|
||||
aux_outputs: Auxiliary outputs from forward pass
|
||||
reduction: Loss reduction method
|
||||
|
||||
Returns:
|
||||
Tensor: Total loss
|
||||
"""
|
||||
# Calculate cross entropy loss
|
||||
ce_loss = F.cross_entropy(
|
||||
logits.view(-1, self.config.vocab_size),
|
||||
labels.view(-1),
|
||||
reduction=reduction,
|
||||
)
|
||||
|
||||
# Calculate MoE loss
|
||||
moe_loss = torch.stack(aux_outputs["moe_losses"]).mean()
|
||||
|
||||
# Combine losses
|
||||
total_loss = ce_loss + 0.01 * moe_loss
|
||||
|
||||
logger.debug(
|
||||
f"CE Loss: {ce_loss.item():.4f}, "
|
||||
f"MoE Loss: {moe_loss.item():.4f}"
|
||||
)
|
||||
|
||||
return total_loss
|
||||
|
||||
@torch.no_grad()
|
||||
def generate(
|
||||
self,
|
||||
input_ids: Tensor,
|
||||
max_length: int = 100,
|
||||
temperature: float = 1.0,
|
||||
top_k: int = 50,
|
||||
top_p: float = 0.9,
|
||||
) -> Tensor:
|
||||
"""
|
||||
Generate text using the model.
|
||||
|
||||
Args:
|
||||
input_ids: Initial input tokens
|
||||
max_length: Maximum sequence length to generate
|
||||
temperature: Sampling temperature
|
||||
top_k: Number of highest probability tokens to keep
|
||||
top_p: Cumulative probability for nucleus sampling
|
||||
|
||||
Returns:
|
||||
Tensor: Generated token IDs
|
||||
"""
|
||||
batch_size = input_ids.shape[0]
|
||||
device = input_ids.device
|
||||
|
||||
# Initialize sequence with input_ids
|
||||
generated = input_ids
|
||||
|
||||
# Cache for key-value pairs
|
||||
cache = {}
|
||||
|
||||
for _ in range(max_length):
|
||||
# Get position IDs for current sequence
|
||||
position_ids = torch.arange(
|
||||
generated.shape[1], dtype=torch.long, device=device
|
||||
)
|
||||
position_ids = position_ids.unsqueeze(0).expand(
|
||||
batch_size, -1
|
||||
)
|
||||
|
||||
# Forward pass
|
||||
logits, _ = self.forward(
|
||||
generated, position_ids=position_ids, cache=cache
|
||||
)
|
||||
|
||||
# Get next token logits
|
||||
next_token_logits = logits[:, -1, :] / temperature
|
||||
|
||||
# Apply top-k filtering
|
||||
if top_k > 0:
|
||||
indices_to_remove = (
|
||||
next_token_logits
|
||||
< torch.topk(next_token_logits, top_k)[0][
|
||||
..., -1, None
|
||||
]
|
||||
)
|
||||
next_token_logits[indices_to_remove] = float("-inf")
|
||||
|
||||
# Apply top-p (nucleus) filtering
|
||||
if top_p < 1.0:
|
||||
sorted_logits, sorted_indices = torch.sort(
|
||||
next_token_logits, descending=True
|
||||
)
|
||||
cumulative_probs = torch.cumsum(
|
||||
F.softmax(sorted_logits, dim=-1), dim=-1
|
||||
)
|
||||
|
||||
# Remove tokens with cumulative probability above the threshold
|
||||
sorted_indices_to_remove = cumulative_probs > top_p
|
||||
sorted_indices_to_remove[..., 1:] = (
|
||||
sorted_indices_to_remove[..., :-1].clone()
|
||||
)
|
||||
sorted_indices_to_remove[..., 0] = 0
|
||||
|
||||
indices_to_remove = sorted_indices[
|
||||
sorted_indices_to_remove
|
||||
]
|
||||
next_token_logits[indices_to_remove] = float("-inf")
|
||||
|
||||
# Sample next token
|
||||
probs = F.softmax(next_token_logits, dim=-1)
|
||||
next_token = torch.multinomial(probs, num_samples=1)
|
||||
|
||||
# Append next token to sequence
|
||||
generated = torch.cat((generated, next_token), dim=1)
|
||||
|
||||
# Check for end of sequence token
|
||||
if (next_token == self.config.vocab_size - 1).all():
|
||||
break
|
||||
|
||||
return generated
|
||||
|
||||
|
||||
# Initialize model configuration
|
||||
config = TransformerConfig(
|
||||
vocab_size=50257,
|
||||
hidden_size=768,
|
||||
num_attention_heads=12,
|
||||
num_expert_layers=4,
|
||||
num_experts=8,
|
||||
expert_capacity=32,
|
||||
max_position_embeddings=1024,
|
||||
num_query_groups=4,
|
||||
)
|
||||
|
||||
|
||||
def prepare_sample_data(
|
||||
batch_size: int = 8,
|
||||
seq_length: int = 512,
|
||||
vocab_size: int = 50257,
|
||||
) -> DataLoader:
|
||||
"""Create sample data for demonstration."""
|
||||
# Create random input sequences
|
||||
input_ids = torch.randint(
|
||||
0, vocab_size, (100, seq_length) # 100 samples
|
||||
)
|
||||
|
||||
# Create target sequences (shifted by 1)
|
||||
labels = torch.randint(0, vocab_size, (100, seq_length))
|
||||
|
||||
# Create attention masks (1 for real tokens, 0 for padding)
|
||||
attention_mask = torch.ones_like(input_ids)
|
||||
|
||||
# Create dataset and dataloader
|
||||
dataset = TensorDataset(input_ids, attention_mask, labels)
|
||||
dataloader = DataLoader(
|
||||
dataset, batch_size=batch_size, shuffle=True
|
||||
)
|
||||
|
||||
return dataloader
|
||||
|
||||
|
||||
def train_step(
|
||||
model: MoETransformer,
|
||||
batch: tuple,
|
||||
optimizer: torch.optim.Optimizer,
|
||||
device: str = "cuda" if torch.cuda.is_available() else "cpu",
|
||||
) -> float:
|
||||
"""Execute single training step."""
|
||||
model.train()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Unpack batch
|
||||
input_ids, attention_mask, labels = [b.to(device) for b in batch]
|
||||
|
||||
# Forward pass
|
||||
logits, aux_outputs = model(
|
||||
input_ids=input_ids, attention_mask=attention_mask
|
||||
)
|
||||
|
||||
# Calculate loss
|
||||
loss = model.fetch_loss(logits, labels, aux_outputs)
|
||||
|
||||
# Backward pass
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
return loss.item()
|
||||
|
||||
|
||||
def main():
|
||||
# Set device
|
||||
device = "cuda" if torch.cuda.is_available() else "cpu"
|
||||
logger.info(f"Using device: {device}")
|
||||
|
||||
# Initialize model
|
||||
model = MoETransformer(config).to(device)
|
||||
logger.info("Model initialized")
|
||||
|
||||
# Setup optimizer
|
||||
optimizer = torch.optim.AdamW(
|
||||
model.parameters(), lr=1e-4, weight_decay=0.01
|
||||
)
|
||||
|
||||
# Prepare data
|
||||
dataloader = prepare_sample_data()
|
||||
logger.info("Data prepared")
|
||||
|
||||
# Training loop
|
||||
num_epochs = 3
|
||||
for epoch in range(num_epochs):
|
||||
epoch_losses = []
|
||||
|
||||
for batch_idx, batch in enumerate(dataloader):
|
||||
loss = train_step(model, batch, optimizer, device)
|
||||
epoch_losses.append(loss)
|
||||
|
||||
if batch_idx % 10 == 0:
|
||||
logger.info(
|
||||
f"Epoch {epoch+1}/{num_epochs} "
|
||||
f"Batch {batch_idx}/{len(dataloader)} "
|
||||
f"Loss: {loss:.4f}"
|
||||
)
|
||||
|
||||
avg_loss = np.mean(epoch_losses)
|
||||
logger.info(f"Epoch {epoch+1} average loss: {avg_loss:.4f}")
|
||||
|
||||
# Generation example
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
# Prepare input prompt
|
||||
prompt = torch.randint(0, config.vocab_size, (1, 10)).to(
|
||||
device
|
||||
)
|
||||
|
||||
# Generate sequence
|
||||
generated = model.generate(
|
||||
input_ids=prompt,
|
||||
max_length=50,
|
||||
temperature=0.7,
|
||||
top_k=50,
|
||||
top_p=0.9,
|
||||
)
|
||||
|
||||
logger.info(f"Generated sequence shape: {generated.shape}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,433 @@
|
||||
import asyncio
|
||||
import json
|
||||
from dataclasses import asdict, dataclass
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Set
|
||||
|
||||
import aiohttp
|
||||
import matplotlib.pyplot as plt
|
||||
import networkx as nx
|
||||
import websockets
|
||||
from loguru import logger
|
||||
|
||||
from swarms import Agent
|
||||
|
||||
TREND_AGENT_PROMPT = """You are a specialized blockchain trend analysis agent. Your role:
|
||||
1. Analyze transaction patterns in Solana blockchain data
|
||||
2. Identify volume trends, price movements, and temporal patterns
|
||||
3. Focus on whale movements and their market impact
|
||||
4. Format findings in clear, structured JSON
|
||||
5. Include confidence scores for each insight
|
||||
6. Flag unusual patterns or anomalies
|
||||
7. Provide historical context for significant movements
|
||||
|
||||
Output format:
|
||||
{
|
||||
"trends": [
|
||||
{"pattern": str, "confidence": float, "impact": str}
|
||||
],
|
||||
"whale_activity": {...},
|
||||
"temporal_analysis": {...}
|
||||
}"""
|
||||
|
||||
RISK_AGENT_PROMPT = """You are a blockchain risk assessment specialist. Your tasks:
|
||||
1. Identify suspicious transaction patterns
|
||||
2. Monitor for known exploit signatures
|
||||
3. Assess wallet clustering and relationship patterns
|
||||
4. Evaluate transaction velocity and size anomalies
|
||||
5. Check for bridge-related risks
|
||||
6. Monitor smart contract interactions
|
||||
7. Flag potential wash trading
|
||||
|
||||
Output format:
|
||||
{
|
||||
"risk_score": float,
|
||||
"flags": [...],
|
||||
"recommendations": [...]
|
||||
}"""
|
||||
|
||||
SUMMARY_AGENT_PROMPT = """You are a blockchain data synthesis expert. Your responsibilities:
|
||||
1. Combine insights from trend and risk analyses
|
||||
2. Prioritize actionable intelligence
|
||||
3. Highlight critical patterns
|
||||
4. Generate executive summaries
|
||||
5. Provide market context
|
||||
6. Make predictions with confidence intervals
|
||||
7. Suggest trading strategies based on data
|
||||
|
||||
Output format:
|
||||
{
|
||||
"key_insights": [...],
|
||||
"market_impact": str,
|
||||
"recommendations": {...}
|
||||
}"""
|
||||
|
||||
|
||||
@dataclass
|
||||
class Transaction:
|
||||
signature: str
|
||||
timestamp: datetime
|
||||
amount: float
|
||||
from_address: str
|
||||
to_address: str
|
||||
|
||||
|
||||
class SolanaRPC:
|
||||
def __init__(
|
||||
self, endpoint="https://api.mainnet-beta.solana.com"
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.session = None
|
||||
|
||||
async def get_signatures(self, address: str) -> List[Dict]:
|
||||
if not self.session:
|
||||
self.session = aiohttp.ClientSession()
|
||||
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "getSignaturesForAddress",
|
||||
"params": [address, {"limit": 100}],
|
||||
}
|
||||
|
||||
async with self.session.post(
|
||||
self.endpoint, json=payload
|
||||
) as response:
|
||||
result = await response.json()
|
||||
return result.get("result", [])
|
||||
|
||||
async def get_transaction(self, signature: str) -> Dict:
|
||||
payload = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "getTransaction",
|
||||
"params": [
|
||||
signature,
|
||||
{
|
||||
"encoding": "json",
|
||||
"maxSupportedTransactionVersion": 0,
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
async with self.session.post(
|
||||
self.endpoint, json=payload
|
||||
) as response:
|
||||
result = await response.json()
|
||||
return result.get("result", {})
|
||||
|
||||
|
||||
class AlertSystem:
|
||||
def __init__(self, email: str, threshold: float = 1000.0):
|
||||
self.email = email
|
||||
self.threshold = threshold
|
||||
self.smtp_server = "smtp.gmail.com"
|
||||
self.smtp_port = 587
|
||||
|
||||
async def check_and_alert(
|
||||
self, transaction: Transaction, risk_score: float
|
||||
):
|
||||
if transaction.amount > self.threshold or risk_score > 0.8:
|
||||
await self.send_alert(transaction, risk_score)
|
||||
|
||||
async def send_alert(
|
||||
self, transaction: Transaction, risk_score: float
|
||||
):
|
||||
# msg = MIMEText(
|
||||
# f"High-risk transaction detected:\n"
|
||||
# f"Amount: {transaction.amount} SOL\n"
|
||||
# f"Risk Score: {risk_score}\n"
|
||||
# f"Signature: {transaction.signature}"
|
||||
# )
|
||||
logger.info(
|
||||
f"Alert sent for transaction {transaction.signature}"
|
||||
)
|
||||
|
||||
|
||||
class WalletClusterAnalyzer:
|
||||
def __init__(self):
|
||||
self.graph = nx.Graph()
|
||||
self.known_wallets: Set[str] = set()
|
||||
|
||||
def update_graph(self, transaction: Transaction):
|
||||
self.graph.add_edge(
|
||||
transaction.from_address,
|
||||
transaction.to_address,
|
||||
weight=transaction.amount,
|
||||
)
|
||||
self.known_wallets.add(transaction.from_address)
|
||||
self.known_wallets.add(transaction.to_address)
|
||||
|
||||
def identify_clusters(self) -> Dict:
|
||||
communities = nx.community.greedy_modularity_communities(
|
||||
self.graph
|
||||
)
|
||||
return {
|
||||
"clusters": [list(c) for c in communities],
|
||||
"central_wallets": [
|
||||
wallet
|
||||
for wallet in self.known_wallets
|
||||
if self.graph.degree[wallet] > 5
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class TransactionVisualizer:
|
||||
def __init__(self):
|
||||
self.transaction_history = []
|
||||
|
||||
def add_transaction(self, transaction: Transaction):
|
||||
self.transaction_history.append(asdict(transaction))
|
||||
|
||||
def generate_volume_chart(self) -> str:
|
||||
volumes = [tx["amount"] for tx in self.transaction_history]
|
||||
plt.figure(figsize=(12, 6))
|
||||
plt.plot(volumes)
|
||||
plt.title("Transaction Volume Over Time")
|
||||
plt.savefig("volume_chart.png")
|
||||
return "volume_chart.png"
|
||||
|
||||
def generate_network_graph(
|
||||
self, wallet_analyzer: WalletClusterAnalyzer
|
||||
) -> str:
|
||||
plt.figure(figsize=(15, 15))
|
||||
pos = nx.spring_layout(wallet_analyzer.graph)
|
||||
nx.draw(
|
||||
wallet_analyzer.graph,
|
||||
pos,
|
||||
node_size=1000,
|
||||
node_color="lightblue",
|
||||
with_labels=True,
|
||||
)
|
||||
plt.savefig("network_graph.png")
|
||||
return "network_graph.png"
|
||||
|
||||
|
||||
class SolanaMultiAgentAnalyzer:
|
||||
def __init__(
|
||||
self,
|
||||
min_amount: float = 50.0,
|
||||
websocket_url: str = "wss://api.mainnet-beta.solana.com",
|
||||
alert_email: str = None,
|
||||
):
|
||||
self.rpc = SolanaRPC()
|
||||
self.websocket_url = websocket_url
|
||||
self.min_amount = min_amount
|
||||
self.transactions = []
|
||||
|
||||
self.wallet_analyzer = WalletClusterAnalyzer()
|
||||
self.visualizer = TransactionVisualizer()
|
||||
self.alert_system = (
|
||||
AlertSystem(alert_email) if alert_email else None
|
||||
)
|
||||
|
||||
self.trend_agent = Agent(
|
||||
agent_name="trend-analyzer",
|
||||
system_prompt=TREND_AGENT_PROMPT,
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
self.risk_agent = Agent(
|
||||
agent_name="risk-analyzer",
|
||||
system_prompt=RISK_AGENT_PROMPT,
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
self.summary_agent = Agent(
|
||||
agent_name="summary-agent",
|
||||
system_prompt=SUMMARY_AGENT_PROMPT,
|
||||
model_name="gpt-4o-mini",
|
||||
max_loops=1,
|
||||
streaming_on=True,
|
||||
)
|
||||
|
||||
logger.add(
|
||||
"solana_analysis.log", rotation="500 MB", level="INFO"
|
||||
)
|
||||
|
||||
async def start_websocket_stream(self):
|
||||
async with websockets.connect(
|
||||
self.websocket_url
|
||||
) as websocket:
|
||||
subscribe_message = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": 1,
|
||||
"method": "programSubscribe",
|
||||
"params": [
|
||||
"11111111111111111111111111111111",
|
||||
{"encoding": "json", "commitment": "confirmed"},
|
||||
],
|
||||
}
|
||||
await websocket.send(json.dumps(subscribe_message))
|
||||
|
||||
while True:
|
||||
try:
|
||||
msg = await websocket.recv()
|
||||
transaction = await self.parse_websocket_message(
|
||||
msg
|
||||
)
|
||||
if (
|
||||
transaction
|
||||
and transaction.amount >= self.min_amount
|
||||
):
|
||||
await self.process_transaction(transaction)
|
||||
except Exception as e:
|
||||
logger.error(f"Websocket error: {e}")
|
||||
await asyncio.sleep(5)
|
||||
|
||||
async def parse_websocket_message(
|
||||
self, msg: str
|
||||
) -> Optional[Transaction]:
|
||||
try:
|
||||
data = json.loads(msg)
|
||||
if "params" in data and "result" in data["params"]:
|
||||
tx_data = data["params"]["result"]
|
||||
return Transaction(
|
||||
signature=tx_data["signature"],
|
||||
timestamp=datetime.fromtimestamp(
|
||||
tx_data["blockTime"]
|
||||
),
|
||||
amount=float(
|
||||
tx_data["meta"]["postBalances"][0]
|
||||
- tx_data["meta"]["preBalances"][0]
|
||||
)
|
||||
/ 1e9,
|
||||
from_address=tx_data["transaction"]["message"][
|
||||
"accountKeys"
|
||||
][0],
|
||||
to_address=tx_data["transaction"]["message"][
|
||||
"accountKeys"
|
||||
][1],
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing websocket message: {e}")
|
||||
return None
|
||||
|
||||
async def process_transaction(self, transaction: Transaction):
|
||||
self.wallet_analyzer.update_graph(transaction)
|
||||
self.visualizer.add_transaction(transaction)
|
||||
|
||||
risk_analysis = await self.risk_agent.run(
|
||||
f"Analyze risk for transaction: {json.dumps(asdict(transaction))}"
|
||||
)
|
||||
|
||||
if self.alert_system:
|
||||
await self.alert_system.check_and_alert(
|
||||
transaction, risk_analysis.get("risk_score", 0)
|
||||
)
|
||||
|
||||
async def fetch_transactions(self) -> List[Transaction]:
|
||||
try:
|
||||
signatures = await self.rpc.get_signatures(
|
||||
"11111111111111111111111111111111"
|
||||
)
|
||||
transactions = []
|
||||
|
||||
for sig_info in signatures:
|
||||
tx_data = await self.rpc.get_transaction(
|
||||
sig_info["signature"]
|
||||
)
|
||||
if not tx_data or "meta" not in tx_data:
|
||||
continue
|
||||
|
||||
pre_balances = tx_data["meta"]["preBalances"]
|
||||
post_balances = tx_data["meta"]["postBalances"]
|
||||
amount = abs(pre_balances[0] - post_balances[0]) / 1e9
|
||||
|
||||
if amount >= self.min_amount:
|
||||
tx = Transaction(
|
||||
signature=sig_info["signature"],
|
||||
timestamp=datetime.fromtimestamp(
|
||||
tx_data["blockTime"]
|
||||
),
|
||||
amount=amount,
|
||||
from_address=tx_data["transaction"][
|
||||
"message"
|
||||
]["accountKeys"][0],
|
||||
to_address=tx_data["transaction"]["message"][
|
||||
"accountKeys"
|
||||
][1],
|
||||
)
|
||||
transactions.append(tx)
|
||||
|
||||
return transactions
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching transactions: {e}")
|
||||
return []
|
||||
|
||||
async def analyze_transactions(
|
||||
self, transactions: List[Transaction]
|
||||
) -> Dict:
|
||||
tx_data = [asdict(tx) for tx in transactions]
|
||||
cluster_data = self.wallet_analyzer.identify_clusters()
|
||||
|
||||
trend_analysis = await self.trend_agent.run(
|
||||
f"Analyze trends in: {json.dumps(tx_data)}"
|
||||
)
|
||||
print(trend_analysis)
|
||||
|
||||
risk_analysis = await self.risk_agent.run(
|
||||
f"Analyze risks in: {json.dumps({'transactions': tx_data, 'clusters': cluster_data})}"
|
||||
)
|
||||
print(risk_analysis)
|
||||
|
||||
summary = await self.summary_agent.run(
|
||||
f"Synthesize insights from: {trend_analysis}, {risk_analysis}"
|
||||
)
|
||||
|
||||
print(summary)
|
||||
|
||||
volume_chart = self.visualizer.generate_volume_chart()
|
||||
network_graph = self.visualizer.generate_network_graph(
|
||||
self.wallet_analyzer
|
||||
)
|
||||
|
||||
return {
|
||||
"transactions": tx_data,
|
||||
"trend_analysis": trend_analysis,
|
||||
"risk_analysis": risk_analysis,
|
||||
"cluster_analysis": cluster_data,
|
||||
"summary": summary,
|
||||
"visualizations": {
|
||||
"volume_chart": volume_chart,
|
||||
"network_graph": network_graph,
|
||||
},
|
||||
}
|
||||
|
||||
async def run_continuous_analysis(self):
|
||||
logger.info("Starting continuous analysis")
|
||||
asyncio.create_task(self.start_websocket_stream())
|
||||
|
||||
while True:
|
||||
try:
|
||||
transactions = await self.fetch_transactions()
|
||||
if transactions:
|
||||
analysis = await self.analyze_transactions(
|
||||
transactions
|
||||
)
|
||||
timestamp = datetime.now().strftime(
|
||||
"%Y%m%d_%H%M%S"
|
||||
)
|
||||
with open(f"analysis_{timestamp}.json", "w") as f:
|
||||
json.dump(analysis, f, indent=2, default=str)
|
||||
logger.info(
|
||||
f"Analysis completed: analysis_{timestamp}.json"
|
||||
)
|
||||
await asyncio.sleep(60)
|
||||
except Exception as e:
|
||||
logger.error(f"Error in analysis loop: {e}")
|
||||
await asyncio.sleep(60)
|
||||
|
||||
|
||||
# Add to __main__:
|
||||
if __name__ == "__main__":
|
||||
logger.info("Starting Solana analyzer...")
|
||||
analyzer = SolanaMultiAgentAnalyzer(alert_email="your@email.com")
|
||||
try:
|
||||
asyncio.run(analyzer.run_continuous_analysis())
|
||||
except Exception as e:
|
||||
logger.error(f"Critical error: {e}")
|
@ -0,0 +1,42 @@
|
||||
from swarms.structs.tree_swarm import ForestSwarm, Tree, TreeAgent
|
||||
|
||||
|
||||
agents_tree1 = [
|
||||
TreeAgent(
|
||||
system_prompt="Stock Analysis Agent",
|
||||
agent_name="Stock Analysis Agent",
|
||||
),
|
||||
TreeAgent(
|
||||
system_prompt="Financial Planning Agent",
|
||||
agent_name="Financial Planning Agent",
|
||||
),
|
||||
TreeAgent(
|
||||
agent_name="Retirement Strategy Agent",
|
||||
system_prompt="Retirement Strategy Agent",
|
||||
),
|
||||
]
|
||||
|
||||
agents_tree2 = [
|
||||
TreeAgent(
|
||||
system_prompt="Tax Filing Agent",
|
||||
agent_name="Tax Filing Agent",
|
||||
),
|
||||
TreeAgent(
|
||||
system_prompt="Investment Strategy Agent",
|
||||
agent_name="Investment Strategy Agent",
|
||||
),
|
||||
TreeAgent(
|
||||
system_prompt="ROTH IRA Agent", agent_name="ROTH IRA Agent"
|
||||
),
|
||||
]
|
||||
|
||||
# Create trees
|
||||
tree1 = Tree(tree_name="Financial Tree", agents=agents_tree1)
|
||||
tree2 = Tree(tree_name="Investment Tree", agents=agents_tree2)
|
||||
|
||||
# Create the ForestSwarm
|
||||
multi_agent_structure = ForestSwarm(trees=[tree1, tree2])
|
||||
|
||||
# Run a task
|
||||
task = "Our company is incorporated in delaware, how do we do our taxes for free?"
|
||||
multi_agent_structure.run(task)
|
@ -0,0 +1,206 @@
|
||||
from swarms import Agent
|
||||
from loguru import logger
|
||||
import random
|
||||
import re
|
||||
|
||||
# Configure loguru
|
||||
logger.add("zkp_log.log", rotation="500 KB", retention="10 days", level="INFO")
|
||||
|
||||
|
||||
class ProverAgent:
|
||||
"""
|
||||
Prover Agent for Zero Knowledge Proof.
|
||||
|
||||
Responsibilities:
|
||||
- Generate commitments based on a secret.
|
||||
- Respond to challenges from the Verifier.
|
||||
|
||||
Attributes:
|
||||
agent (Agent): Swarms agent instance.
|
||||
p (int): The prime modulus.
|
||||
g (int): The generator.
|
||||
x (int): The Prover's secret.
|
||||
"""
|
||||
|
||||
def __init__(self, p: int, g: int, secret: int):
|
||||
self.p = p
|
||||
self.g = g
|
||||
self.x = secret # Prover's secret
|
||||
self.agent = Agent(
|
||||
agent_name="ProverAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loop=1,
|
||||
interactive=False,
|
||||
streaming_on=True,
|
||||
system_prompt=(
|
||||
"You are the Prover in a Zero Knowledge Proof (ZKP) system. "
|
||||
"Your responsibilities are to generate commitments based on a secret value and "
|
||||
"respond to challenges from the Verifier without revealing the secret. "
|
||||
"Follow mathematical rules of modular arithmetic when performing computations."
|
||||
),
|
||||
)
|
||||
logger.info("Initialized ProverAgent with p={}, g={}, secret={}", p, g, secret)
|
||||
|
||||
def generate_commitment(self) -> tuple[int, int]:
|
||||
"""
|
||||
Generates a random commitment for the proof.
|
||||
|
||||
Returns:
|
||||
tuple[int, int]: The random value (r) and the commitment (t).
|
||||
"""
|
||||
r = random.randint(1, self.p - 2)
|
||||
task = (
|
||||
f"Compute the commitment t = g^r % p for g={self.g}, r={r}, p={self.p}. "
|
||||
"Return only the numerical value of t as an integer."
|
||||
)
|
||||
t = self.agent.run(task=task)
|
||||
t_value = self._extract_integer(t, "commitment")
|
||||
logger.info("Prover generated commitment: r={}, t={}", r, t_value)
|
||||
return r, t_value
|
||||
|
||||
def _extract_integer(self, response: str, label: str) -> int:
|
||||
"""
|
||||
Extracts an integer from the LLM response.
|
||||
|
||||
Args:
|
||||
response (str): The response from the agent.
|
||||
label (str): A label for logging purposes.
|
||||
|
||||
Returns:
|
||||
int: The extracted integer value.
|
||||
"""
|
||||
try:
|
||||
# Use regex to find the first integer in the response
|
||||
match = re.search(r"\b\d+\b", response)
|
||||
if match:
|
||||
value = int(match.group(0))
|
||||
return value
|
||||
else:
|
||||
raise ValueError(f"No integer found in {label} response: {response}")
|
||||
except Exception as e:
|
||||
logger.error("Failed to extract integer from {label} response: {response}")
|
||||
raise ValueError(f"Invalid {label} response: {response}") from e
|
||||
|
||||
def respond_to_challenge(self, r: int, c: int) -> int:
|
||||
"""
|
||||
Computes the response to a challenge.
|
||||
|
||||
Args:
|
||||
r (int): The random value used in the commitment.
|
||||
c (int): The challenge issued by the Verifier.
|
||||
|
||||
Returns:
|
||||
int: The response (z).
|
||||
"""
|
||||
task = f"Compute the response z = (r + c * x) % (p-1) for r={r}, c={c}, x={self.x}, p={self.p}."
|
||||
z = self.agent.run(task=task)
|
||||
logger.info("Prover responded to challenge: z={}", z)
|
||||
return int(z)
|
||||
|
||||
|
||||
class VerifierAgent:
|
||||
"""
|
||||
Verifier Agent for Zero Knowledge Proof.
|
||||
|
||||
Responsibilities:
|
||||
- Issue challenges to the Prover.
|
||||
- Verify the Prover's response.
|
||||
|
||||
Attributes:
|
||||
agent (Agent): Swarms agent instance.
|
||||
p (int): The prime modulus.
|
||||
g (int): The generator.
|
||||
y (int): The public value from the Prover.
|
||||
"""
|
||||
|
||||
def __init__(self, p: int, g: int, y: int):
|
||||
self.p = p
|
||||
self.g = g
|
||||
self.y = y # Public value
|
||||
self.agent = Agent(
|
||||
agent_name="VerifierAgent",
|
||||
model_name="gpt-4o-mini",
|
||||
max_loop=1,
|
||||
interactive=False,
|
||||
streaming_on=True,
|
||||
system_prompt=(
|
||||
"You are the Verifier in a Zero Knowledge Proof (ZKP) system. "
|
||||
"Your responsibilities are to issue random challenges and verify the Prover's response. "
|
||||
"Use modular arithmetic to check if the proof satisfies g^z % p == (t * y^c) % p."
|
||||
),
|
||||
)
|
||||
logger.info("Initialized VerifierAgent with p={}, g={}, y={}", p, g, y)
|
||||
|
||||
def issue_challenge(self) -> int:
|
||||
"""
|
||||
Issues a random challenge to the Prover.
|
||||
|
||||
Returns:
|
||||
int: The challenge value (c).
|
||||
"""
|
||||
c = random.randint(1, 10)
|
||||
logger.info("Verifier issued challenge: c={}", c)
|
||||
return c
|
||||
|
||||
def verify_proof(self, t: int, z: int, c: int) -> bool:
|
||||
"""
|
||||
Verifies the Prover's response.
|
||||
|
||||
Args:
|
||||
t (int): The commitment from the Prover.
|
||||
z (int): The response from the Prover.
|
||||
c (int): The challenge issued to the Prover.
|
||||
|
||||
Returns:
|
||||
bool: True if the proof is valid, False otherwise.
|
||||
"""
|
||||
task = f"Verify if g^z % p == (t * y^c) % p for g={self.g}, z={z}, p={self.p}, t={t}, y={self.y}, c={c}."
|
||||
verification_result = self.agent.run(task=task)
|
||||
is_valid = verification_result.strip().lower() == "true"
|
||||
logger.info("Verifier checked proof: t={}, z={}, c={}, valid={}", t, z, c, is_valid)
|
||||
return is_valid
|
||||
|
||||
|
||||
class CoordinatorAgent:
|
||||
"""
|
||||
Coordinator for orchestrating the Zero Knowledge Proof protocol.
|
||||
|
||||
Responsibilities:
|
||||
- Initialize parameters.
|
||||
- Facilitate interaction between Prover and Verifier agents.
|
||||
"""
|
||||
|
||||
def __init__(self, p: int, g: int, secret: int):
|
||||
self.p = p
|
||||
self.g = g
|
||||
self.prover = ProverAgent(p, g, secret)
|
||||
y = pow(g, secret, p) # Public value
|
||||
self.verifier = VerifierAgent(p, g, y)
|
||||
logger.info("Coordinator initialized with p={}, g={}, secret={}", p, g, secret)
|
||||
|
||||
def orchestrate(self) -> bool:
|
||||
"""
|
||||
Orchestrates the Zero Knowledge Proof protocol.
|
||||
|
||||
Returns:
|
||||
bool: True if the proof is valid, False otherwise.
|
||||
"""
|
||||
logger.info("Starting ZKP protocol orchestration.")
|
||||
r, t = self.prover.generate_commitment()
|
||||
c = self.verifier.issue_challenge()
|
||||
z = self.prover.respond_to_challenge(r, c)
|
||||
is_valid = self.verifier.verify_proof(t, z, c)
|
||||
logger.info("ZKP protocol completed. Valid proof: {}", is_valid)
|
||||
return is_valid
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Example parameters
|
||||
p = 23 # Prime number
|
||||
g = 5 # Generator
|
||||
secret = 7 # Prover's secret
|
||||
|
||||
# Initialize the Coordinator and run the protocol
|
||||
coordinator = CoordinatorAgent(p, g, secret)
|
||||
result = coordinator.orchestrate()
|
||||
print(f"Zero Knowledge Proof Verification Result: {'Valid' if result else 'Invalid'}")
|
Loading…
Reference in new issue