pull/24/head
Kye 1 year ago
parent cf143bd564
commit c460464ec1

@ -0,0 +1,23 @@
name: Quality
on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
jobs:
lint:
runs-on: ubuntu-latest
strategy:
fail-fast: false
steps:
- name: Checkout actions
uses: actions/checkout@v1
with:
fetch-depth: 0
- name: Init environment
uses: ./.github/actions/init-environment
- name: Run linter
run: |
pylint `git diff --name-only --diff-filter=d origin/main HEAD | grep -E '\.py$' | tr '\n' ' '`

1
.gitignore vendored

@ -19,3 +19,4 @@ swarms/agents/.DS_Store
_build _build
.DS_STORE

@ -1,48 +1,58 @@
# coding: utf-8 # coding: utf-8
import argparse
import inspect
import math
import os import os
import gradio as gr
import random import random
import torch
import cv2
import re import re
import uuid import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import math
import numpy as np
import argparse
import inspect
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from diffusers import StableDiffusionPipeline, StableDiffusionInpaintPipeline, StableDiffusionInstructPix2PixPipeline import cv2
from diffusers import EulerAncestralDiscreteScheduler import gradio as gr
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler import matplotlib.pyplot as plt
import numpy as np
import torch
import wget
from controlnet_aux import HEDdetector, MLSDdetector, OpenposeDetector
from diffusers import (
ControlNetModel,
EulerAncestralDiscreteScheduler,
StableDiffusionControlNetPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInstructPix2PixPipeline,
StableDiffusionPipeline,
UniPCMultistepScheduler,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from controlnet_aux import OpenposeDetector, MLSDdetector, HEDdetector
from langchain.agents.initialize import initialize_agent from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI from langchain.llms.openai import OpenAI
from PIL import Image, ImageDraw, ImageFont, ImageOps
from transformers import (
BlipForConditionalGeneration,
BlipForQuestionAnswering,
BlipProcessor,
pipeline,
)
# Grounding DINO # Grounding DINO
# import groundingdino.datasets.transforms as T # import groundingdino.datasets.transforms as T
from swarms.workers.models import ( from swarms.workers.models import (
Compose, Compose,
Normalize, Normalize,
RandomResize,
SLConfig,
ToTensor, ToTensor,
RandomResize build_model,
clean_state_dict,
get_phrases_from_posmap,
)
from swarms.workers.models.segment_anything import (
SamAutomaticMaskGenerator,
SamPredictor,
build_sam,
) )
from swarms.workers.models import build_model
from swarms.workers.models import SLConfig
from swarms.workers.models import clean_state_dict, get_phrases_from_posmap
from swarms.workers.models.segment_anything import build_sam, SamPredictor, SamAutomaticMaskGenerator
import cv2
import numpy as np
import matplotlib.pyplot as plt
import wget
VISUAL_AGENT_PREFIX = """Worker Multi-Modal Agent is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Worker Multi-Modal Agent is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand. VISUAL_AGENT_PREFIX = """Worker Multi-Modal Agent is designed to be able to assist with a wide range of text and visual related tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. Worker Multi-Modal Agent is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.

@ -1,23 +1,28 @@
import os
import logging import logging
import os
from typing import Dict, List
from langchain.memory.chat_message_histories import FileChatMessageHistory
from swarms.agents.tools.agent_tools import * from swarms.agents.tools.agent_tools import *
from typing import List, Dict
from langchain.memory.chat_message_histories import FileChatMessageHistory
import logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
from typing import List, Dict from typing import Dict, List
from langchain.memory.chat_message_histories import FileChatMessageHistory from langchain.memory.chat_message_histories import FileChatMessageHistory
from swarms.utils.main import BaseHandler, FileType
from swarms.agents.tools.main import ExitConversation, RequestsGet, CodeEditor, Terminal from swarms.agents.tools.main import (
from swarms.utils.main import CsvToDataframe BaseToolSet,
from swarms.agents.tools.main import BaseToolSet CodeEditor,
ExitConversation,
RequestsGet,
Terminal,
)
from swarms.utils.main import BaseHandler, CsvToDataframe, FileType
class WorkerUltraNode: class WorkerUltraNode:
@ -105,8 +110,14 @@ class WorkerUltraNodeInitializer:
if os.environ.get("USE_GPU", False): if os.environ.get("USE_GPU", False):
import torch import torch
from swarms.agents.tools.main import ImageCaptioning
from swarms.agents.tools.main import ImageEditing, InstructPix2Pix, Text2Image, VisualQuestionAnswering from swarms.agents.tools.main import (
ImageCaptioning,
ImageEditing,
InstructPix2Pix,
Text2Image,
VisualQuestionAnswering,
)
if torch.cuda.is_available(): if torch.cuda.is_available():
toolsets.extend( toolsets.extend(

Loading…
Cancel
Save