commit
aa88b11d3f
@ -0,0 +1,7 @@
|
|||||||
|
"""
|
||||||
|
Idea 2 img
|
||||||
|
|
||||||
|
task -> gpt4 text -> dalle3 img -> gpt4vision img + text analyze img -> dalle3 img -> loop
|
||||||
|
|
||||||
|
"""
|
||||||
|
from swarms.models.gpt4_vision_api import GPT4VisionAPI
|
After Width: | Height: | Size: 41 KiB |
@ -0,0 +1,127 @@
|
|||||||
|
"""
|
||||||
|
Swarm of multi modal autonomous agents for manufacturing!
|
||||||
|
---------------------------------------------------------
|
||||||
|
Health Security agent: Agent that monitors the health of working conditions: input image of factory output: health safety index 0.0 - 1.0 being the highest
|
||||||
|
Quality Control agent: Agent that monitors the quality of the product: input image of product output: quality index 0.0 - 1.0 being the highest
|
||||||
|
Productivity agent: Agent that monitors the productivity of the factory: input image of factory output: productivity index 0.0 - 1.0 being the highest
|
||||||
|
Safety agent: Agent that monitors the safety of the factory: input image of factory output: safety index 0.0 - 1.0 being the highest
|
||||||
|
Security agent: Agent that monitors the security of the factory: input image of factory output: security index 0.0 - 1.0 being the highest
|
||||||
|
Sustainability agent: Agent that monitors the sustainability of the factory: input image of factory output: sustainability index 0.0 - 1.0 being the highest
|
||||||
|
Efficiency agent: Agent that monitors the efficiency of the factory: input image of factory output: efficiency index 0.0 - 1.0 being the highest
|
||||||
|
|
||||||
|
|
||||||
|
Flow:
|
||||||
|
health security agent -> quality control agent -> productivity agent -> safety agent -> security agent -> sustainability agent -> efficiency agent
|
||||||
|
"""
|
||||||
|
from swarms.structs import Flow
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from swarms.models import GPT4VisionAPI
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv("OPENAI_API_KEY")
|
||||||
|
|
||||||
|
|
||||||
|
llm = GPT4VisionAPI(
|
||||||
|
openai_api_key=api_key
|
||||||
|
)
|
||||||
|
|
||||||
|
assembly_line = "playground/demos/swarm_of_mma_manufacturing/assembly_line.jpg"
|
||||||
|
red_robots = "playground/demos/swarm_of_mma_manufacturing/red_robots.jpg"
|
||||||
|
robots = "playground/demos/swarm_of_mma_manufacturing/robots.jpg"
|
||||||
|
tesla_assembly_line = "playground/demos/swarm_of_mma_manufacturing/tesla_assembly.jpg"
|
||||||
|
|
||||||
|
|
||||||
|
# Define detailed prompts for each agent
|
||||||
|
tasks = {
|
||||||
|
"health_safety": (
|
||||||
|
"Analyze the factory's working environment for health safety. Focus on"
|
||||||
|
" cleanliness, ventilation, spacing between workstations, and personal"
|
||||||
|
" protective equipment availability."
|
||||||
|
),
|
||||||
|
"productivity": (
|
||||||
|
"Review the factory's workflow efficiency, machine utilization, and"
|
||||||
|
" employee engagement. Identify operational delays or bottlenecks."
|
||||||
|
),
|
||||||
|
"safety": (
|
||||||
|
"Analyze the factory's safety measures, including fire exits, safety"
|
||||||
|
" signage, and emergency response equipment."
|
||||||
|
),
|
||||||
|
"security": (
|
||||||
|
"Evaluate the factory's security systems, entry/exit controls, and"
|
||||||
|
" potential vulnerabilities."
|
||||||
|
),
|
||||||
|
"sustainability": (
|
||||||
|
"Inspect the factory's sustainability practices, including waste"
|
||||||
|
" management, energy usage, and eco-friendly processes."
|
||||||
|
),
|
||||||
|
"efficiency": (
|
||||||
|
"Assess the manufacturing process's efficiency, considering the layout,"
|
||||||
|
" logistics, and automation level."
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Define prompts for each agent
|
||||||
|
health_safety_prompt = tasks["health_safety"]
|
||||||
|
productivity_prompt = tasks["productivity"]
|
||||||
|
safety_prompt = tasks["safety"]
|
||||||
|
security_prompt = tasks["security"]
|
||||||
|
sustainability_prompt = tasks["sustainability"]
|
||||||
|
efficiency_prompt = tasks["efficiency"]
|
||||||
|
|
||||||
|
|
||||||
|
# Health security agent
|
||||||
|
health_security_agent = Flow(
|
||||||
|
llm=llm,
|
||||||
|
sop_list=health_safety_prompt,
|
||||||
|
max_loops=2,
|
||||||
|
multi_modal=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Quality control agent
|
||||||
|
productivity_check_agent = Flow(
|
||||||
|
llm=llm,
|
||||||
|
sop=productivity_prompt,
|
||||||
|
max_loops=2,
|
||||||
|
multi_modal=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Security agent
|
||||||
|
security_check_agent = Flow(
|
||||||
|
llm=llm,
|
||||||
|
sop=security_prompt,
|
||||||
|
max_loops=2,
|
||||||
|
multi_modal=True
|
||||||
|
)
|
||||||
|
|
||||||
|
# Efficiency agent
|
||||||
|
efficiency_check_agent = Flow(
|
||||||
|
llm=llm,
|
||||||
|
sop=efficiency_prompt,
|
||||||
|
max_loops=2,
|
||||||
|
multi_modal=True
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
# Add the first task to the health_security_agent
|
||||||
|
health_check = health_security_agent.run(
|
||||||
|
"Analyze the safety of this factory",
|
||||||
|
robots
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the third task to the productivity_check_agent
|
||||||
|
productivity_check = productivity_check_agent.run(
|
||||||
|
health_check, assembly_line
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the fourth task to the security_check_agent
|
||||||
|
security_check = security_check_agent.add(
|
||||||
|
productivity_check, red_robots
|
||||||
|
)
|
||||||
|
|
||||||
|
# Add the fifth task to the efficiency_check_agent
|
||||||
|
efficiency_check = efficiency_check_agent.run(
|
||||||
|
security_check, tesla_assembly_line
|
||||||
|
)
|
||||||
|
|
After Width: | Height: | Size: 43 KiB |
After Width: | Height: | Size: 39 KiB |
After Width: | Height: | Size: 42 KiB |
@ -0,0 +1,112 @@
|
|||||||
|
import os
|
||||||
|
import base64
|
||||||
|
import requests
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
class StableDiffusion:
|
||||||
|
"""
|
||||||
|
A class to interact with the Stable Diffusion API for image generation.
|
||||||
|
|
||||||
|
Attributes:
|
||||||
|
-----------
|
||||||
|
api_key : str
|
||||||
|
The API key for accessing the Stable Diffusion API.
|
||||||
|
api_host : str
|
||||||
|
The host URL of the Stable Diffusion API.
|
||||||
|
engine_id : str
|
||||||
|
The ID of the Stable Diffusion engine.
|
||||||
|
headers : dict
|
||||||
|
The headers for the API request.
|
||||||
|
output_dir : str
|
||||||
|
Directory where generated images will be saved.
|
||||||
|
|
||||||
|
Methods:
|
||||||
|
--------
|
||||||
|
generate_image(prompt: str, cfg_scale: int, height: int, width: int, samples: int, steps: int) -> List[str]:
|
||||||
|
Generates images based on a text prompt and returns a list of file paths to the generated images.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, api_key: str, api_host: str = "https://api.stability.ai"):
|
||||||
|
"""
|
||||||
|
Initializes the StableDiffusion class with the provided API key and host.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
-----------
|
||||||
|
api_key : str
|
||||||
|
The API key for accessing the Stable Diffusion API.
|
||||||
|
api_host : str
|
||||||
|
The host URL of the Stable Diffusion API. Default is "https://api.stability.ai".
|
||||||
|
"""
|
||||||
|
self.api_key = api_key
|
||||||
|
self.api_host = api_host
|
||||||
|
self.engine_id = "stable-diffusion-v1-6"
|
||||||
|
self.headers = {
|
||||||
|
"Authorization": f"Bearer {self.api_key}",
|
||||||
|
"Content-Type": "application/json",
|
||||||
|
"Accept": "application/json"
|
||||||
|
}
|
||||||
|
self.output_dir = "images"
|
||||||
|
os.makedirs(self.output_dir, exist_ok=True)
|
||||||
|
|
||||||
|
def generate_image(self, prompt: str, cfg_scale: int = 7, height: int = 1024, width: int = 1024, samples: int = 1, steps: int = 30) -> List[str]:
|
||||||
|
"""
|
||||||
|
Generates images based on a text prompt.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
-----------
|
||||||
|
prompt : str
|
||||||
|
The text prompt based on which the image will be generated.
|
||||||
|
cfg_scale : int
|
||||||
|
CFG scale parameter for image generation. Default is 7.
|
||||||
|
height : int
|
||||||
|
Height of the generated image. Default is 1024.
|
||||||
|
width : int
|
||||||
|
Width of the generated image. Default is 1024.
|
||||||
|
samples : int
|
||||||
|
Number of images to generate. Default is 1.
|
||||||
|
steps : int
|
||||||
|
Number of steps for the generation process. Default is 30.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
--------
|
||||||
|
List[str]:
|
||||||
|
A list of paths to the generated images.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
-------
|
||||||
|
Exception:
|
||||||
|
If the API response is not 200 (OK).
|
||||||
|
"""
|
||||||
|
response = requests.post(
|
||||||
|
f"{self.api_host}/v1/generation/{self.engine_id}/text-to-image",
|
||||||
|
headers=self.headers,
|
||||||
|
json={
|
||||||
|
"text_prompts": [{"text": prompt}],
|
||||||
|
"cfg_scale": cfg_scale,
|
||||||
|
"height": height,
|
||||||
|
"width": width,
|
||||||
|
"samples": samples,
|
||||||
|
"steps": steps,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise Exception(f"Non-200 response: {response.text}")
|
||||||
|
|
||||||
|
data = response.json()
|
||||||
|
image_paths = []
|
||||||
|
for i, image in enumerate(data["artifacts"]):
|
||||||
|
image_path = os.path.join(self.output_dir, f"v1_txt2img_{i}.png")
|
||||||
|
with open(image_path, "wb") as f:
|
||||||
|
f.write(base64.b64decode(image["base64"]))
|
||||||
|
image_paths.append(image_path)
|
||||||
|
|
||||||
|
return image_paths
|
||||||
|
|
||||||
|
# Usage example:
|
||||||
|
# sd = StableDiffusion("your-api-key")
|
||||||
|
# images = sd.generate_image("A scenic landscape with mountains")
|
||||||
|
# print(images)
|
@ -0,0 +1,30 @@
|
|||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
|
||||||
|
def disable_logging():
|
||||||
|
warnings.filterwarnings("ignore", category=UserWarning)
|
||||||
|
|
||||||
|
# disable tensorflow warnings
|
||||||
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
|
||||||
|
|
||||||
|
# Set the logging level for the entire module
|
||||||
|
logging.basicConfig(level=logging.WARNING)
|
||||||
|
|
||||||
|
try:
|
||||||
|
log = logging.getLogger("pytorch")
|
||||||
|
log.propagate = False
|
||||||
|
log.setLevel(logging.ERROR)
|
||||||
|
except Exception as error:
|
||||||
|
print(f"Pytorch logging not disabled: {error}")
|
||||||
|
|
||||||
|
for logger_name in [
|
||||||
|
"tensorflow",
|
||||||
|
"h5py",
|
||||||
|
"numexpr",
|
||||||
|
"git",
|
||||||
|
"wandb.docker.auth",
|
||||||
|
]:
|
||||||
|
logger = logging.getLogger(logger_name)
|
||||||
|
logger.setLevel(logging.WARNING) # Supress DEBUG and info logs
|
Loading…
Reference in new issue