Former-commit-id: 604611d669
clean-history
parent
3c2b7c3f65
commit
401de5d9be
@ -0,0 +1,19 @@
|
||||
from swarms.models.bioclip import BioClip
|
||||
|
||||
clip = BioClip("hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224")
|
||||
|
||||
labels = [
|
||||
"adenocarcinoma histopathology",
|
||||
"brain MRI",
|
||||
"covid line chart",
|
||||
"squamous cell carcinoma histopathology",
|
||||
"immunohistochemistry histopathology",
|
||||
"bone X-ray",
|
||||
"chest X-ray",
|
||||
"pie chart",
|
||||
"hematoxylin and eosin histopathology",
|
||||
]
|
||||
|
||||
result = clip("swarms.jpeg", labels)
|
||||
metadata = {"filename": "images/.jpg".split("/")[-1], "top_probs": result}
|
||||
clip.plot_image_with_metadata("swarms.jpeg", metadata)
|
@ -0,0 +1,7 @@
|
||||
from swarms.models.biogpt import BioGPTWrapper
|
||||
|
||||
model = BioGPTWrapper()
|
||||
|
||||
out = model("The patient has a fever")
|
||||
|
||||
print(out)
|
@ -0,0 +1,10 @@
|
||||
import asyncio
|
||||
from swarms.models.distilled_whisperx import DistilWhisperModel
|
||||
|
||||
model_wrapper = DistilWhisperModel()
|
||||
|
||||
# Download mp3 of voice and place the path here
|
||||
transcription = model_wrapper("path/to/audio.mp3")
|
||||
|
||||
# For async usage
|
||||
transcription = asyncio.run(model_wrapper.async_transcribe("path/to/audio.mp3"))
|
@ -0,0 +1,5 @@
|
||||
from swarms.models.fastvit import FastViT
|
||||
|
||||
fastvit = FastViT()
|
||||
|
||||
result = fastvit(img="images/swarms.jpeg", confidence_threshold=0.5)
|
@ -0,0 +1,7 @@
|
||||
from swarms.models.fuyu import Fuyu
|
||||
|
||||
fuyu = Fuyu()
|
||||
|
||||
# This is the default image, you can change it to any image you want
|
||||
out = fuyu("What is this image?", "images/swarms.jpeg")
|
||||
print(out)
|
@ -0,0 +1,8 @@
|
||||
from swarms.models import HuggingfaceLLM
|
||||
|
||||
model_id = "NousResearch/Yarn-Mistral-7b-128k"
|
||||
inference = HuggingfaceLLM(model_id=model_id)
|
||||
|
||||
task = "Once upon a time"
|
||||
generated_text = inference(task)
|
||||
print(generated_text)
|
@ -0,0 +1,16 @@
|
||||
from swarms.models import idefics
|
||||
|
||||
model = idefics()
|
||||
|
||||
user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG"
|
||||
response = model.chat(user_input)
|
||||
print(response)
|
||||
|
||||
user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052"
|
||||
response = model.chat(user_input)
|
||||
print(response)
|
||||
|
||||
model.set_checkpoint("new_checkpoint")
|
||||
model.set_device("cpu")
|
||||
model.set_max_length(200)
|
||||
model.clear_chat_history()
|
@ -0,0 +1,7 @@
|
||||
from swarms.models import JinaEmbeddings
|
||||
|
||||
model = JinaEmbeddings()
|
||||
|
||||
embeddings = model("Encode this text")
|
||||
|
||||
print(embeddings)
|
@ -0,0 +1,10 @@
|
||||
from swarms.models.kosmos2 import Kosmos2, Detections
|
||||
from PIL import Image
|
||||
|
||||
|
||||
model = Kosmos2.initialize()
|
||||
|
||||
image = Image.open("images/swarms.jpg")
|
||||
|
||||
detections = model(image)
|
||||
print(detections)
|
@ -0,0 +1,11 @@
|
||||
from swarms.models.kosmos_two import Kosmos
|
||||
|
||||
# Initialize Kosmos
|
||||
kosmos = Kosmos()
|
||||
|
||||
# Perform multimodal grounding
|
||||
out = kosmos.multimodal_grounding(
|
||||
"Find the red apple in the image.", "images/swarms.jpeg"
|
||||
)
|
||||
|
||||
print(out)
|
@ -0,0 +1,8 @@
|
||||
from swarms.models import LayoutLMDocumentQA
|
||||
|
||||
model = LayoutLMDocumentQA()
|
||||
|
||||
# Place an image of a financial document
|
||||
out = model("What is the total amount?", "images/swarmfest.png")
|
||||
|
||||
print(out)
|
@ -0,0 +1,35 @@
|
||||
from swarms.models.llama_function_caller import LlamaFunctionCaller
|
||||
|
||||
llama_caller = LlamaFunctionCaller()
|
||||
|
||||
|
||||
# Add a custom function
|
||||
def get_weather(location: str, format: str) -> str:
|
||||
# This is a placeholder for the actual implementation
|
||||
return f"Weather at {location} in {format} format."
|
||||
|
||||
|
||||
llama_caller.add_func(
|
||||
name="get_weather",
|
||||
function=get_weather,
|
||||
description="Get the weather at a location",
|
||||
arguments=[
|
||||
{
|
||||
"name": "location",
|
||||
"type": "string",
|
||||
"description": "Location for the weather",
|
||||
},
|
||||
{
|
||||
"name": "format",
|
||||
"type": "string",
|
||||
"description": "Format of the weather data",
|
||||
},
|
||||
],
|
||||
)
|
||||
|
||||
# Call the function
|
||||
result = llama_caller.call_function("get_weather", location="Paris", format="Celsius")
|
||||
print(result)
|
||||
|
||||
# Stream a user prompt
|
||||
llama_caller("Tell me about the tallest mountain in the world.")
|
@ -0,0 +1,7 @@
|
||||
from swarms.models.mpt import MPT
|
||||
|
||||
mpt_instance = MPT(
|
||||
"mosaicml/mpt-7b-storywriter", "EleutherAI/gpt-neox-20b", max_tokens=150
|
||||
)
|
||||
|
||||
mpt_instance.generate("Once upon a time in a land far, far away...")
|
@ -0,0 +1,5 @@
|
||||
from swarms.models.nougat import Nougat
|
||||
|
||||
nougat = Nougat()
|
||||
|
||||
out = nougat("path/to/image.png")
|
@ -0,0 +1,5 @@
|
||||
from swarms.models.palm import PALM
|
||||
|
||||
palm = PALM()
|
||||
|
||||
out = palm("path/to/image.png")
|
@ -0,0 +1,9 @@
|
||||
from swarms.models.speecht5 import SpeechT5Wrapper
|
||||
|
||||
speechT5 = SpeechT5Wrapper()
|
||||
|
||||
result = speechT5("Hello, how are you?")
|
||||
|
||||
speechT5.save_speech(result)
|
||||
print("Speech saved successfully!")
|
||||
|
@ -0,0 +1,9 @@
|
||||
from swarms.models.ssd_1b import SSD1B
|
||||
|
||||
model = SSD1B()
|
||||
|
||||
task = "A painting of a dog"
|
||||
neg_prompt = "ugly, blurry, poor quality"
|
||||
|
||||
image_url = model(task, neg_prompt)
|
||||
print(image_url)
|
@ -0,0 +1,5 @@
|
||||
from swarms.models.vilt import Vilt
|
||||
|
||||
model = Vilt()
|
||||
|
||||
output = model("What is this image", "http://images.cocodataset.org/val2017/000000039769.jpg")
|
@ -0,0 +1,5 @@
|
||||
from swarms.models.yi_200k import Yi200k
|
||||
|
||||
models = Yi200k()
|
||||
|
||||
out = models("What is the weather like today?")
|
@ -1,102 +0,0 @@
|
||||
"""
|
||||
|
||||
Battle royal swarm where agents compete to be the first to answer a question. or the best answer.
|
||||
Look to fornight game
|
||||
|
||||
teams of 1, 3 or 4 that equates to 100 total agents
|
||||
|
||||
|
||||
Communication is proximal and based on proximity
|
||||
Clashes with adversial agents not in team.
|
||||
|
||||
Teams of 3 agents would fight each other and then move on while other agents are clashing with eachother as well.
|
||||
|
||||
Agents can be in multiple teams
|
||||
Agents can be in multiple teams and be adversial to each other
|
||||
Agents can be in multiple teams and be adversial to each other and be in multiple teams
|
||||
"""
|
||||
import random
|
||||
from swarms.workers.worker import Worker
|
||||
|
||||
|
||||
class BattleRoyalSwarm:
|
||||
"""
|
||||
Battle Royal Swarm
|
||||
|
||||
Parameters:
|
||||
- `human_evaluator` (function): Function to evaluate and score two solutions.
|
||||
- `num_workers` (int): Number of workers in the swarm.
|
||||
- `num_teams` (int): Number of teams in the swarm.
|
||||
|
||||
Example:
|
||||
|
||||
# User evaluator function to evaluate and score two solutions
|
||||
def human_evaluator(solution1, solution2):
|
||||
# Placeholder; in a real-world application, the user would input scores here
|
||||
score1 = int(input(f"Score for solution 1 - '{solution1}': "))
|
||||
score2 = int(input(f"Score for solution 2 - '{solution2}': "))
|
||||
return score1, score2
|
||||
|
||||
# Example usage
|
||||
swarm = BattleRoyalSwarm(human_evaluator)
|
||||
swarm.broadcast_question("What is the capital of France?")
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
human_evaluator=None,
|
||||
num_workers: int = 100,
|
||||
):
|
||||
self.workers = [Worker() for _ in range(num_workers)]
|
||||
self.teams = self.form_teams()
|
||||
self.human_evaluator = human_evaluator
|
||||
|
||||
def form_teams(self):
|
||||
"""Form teams of 1, 3 or 4 workers."""
|
||||
teams = []
|
||||
unassigned_workers = self.workers.copy()
|
||||
while unassigned_workers:
|
||||
size = random.choice([1, 3, 4])
|
||||
team = [
|
||||
unassigned_workers.pop()
|
||||
for _ in range(min(size, len(unassigned_workers)))
|
||||
]
|
||||
for worker in team:
|
||||
worker.teams.append(team)
|
||||
teams.append(team)
|
||||
return teams
|
||||
|
||||
def broadcast_question(self, question: str):
|
||||
"""Broadcast a question to the swarm."""
|
||||
responses = {}
|
||||
for worker in self.workers:
|
||||
response = worker.run(question)
|
||||
responses[worker.id] = response
|
||||
|
||||
# Check for clashes and handle them
|
||||
for i, worker1 in enumerate(self.workers):
|
||||
for j, worker2 in enumerate(self.workers):
|
||||
if (
|
||||
i != j
|
||||
and worker1.is_within_proximity(worker2)
|
||||
and set(worker1.teams) != set(worker2.teams)
|
||||
):
|
||||
winner, loser = self.clash(worker1, worker2, question)
|
||||
print(f"Worker {winner.id} won over Worker {loser.id}")
|
||||
|
||||
def communicate(self, sender: Worker, reciever: Worker, message: str):
|
||||
"""Communicate a message from one worker to another."""
|
||||
if sender.is_within_proximity(reciever) or any(
|
||||
team in sender.teams for team in reciever.teams
|
||||
):
|
||||
pass
|
||||
|
||||
def clash(self, worker1: Worker, worker2: Worker, question: str):
|
||||
"""Clash two workers and return the winner."""
|
||||
solution1 = worker1.run(question)
|
||||
solution2 = worker2.run(question)
|
||||
score1, score2 = self.human_evaluator(solution1, solution2)
|
||||
if score1 > score2:
|
||||
return worker1, worker2
|
||||
return worker2, worker1
|
Loading…
Reference in new issue