More linting

pull/443/head
Wyatt Stanke 9 months ago
parent f17bb15c70
commit 041d7cf39a
No known key found for this signature in database
GPG Key ID: CE6BA5FFF135536D

@ -30,4 +30,4 @@ jobs:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
- run: pip install pylint
- run: pylint .
- run: pylint swarms --recursive=y

@ -1,6 +1,7 @@
from swarms import Agent, Anthropic, tool
import subprocess
from swarms import Agent, Anthropic, tool
# Model
llm = Anthropic(
temperature=0.1,

@ -1,6 +1,5 @@
from swarms import Agent, Anthropic
## Initialize the workflow
agent = Agent(
agent_name="Transcript Generator",

@ -4,8 +4,8 @@ Building an Autonomous Agent in 5 minutes with:
- Tools: Search, Browser, ETC
- Long Term Mmeory: ChromaDB, Weaviate, Pinecone, ETC
"""
from swarms import Agent, OpenAIChat, tool
from playground.demos.agent_in_5.chroma_db import ChromaDB
from swarms import Agent, OpenAIChat, tool
# Initialize the memory
chroma = ChromaDB(

@ -1,12 +1,13 @@
import concurrent
import csv
from dotenv import load_dotenv
from swarms import Agent, OpenAIChat
from swarms.memory import ChromaDB
from dotenv import load_dotenv
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.file_processing import create_file
from swarms.utils.loguru_logger import logger
from swarms.utils.parse_code import extract_code_from_markdown
# Load ENV
load_dotenv()
@ -69,7 +70,7 @@ def extract_and_create_agents(
"""
try:
agents = []
with open(csv_file_path, mode="r", encoding="utf-8") as file:
with open(csv_file_path, encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
project_name = row[target_columns[0]]

@ -1,8 +1,10 @@
import os
from dotenv import load_dotenv
import swarms.prompts.autoswarm as sdsp
from swarms.models import OpenAIChat
from swarms.structs import Agent
import swarms.prompts.autoswarm as sdsp
# Load environment variables and initialize the OpenAI Chat model
load_dotenv()

@ -1,5 +1,6 @@
# Import the necessary libraries.
import asyncio
import websockets
# Create a list of public group chats.

@ -1,5 +1,5 @@
import discord
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
# Discord Bot Setup
client = discord.Client()

@ -1,10 +1,10 @@
# OpenMind.bot streamlines social interactions between personalized bots, representing users, media, and influencers, ensuring meaningful exchanges. It eliminates misunderstandings by using context-aware conversations, followed by summaries or audio recaps of these interactions for efficient communication.
import json
import datetime
import pytz
import json
from flask import Flask, request, jsonify
import pytz
from flask import Flask, jsonify, request
app = Flask(__name__)
@ -28,7 +28,7 @@ def create_conversation():
@app.route("/api/v1/conversations/<conversation_id>", methods=["GET"])
def get_conversation(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
with open("conversations.json") as f:
conversation = json.load(f)
# Return the conversation
@ -49,7 +49,7 @@ def create_message(conversation_id):
}
# Get the conversation from the database
with open("conversations.json", "r") as f:
with open("conversations.json") as f:
conversation = json.load(f)
# Add the message to the conversation
@ -68,7 +68,7 @@ def create_message(conversation_id):
)
def get_messages(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
with open("conversations.json") as f:
conversation = json.load(f)
# Return the messages
@ -80,7 +80,7 @@ def get_messages(conversation_id):
)
def get_summary(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
with open("conversations.json") as f:
conversation = json.load(f)
# Create a summary of the conversation
@ -98,7 +98,7 @@ def get_summary(conversation_id):
)
def get_audio_recap(conversation_id):
# Get the conversation from the database
with open("conversations.json", "r") as f:
with open("conversations.json") as f:
conversation = json.load(f)
# Create an audio recap of the conversation

@ -1,12 +1,14 @@
import concurrent
import csv
import os
from swarms import Gemini, Agent
from swarms.memory import ChromaDB
from dotenv import load_dotenv
from swarms.utils.parse_code import extract_code_from_markdown
from swarms import Agent, Gemini
from swarms.memory import ChromaDB
from swarms.utils.file_processing import create_file
from swarms.utils.loguru_logger import logger
from swarms.utils.parse_code import extract_code_from_markdown
# Load ENV
load_dotenv()
@ -71,7 +73,7 @@ def extract_and_create_agents(
- target_columns: A list of column names to extract values from.
"""
agents = []
with open(csv_file_path, mode="r", encoding="utf-8") as file:
with open(csv_file_path, encoding="utf-8") as file:
reader = csv.DictReader(file)
for row in reader:
project_name = row[target_columns[0]]

@ -4,7 +4,7 @@ import sys
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent
from swarms import Agent, OpenAIChat
# Load the environment variables
load_dotenv()

@ -1,6 +1,8 @@
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, ConcurrentWorkflow, Agent
from swarms import Agent, ConcurrentWorkflow, OpenAIChat, Task
# Load environment variables from .env file
load_dotenv()

@ -1,6 +1,7 @@
from swarms.models import HuggingfaceLLM
import torch
from swarms.models import HuggingfaceLLM
try:
inference = HuggingfaceLLM(
model_id="gpt2",

@ -1,16 +1,18 @@
from swarms.structs import Agent
import os
from dotenv import load_dotenv
from swarms.models import GPT4VisionAPI
from swarms.prompts.logistics import (
Efficiency_Agent_Prompt,
Health_Security_Agent_Prompt,
Quality_Control_Agent_Prompt,
Productivity_Agent_Prompt,
Quality_Control_Agent_Prompt,
Safety_Agent_Prompt,
Security_Agent_Prompt,
Sustainability_Agent_Prompt,
Efficiency_Agent_Prompt,
)
from swarms.structs import Agent
# Load ENV
load_dotenv()

@ -1,6 +1,8 @@
import os
from dotenv import load_dotenv
from swarms import OpenAIChat, Task, RecursiveWorkflow, Agent
from swarms import Agent, OpenAIChat, RecursiveWorkflow, Task
# Load environment variables from .env file
load_dotenv()

@ -1,7 +1,9 @@
import os
from swarms import OpenAIChat, Agent, SequentialWorkflow
from dotenv import load_dotenv
from swarms import Agent, OpenAIChat, SequentialWorkflow
load_dotenv()
# Load the environment variables

@ -3,8 +3,8 @@ import os
from dotenv import load_dotenv
from swarms import (
OpenAIChat,
Conversation,
OpenAIChat,
)
conv = Conversation(

@ -3,7 +3,7 @@ import os
from dotenv import load_dotenv
# Import the OpenAIChat model and the Agent struct
from swarms import OpenAIChat, Agent, SwarmNetwork
from swarms import Agent, OpenAIChat, SwarmNetwork
# Load the environment variables
load_dotenv()

@ -1,5 +1,6 @@
# Import necessary libraries
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
# Load the pre-trained model and tokenizer

@ -1,7 +1,9 @@
# Importing necessary modules
import os
from dotenv import load_dotenv
from swarms import Worker, OpenAIChat, tool
from swarms import OpenAIChat, Worker, tool
# Loading environment variables from .env file
load_dotenv()

@ -1,5 +1,7 @@
import os
from dotenv import load_dotenv
from swarms import AzureOpenAI
# Load the environment variables

@ -1,4 +1,5 @@
from vllm import LLM
from swarms import AbstractLLM, Agent, ChromaDB

@ -1,7 +1,8 @@
import os
from swarms import OpenAIChat, Agent
from dotenv import load_dotenv
from swarms import Agent, OpenAIChat
# Load environment variables
load_dotenv()

@ -1,4 +1,4 @@
from swarms import Agent, MajorityVoting, ChromaDB, Anthropic
from swarms import Agent, Anthropic, ChromaDB, MajorityVoting
# Initialize the llm
llm = Anthropic()

@ -1,7 +1,6 @@
from swarms.structs.message_pool import MessagePool
from swarms import Agent, OpenAIChat
from swarms.memory.chroma_db import ChromaDB
from swarms.structs.message_pool import MessagePool
# Agents
agent1 = Agent(

@ -1,8 +1,10 @@
import os
from swarms import Gemini, Agent
from swarms.structs.multi_process_workflow import MultiProcessWorkflow
from dotenv import load_dotenv
from swarms import Agent, Gemini
from swarms.structs.multi_process_workflow import MultiProcessWorkflow
# Load the environment variables
load_dotenv()

@ -1,14 +1,16 @@
# Import the OpenAIChat model and the Agent struct
import os
from dotenv import load_dotenv
from swarms import (
Agent,
Anthropic,
OpenAIChat,
SwarmNetwork,
Anthropic,
TogetherLLM,
)
from swarms.memory import ChromaDB
from dotenv import load_dotenv
# load the environment variables
load_dotenv()

@ -1,4 +1,5 @@
from transformers import AutoModelForCausalLM, AutoTokenizer
from swarms import ToolAgent
# Load the pre-trained model and tokenizer

@ -1,21 +1,21 @@
import concurrent
import inspect
import os
import threading
from typing import Callable, List
from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP
from swarms import Agent, OpenAIChat
from swarms.utils.loguru_logger import logger
import concurrent
from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP
#########
from swarms.utils.file_processing import (
create_file_in_folder,
load_json,
sanitize_file_path,
zip_workspace,
create_file_in_folder,
zip_folders,
zip_workspace,
)
from swarms.utils.loguru_logger import logger
class PythonDocumentationSwarm:

@ -3,7 +3,9 @@ Boss selects what agent to use
B -> W1, W2, W3
"""
from typing import List, Optional
from pydantic import BaseModel, Field
from swarms.utils.json_utils import str_to_json

@ -71,10 +71,10 @@ pandas = "^2.2.2"
fastapi = "^0.110.1"
[tool.ruff]
line-length = 127
line-length = 1234
[tool.ruff.lint]
select = ["E4", "E7", "E9", "F", "W", "E501"]
select = ["E4", "E7", "E9", "F", "W", "E501", "I", "UP"]
ignore = []
fixable = ["ALL"]
unfixable = []

@ -1,5 +1,5 @@
from abc import abstractmethod
from typing import Dict, List, Union, Optional
from typing import Dict, List, Optional, Union
class AbstractAgent:

@ -1,4 +1,4 @@
from typing import Any, Optional, Callable
from typing import Any, Callable, Optional
from swarms.structs.agent import Agent
from swarms.tools.format_tools import Jsonformer

@ -7,9 +7,9 @@ import chromadb
import numpy as np
from dotenv import load_dotenv
from swarms.memory.base_vectordb import AbstractVectorDatabase
from swarms.utils.data_to_text import data_to_text
from swarms.utils.markdown_message import display_markdown_message
from swarms.memory.base_vectordb import AbstractVectorDatabase
# Load environment variables
load_dotenv()

@ -6,8 +6,9 @@ from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from swarms.models.popular_llms import OpenAIChat
from swarms.memory.base_vectordb import AbstractVectorDatabase
from swarms.models.popular_llms import OpenAIChat
def synchronized_mem(method):

@ -5,6 +5,7 @@ from sqlalchemy import JSON, Column, String, create_engine
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from swarms.memory.base_vectordb import AbstractVectorDatabase

@ -1,6 +1,7 @@
from typing import List
from httpx import RequestError
from swarms.memory.base_vectordb import AbstractVectorDatabase
try:

@ -23,6 +23,7 @@ from swarms.models.popular_llms import (
from swarms.models.popular_llms import (
CohereChat as Cohere,
)
from swarms.models.popular_llms import OctoAIChat
from swarms.models.popular_llms import (
OpenAIChatLLM as OpenAIChat,
)
@ -32,9 +33,7 @@ from swarms.models.popular_llms import (
from swarms.models.popular_llms import (
ReplicateLLM as Replicate,
)
from swarms.models.popular_llms import OctoAIChat
from swarms.models.qwen import QwenVLMultiModal # noqa: E402
from swarms.models.sampling_params import SamplingParams, SamplingType
from swarms.models.together import TogetherLLM # noqa: E402
from swarms.models.types import ( # noqa: E402
@ -46,7 +45,6 @@ from swarms.models.types import ( # noqa: E402
)
from swarms.models.vilt import Vilt # noqa: E402
__all__ = [
"AbstractLLM",
"Anthropic",

@ -2,9 +2,10 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Callable
import numpy as np
from typing import Callable
from swarms.artifacts.text_artifact import TextArtifact
from swarms.utils.exponential_backoff import ExponentialBackoffMixin

@ -5,7 +5,7 @@ import warnings
from typing import Any, Callable, Literal, Sequence
import numpy as np
from pydantic import model_validator, ConfigDict, BaseModel, Field
from pydantic import BaseModel, ConfigDict, Field, model_validator
from tenacity import (
AsyncRetrying,
before_sleep_log,

@ -8,6 +8,7 @@ from langchain.llms import BaseLLM
from langchain.pydantic_v1 import BaseModel
from langchain.schema import Generation, LLMResult
from langchain.utils import get_from_dict_or_env
from pydantic import model_validator
from tenacity import (
before_sleep_log,
retry,
@ -15,7 +16,6 @@ from tenacity import (
stop_after_attempt,
wait_exponential,
)
from pydantic import model_validator
logger = logging.getLogger(__name__)

@ -1,4 +1,5 @@
import datetime
from pydantic import BaseModel, Field
time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")

@ -4,6 +4,7 @@ from swarms.structs.agent_process import (
AgentProcess,
AgentProcessQueue,
)
from swarms.structs.agent_rearrange import AgentRearrange
from swarms.structs.auto_swarm import AutoSwarm, AutoSwarmRouter
from swarms.structs.autoscaler import AutoScaler
from swarms.structs.base import BaseStructure
@ -78,8 +79,6 @@ from swarms.structs.utils import (
find_token_in_text,
parse_tasks,
)
from swarms.structs.agent_rearrange import AgentRearrange
__all__ = [
"Agent",

@ -19,14 +19,14 @@ from swarms.prompts.multi_modal_autonomous_instruction_prompt import (
)
from swarms.prompts.worker_prompt import worker_tools_sop_promp
from swarms.structs.conversation import Conversation
from swarms.tools.code_executor import CodeExecutor
from swarms.tools.exec_tool import execute_tool_by_name
from swarms.tools.function_util import process_tool_docs
from swarms.tools.tool import BaseTool
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.data_to_text import data_to_text
from swarms.utils.parse_code import extract_code_from_markdown
from swarms.utils.pdf_to_text import pdf_to_text
from swarms.tools.exec_tool import execute_tool_by_name
from swarms.tools.function_util import process_tool_docs
from swarms.tools.code_executor import CodeExecutor
# Utils

@ -1,10 +1,10 @@
from datetime import datetime
from typing import Callable
from pydantic import BaseModel
from swarms.structs.omni_agent_types import agents
from swarms.utils.loguru_logger import logger
from typing import Callable
class AgentProcess(BaseModel):

@ -1,6 +1,7 @@
import logging
from collections import defaultdict
from typing import Callable, Sequence
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm

@ -15,8 +15,8 @@ import yaml
from swarms.structs.agent import Agent
from swarms.structs.conversation import Conversation
from swarms.utils.loguru_logger import logger
from swarms.structs.omni_agent_types import agent
from swarms.utils.loguru_logger import logger
class BaseSwarm(ABC):

@ -1,12 +1,11 @@
import datetime
import json
from typing import Optional
from typing import Any, Optional
from termcolor import colored
from swarms.memory.base_db import AbstractDatabase
from swarms.structs.base import BaseStructure
from typing import Any
class Conversation(BaseStructure):

@ -1,10 +1,11 @@
from swarms.structs.agent import Agent
from typing import Union
from swarms.models.popular_llms import OpenAIChat
from swarms.models.base_llm import AbstractLLM
from swarms.models.popular_llms import OpenAIChat
from swarms.prompts.meta_system_prompt import (
meta_system_prompt_generator,
)
from swarms.structs.agent import Agent
meta_prompter_llm = OpenAIChat(
system_prompt=str(meta_system_prompt_generator)

@ -4,6 +4,7 @@ from typing import (
Sequence,
Union,
)
from swarms.models.base_llm import AbstractLLM
from swarms.models.base_multimodal_model import BaseMultiModalModel
from swarms.structs.agent import Agent

@ -1,8 +1,9 @@
import logging
from collections import defaultdict
from swarms.utils.loguru_logger import logger
from typing import Callable, Sequence
from swarms.structs.agent import Agent
from typing import Sequence, Callable
from swarms.utils.loguru_logger import logger
class AgentRearrange:

@ -1,4 +1,5 @@
from typing import Union, Sequence, List, Callable
from typing import Callable, List, Sequence, Union
from swarms.structs.agent import Agent
from swarms.structs.base_swarm import BaseSwarm

@ -1,8 +1,9 @@
from typing import Dict, List, Sequence
from swarms.tools.tool import BaseTool
from pydantic import BaseModel
from swarms.tools.tool import BaseTool
class Step(BaseModel):
"""

@ -5,7 +5,6 @@ import threading
from typing import List, Optional
# from fastapi import FastAPI
from swarms.structs.agent import Agent
from swarms.structs.base import BaseStructure
from swarms.utils.logger import logger # noqa: F401

@ -1,7 +1,7 @@
import json
from typing import List, Optional
from pydantic import model_validator, BaseModel, Field, Json
from pydantic import BaseModel, Field, Json, model_validator
from swarms.structs.agent import Agent
from swarms.structs.task import Task

@ -1,4 +1,5 @@
from swarms.telemetry.log_all import log_all_calls, log_calls
from swarms.telemetry.sentry_active import activate_sentry
from swarms.telemetry.sys_info import (
get_cpu_info,
get_os_version,
@ -16,7 +17,6 @@ from swarms.telemetry.user_utils import (
get_system_info,
get_user_device_data,
)
from swarms.telemetry.sentry_active import activate_sentry
__all__ = [
"log_all_calls",

@ -1,8 +1,9 @@
import subprocess
from swarms.telemetry.check_update import check_for_update
from termcolor import colored
from swarms.telemetry.check_update import check_for_update
def auto_update():
"""auto update swarms"""

@ -1,5 +1,5 @@
import os
import logging
import os
import warnings
from swarms.telemetry.auto_upgrade_swarms import auto_update

@ -1,6 +1,7 @@
import os
from dotenv import load_dotenv
import sentry_sdk
from dotenv import load_dotenv
load_dotenv()

@ -1,4 +1,3 @@
from swarms.tools.tool import BaseTool, Tool, StructuredTool, tool
from swarms.tools.code_executor import CodeExecutor
from swarms.tools.exec_tool import (
AgentAction,
@ -7,6 +6,7 @@ from swarms.tools.exec_tool import (
execute_tool_by_name,
preprocess_json_input,
)
from swarms.tools.tool import BaseTool, StructuredTool, Tool, tool
from swarms.tools.tool_utils import (
execute_tools,
extract_tool_commands,

@ -1,5 +1,5 @@
import json
import concurrent.futures
import json
import re
from abc import abstractmethod
from typing import Dict, List, NamedTuple

@ -1,15 +1,16 @@
import json
from typing import Any, Dict, List, Union
from pydantic import BaseModel
from termcolor import cprint
from transformers import PreTrainedModel, PreTrainedTokenizer
from pydantic import BaseModel
from swarms.models.base_llm import AbstractLLM
from swarms.tools.logits_processor import (
NumberStoppingCriteria,
OutputNumbersTokens,
StringStoppingCriteria,
)
from swarms.models.base_llm import AbstractLLM
GENERATION_MARKER = "|GENERATION|"

@ -1,8 +1,8 @@
from langchain.tools import (
BaseTool,
Tool,
StructuredTool,
Tool,
tool,
) # noqa F401
)
__all__ = ["BaseTool", "Tool", "StructuredTool", "tool"]

@ -1,13 +1,12 @@
import inspect
import json
import re
from typing import Any, List
from typing import Any, Callable, List
from termcolor import colored
from swarms.prompts.tools import SCENARIOS
from swarms.tools.tool import BaseTool
import inspect
from typing import Callable
from termcolor import colored
def scrape_tool_func_docs(fn: Callable) -> str:

@ -1,5 +1,6 @@
from swarms.utils.class_args_wrapper import print_class_parameters
from swarms.utils.code_interpreter import SubprocessCodeInterpreter
from swarms.utils.concurrent_utils import execute_concurrently
from swarms.utils.csv_and_pandas import (
csv_to_dataframe,
dataframe_to_strings,
@ -17,11 +18,11 @@ from swarms.utils.download_weights_from_url import (
)
from swarms.utils.exponential_backoff import ExponentialBackoffMixin
from swarms.utils.file_processing import (
create_file_in_folder,
load_json,
sanitize_file_path,
zip_workspace,
create_file_in_folder,
zip_folders,
zip_workspace,
)
from swarms.utils.find_img_path import find_image_path
from swarms.utils.json_output_parser import JsonOutputParser
@ -44,8 +45,6 @@ from swarms.utils.save_logs import parse_log_file
# from swarms.utils.supervision_visualizer import MarkVisualizer
from swarms.utils.try_except_wrapper import try_except_wrapper
from swarms.utils.yaml_output_parser import YamlOutputParser
from swarms.utils.concurrent_utils import execute_concurrently
__all__ = [
"print_class_parameters",

@ -1,7 +1,7 @@
import asyncio
import concurrent.futures
from typing import Any, Callable, Dict, List
from inspect import iscoroutinefunction
import asyncio
from typing import Any, Callable, Dict, List
# Helper function to run an asynchronous function in a synchronous way

@ -50,22 +50,12 @@ def get_new_image_name(org_img_name, func_name="update"):
if len(name_split) == 1:
most_org_file_name = name_split[0]
recent_prev_file_name = name_split[0]
new_file_name = "{}_{}_{}_{}.png".format(
this_new_uuid,
func_name,
recent_prev_file_name,
most_org_file_name,
)
new_file_name = f"{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png"
else:
assert len(name_split) == 4
most_org_file_name = name_split[3]
recent_prev_file_name = name_split[0]
new_file_name = "{}_{}_{}_{}.png".format(
this_new_uuid,
func_name,
recent_prev_file_name,
most_org_file_name,
)
new_file_name = f"{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.png"
return os.path.join(head, new_file_name)
@ -78,22 +68,12 @@ def get_new_dataframe_name(org_img_name, func_name="update"):
if len(name_split) == 1:
most_org_file_name = name_split[0]
recent_prev_file_name = name_split[0]
new_file_name = "{}_{}_{}_{}.csv".format(
this_new_uuid,
func_name,
recent_prev_file_name,
most_org_file_name,
)
new_file_name = f"{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.csv"
else:
assert len(name_split) == 4
most_org_file_name = name_split[3]
recent_prev_file_name = name_split[0]
new_file_name = "{}_{}_{}_{}.csv".format(
this_new_uuid,
func_name,
recent_prev_file_name,
most_org_file_name,
)
new_file_name = f"{this_new_uuid}_{func_name}_{recent_prev_file_name}_{most_org_file_name}.csv"
return os.path.join(head, new_file_name)

@ -1,7 +1,7 @@
from abc import ABC
from typing import Any, Dict, List, Literal, TypedDict, Union, cast
from pydantic import ConfigDict, BaseModel, PrivateAttr
from pydantic import BaseModel, ConfigDict, PrivateAttr
class BaseSerialized(TypedDict):

Loading…
Cancel
Save