From 06488efc531564729ada372a961d2b2d40d272ad Mon Sep 17 00:00:00 2001 From: Your Name Date: Mon, 16 Sep 2024 22:37:42 -0400 Subject: [PATCH] [CLEANUP] --- README.md | 9 +- agent_with_rag_and_tools.py | 3 +- company_swarm_example 2.py | 100 ---- company_swarm_example.py | 3 +- docs/applications/discord.md | 2 +- docs/swarms/models/anthropic.md | 2 +- docs/swarms/models/base_llm.md | 2 +- docs/swarms/models/base_multimodal_model.md | 10 +- docs/swarms/models/dalle3.md | 16 +- docs/swarms/models/distilled_whisperx.md | 2 +- docs/swarms/models/fuyu.md | 4 +- docs/swarms/models/gemini.md | 4 +- docs/swarms/models/gpt4v.md | 8 +- docs/swarms/models/huggingface.md | 6 +- docs/swarms/models/idefics.md | 6 +- docs/swarms/models/kosmos.md | 18 +- docs/swarms/models/layoutlm_document_qa.md | 2 +- docs/swarms/models/llama3.md | 2 +- .../models/models_available_overview.md | 6 +- docs/swarms/models/nougat.md | 2 +- docs/swarms/models/openai.md | 2 +- docs/swarms/models/openai_chat.md | 2 +- docs/swarms/models/openai_function_caller.md | 6 +- docs/swarms/models/openai_tts.md | 4 +- docs/swarms/models/vilt.md | 2 +- docs/swarms/structs/agent.md | 6 +- docs/swarms/structs/index.md | 8 +- docs/swarms/structs/spreadsheet_swarm.md | 9 +- docs/swarms/structs/task.md | 2 +- docs/swarms/tools/build_tool.md | 3 +- example.py | 3 +- examples/agents/agent_settings.py | 3 +- examples/agents/easy_example.py | 3 +- .../agent_with_longterm_memory.py | 3 +- examples/agents/o1_preview 2.py | 3 +- examples/agents/o1_preview.py | 3 +- .../settings/monitoring/agent_ops_tools.py | 3 +- .../various_models/custom_model_with_agent.py | 2 +- .../agent_spec_func_calling.py | 2 +- .../audience_generator_agent.py | 2 +- .../claude_artifacts_example.py | 2 +- .../function_calling/idea_generator_agent.py | 2 +- .../openai_function_caller_agent_rearrange.py | 2 +- .../openai_function_caller_example.py | 2 +- .../prompt_generator_agent.py | 2 +- .../tools/function_calling/react_agent.py | 2 +- .../sentiment_analysis_function_calling.py | 2 +- .../browser/multion/multion_example.ipynb | 2 +- .../ai_research_team/json_output_v.py | 2 +- .../ai_research_team/multi_agent_hf.py | 2 +- .../novel_pytorch_code_generator.py | 2 +- .../use_cases/code_gen/amazon_review_agent.py | 3 +- .../use_cases/code_gen/api_requester_agent.py | 3 +- .../code_gen/code_interpreter_agent.py | 2 +- .../agents/use_cases/code_gen/sql_agent.py | 3 +- .../finance/estate_planning_agent.py | 3 +- .../finance/financial_agent_gpt4o_mini.py | 3 +- examples/agents/use_cases/finance/main.py | 3 +- .../use_cases/finance/plaid_api_tool.py | 3 +- examples/agents/use_cases/kyle_hackathon.py | 3 +- .../agents/use_cases/law/alberto_agent 2.py | 2 +- .../agents/use_cases/law/alberto_agent.py | 2 +- .../multi_modal/multi_modal_rag_agent.py | 3 +- .../multi_modal/new_agent_tool_system.py | 3 +- .../research/new_perplexity_agent.py | 2 +- .../use_cases/research/perplexity_agent.py | 3 +- .../security/perimeter_defense_agent.py | 2 +- examples/collabs/swarms_example.ipynb | 4 +- .../accountant_team/account_team2_example.py | 2 +- examples/demos/ad_gen/ad_gen_example.py | 4 +- .../demos/ai_acceleerated_learning/main.py | 3 +- .../demos/ai_research_team/main_example.py | 2 +- examples/demos/assembly/assembly_example.py | 2 +- examples/demos/autoswarm/autoswarm.py | 2 +- examples/demos/autotemp/autotemp_example.py | 2 +- examples/demos/autotemp/blog_gen_example.py | 2 +- .../demos/developer_swarm/main_example.py | 2 +- examples/demos/education/education_example.py | 2 +- .../gemini_chat_example.py | 2 +- .../gemini_react_example.py | 2 +- .../gemini_vcot_example.py | 2 +- examples/demos/grupa/app_example.py | 2 +- .../jarvis_example.py | 2 +- .../llm_with_conversation/main_example.py | 2 +- examples/demos/logistics/logistics_example.py | 2 +- .../multi_modal_auto_agent_example.py | 2 +- .../vcot_example.py | 2 +- .../demos/multimodal_tot/idea2img_example.py | 6 +- examples/demos/multimodal_tot/main_example.py | 4 +- examples/demos/nutrition/nutrition_example.py | 2 +- examples/demos/octomology_swarm/api.py | 4 +- .../demos/optimize_llm_stack/vllm_example.py | 2 +- .../optimize_llm_stack/vortex_example.py | 2 +- .../demos/patient_question_assist/main.py | 3 +- .../personal_stylist_example.py | 2 +- .../agricultural_swarm.py | 2 +- .../using_concurrent_workflow.py | 2 +- .../positive_med/positive_med_example.py | 2 +- .../security_team/security_team_example.py | 2 +- .../agents.py | 3 +- .../society_of_agents/hallucination_swarm.py | 3 +- .../demos/society_of_agents/probate_agent.py | 3 +- .../swarm_of_complaince/compliance_swarm.py | 2 +- .../main_example.py | 2 +- .../urban_planning/urban_planning_example.py | 2 +- examples/demos/xray/xray_example.py | 2 +- examples/models/anthropic_example.py | 9 - examples/models/azure_openai.py | 10 - examples/models/azure_openai_example.py | 25 - examples/models/cohere_example.py | 5 - examples/models/dalle3.jpeg | Bin 228428 -> 0 bytes examples/models/dalle3_concurrent_example.py | 23 - examples/models/dalle3_example.py | 6 - examples/models/example_gpt4vison.py | 17 - examples/models/example_idefics.py | 33 -- examples/models/example_kosmos.py | 10 - examples/models/example_qwenvlmultimodal.py | 16 - examples/models/fire_works.py | 13 - examples/models/fuyu_example.py | 7 - examples/models/gemini_example.py | 20 - examples/models/gpt4_v_example.py | 35 -- examples/models/gpt_4o_mini.py | 16 - examples/models/groq_model_exampole 2.py | 18 - examples/models/groq_model_exampole.py | 18 - examples/models/hf/llama3 2 | 71 --- examples/models/hf/llama3.py | 71 --- examples/models/huggingface_example.py | 8 - examples/models/idefics_example.py | 33 -- examples/models/kosmos_example.py | 10 - examples/models/layout_documentxlm_example.py | 8 - examples/models/llama_3_hosted.py | 7 - .../models/llama_function_caller_example.py | 37 -- examples/models/llava_example.py | 16 - examples/models/nougat_example.py | 5 - examples/models/openai_model_example.py | 10 - examples/models/palm_example.py | 5 - examples/models/ssd_example.py | 9 - examples/models/swarms_cloud_api_example.py | 31 - examples/models/together_example.py | 12 - examples/models/tts_speech_example.py | 16 - examples/models/vilt_example.py | 8 - .../swarms/agent_registry/agent_registry.py | 2 +- examples/structs/swarms/automate_docs.py | 3 +- .../a_star_swarm_example.py | 3 +- .../different_architectures/circular_swarm.py | 3 +- .../different_architectures/star_swarm.py | 3 +- examples/structs/swarms/example_logistics.py | 2 +- .../hiearchical_swarm/agent_creator 2.py | 2 +- .../swarms/hiearchical_swarm/agent_creator.py | 2 +- .../mixture_of_agents/mixture_of_agents.py | 3 +- .../mixture_of_agents_nividia_10k 2.py | 3 +- .../mixture_of_agents_nividia_10k.py | 3 +- .../mixture_of_agents/moa_from_scratch 2.py | 3 +- .../mixture_of_agents/moa_from_scratch.py | 3 +- examples/structs/swarms/movers_swarm.py | 3 +- .../agent_delegation.py | 3 +- .../company_example.py | 3 +- .../mixture_of_agents/moa_with_scp.py | 3 +- .../round_robin_swarm_example.py | 3 +- .../swarms/queue_swarm/queue_swarm_2 2.py | 3 +- .../swarms/queue_swarm/queue_swarm_2 .py | 3 +- .../swarms/queue_swarm/queue_swarm_example.py | 3 +- .../swarms/round_of_robin_swarm/rob_swarm_1 2 | 3 +- .../round_of_robin_swarm/rob_swarm_1.py | 3 +- .../swarms/search_arena/search_agents.py | 3 +- .../swarms/spreadsheet_swarm/dfs_example.py | 3 +- .../spreadsheet_swarm/real_estate_swarm 2.py | 3 +- .../spreadsheet_swarm/real_estate_swarm.py | 3 +- ...cial_media_marketing_spreesheet_swarm 2.py | 3 +- ...social_media_marketing_spreesheet_swarm.py | 3 +- .../spreadsheet_swarm_examples/csvs/README.md | 9 +- .../qr_code_generative_spreedsheet_swarm 2.py | 3 +- .../qr_code_generative_spreedsheet_swarm.py | 3 +- .../spread_sheet_example 2.py | 3 +- .../spread_sheet_example.py | 3 +- examples/swarms/rearrange/example.py | 2 +- examples/tasks/task_example 2.py | 3 +- examples/tasks/task_example.py | 3 +- examples/utils/agent_table 2.py | 3 +- examples/utils/agent_table.py | 3 +- .../workshops/aug_10/book_generator_swarm.py | 2 +- .../groq_api_spreadsheet_marketing_swarm 2.py | 3 +- .../groq_api_spreadsheet_marketing_swarm.py | 3 +- .../hierarchical_img_gen_swarm 2.py | 2 +- .../hierarchical_img_gen_swarm.py | 2 +- .../swarms_promotion_spreadsheet_swarm 2.py | 3 +- .../swarms_promotion_spreadsheet_swarm.py | 3 +- pyproject.toml | 1 + requirements.txt | 3 +- sample_rearrange 2.py | 112 ---- swarms/__init__.py | 2 +- swarms/cli/create_agent 2.py | 2 +- swarms/cli/create_agent.py | 2 +- swarms/cli/parse_yaml.py | 2 +- swarms/models/__init__.py | 80 --- swarms/models/base_embedding_model.py | 71 --- swarms/models/base_llm.py | 415 -------------- swarms/models/base_multimodal_model.py | 329 ----------- swarms/models/base_tts.py | 89 --- swarms/models/base_ttv.py | 117 ---- swarms/models/cog_vlm.py | 528 ------------------ swarms/models/dalle3.py | 367 ------------ swarms/models/embeddings_base.py | 26 - swarms/models/fuyu.py | 107 ---- swarms/models/gemini.py | 277 --------- swarms/models/gpt4_vision_api.py | 377 ------------- swarms/models/huggingface.py | 420 -------------- swarms/models/huggingface_pipeline.py | 72 --- swarms/models/idefics.py | 189 ------- swarms/models/kosmos_two.py | 126 ----- swarms/models/layoutlm_document_qa.py | 51 -- swarms/models/llama3_hosted.py | 82 --- swarms/models/llama_function_caller.py | 230 -------- swarms/models/llava.py | 84 --- swarms/models/model_router.py | 359 ------------ swarms/models/model_types.py | 29 - swarms/models/moondream_mm.py | 63 --- swarms/models/nougat.py | 106 ---- swarms/models/ollama_model 2.py | 135 ----- swarms/models/ollama_model.py | 135 ----- swarms/models/open_dalle.py | 67 --- swarms/models/open_router.py | 75 --- swarms/models/openai_embeddings.py | 5 - swarms/models/openai_function_caller.py | 179 ------ swarms/models/openai_tts.py | 124 ---- swarms/models/palm.py | 5 - swarms/models/popular_llms.py | 92 --- swarms/models/qwen.py | 144 ----- swarms/models/sam.py | 108 ---- swarms/models/sampling_params.py | 300 ---------- swarms/models/ssd_1b.py | 280 ---------- swarms/models/tiktoken_wrapper.py | 101 ---- swarms/models/together.py | 137 ----- swarms/models/vilt.py | 57 -- swarms/models/vip_llava.py | 94 ---- swarms/models/zeroscope.py | 101 ---- swarms/prompts/autoswarm.py | 2 +- swarms/structs/agent.py | 21 +- swarms/structs/base_workflow.py | 10 +- swarms/structs/dfs_search_swarm.py | 6 +- swarms/structs/graph_workflow.py | 3 +- swarms/structs/hiearchical_swarm.py | 2 +- swarms/structs/monte_carlo_swarm.py | 3 +- swarms/structs/multi_agent_collab.py | 2 +- swarms/structs/omni_agent_types.py | 4 +- swarms/structs/pulsar_swarm 2.py | 3 +- swarms/structs/pulsar_swarm.py | 3 +- swarms/structs/recursive_workflow.py | 2 +- ...n_agents_in_parallel_async_multiprocess.py | 3 +- swarms/structs/task.py | 4 +- swarms/tools/json_former.py | 2 +- swarms/tools/prebuilt/code_executor.py | 2 +- tests/models/test_anthropic.py | 2 +- tests/models/test_fuyu.py | 2 +- tests/models/test_gemini.py | 2 +- tests/models/test_gpt4_vision_api.py | 2 +- tests/models/test_hf.py | 2 +- tests/models/test_hf_pipeline.py | 2 +- tests/models/test_idefics.py | 2 +- tests/models/test_imports.py | 2 +- tests/models/test_kosmos.py | 2 +- tests/models/test_nougat.py | 2 +- tests/models/test_open_dalle.py | 2 +- tests/models/test_openaitts.py | 2 +- tests/models/test_qwen.py | 2 +- tests/models/test_ssd_1b.py | 2 +- tests/models/test_timm_model.py | 2 +- tests/models/test_togther.py | 2 +- tests/models/test_vilt.py | 2 +- tests/models/test_zeroscope.py | 2 +- "tests/profiling_agent 2.py\\" | 3 +- tests/profiling_agent.py | 3 +- tests/structs/test_agent.py | 2 +- tests/structs/test_base_workflow.py | 2 +- tests/structs/test_groupchat.py | 4 +- tests/structs/test_multi_agent_collab.py | 3 +- tests/structs/test_recursive_workflow.py | 2 +- tests/structs/test_sequential_workflow.py | 2 +- tests/structs/test_task.py | 2 +- tests/structs/test_team.py | 2 +- 280 files changed, 345 insertions(+), 7857 deletions(-) delete mode 100644 company_swarm_example 2.py delete mode 100644 examples/models/anthropic_example.py delete mode 100644 examples/models/azure_openai.py delete mode 100644 examples/models/azure_openai_example.py delete mode 100644 examples/models/cohere_example.py delete mode 100644 examples/models/dalle3.jpeg delete mode 100644 examples/models/dalle3_concurrent_example.py delete mode 100644 examples/models/dalle3_example.py delete mode 100644 examples/models/example_gpt4vison.py delete mode 100644 examples/models/example_idefics.py delete mode 100644 examples/models/example_kosmos.py delete mode 100644 examples/models/example_qwenvlmultimodal.py delete mode 100644 examples/models/fire_works.py delete mode 100644 examples/models/fuyu_example.py delete mode 100644 examples/models/gemini_example.py delete mode 100644 examples/models/gpt4_v_example.py delete mode 100644 examples/models/gpt_4o_mini.py delete mode 100644 examples/models/groq_model_exampole 2.py delete mode 100644 examples/models/groq_model_exampole.py delete mode 100644 examples/models/hf/llama3 2 delete mode 100644 examples/models/hf/llama3.py delete mode 100644 examples/models/huggingface_example.py delete mode 100644 examples/models/idefics_example.py delete mode 100644 examples/models/kosmos_example.py delete mode 100644 examples/models/layout_documentxlm_example.py delete mode 100644 examples/models/llama_3_hosted.py delete mode 100644 examples/models/llama_function_caller_example.py delete mode 100644 examples/models/llava_example.py delete mode 100644 examples/models/nougat_example.py delete mode 100644 examples/models/openai_model_example.py delete mode 100644 examples/models/palm_example.py delete mode 100644 examples/models/ssd_example.py delete mode 100644 examples/models/swarms_cloud_api_example.py delete mode 100644 examples/models/together_example.py delete mode 100644 examples/models/tts_speech_example.py delete mode 100644 examples/models/vilt_example.py delete mode 100644 sample_rearrange 2.py delete mode 100644 swarms/models/__init__.py delete mode 100644 swarms/models/base_embedding_model.py delete mode 100644 swarms/models/base_llm.py delete mode 100644 swarms/models/base_multimodal_model.py delete mode 100644 swarms/models/base_tts.py delete mode 100644 swarms/models/base_ttv.py delete mode 100644 swarms/models/cog_vlm.py delete mode 100644 swarms/models/dalle3.py delete mode 100644 swarms/models/embeddings_base.py delete mode 100644 swarms/models/fuyu.py delete mode 100644 swarms/models/gemini.py delete mode 100644 swarms/models/gpt4_vision_api.py delete mode 100644 swarms/models/huggingface.py delete mode 100644 swarms/models/huggingface_pipeline.py delete mode 100644 swarms/models/idefics.py delete mode 100644 swarms/models/kosmos_two.py delete mode 100644 swarms/models/layoutlm_document_qa.py delete mode 100644 swarms/models/llama3_hosted.py delete mode 100644 swarms/models/llama_function_caller.py delete mode 100644 swarms/models/llava.py delete mode 100644 swarms/models/model_router.py delete mode 100644 swarms/models/model_types.py delete mode 100644 swarms/models/moondream_mm.py delete mode 100644 swarms/models/nougat.py delete mode 100644 swarms/models/ollama_model 2.py delete mode 100644 swarms/models/ollama_model.py delete mode 100644 swarms/models/open_dalle.py delete mode 100644 swarms/models/open_router.py delete mode 100644 swarms/models/openai_embeddings.py delete mode 100644 swarms/models/openai_function_caller.py delete mode 100644 swarms/models/openai_tts.py delete mode 100644 swarms/models/palm.py delete mode 100644 swarms/models/popular_llms.py delete mode 100644 swarms/models/qwen.py delete mode 100644 swarms/models/sam.py delete mode 100644 swarms/models/sampling_params.py delete mode 100644 swarms/models/ssd_1b.py delete mode 100644 swarms/models/tiktoken_wrapper.py delete mode 100644 swarms/models/together.py delete mode 100644 swarms/models/vilt.py delete mode 100644 swarms/models/vip_llava.py delete mode 100644 swarms/models/zeroscope.py diff --git a/README.md b/README.md index 51d389bb..4a1a229a 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,8 @@ Features: ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) @@ -179,7 +180,8 @@ agent.run( An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt. ```python -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB import subprocess import os @@ -848,7 +850,8 @@ An all-new swarm architecture that makes it easy to manage and oversee the outpu ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/agent_with_rag_and_tools.py b/agent_with_rag_and_tools.py index 0200b19e..f278c173 100644 --- a/agent_with_rag_and_tools.py +++ b/agent_with_rag_and_tools.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB import subprocess import os diff --git a/company_swarm_example 2.py b/company_swarm_example 2.py deleted file mode 100644 index 213d39c4..00000000 --- a/company_swarm_example 2.py +++ /dev/null @@ -1,100 +0,0 @@ -import os - -from swarms import Agent, OpenAIChat -from swarms.structs.company import Company - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - - -# Initialize the boss agent (Director) -boss_agent = Agent( - agent_name="BossAgent", - system_prompt=""" - You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses. - Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently. - After receiving a report on the company's expenses, you will break down the work into smaller tasks, - assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures, - and identifying unnecessary transactions. Ensure the results are communicated back in a structured way - so the finance team can take actionable steps to cut off unproductive spending. You also monitor and - dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings - into a coherent report. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="boss_agent.json", -) - -# Initialize worker 1: Expense Analyzer -worker1 = Agent( - agent_name="ExpenseAnalyzer", - system_prompt=""" - Your task is to carefully analyze the company's expense data provided to you. - You will focus on identifying high-cost recurring transactions, categorizing expenditures - (e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending. - You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting. - Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker1.json", -) - -# Initialize worker 2: Summary Generator -worker2 = Agent( - agent_name="SummaryGenerator", - system_prompt=""" - After receiving the detailed breakdown from the ExpenseAnalyzer, - your task is to create a concise summary of the findings. You will focus on the most actionable insights, - such as highlighting the specific transactions that can be immediately cut off and summarizing the areas - where the company is overspending. Your summary will be used by the BossAgent to generate the final report. - Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker2.json", -) - -# Swarm-Level Prompt (Collaboration Prompt) -swarm_prompt = """ - As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off. - You will work collaboratively to break down the entire process of expense analysis into manageable steps. - The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first - focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them, - and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then - consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses. - Together, your collaboration is essential to streamlining and improving the company’s financial health. -""" - -# Create a list of agents -agents = [boss_agent, worker1, worker2] - - -# Create an organization chart -org_chart = [[boss_agent], [worker1, worker2]] - -# Create a company -company = Company(org_chart=org_chart) - -# Run the company -company.run() diff --git a/company_swarm_example.py b/company_swarm_example.py index 213d39c4..dd8ac1c6 100644 --- a/company_swarm_example.py +++ b/company_swarm_example.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.company import Company # Get the OpenAI API key from the environment variable diff --git a/docs/applications/discord.md b/docs/applications/discord.md index dd7de16c..e2d0be5b 100644 --- a/docs/applications/discord.md +++ b/docs/applications/discord.md @@ -63,7 +63,7 @@ Starts the bot using the `DISCORD_TOKEN` from the `.env` file. Initialize the `llm` (Language Learning Model) with your OpenAI API key: ```python -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat llm = OpenAIChat( openai_api_key="Your_OpenAI_API_Key", diff --git a/docs/swarms/models/anthropic.md b/docs/swarms/models/anthropic.md index 438adfbe..f8fa037e 100644 --- a/docs/swarms/models/anthropic.md +++ b/docs/swarms/models/anthropic.md @@ -69,7 +69,7 @@ class Anthropic: ```python # Import necessary modules and classes -from swarms.models import Anthropic +from swarm_models import Anthropic # Initialize an instance of the Anthropic class model = Anthropic(anthropic_api_key="") diff --git a/docs/swarms/models/base_llm.md b/docs/swarms/models/base_llm.md index 0c678165..c3ec89ce 100644 --- a/docs/swarms/models/base_llm.md +++ b/docs/swarms/models/base_llm.md @@ -164,7 +164,7 @@ To demonstrate how to use the `BaseLLM` interface, let's create an example using ```python # Import the BaseLLM class -from swarms.models import BaseLLM +from swarm_models import BaseLLM # Create an instance of the language model language_model = BaseLLM( diff --git a/docs/swarms/models/base_multimodal_model.md b/docs/swarms/models/base_multimodal_model.md index c1a8373d..fb0f45ae 100644 --- a/docs/swarms/models/base_multimodal_model.md +++ b/docs/swarms/models/base_multimodal_model.md @@ -31,7 +31,7 @@ pip install swarms To get started with Swarms, you'll need to import the library and create an instance of the `BaseMultiModalModel` class. This class serves as the foundation for running multimodal models. ```python -from swarms.models import BaseMultiModalModel +from swarm_models import BaseMultiModalModel model = BaseMultiModalModel( model_name="your_model_name", @@ -138,7 +138,7 @@ Let's explore some usage examples of the MultiModalAI library: ```python # Import the library -from swarms.models import BaseMultiModalModel +from swarm_models import BaseMultiModalModel # Create an instance of the model model = BaseMultiModalModel( @@ -159,7 +159,7 @@ print(response) ```python # Import the library -from swarms.models import BaseMultiModalModel +from swarm_models import BaseMultiModalModel # Create an instance of the model model = BaseMultiModalModel( @@ -184,7 +184,7 @@ for response in responses: ```python # Import the library -from swarms.models import BaseMultiModalModel +from swarm_models import BaseMultiModalModel # Create an instance of the model model = BaseMultiModalModel( @@ -209,7 +209,7 @@ for response in responses: ### Example 4: Inheriting `BaseMultiModalModel` for it's prebuilt classes ```python -from swarms.models import BaseMultiModalModel +from swarm_models import BaseMultiModalModel class CustomMultiModalModel(BaseMultiModalModel): diff --git a/docs/swarms/models/dalle3.md b/docs/swarms/models/dalle3.md index 346489c7..e847ef04 100644 --- a/docs/swarms/models/dalle3.md +++ b/docs/swarms/models/dalle3.md @@ -36,7 +36,7 @@ pip install swarms Let's get started with a quick example of using the Dalle3 library to generate an image from a text prompt: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class dalle = Dalle3() @@ -97,7 +97,7 @@ Returns: ### Example 1: Basic Image Generation ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class dalle3 = Dalle3() @@ -115,7 +115,7 @@ print(image_url) ### Example 2: Creating Image Variations ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class dalle3 = Dalle3() @@ -137,7 +137,7 @@ Certainly! Here are additional examples that cover various edge cases and method You can customize the size of the generated image by specifying the `size` parameter when creating an instance of the `Dalle3` class. Here's how to generate a smaller image: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class with a custom image size dalle3 = Dalle3(size="512x512") @@ -157,7 +157,7 @@ print(image_url) You can adjust the maximum number of API request retries using the `max_retries` parameter. Here's how to increase the retry limit: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class with a higher retry limit dalle3 = Dalle3(max_retries=5) @@ -177,7 +177,7 @@ print(image_url) To create variations of an existing image, you can use the `create_variations` method. Here's an example: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class dalle3 = Dalle3() @@ -197,7 +197,7 @@ print(variations_url) The Dalle3 library provides error handling for API-related issues. Here's how to handle and display API errors: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class dalle3 = Dalle3() @@ -218,7 +218,7 @@ except Exception as e: You can customize the quality of the generated image by specifying the `quality` parameter. Here's how to generate a high-quality image: ```python -from swarms.models.dalle3 import Dalle3 +from swarm_models.dalle3 import Dalle3 # Create an instance of the Dalle3 class with high quality dalle3 = Dalle3(quality="high") diff --git a/docs/swarms/models/distilled_whisperx.md b/docs/swarms/models/distilled_whisperx.md index 79c8c2ea..2718eb71 100644 --- a/docs/swarms/models/distilled_whisperx.md +++ b/docs/swarms/models/distilled_whisperx.md @@ -23,7 +23,7 @@ The `DistilWhisperModel` class is initialized with the following parameters: Example of initialization: ```python -from swarms.models import DistilWhisperModel +from swarm_models import DistilWhisperModel # Initialize with default model model_wrapper = DistilWhisperModel() diff --git a/docs/swarms/models/fuyu.md b/docs/swarms/models/fuyu.md index e54a4a22..fd90f79a 100644 --- a/docs/swarms/models/fuyu.md +++ b/docs/swarms/models/fuyu.md @@ -37,7 +37,7 @@ To use Fuyu, follow these steps: 1. Initialize the Fuyu instance: ```python -from swarms.models.fuyu import Fuyu +from swarm_models.fuyu import Fuyu fuyu = Fuyu() ``` @@ -54,7 +54,7 @@ output_text = fuyu(text, img_path) ### Example 2 - Text Generation ```python -from swarms.models.fuyu import Fuyu +from swarm_models.fuyu import Fuyu fuyu = Fuyu() diff --git a/docs/swarms/models/gemini.md b/docs/swarms/models/gemini.md index d5b1b44a..012bc7dc 100644 --- a/docs/swarms/models/gemini.md +++ b/docs/swarms/models/gemini.md @@ -78,7 +78,7 @@ class Gemini(BaseMultiModalModel): **Examples**: ```python - from swarms.models import Gemini + from swarm_models import Gemini # Initialize the Gemini model gemini = Gemini() @@ -128,7 +128,7 @@ class Gemini(BaseMultiModalModel): **Examples**: ```python - from swarms.models.gemini import Gemini + from swarm_models.gemini import Gemini # Initialize the Gemini model gemini = Gemini() diff --git a/docs/swarms/models/gpt4v.md b/docs/swarms/models/gpt4v.md index 5ad80cd9..4240fe3b 100644 --- a/docs/swarms/models/gpt4v.md +++ b/docs/swarms/models/gpt4v.md @@ -53,7 +53,7 @@ When initializing the `GPT4VisionAPI` class, you have the option to provide the Here's how you can initialize the `GPT4VisionAPI` class: ```python -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI # Initialize with default API key and max_tokens api = GPT4VisionAPI() @@ -129,7 +129,7 @@ Let's explore some usage examples of the `GPT4VisionAPI` module to better unders In this example, we'll use the module with the default API key and maximum tokens to analyze an image. ```python -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI # Initialize with default API key and max_tokens api = GPT4VisionAPI() @@ -150,7 +150,7 @@ print(response) If you have a custom API key, you can initialize the module with it as shown in this example. ```python -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI # Initialize with custom API key and max_tokens custom_api_key = "your_custom_api_key" @@ -172,7 +172,7 @@ print(response) You can also customize the maximum token limit when initializing the module. In this example, we set it to 1000 tokens. ```python -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI # Initialize with default API key and custom max_tokens api = GPT4VisionAPI(max_tokens=1000) diff --git a/docs/swarms/models/huggingface.md b/docs/swarms/models/huggingface.md index 50aaa2a1..45c9b535 100644 --- a/docs/swarms/models/huggingface.md +++ b/docs/swarms/models/huggingface.md @@ -93,7 +93,7 @@ Here are three ways to use the `HuggingfaceLLM` class: #### Example 1: Basic Usage ```python -from swarms.models import HuggingfaceLLM +from swarm_models import HuggingfaceLLM # Initialize the HuggingfaceLLM instance with a model ID model_id = "NousResearch/Nous-Hermes-2-Vision-Alpha" @@ -108,7 +108,7 @@ print(generated_text) #### Example 2: Custom Configuration ```python -from swarms.models import HuggingfaceLLM +from swarm_models import HuggingfaceLLM # Initialize with custom configuration custom_config = { @@ -129,7 +129,7 @@ print(generated_text) #### Example 3: Distributed Processing ```python -from swarms.models import HuggingfaceLLM +from swarm_models import HuggingfaceLLM # Initialize for distributed processing inference = HuggingfaceLLM(model_id="gpt2-medium", distributed=True) diff --git a/docs/swarms/models/idefics.md b/docs/swarms/models/idefics.md index 57125038..30ad0b2e 100644 --- a/docs/swarms/models/idefics.md +++ b/docs/swarms/models/idefics.md @@ -28,7 +28,7 @@ To use Idefics, follow these steps: 1. Initialize the Idefics instance: ```python -from swarms.models import Idefics +from swarm_models import Idefics model = Idefics() ``` @@ -46,7 +46,7 @@ print(response) ### Example 1 - Image Questioning ```python -from swarms.models import Idefics +from swarm_models import Idefics model = Idefics() prompts = [ @@ -59,7 +59,7 @@ print(response) ### Example 2 - Bidirectional Conversation ```python -from swarms.models import Idefics +from swarm_models import Idefics model = Idefics() user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" diff --git a/docs/swarms/models/kosmos.md b/docs/swarms/models/kosmos.md index a19ea791..6631e94e 100644 --- a/docs/swarms/models/kosmos.md +++ b/docs/swarms/models/kosmos.md @@ -22,7 +22,7 @@ To use Kosmos, follow these steps: 1. Initialize the Kosmos instance: ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() ``` @@ -38,7 +38,7 @@ kosmos.multimodal_grounding( ### Example 1 - Multimodal Grounding ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -58,7 +58,7 @@ kosmos.referring_expression_comprehension( ### Example 2 - Referring Expression Comprehension ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -78,7 +78,7 @@ kosmos.referring_expression_generation( ### Example 3 - Referring Expression Generation ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -96,7 +96,7 @@ kosmos.grounded_vqa("What is the color of the car?", "https://example.com/car.jp ### Example 4 - Grounded Visual Question Answering ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -112,7 +112,7 @@ kosmos.grounded_image_captioning("https://example.com/beach.jpg") ### Example 5 - Grounded Image Captioning ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -128,7 +128,7 @@ kosmos.grounded_image_captioning_detailed("https://example.com/beach.jpg") ### Example 6 - Detailed Grounded Image Captioning ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -149,7 +149,7 @@ kosmos.draw_entity_boxes_on_image(image, entities, show=True) ### Example 7 - Drawing Entity Boxes on Image ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() @@ -176,7 +176,7 @@ image = kosmos.generate_boxes( ### Example 8 - Generating Boxes for Entities ```python -from swarms.models.kosmos_two import Kosmos +from swarm_models.kosmos_two import Kosmos kosmos = Kosmos() entities = [ diff --git a/docs/swarms/models/layoutlm_document_qa.md b/docs/swarms/models/layoutlm_document_qa.md index 4c6169d0..89c66644 100644 --- a/docs/swarms/models/layoutlm_document_qa.md +++ b/docs/swarms/models/layoutlm_document_qa.md @@ -39,7 +39,7 @@ To use LayoutLMDocumentQA, follow these steps: 1. Initialize the LayoutLMDocumentQA instance: ```python -from swarms.models import LayoutLMDocumentQA +from swarm_models import LayoutLMDocumentQA layout_lm_doc_qa = LayoutLMDocumentQA() ``` diff --git a/docs/swarms/models/llama3.md b/docs/swarms/models/llama3.md index 4ae0f1ef..da1df781 100644 --- a/docs/swarms/models/llama3.md +++ b/docs/swarms/models/llama3.md @@ -4,7 +4,7 @@ ```python from transformers import AutoTokenizer, AutoModelForCausalLM import torch -from swarms.models.base_llm import BaseLLM +from swarm_models.base_llm import BaseLLM class Llama3(BaseLLM): diff --git a/docs/swarms/models/models_available_overview.md b/docs/swarms/models/models_available_overview.md index db2c9bdd..21ce54a7 100644 --- a/docs/swarms/models/models_available_overview.md +++ b/docs/swarms/models/models_available_overview.md @@ -50,7 +50,7 @@ The Anthropic model is one of the many models supported by Swarms. Here's how yo ```python import os -from swarms.models import Anthropic +from swarm_models import Anthropic # Load the environment variables anthropic_api_key = os.getenv("ANTHROPIC_API_KEY") @@ -73,7 +73,7 @@ print(response) HuggingfaceLLM allows you to use models from Hugging Face's vast repository. Here's an example: ```python -from swarms.models import HuggingfaceLLM +from swarm_models import HuggingfaceLLM # Define the model ID model_id = "NousResearch/Yarn-Mistral-7b-128k" @@ -97,7 +97,7 @@ The OpenAIChat model is designed for conversational tasks. Here's how to use it: ```python import os -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat # Load the environment variables openai_api_key = os.getenv("OPENAI_API_KEY") diff --git a/docs/swarms/models/nougat.md b/docs/swarms/models/nougat.md index 217990a1..6749ce74 100644 --- a/docs/swarms/models/nougat.md +++ b/docs/swarms/models/nougat.md @@ -43,7 +43,7 @@ To use Nougat, follow these steps: 1. Initialize the Nougat instance: ```python -from swarms.models import Nougat +from swarm_models import Nougat nougat = Nougat() ``` diff --git a/docs/swarms/models/openai.md b/docs/swarms/models/openai.md index ae547631..39980b4d 100644 --- a/docs/swarms/models/openai.md +++ b/docs/swarms/models/openai.md @@ -151,7 +151,7 @@ Here are the key attributes and their descriptions for the `BaseOpenAI` and `Ope ```python # Import the OpenAI class -from swarms.models import OpenAI +from swarm_models import OpenAI # Set your OpenAI API key api_key = "YOUR_API_KEY" diff --git a/docs/swarms/models/openai_chat.md b/docs/swarms/models/openai_chat.md index d7d9b2eb..6cdde532 100644 --- a/docs/swarms/models/openai_chat.md +++ b/docs/swarms/models/openai_chat.md @@ -125,7 +125,7 @@ Here are the key attributes and their descriptions for the `OpenAIChat` class: ### Example 1: Initializing `OpenAIChat` ```python -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat # Initialize OpenAIChat with model name and API key openai_chat = OpenAIChat(model_name="gpt-3.5-turbo", openai_api_key="YOUR_API_KEY") diff --git a/docs/swarms/models/openai_function_caller.md b/docs/swarms/models/openai_function_caller.md index bb952ff1..16fb6f5b 100644 --- a/docs/swarms/models/openai_function_caller.md +++ b/docs/swarms/models/openai_function_caller.md @@ -89,7 +89,7 @@ Here are three examples demonstrating different ways to use the `OpenAIFunctionC ```python import openai -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from swarms.artifacts.main_artifact import Artifact @@ -120,7 +120,7 @@ print(out) ### Example 2: Prompt Generator ```python -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import Sequence @@ -181,7 +181,7 @@ print(out) ### Example 3: Sentiment Analysis ```python -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field diff --git a/docs/swarms/models/openai_tts.md b/docs/swarms/models/openai_tts.md index b2996312..1f797a69 100644 --- a/docs/swarms/models/openai_tts.md +++ b/docs/swarms/models/openai_tts.md @@ -38,7 +38,7 @@ pip install swarms requests wave To use the `OpenAITTS` module, you need to initialize an instance of the `OpenAITTS` class. Here's how you can do it: ```python -from swarms.models.openai_tts import OpenAITTS +from swarm_models.openai_tts import OpenAITTS # Initialize the OpenAITTS instance tts = OpenAITTS( @@ -95,7 +95,7 @@ speech_data = tts.run_and_save("Hello, world!") Here's a basic example of how to use the `OpenAITTS` module to generate speech from text: ```python -from swarms.models.openai_tts import OpenAITTS +from swarm_models.openai_tts import OpenAITTS # Initialize the OpenAITTS instance tts = OpenAITTS( diff --git a/docs/swarms/models/vilt.md b/docs/swarms/models/vilt.md index 2cb56b22..8436ea42 100644 --- a/docs/swarms/models/vilt.md +++ b/docs/swarms/models/vilt.md @@ -25,7 +25,7 @@ To use the Vilt model, follow these steps: 1. Initialize the Vilt model: ```python -from swarms.models import Vilt +from swarm_models import Vilt model = Vilt() ``` diff --git a/docs/swarms/structs/agent.md b/docs/swarms/structs/agent.md index 212df914..c268c8b2 100644 --- a/docs/swarms/structs/agent.md +++ b/docs/swarms/structs/agent.md @@ -135,7 +135,8 @@ And, then now you can get started with the following: ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) @@ -189,7 +190,8 @@ To integrate tools with the Swarm Agent, you can pass a list of callable functio - with doc strings ```python -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB import subprocess import os diff --git a/docs/swarms/structs/index.md b/docs/swarms/structs/index.md index c95b5411..af362fcf 100644 --- a/docs/swarms/structs/index.md +++ b/docs/swarms/structs/index.md @@ -45,7 +45,8 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Load the environment variables load_dotenv() @@ -71,7 +72,8 @@ agent.run("Generate a 10,000 word blog on health and wellness.") `Agent` equipped with quasi-infinite long term memory. Great for long document understanding, analysis, and retrieval. ```python -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB # Copy and paste the code and put it in your own local directory. # Making an instance of the ChromaDB class @@ -327,7 +329,7 @@ import os from dotenv import load_dotenv -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent # Load the environment variables diff --git a/docs/swarms/structs/spreadsheet_swarm.md b/docs/swarms/structs/spreadsheet_swarm.md index 9cdb919c..06101128 100644 --- a/docs/swarms/structs/spreadsheet_swarm.md +++ b/docs/swarms/structs/spreadsheet_swarm.md @@ -189,7 +189,8 @@ swarm._save_to_csv() ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) @@ -241,7 +242,8 @@ swarm.run( ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for QR code generation @@ -308,7 +310,8 @@ swarm.run( ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/docs/swarms/structs/task.md b/docs/swarms/structs/task.md index 3bdb461c..157ac95e 100644 --- a/docs/swarms/structs/task.md +++ b/docs/swarms/structs/task.md @@ -46,7 +46,7 @@ Executes the task by calling the agent or model with the specified arguments and ```python >>> from swarms.structs import Task, Agent ->>> from swarms.models import OpenAIChat +>>> from swarm_models import OpenAIChat >>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) >>> task = Task(description="What's the weather in Miami?", agent=agent) >>> task.run() diff --git a/docs/swarms/tools/build_tool.md b/docs/swarms/tools/build_tool.md index fb680de6..d9aa97b8 100644 --- a/docs/swarms/tools/build_tool.md +++ b/docs/swarms/tools/build_tool.md @@ -470,7 +470,8 @@ import os import requests -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") diff --git a/example.py b/example.py index d2d1b3d2..aaf45b83 100644 --- a/example.py +++ b/example.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/agent_settings.py b/examples/agents/agent_settings.py index e21d820b..8b963026 100644 --- a/examples/agents/agent_settings.py +++ b/examples/agents/agent_settings.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/easy_example.py b/examples/agents/easy_example.py index bebdb11a..c78cb6be 100644 --- a/examples/agents/easy_example.py +++ b/examples/agents/easy_example.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat ## Initialize the workflow agent = Agent( diff --git a/examples/agents/memory/agents_and_memory/agent_with_longterm_memory.py b/examples/agents/memory/agents_and_memory/agent_with_longterm_memory.py index 36e32081..00a63bf5 100644 --- a/examples/agents/memory/agents_and_memory/agent_with_longterm_memory.py +++ b/examples/agents/memory/agents_and_memory/agent_with_longterm_memory.py @@ -3,7 +3,8 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB # Load the environment variables diff --git a/examples/agents/o1_preview 2.py b/examples/agents/o1_preview 2.py index c62c9f0b..67d84d35 100644 --- a/examples/agents/o1_preview 2.py +++ b/examples/agents/o1_preview 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/o1_preview.py b/examples/agents/o1_preview.py index c62c9f0b..67d84d35 100644 --- a/examples/agents/o1_preview.py +++ b/examples/agents/o1_preview.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/settings/monitoring/agent_ops_tools.py b/examples/agents/settings/monitoring/agent_ops_tools.py index 9256071d..1799459a 100644 --- a/examples/agents/settings/monitoring/agent_ops_tools.py +++ b/examples/agents/settings/monitoring/agent_ops_tools.py @@ -24,7 +24,8 @@ sys.path.insert(0, os.getcwd()) ################ Adding project root to PYTHONPATH ################################ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from agentops import record_function diff --git a/examples/agents/settings/various_models/custom_model_with_agent.py b/examples/agents/settings/various_models/custom_model_with_agent.py index c0511bec..dd46076d 100644 --- a/examples/agents/settings/various_models/custom_model_with_agent.py +++ b/examples/agents/settings/various_models/custom_model_with_agent.py @@ -1,5 +1,5 @@ from swarms import Agent -from swarms.models.base_llm import BaseLLM +from swarm_models.base_llm import BaseLLM # Define a custom LLM class diff --git a/examples/agents/tools/function_calling/agent_spec_func_calling.py b/examples/agents/tools/function_calling/agent_spec_func_calling.py index e59fd29a..ccc19b36 100644 --- a/examples/agents/tools/function_calling/agent_spec_func_calling.py +++ b/examples/agents/tools/function_calling/agent_spec_func_calling.py @@ -1,5 +1,5 @@ import json -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import List from swarms import Agent diff --git a/examples/agents/tools/function_calling/audience_generator_agent.py b/examples/agents/tools/function_calling/audience_generator_agent.py index 73244577..aaf91e2a 100644 --- a/examples/agents/tools/function_calling/audience_generator_agent.py +++ b/examples/agents/tools/function_calling/audience_generator_agent.py @@ -1,5 +1,5 @@ import os -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import List diff --git a/examples/agents/tools/function_calling/claude_artifacts_example.py b/examples/agents/tools/function_calling/claude_artifacts_example.py index 12a809ce..52832c36 100644 --- a/examples/agents/tools/function_calling/claude_artifacts_example.py +++ b/examples/agents/tools/function_calling/claude_artifacts_example.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field diff --git a/examples/agents/tools/function_calling/idea_generator_agent.py b/examples/agents/tools/function_calling/idea_generator_agent.py index da19822c..3283edeb 100644 --- a/examples/agents/tools/function_calling/idea_generator_agent.py +++ b/examples/agents/tools/function_calling/idea_generator_agent.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import List import json diff --git a/examples/agents/tools/function_calling/openai_function_caller_agent_rearrange.py b/examples/agents/tools/function_calling/openai_function_caller_agent_rearrange.py index 165d831e..c08bdb26 100644 --- a/examples/agents/tools/function_calling/openai_function_caller_agent_rearrange.py +++ b/examples/agents/tools/function_calling/openai_function_caller_agent_rearrange.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel diff --git a/examples/agents/tools/function_calling/openai_function_caller_example.py b/examples/agents/tools/function_calling/openai_function_caller_example.py index c0a8f0a7..22e1169e 100644 --- a/examples/agents/tools/function_calling/openai_function_caller_example.py +++ b/examples/agents/tools/function_calling/openai_function_caller_example.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel diff --git a/examples/agents/tools/function_calling/prompt_generator_agent.py b/examples/agents/tools/function_calling/prompt_generator_agent.py index cc5c2e0e..3ff9ebe5 100644 --- a/examples/agents/tools/function_calling/prompt_generator_agent.py +++ b/examples/agents/tools/function_calling/prompt_generator_agent.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import Sequence diff --git a/examples/agents/tools/function_calling/react_agent.py b/examples/agents/tools/function_calling/react_agent.py index a810c15e..885e6f7c 100644 --- a/examples/agents/tools/function_calling/react_agent.py +++ b/examples/agents/tools/function_calling/react_agent.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import List diff --git a/examples/agents/tools/function_calling/sentiment_analysis_function_calling.py b/examples/agents/tools/function_calling/sentiment_analysis_function_calling.py index fcc8a311..3e66605f 100644 --- a/examples/agents/tools/function_calling/sentiment_analysis_function_calling.py +++ b/examples/agents/tools/function_calling/sentiment_analysis_function_calling.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field diff --git a/examples/agents/use_cases/browser/multion/multion_example.ipynb b/examples/agents/use_cases/browser/multion/multion_example.ipynb index a2941aa6..66da08a5 100644 --- a/examples/agents/use_cases/browser/multion/multion_example.ipynb +++ b/examples/agents/use_cases/browser/multion/multion_example.ipynb @@ -11,7 +11,7 @@ "from multion.client import MultiOn\n", "from swarms import Agent\n", "import os\n", - "from swarms.models.base_llm import BaseLLM\n", + "from swarm_models.base_llm import BaseLLM\n", "\n", "def check_multion_api_key():\n", " \"\"\"\n", diff --git a/examples/agents/use_cases/code_gen/ai_research_team/json_output_v.py b/examples/agents/use_cases/code_gen/ai_research_team/json_output_v.py index 6c90c32b..e7f76867 100644 --- a/examples/agents/use_cases/code_gen/ai_research_team/json_output_v.py +++ b/examples/agents/use_cases/code_gen/ai_research_team/json_output_v.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from swarms import create_file_in_folder from swarms.utils.loguru_logger import logger diff --git a/examples/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py b/examples/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py index 771b51d3..65bbd02f 100644 --- a/examples/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py +++ b/examples/agents/use_cases/code_gen/ai_research_team/multi_agent_hf.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from swarms.utils.loguru_logger import logger import threading diff --git a/examples/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py b/examples/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py index de87dbd5..d22b9064 100644 --- a/examples/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py +++ b/examples/agents/use_cases/code_gen/ai_research_team/novel_pytorch_code_generator.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from swarms import create_file_in_folder from swarms.tools.prebuilt.code_executor import CodeExecutor diff --git a/examples/agents/use_cases/code_gen/amazon_review_agent.py b/examples/agents/use_cases/code_gen/amazon_review_agent.py index 3fb3bc40..f0c1a695 100644 --- a/examples/agents/use_cases/code_gen/amazon_review_agent.py +++ b/examples/agents/use_cases/code_gen/amazon_review_agent.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat ## Initialize the workflow agent = Agent( diff --git a/examples/agents/use_cases/code_gen/api_requester_agent.py b/examples/agents/use_cases/code_gen/api_requester_agent.py index ae7bd5f9..502e6285 100644 --- a/examples/agents/use_cases/code_gen/api_requester_agent.py +++ b/examples/agents/use_cases/code_gen/api_requester_agent.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat agent = Agent( agent_name="API Requester", diff --git a/examples/agents/use_cases/code_gen/code_interpreter_agent.py b/examples/agents/use_cases/code_gen/code_interpreter_agent.py index b49c4099..7b46bc78 100644 --- a/examples/agents/use_cases/code_gen/code_interpreter_agent.py +++ b/examples/agents/use_cases/code_gen/code_interpreter_agent.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from swarms.tools.prebuilt.code_executor import CodeExecutor from swarms.structs.concat import concat_strings diff --git a/examples/agents/use_cases/code_gen/sql_agent.py b/examples/agents/use_cases/code_gen/sql_agent.py index bdfd9966..f8dc06ce 100644 --- a/examples/agents/use_cases/code_gen/sql_agent.py +++ b/examples/agents/use_cases/code_gen/sql_agent.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") diff --git a/examples/agents/use_cases/finance/estate_planning_agent.py b/examples/agents/use_cases/finance/estate_planning_agent.py index a9f0c8eb..16c89d35 100644 --- a/examples/agents/use_cases/finance/estate_planning_agent.py +++ b/examples/agents/use_cases/finance/estate_planning_agent.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") diff --git a/examples/agents/use_cases/finance/financial_agent_gpt4o_mini.py b/examples/agents/use_cases/finance/financial_agent_gpt4o_mini.py index 52d8329c..cd4bed51 100644 --- a/examples/agents/use_cases/finance/financial_agent_gpt4o_mini.py +++ b/examples/agents/use_cases/finance/financial_agent_gpt4o_mini.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/use_cases/finance/main.py b/examples/agents/use_cases/finance/main.py index 3e5c1445..fed00f5b 100644 --- a/examples/agents/use_cases/finance/main.py +++ b/examples/agents/use_cases/finance/main.py @@ -2,7 +2,8 @@ import os import requests -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Get the OpenAI API key from the environment variable api_key = os.getenv("OPENAI_API_KEY") diff --git a/examples/agents/use_cases/finance/plaid_api_tool.py b/examples/agents/use_cases/finance/plaid_api_tool.py index 84cd10b0..1b8b56f4 100644 --- a/examples/agents/use_cases/finance/plaid_api_tool.py +++ b/examples/agents/use_cases/finance/plaid_api_tool.py @@ -12,7 +12,8 @@ from plaid.model.transactions_get_response import ( TransactionsGetResponse, ) -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/agents/use_cases/kyle_hackathon.py b/examples/agents/use_cases/kyle_hackathon.py index 36fcfcd2..b1c5c493 100644 --- a/examples/agents/use_cases/kyle_hackathon.py +++ b/examples/agents/use_cases/kyle_hackathon.py @@ -2,7 +2,8 @@ import os from dotenv import load_dotenv -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.agents.multion_agent import MultiOnAgent from swarms_memory import ChromaDB from swarms import tool diff --git a/examples/agents/use_cases/law/alberto_agent 2.py b/examples/agents/use_cases/law/alberto_agent 2.py index 77c8c028..74fc62ee 100644 --- a/examples/agents/use_cases/law/alberto_agent 2.py +++ b/examples/agents/use_cases/law/alberto_agent 2.py @@ -10,7 +10,7 @@ from typing import Optional from pydantic import BaseModel, Field -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller PROBABE_SYS_PROMPT = """ diff --git a/examples/agents/use_cases/law/alberto_agent.py b/examples/agents/use_cases/law/alberto_agent.py index 77c8c028..74fc62ee 100644 --- a/examples/agents/use_cases/law/alberto_agent.py +++ b/examples/agents/use_cases/law/alberto_agent.py @@ -10,7 +10,7 @@ from typing import Optional from pydantic import BaseModel, Field -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller PROBABE_SYS_PROMPT = """ diff --git a/examples/agents/use_cases/multi_modal/multi_modal_rag_agent.py b/examples/agents/use_cases/multi_modal/multi_modal_rag_agent.py index c309d60a..0d31924c 100644 --- a/examples/agents/use_cases/multi_modal/multi_modal_rag_agent.py +++ b/examples/agents/use_cases/multi_modal/multi_modal_rag_agent.py @@ -3,7 +3,8 @@ import os from dotenv import load_dotenv -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT from swarms import tool diff --git a/examples/agents/use_cases/multi_modal/new_agent_tool_system.py b/examples/agents/use_cases/multi_modal/new_agent_tool_system.py index 62f46678..18958770 100644 --- a/examples/agents/use_cases/multi_modal/new_agent_tool_system.py +++ b/examples/agents/use_cases/multi_modal/new_agent_tool_system.py @@ -13,7 +13,8 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Load the environment variables load_dotenv() diff --git a/examples/agents/use_cases/research/new_perplexity_agent.py b/examples/agents/use_cases/research/new_perplexity_agent.py index 272041de..e51e340a 100644 --- a/examples/agents/use_cases/research/new_perplexity_agent.py +++ b/examples/agents/use_cases/research/new_perplexity_agent.py @@ -1,5 +1,5 @@ from swarms import Agent -from swarms.models.llama3_hosted import llama3Hosted +from swarm_models.llama3_hosted import llama3Hosted from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api diff --git a/examples/agents/use_cases/research/perplexity_agent.py b/examples/agents/use_cases/research/perplexity_agent.py index 0faab2cf..b26c1b30 100644 --- a/examples/agents/use_cases/research/perplexity_agent.py +++ b/examples/agents/use_cases/research/perplexity_agent.py @@ -9,7 +9,8 @@ $ pip install swarms - """ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api import os diff --git a/examples/agents/use_cases/security/perimeter_defense_agent.py b/examples/agents/use_cases/security/perimeter_defense_agent.py index d235fa22..f92cbdee 100644 --- a/examples/agents/use_cases/security/perimeter_defense_agent.py +++ b/examples/agents/use_cases/security/perimeter_defense_agent.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv import swarms.prompts.security_team as stsp -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.structs import Agent # Load environment variables and initialize the Vision API diff --git a/examples/collabs/swarms_example.ipynb b/examples/collabs/swarms_example.ipynb index c0f52ed1..1e30e61e 100644 --- a/examples/collabs/swarms_example.ipynb +++ b/examples/collabs/swarms_example.ipynb @@ -1091,7 +1091,7 @@ "\n", "from dotenv import load_dotenv\n", "\n", - "from swarms.models import GPT4VisionAPI\n", + "from swarm_models import GPT4VisionAPI\n", "from swarms.prompts.logistics import (\n", " Efficiency_Agent_Prompt,\n", " Health_Security_Agent_Prompt,\n", @@ -1211,7 +1211,7 @@ "\n", "from dotenv import load_dotenv\n", "\n", - "from swarms.models.gpt4_vision_api import GPT4VisionAPI\n", + "from swarm_models.gpt4_vision_api import GPT4VisionAPI\n", "from swarms.structs import Agent\n", "\n", "# Load the environment variables\n", diff --git a/examples/demos/accountant_team/account_team2_example.py b/examples/demos/accountant_team/account_team2_example.py index 5a5aafd3..29c6d9b9 100644 --- a/examples/demos/accountant_team/account_team2_example.py +++ b/examples/demos/accountant_team/account_team2_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import Anthropic, OpenAIChat +from swarm_models import Anthropic, OpenAIChat from swarms.prompts.accountant_swarm_prompts import ( DECISION_MAKING_PROMPT, DOC_ANALYZER_AGENT_PROMPT, diff --git a/examples/demos/ad_gen/ad_gen_example.py b/examples/demos/ad_gen/ad_gen_example.py index 978ab502..8ee79706 100644 --- a/examples/demos/ad_gen/ad_gen_example.py +++ b/examples/demos/ad_gen/ad_gen_example.py @@ -3,8 +3,8 @@ import random from dotenv import load_dotenv -from swarms.models import OpenAIChat -from swarms.models.stable_diffusion import StableDiffusion +from swarm_models import OpenAIChat +from swarm_models import StableDiffusion from swarms.structs import Agent load_dotenv() diff --git a/examples/demos/ai_acceleerated_learning/main.py b/examples/demos/ai_acceleerated_learning/main.py index 69840da4..50e6674c 100644 --- a/examples/demos/ai_acceleerated_learning/main.py +++ b/examples/demos/ai_acceleerated_learning/main.py @@ -1,6 +1,7 @@ import concurrent import csv -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB from dotenv import load_dotenv from swarms.utils.parse_code import extract_code_from_markdown diff --git a/examples/demos/ai_research_team/main_example.py b/examples/demos/ai_research_team/main_example.py index 96f2e417..0dc804f9 100644 --- a/examples/demos/ai_research_team/main_example.py +++ b/examples/demos/ai_research_team/main_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import Anthropic, OpenAIChat +from swarm_models import Anthropic, OpenAIChat from swarms.prompts.ai_research_team import ( PAPER_IMPLEMENTOR_AGENT_PROMPT, PAPER_SUMMARY_ANALYZER, diff --git a/examples/demos/assembly/assembly_example.py b/examples/demos/assembly/assembly_example.py index 7ac97ab0..700a5143 100644 --- a/examples/demos/assembly/assembly_example.py +++ b/examples/demos/assembly/assembly_example.py @@ -1,4 +1,4 @@ -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent llm = GPT4VisionAPI() diff --git a/examples/demos/autoswarm/autoswarm.py b/examples/demos/autoswarm/autoswarm.py index 309c88ea..79346529 100644 --- a/examples/demos/autoswarm/autoswarm.py +++ b/examples/demos/autoswarm/autoswarm.py @@ -1,6 +1,6 @@ import os from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import Agent import swarms.prompts.autoswarm as sdsp diff --git a/examples/demos/autotemp/autotemp_example.py b/examples/demos/autotemp/autotemp_example.py index f086f112..2a744857 100644 --- a/examples/demos/autotemp/autotemp_example.py +++ b/examples/demos/autotemp/autotemp_example.py @@ -1,6 +1,6 @@ import re -from swarms.models.openai_models import OpenAIChat +from swarm_models.openai_models import OpenAIChat class AutoTemp: diff --git a/examples/demos/autotemp/blog_gen_example.py b/examples/demos/autotemp/blog_gen_example.py index fe2a2317..d9c7eae0 100644 --- a/examples/demos/autotemp/blog_gen_example.py +++ b/examples/demos/autotemp/blog_gen_example.py @@ -3,7 +3,7 @@ import os from autotemp import AutoTemp from termcolor import colored -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import SequentialWorkflow diff --git a/examples/demos/developer_swarm/main_example.py b/examples/demos/developer_swarm/main_example.py index 0a2e2a95..8f8c51fb 100644 --- a/examples/demos/developer_swarm/main_example.py +++ b/examples/demos/developer_swarm/main_example.py @@ -19,7 +19,7 @@ import os from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP from swarms.structs import Agent diff --git a/examples/demos/education/education_example.py b/examples/demos/education/education_example.py index 31c08f0d..32fe6761 100644 --- a/examples/demos/education/education_example.py +++ b/examples/demos/education/education_example.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv import swarms.prompts.education as edu_prompts from swarms import Agent, SequentialWorkflow -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat # Load environment variables load_dotenv() diff --git a/examples/demos/gemini_benchmarking/gemini_chat_example.py b/examples/demos/gemini_benchmarking/gemini_chat_example.py index 2ea6a900..f19e6983 100644 --- a/examples/demos/gemini_benchmarking/gemini_chat_example.py +++ b/examples/demos/gemini_benchmarking/gemini_chat_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models.gemini import Gemini +from swarm_models.gemini import Gemini from swarms.prompts.react import react_prompt load_dotenv() diff --git a/examples/demos/gemini_benchmarking/gemini_react_example.py b/examples/demos/gemini_benchmarking/gemini_react_example.py index 37765baf..725b84a3 100644 --- a/examples/demos/gemini_benchmarking/gemini_react_example.py +++ b/examples/demos/gemini_benchmarking/gemini_react_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models.gemini import Gemini +from swarm_models.gemini import Gemini from swarms.prompts.react import react_prompt load_dotenv() diff --git a/examples/demos/gemini_benchmarking/gemini_vcot_example.py b/examples/demos/gemini_benchmarking/gemini_vcot_example.py index 0a5c4c99..4b10f905 100644 --- a/examples/demos/gemini_benchmarking/gemini_vcot_example.py +++ b/examples/demos/gemini_benchmarking/gemini_vcot_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import Gemini +from swarm_models import Gemini from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT # Load the environment variables diff --git a/examples/demos/grupa/app_example.py b/examples/demos/grupa/app_example.py index ff5fc27d..acf13635 100644 --- a/examples/demos/grupa/app_example.py +++ b/examples/demos/grupa/app_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv from termcolor import colored -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.prompts.code_interpreter import CODE_INTERPRETER from swarms.prompts.programming import DOCUMENTATION_SOP, TEST_SOP from swarms.structs import Agent diff --git a/examples/demos/jarvis_multi_modal_auto_agent/jarvis_example.py b/examples/demos/jarvis_multi_modal_auto_agent/jarvis_example.py index cce61fba..df36e7db 100644 --- a/examples/demos/jarvis_multi_modal_auto_agent/jarvis_example.py +++ b/examples/demos/jarvis_multi_modal_auto_agent/jarvis_example.py @@ -1,4 +1,4 @@ -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, ) diff --git a/examples/demos/llm_with_conversation/main_example.py b/examples/demos/llm_with_conversation/main_example.py index a9e6c42a..70596b7e 100644 --- a/examples/demos/llm_with_conversation/main_example.py +++ b/examples/demos/llm_with_conversation/main_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv # Import the OpenAIChat model and the Agent struct -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat # Load the environment variables load_dotenv() diff --git a/examples/demos/logistics/logistics_example.py b/examples/demos/logistics/logistics_example.py index 48d8b9ce..08fb9448 100644 --- a/examples/demos/logistics/logistics_example.py +++ b/examples/demos/logistics/logistics_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.prompts.logistics import ( Efficiency_Agent_Prompt, Health_Security_Agent_Prompt, diff --git a/examples/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py b/examples/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py index 007776ac..fc7d7cb8 100644 --- a/examples/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py +++ b/examples/demos/multi_modal_autonomous_agents/multi_modal_auto_agent_example.py @@ -1,4 +1,4 @@ -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.structs import Agent llm = GPT4VisionAPI() diff --git a/examples/demos/multi_modal_chain_of_thought/vcot_example.py b/examples/demos/multi_modal_chain_of_thought/vcot_example.py index 24bf60aa..f92d8de2 100644 --- a/examples/demos/multi_modal_chain_of_thought/vcot_example.py +++ b/examples/demos/multi_modal_chain_of_thought/vcot_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.visual_cot import VISUAL_CHAIN_OF_THOUGHT from swarms.structs import Agent diff --git a/examples/demos/multimodal_tot/idea2img_example.py b/examples/demos/multimodal_tot/idea2img_example.py index 4a6c1da3..186d31fb 100644 --- a/examples/demos/multimodal_tot/idea2img_example.py +++ b/examples/demos/multimodal_tot/idea2img_example.py @@ -4,9 +4,9 @@ import os import streamlit as st from dotenv import load_dotenv -from swarms.models import OpenAIChat -from swarms.models.gpt4_vision_api import GPT4VisionAPI -from swarms.models.stable_diffusion import StableDiffusion +from swarm_models import OpenAIChat +from swarm_models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.stable_diffusion import StableDiffusion from swarms.structs import Agent # Load environment variables diff --git a/examples/demos/multimodal_tot/main_example.py b/examples/demos/multimodal_tot/main_example.py index 2a0494dc..815a326c 100644 --- a/examples/demos/multimodal_tot/main_example.py +++ b/examples/demos/multimodal_tot/main_example.py @@ -20,8 +20,8 @@ import os from dotenv import load_dotenv from termcolor import colored -from swarms.models.gpt4_vision_api import GPT4VisionAPI -from swarms.models.stable_diffusion import StableDiffusion +from swarm_models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.stable_diffusion import StableDiffusion # Load the environment variables load_dotenv() diff --git a/examples/demos/nutrition/nutrition_example.py b/examples/demos/nutrition/nutrition_example.py index b4331db6..66542b6e 100644 --- a/examples/demos/nutrition/nutrition_example.py +++ b/examples/demos/nutrition/nutrition_example.py @@ -4,7 +4,7 @@ import os import requests from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import Agent # Load environment variables diff --git a/examples/demos/octomology_swarm/api.py b/examples/demos/octomology_swarm/api.py index cccf4dfe..d33238a6 100644 --- a/examples/demos/octomology_swarm/api.py +++ b/examples/demos/octomology_swarm/api.py @@ -2,8 +2,8 @@ import os from dotenv import load_dotenv from swarms import Agent -from swarms.models import OpenAIChat -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models import OpenAIChat +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.structs.rearrange import AgentRearrange # Load the environment variables diff --git a/examples/demos/optimize_llm_stack/vllm_example.py b/examples/demos/optimize_llm_stack/vllm_example.py index b032709d..31452c03 100644 --- a/examples/demos/optimize_llm_stack/vllm_example.py +++ b/examples/demos/optimize_llm_stack/vllm_example.py @@ -1,4 +1,4 @@ -from swarms.models import vLLM +from swarm_models import vLLM # Initialize vLLM with custom model and parameters custom_vllm = vLLM( diff --git a/examples/demos/optimize_llm_stack/vortex_example.py b/examples/demos/optimize_llm_stack/vortex_example.py index 5badb2fd..a95e876e 100644 --- a/examples/demos/optimize_llm_stack/vortex_example.py +++ b/examples/demos/optimize_llm_stack/vortex_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import Agent # import modal diff --git a/examples/demos/patient_question_assist/main.py b/examples/demos/patient_question_assist/main.py index 45b31cb4..69224f12 100644 --- a/examples/demos/patient_question_assist/main.py +++ b/examples/demos/patient_question_assist/main.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from typing import List from swarms_memory import ChromaDB diff --git a/examples/demos/personal_stylist/personal_stylist_example.py b/examples/demos/personal_stylist/personal_stylist_example.py index dde64cb7..5c3f745c 100644 --- a/examples/demos/personal_stylist/personal_stylist_example.py +++ b/examples/demos/personal_stylist/personal_stylist_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.prompts.personal_stylist import ( ACCESSORIES_STYLIST_AGENT_PROMPT, BEARD_STYLIST_AGENT_PROMPT, diff --git a/examples/demos/plant_biologist_swarm/agricultural_swarm.py b/examples/demos/plant_biologist_swarm/agricultural_swarm.py index 9664e6ed..e388d88f 100644 --- a/examples/demos/plant_biologist_swarm/agricultural_swarm.py +++ b/examples/demos/plant_biologist_swarm/agricultural_swarm.py @@ -18,7 +18,7 @@ from examples.demos.plant_biologist_swarm.prompts import ( treatment_recommender_agent, ) from swarms import Agent -from swarms.models.gpt_o import GPT4VisionAPI +from swarm_models.gpt_o import GPT4VisionAPI # Load the OpenAI API key from the .env file load_dotenv() diff --git a/examples/demos/plant_biologist_swarm/using_concurrent_workflow.py b/examples/demos/plant_biologist_swarm/using_concurrent_workflow.py index 84bfbc55..78bd0f0f 100644 --- a/examples/demos/plant_biologist_swarm/using_concurrent_workflow.py +++ b/examples/demos/plant_biologist_swarm/using_concurrent_workflow.py @@ -10,7 +10,7 @@ from examples.demos.plant_biologist_swarm.prompts import ( ) from swarms import Agent, ConcurrentWorkflow -from swarms.models.gpt_o import GPT4VisionAPI +from swarm_models.gpt_o import GPT4VisionAPI # Load the OpenAI API key from the .env file diff --git a/examples/demos/positive_med/positive_med_example.py b/examples/demos/positive_med/positive_med_example.py index 09cbb411..d10526da 100644 --- a/examples/demos/positive_med/positive_med_example.py +++ b/examples/demos/positive_med/positive_med_example.py @@ -25,7 +25,7 @@ import os from termcolor import colored -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.prompts.autobloggen import ( AUTOBLOG_REVIEW_PROMPT, DRAFT_AGENT_SYSTEM_PROMPT, diff --git a/examples/demos/security_team/security_team_example.py b/examples/demos/security_team/security_team_example.py index d391fe32..00c9b649 100644 --- a/examples/demos/security_team/security_team_example.py +++ b/examples/demos/security_team/security_team_example.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv from termcolor import colored import swarms.prompts.security_team as stsp -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.structs import Agent # Load environment variables and initialize the Vision API diff --git a/examples/demos/social_media_content_generators_swarm/agents.py b/examples/demos/social_media_content_generators_swarm/agents.py index 0ee20cff..958a51f7 100644 --- a/examples/demos/social_media_content_generators_swarm/agents.py +++ b/examples/demos/social_media_content_generators_swarm/agents.py @@ -10,7 +10,8 @@ Example: """ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat llm = OpenAIChat(max_tokens=4000) diff --git a/examples/demos/society_of_agents/hallucination_swarm.py b/examples/demos/society_of_agents/hallucination_swarm.py index 3f6764ba..64f0fe65 100644 --- a/examples/demos/society_of_agents/hallucination_swarm.py +++ b/examples/demos/society_of_agents/hallucination_swarm.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # # model = HuggingfaceLLM(model_id="openai-community/gpt2", max_length=1000) diff --git a/examples/demos/society_of_agents/probate_agent.py b/examples/demos/society_of_agents/probate_agent.py index 04660860..f85d2a79 100644 --- a/examples/demos/society_of_agents/probate_agent.py +++ b/examples/demos/society_of_agents/probate_agent.py @@ -6,7 +6,8 @@ extract forms from images """ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat model = OpenAIChat(max_tokens=4000) diff --git a/examples/demos/swarm_of_complaince/compliance_swarm.py b/examples/demos/swarm_of_complaince/compliance_swarm.py index 63cee018..4c7d3874 100644 --- a/examples/demos/swarm_of_complaince/compliance_swarm.py +++ b/examples/demos/swarm_of_complaince/compliance_swarm.py @@ -10,7 +10,7 @@ Todo [Improvements] """ from swarms import Agent -from swarms.models.llama3_hosted import llama3Hosted +from swarm_models.llama3_hosted import llama3Hosted from swarms_memory import ChromaDB diff --git a/examples/demos/swarm_of_mma_manufacturing/main_example.py b/examples/demos/swarm_of_mma_manufacturing/main_example.py index 0dce5781..ffb31e76 100644 --- a/examples/demos/swarm_of_mma_manufacturing/main_example.py +++ b/examples/demos/swarm_of_mma_manufacturing/main_example.py @@ -19,7 +19,7 @@ import os from dotenv import load_dotenv from termcolor import colored -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.structs import Agent load_dotenv() diff --git a/examples/demos/urban_planning/urban_planning_example.py b/examples/demos/urban_planning/urban_planning_example.py index 2a52ced7..11e5efad 100644 --- a/examples/demos/urban_planning/urban_planning_example.py +++ b/examples/demos/urban_planning/urban_planning_example.py @@ -3,7 +3,7 @@ import os from dotenv import load_dotenv import swarms.prompts.urban_planning as upp -from swarms.models import GPT4VisionAPI, OpenAIChat +from swarm_models import GPT4VisionAPI, OpenAIChat from swarms.structs import Agent, SequentialWorkflow # Load environment variables diff --git a/examples/demos/xray/xray_example.py b/examples/demos/xray/xray_example.py index 4e69c0af..54406f2c 100644 --- a/examples/demos/xray/xray_example.py +++ b/examples/demos/xray/xray_example.py @@ -2,7 +2,7 @@ import os from dotenv import load_dotenv -from swarms.models import GPT4VisionAPI, OpenAIChat +from swarm_models import GPT4VisionAPI, OpenAIChat from swarms.prompts.xray_swarm_prompt import ( TREATMENT_PLAN_PROMPT, XRAY_ANALYSIS_PROMPT, diff --git a/examples/models/anthropic_example.py b/examples/models/anthropic_example.py deleted file mode 100644 index 22dc6c00..00000000 --- a/examples/models/anthropic_example.py +++ /dev/null @@ -1,9 +0,0 @@ -import os - -from swarms.models import Anthropic - -model = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY")) - -task = "What is quantum field theory? What are 3 books on the field?" - -print(model(task)) diff --git a/examples/models/azure_openai.py b/examples/models/azure_openai.py deleted file mode 100644 index 2e216d96..00000000 --- a/examples/models/azure_openai.py +++ /dev/null @@ -1,10 +0,0 @@ -from swarms.models import AzureOpenAI - -# Initialize Azure OpenAI -model = AzureOpenAI() - -# Run the model -model( - "Create a youtube script for a video on how to use the swarms" - " framework" -) diff --git a/examples/models/azure_openai_example.py b/examples/models/azure_openai_example.py deleted file mode 100644 index 6bba72f9..00000000 --- a/examples/models/azure_openai_example.py +++ /dev/null @@ -1,25 +0,0 @@ -import os -from dotenv import load_dotenv -from swarms import AzureOpenAI - -# Load the environment variables -load_dotenv() - -# Create an instance of the AzureOpenAI class -model = AzureOpenAI( - azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), - deployment_name=os.getenv("AZURE_OPENAI_DEPLOYMENT"), - openai_api_version=os.getenv("OPENAI_API_VERSION"), - openai_api_key=os.getenv("AZURE_OPENAI_API_KEY"), - azure_ad_token=os.getenv("AZURE_OPENAI_AD_TOKEN"), -) - -# Define the prompt -prompt = ( - "Analyze this load document and assess it for any risks and" - " create a table in markdwon format." -) - -# Generate a response -response = model(prompt) -print(response) diff --git a/examples/models/cohere_example.py b/examples/models/cohere_example.py deleted file mode 100644 index de128a9f..00000000 --- a/examples/models/cohere_example.py +++ /dev/null @@ -1,5 +0,0 @@ -from swarms.models import Cohere - -cohere = Cohere(model="command-light", cohere_api_key="") - -out = cohere("Hello, how are you?") diff --git a/examples/models/dalle3.jpeg b/examples/models/dalle3.jpeg deleted file mode 100644 index 39753795f0cb5990d6d7d465d6cd5f66faf50356..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 228428 zcmce-1zc5I*YLaPF6r*Bz3Gr{>28o_)39l21r=$Ll1}Lk5kv_=x?50MT0lDP!lUPS zp7@^Uz4yEC<_CMl9CMB_*BtXd$5@k_>6=At*n*!E!p95Y_E;o}sv6%w}Qx3mQd@$vy}A)^1Rj`fe~EMWiCD3=8cX5--rb#;LO0thre1*Com zSh{-rE`WclC6~Pg%%02A)z{`$07PQN1GBFxT_N@YEXrf<=vtUY ze(Ywm6paW_R6;8^3v0<=a@I0D8d5(5r9gkbA*G=K{#j$eo9u*}P12?+{d4<|s%2>V z6pnI%Yio7|3yhnujqAw%Nf7c=Fb})O{K?X_ZDGW#yLI2*f0)|mrIuSPs7JE-{s`~9D@UE zF21W$8JnJq0Y_qKue_T+K*vFLsa)>qdvRsv5}$v_0mJ?EPyZ19m;Pa4^~;g|onL@_ zZ~Y=P-}aB9PCq=#%EQLO)5h8v=4$KdW8q;VWoP5!=^+huv7=FPwy?9Ik-074>S66; z58MEJpO+H?=HwC30fR+&`9%15j2x|PU{)Sbx7+4Dq?BkBY+P(SZtMJ`0>B@ge=5%K zLvbD}r{5j>$MnNxSF5}nrnSK>G-n#JISBjtl{D6 z4NOl!({|oA9x$LUZxwL(YlaNV`>j)~ygZzIP5&=g{#X5f^u%8i?9V|hSfuR9*u;Ti z3&)*XQlse*z`y!l+h?f_EAZU=ghF}#pS05bzpK^HVf|B^p^`k-X1Np_(7$R0S0z4c%JU%Z_m{#u%r2xNO9~p2AoPn{B(byssBTlg3;4qOu zU_A&17!4Ug7ZDjBURN7J1*W(?LB}Wj>)Fe=dbqg)V+nY^5N0sr?R^A%s=w}Q%gEBm z-1>rrizkdmOIwOYMhZ+tjK#wX76S7^gdqHU0!F|oADEXH^79nz`S*75!FadzqT-|L z1B?Sa*%N3C7mR&-5d|Mb3-ESV7wiA1-cR)r{*C(JFu2^`I{*#|gb)T73j(f3!iT|y zg3{)$QO2A#cr>&YubBPC72rqul7udp1r!@9DG{xU(eE@nK1l?9lVYsw_ex9AN=#QS zK#%sd!5L5`>{+(zj1hXLN1JMNxSxl%w$E$kIe@wN5MFhqbOx-@(4#;6xzS*we+Ir9P=!BkHANgGL1&|QrHnzfnrc+|87ay`l64XL1FJs z%3udx^ap4~M5j^n#9K&B9R8;@{B3tL>C~2#x>P7O>)k{r9bTf-!H+!N2VucrfVq5f8XLLKr+8NY#{{ zt<&;Fq?CgKyp4owRlXPWF39rQ@)H@prwt5Uvwgpmoar@P;dq)a&l!@KEZGCBVJ^rU z8gQ4PbJSz+Y&;yUJ}2sCySt(h-qgw8v&r4P!r|o^6*D*#pT|v8zA0jO7#z< ziD^W+5r}R3-NU|*9E(p6%VU2~g^6XUoT(5${lE{$)Hkd7I&A2TGSZE;4`JFCb0uF8 zhumXnN7qaF1ywZDmo?IonZ`x?uaF2^?#?2AqC&5zY8)mHFdp#auQK9$u<^84Fn$61 z{2G;z9&VCjVy%y0DXYf^EwFVCTUBO$P(WZwRH1P`T`&LPYPoS1wzetN!azL~ zTxx_IfS)L_5WABp<@*wSZO~>Tj=9!%g~_NO{|-wja~PUzoxZI2r0!+!q#^Poz=~0T z70th~0&!&Wi$6M@CC(34ApXO^0h|AQ&i+rV0E4-JVF<8-`AN{pDT;ixSa*KzXKJmFfB7j0q?F1PLlhZezRyr#|~phO&$p ztYY*M-+ggDez{!!vsFm-lsJ#dP4k$EX{ccJ`nxT48W)dD2BWUS#J6v3g-_K)%T0_f z3*6cVpijGp5Vu`MkE=`+g-vOcES|#at+iQb+(Sxc*C)y)?UQEZd}0&IulpVP3rjJ# z3x&V42QC7Pi;VUYO@wd&jo?7P(db{o=60z=NrQ+EMg=1yBLasY0Cm6WAb>~2q(TR2 zgSu3Lfk4DCt3ED_4^DZrW^FtY($fi1Gl zvrQw6|07`j5oSN!^p;+?j`}CdZk_c9(ttm=4*P>_e!oZtkj)wF2!{ST2gqjn9~=H> zvi;Xhd;adTzrY6VRNx>$6wA&kGXF&1HB&c`?^wj6>-y%->z#i0kSs01lQb{wdi8hJ z4jORwA{dwFtD#3Ag;4$80<5;U?E8~ZD8e$M_ozisJ=2vC)P{$h&?rE6Q}*-8$DU%o z!*`$Pevs(6fSE*iAHn7X4);X1Yb~w}P5)y*!V7YP{8*6&5w?N0E1^=M2y>)>N`@HM zR@2omZw|(keU;>#4C-@EF;bKva=j+wW9yTNs;F%QH3=D1TP@qWB2}3zogGUJLiU?$ zVSEpGP&MTCH!z2}{3N&6nFzhk$k)Xy7Vhcay;iuyKrWBbSs-6d<)bGl-Gt4}Dv6RG zk4!yJ5#Nx-V_y#YCMC>%CVSQ-^+ZKOg?XQGHQNENT6Oi{P;IMBwbw>bD0YA`Bad^K z*BHWKk@zjJzonnOI|KEElNi1-&BvUiH2=Z15MQg`s82=svq*`zoQYAPHA^8um;o7rIs05kJw#o=sV7@nEG8y4)NvW;*-e(BYTT;3|)7xYmeM!B?3>VhsGo8r$kVNsNM&^Sg z>-OOebGaLy^bKO-R^J`36mt>4)hPsD%HNAaud0+!5a)f$cHXqkV|7U1?J`Pm!DO&| zjr>UG{5`c`$=vw19$TAIYyTGf&Ih&2GSd1x{7}ND#HxArn@&`$X=sOo{Kx-|+Ds;Nvq%c}YP4r$@?b0SJJ(46bNrX-alSTIeu$mlGP9yY z4y`e-Qz)M=#p)GUY122kz=-6?8crVI4CM3N7+ohte|hXX=l1XwLpYSC-T4mIs^2l2 z;m~C1dgxkq^%G9iD2(c%DJOx-%rgYL87WwCsa%u$eFKtlA`YA85u}BLE)86~xIu;f zi^?|ocUee(;LQ$f^G|5|C-(dmXZ=g^{4)yw1?&Evf%p-QD0U0p+`PBfpL!-^D=)(2 z$h~pCV4oAK@J)>Gh5xKB^VwkAUNrx6c>9Ve=& z>G-a7CAf|AAhEdN$H*h9;m|hg0Q2QV^_3ou# zY}n4O3V$9Qwd^PwZy-rye#xY9&m(!F)|f5-%A7lB%`%VK7V~_Dv>%TW@(NB1bxj8| z)rx&{`q6kDD$(BCd&W-;j*yA6xBN}Nbjwb@;nvk-+|R^Z!MQW{F1S+9^utqZ_hP*} zE3eaESyR<3yhx}RcI$Z_7+_@Z4e8sxCMOS);tFd0R3=Z`@02);ru*xH#^%lYb$csT zC_&UbB+z0vql7cRbc6ArHV;#6d3Kk))ccd+QMNur-jz#TB*sgor~;F=g2XoaTthjl zEo;$OHg$U1rdd6eYA%{XPu?bMf-gAMy(FaXce<}9XTiF&dPka^=r=W3D6wVwzZa8B zZWX4iuHtkha2F(XPo3W-*`*}6K)8xnXOQA;ojz>1A8<$!hA?rza`A{JeID^!m46DwvkwN+k=b$BgSwjmLs`2%Esp;%tHRmJZA_2kHs))NysRae;;A zSb1AG84~k>!-&D)AFHY`V&*?{S19n=VZNL|{wNf2<%9wUhWvvJB8^16BZ*_SGHyAuh{OCub(76H+!5Jq zkvEe|LiV0pwrqvT@6 z1rY@c-`+sL=lOMmM#sYocAK+S;JXc__`t#fG}?+%z`G+F9jLR7wx@-&+igz#&up9^ zgb)1V0|ZvQd=M~%mxq^Mm>)op;(w_G4VdZ=C5Zo00%!`zKRdhKhH$(<$n<{>rSi1p z9#cMyF)kC=siprslr^ZfoECtPGcbX`7lI=2d9eQotG09Ft@jf}Ru0SL7V5kqJ5NzN3Pyw@Ka6bFp?O-g*K~oDB>X~d6U)LW|E?)f@Wd-iY10 zd|GR_t%=p_S*u#EuSmNk#|+;oTV7JsG@38&?|KJ9Qf(A;Gwv~Pm$4_m&04v702Alg z)#AGtf3r27&B6C_dBMe+6h#m&9*3e{bD%`U_TdwI%Qr1jFEs4Wurnt2w@C zR@RjhA`@&{qT-K1sRFE3GjJ9MN6ey1N;L>S*7mkcb&A@{<`sxaU%>)*KcV0F@!4vmY(n&u@36SjHTXKFEH}a95vr9 zZyWs^9+&m!K05Z^Z!Gwpm>>B+OfdSdr~&~MOmlmk29Xp@3`RvpwhP6C1JYa=zdQHu`MzeN z8E4gSZWya~8n;)J15IpZkLzy{T0z4e{`%g)uyxLaUa<`%Z((9t32b!R0yUyGSQD(C zt(vVIq4YD50aAIqT-JbRS^=K(O0^d(p{KFG}cf_BWz|go~rT_s`=tFR5 z2$r;qZ+@jSZ@7kwNGv4%ZtPE_L(j{z1(ZLrt4TZd#SzqqjVG4_M=O+22*0A6-_2qs# z`_7J-VoZ}`bAV}UyM>^=l26`NT*;j&=J3N9s)X5gNuey$o@_2gU&ahJB>zXODGmC5 zv2#p%>{W+Ft%8`-Dci!7L1bBd85apeFDAY zataKX91!FdW;E(GqAK-TRl2^&cd?9>ugiek7Wz{%NuMHYj>ik`scId5yiePyr?4}N zryRx8@np?lsbNn1%)Cl^GIp6bex&4`?s7z188e5}a2oHuR|4a&J(7Zw{e%?Fd)w#5 zcow}T1?-HTo*ox?i4ZMYqzz`1M(c`k)6qX*HwMB2Cru5suBoSDijigiaNul`xSFCHcmQ;Qj`o zchlPx-y3q8WH=VpR065&-QM}2kHdd9x&MacMrc*bXtIo$j$Z9(dyM!(!|?{ihuS?^ zR_bGu{FoC@fC0Cig3T&9tReNxkpo$NPl{@P-~993OnF?0Y-I?pR3t#L3V>osKPiSH zAlS&D6ELk%cEw`LW@h}46tnz0KnTJ3x12!7$NV)GZy^Q&Joff720rd>TK#u7g3$g* zYXEp7`BUN--hQMA75IK;2LWdN1u;P|FAvWzi2bjS^gr?KLj*svQtf*&4<$|AZbpR8 zgLuK>WX+t((T5|MD!IX|COVOIY-Ss}I1~3+9`s;NR_f_|fP=pOzMx;0X<6UKBkMY?18;>Bi-)A2%g#Acockruej~Vz*t9)ktnD^^ZY{yj0QQo?N>R)^_1^8wNzH`fwAMas+R3DJL z`+IWmUy|u}bZt*9hmE?MTB|H_*-5N7r&LhjsQ;arK>G34(O+aN(-r4}rA@+ej|DHB6V3Suflou}WB>m(ce^ zlqg@t@Aph&teO~4IX^ysM8n7ju^VW(ljPc|p<+pii5m8bsI^#ch!7SHN5?q$0xF^4 zbK&7hFm+R^9?9Wf8a^OnzH`5!L_OX%lOywUWZCLfREss!W2c1HjDTS`^nkrrEne!) zMkmF%HKt#0SahE5Pei(V+syXwd*sodzhe%Qtw?1iFJ+)&df&Lfwso9Y?Ux#wrx#I* zF86VI0+!=tjZ`~7nAB!~tRdPdAKKsmvT=5{^aRY&)a|)EgvU($Y!}H{vr4%v1V`tYnt2^0_BW zF*&XX!Ac9^)LSJ)ealtKFFId5*g-ev6@Dz`>o4>*{mUo%D@vBJ?z4?=DdrdK(?JLF z8D1Pz%p6Q}u7nS$NS_HbJs)2?AWk*xb=wzgO93s?5UDd?+_xjjv2FgO`%Z*4qADeu zedmQ#qsRnd3Z%{w4F|n7bO!&rEq~-tf2IZTJZyny5%K9hXu!1nY?wY6gt6ic&or}d z<1Jmt`WQ`teJz+QKOE;WCFxjKhn+g){M`XI`|9LHF}Ez?y>)zS$~i$HvK;o~oymYp zAng|o5X!4|6hs40H(Wz83I1}pTv@a!GP^O zz$PMDnjg^m8F}#l`ye2HK??!_(tf|7^}hkm|HQ1ewK$ml!kZP-dw0QfU?}f)dl4ho zM_RFi$Z5m&FRVpk2BqF<0~HU*A7OQZsSnBXg^0HFJ|sI+me%+k^x}=Km`@_z)xy!D z{*Xyn5cD>ySf9vH*Q3(@xm$ZpvsbDEj3;pfR~DCRG3*fK(VC~Iv-=V!CiyPS#77!R zD^||IZ8No==clw5FZs5RKA3GAleKJp92zL&lQggPx83#WQG8{qub-u35QE9ys9pq~ z(cA5PV7grXQQd5ne;v{{``m0kCN^ZYAwESi&acC_yP>+X-ho620XKSjTri&tDe%en zFmc>?w^H3ODW0i0L>QY(JX`>+%6Ll!If9p|ukDNoL^h3AvJG|0NGZR4tvK86Q$BM` zj=FF?o&oU*gNb~s^~rna6YmsnF+)>n@5fJ*q5g{dpL6e!+opN;E+|v2V_Q|MjqfES zh-8Y*gI44ipEjh)lClTAORO8UaNgpb=~`4q1oa0}yy%EMp{=RGYkU)YFcIa(A$7l` z+KNQ>1(~;H&~ZrB#fEhu@5ob>FRhJhGWjG;g+6d!a1t?TDQk3C8J{|>VrywCc)n#S zYmo9<@GmoumiX8*CWUv$cokP;3)0o=K}3hP7@*=MVYN+b-bthB=vrMG@l3saQqDYs z<}!z>>s@Sabr3pJ>h~2WcM2uv_nG1v%2o1T%n|{ZCGgMhoBqoA{YxJG4lKE?+{&Yb zzAy-<=Ch(A+KfZz9-Myy)<3>5`tPD2ncvtX_=_q3r|9RWGb+j6y5f)6%M&d32c5*h z0H3k}K1J~Ts_Ea->7T1{`mtB(A70@72BaUaqTt{_ZrN=zCc5iF=Gl3kl1GkWU20#3dJ!@Lz7MnP_=&_`2MW{{07Y1NfN_rAvPyRPF9bE%W}pesG+QINX>y(8VtohBVl z=Adbjnf->xNms@rCS@cYs#&l`xZ*7R5;K;i*NKoY1aIIyj0cc-1N;21&H(5eDtL)1RmsxE zF@0Hn2OE(4L=1xmMYA=CH%@8KkRD=TN8g}_44mX`S7B&;L8^C8k!L$UX*TKN)KM^e zbCf}|v9KO_&Oa+A9Xce^MPeP-gUXOpPvu;~w*Hj}O=CX6l<1@O3exi|`O{86Y@ru! zb1FgjCzD;cd8Z4HP6C%@U-kGEk3)05(5ENd@#aOHdO1z>or=)?e3XfWiDNDt!ly`M zF`3et9Cw+eV=t1aXkm{wStOr*!Rf?vP;@nq$#{UR~*qv4WF!^p42bXfDTo z(!-cFnWz2vre7G>N2~eb;^Z_aPE}g&u36k&8XlNTCRE&Dl{u^_7sHLiioJd^YA6)M zy}Ljq#10V{)Vmjvbg-pV`l0#ruDNxeOu~Rj)>6nq*+lOn89IHjSZ4tW6gMSvm8SfX zJ+Zl!h;-(5fZjboB=>^b9)68d}UdUyKw} zDJ9(hn7yaRh^F}QgJYz{RqBM+?&ei{{WbR?3)?OQLQ?KNrR-Bb`m%+&83f4|zcHMZ zRZKdHGr5CVM?yA!>zk7n2+|E3Uk4P#n`~g-h-xZ!XlLLjTzj2Gpe@4moi_R{nh`z; zS=Gl#75^kKaX44Okkz`7W8X8RJuFA@%6mRXPa!4sxw{KbZ4?^6^%kMzcaD2jmeY~@ zXuOcL8|rj=d29$5;?7sSdXdn(I4B~;O)3nPFmMsDxp=(zfopbNb4dsTjaa<4Cu)*< z{3PvdW7btY-erT+hetQ8H_`T~Jg-wu+vy$5@>sum?thpbA6!KbCZxS1ma ze-&MHqoQCg8+Z{@Yk=1RMGD|q@9wp{(Vy&H#5%)>8C}Tzg!*OB~9OF)gz5itZWj51L z;;7j@E@ZxEM|s%M1f%aUx<|m`hFaXQxj9i(Cq7I@`p7cuPMhYFt_kO=jdypQp&cT} zmBEgT*!B6!)>^u`9}>}JLYjOV^@AMSwM?HAJbpX!q&?-%W%x8KCj50N=q?3Bs84G) znw_vSRx2rHcDm#_4fA~wm!o|9OQA}iU^pBsU!#{+M78087Q)eUf+X!vSQrQ{1G$Zy zX|b@>OufBDn9SQuDK7N9PiooS*eTRw(8ov(t;ZF~eagW%pFz0yZ~zMK13#df zFCY;)U#Kkzq^b&H1GZS9f)F%uL2$qkPy__h!~-GT9s|E1%{zY`-=5#h0eeAEkdTm( z5K)kkkWo=kP|@))(9zJ)32|^S@kj{CNJ$7uh{>s#X~`)WDTztwxab&JSlKz)$!NHF zx!HJ_+1S}`D}h5nMMXzLC&0iUV51qB?X$KKJg@rlW)>6zItOUo;(YwH_dH@_Vm9(_MPIXyeSwF?fg^VjDO z!~Vf8Jisn^U>7PP%B@{+@IJsV0v;j~Ed=?FlqQOWJ3bvxC@O(;d`|UyG!~gk;Gv76i^BAz@P1V-F9x0G5J>NaS%XA8^9T~d z+I4Ex`7Z9BnW=NaGaWNK#Dv(@D@2-T5K&|9fZ}Lm)@Ws3OMO1K%ULd$Q_G{!Zj)Rk zS6hw>;!a$WAB1kE+%nw^IPF;>bDco6m zk!K^hYQQRl6@vw?+Ir*F(2|Cj#@gwkp^U~zjY6s=`FUKZ$nrUX35P&3K~o;^&BW3r zN?;kcO-qvB#l_MEUzJkKRnAnK-r+?qM+17{l8qOCPG-xh#L<!HcOk6e6QmU5J2Y_#vfPa|s(#f6b$>O$l;PmCld>j+S*ZGO%zaC;Ob5{3 zGD_2Z+L@$?@O_t}HS)aNYN5>TA=tru8IZGkT4XQElO_g1sX1SN11!ZZ)ir7X&F z3_@f;zVxkv1lyWF`(USFMs0%w=y2_7rAYV=`&<2A3>&!I_L7NqlYz^vMJc&ofR1B> zau!6Q7R00H&#){mynlb?z@b5ePhgo$P**{O=Z4eIBgQMoLMS%x{jE25GFS}L-mrnN zgOEHqOC~<^yYl^NV*<;f2$l@%LngczD%osmhwHG@8`hG=Ugd zQA3jZ#Yc3{38?(jWUA&hBfVL=h_N-B@&Q#e-GCkfn!FvAU7TDjK&RVpj2N3ZOvWwF z2d2#`#WQ7*9t_97u3m(12!IvZrwKl-*bd+=rc_s#EM$KeTL?j}7dBqEgu%52HCcen!wxi8u~CpF)5 zZsUC#bpGr|Xp_RUO!mC*zSp~Tg62nhRStr9&=V{#rvyBL9jXqx^@zDS@fS#pqLH&t zscmb zM6xs)R9jfihA$a2R7nq|-#tK-3Q=dck`O76G|`FYP@x`-E5uD~Ux#^9EgJc#aO}!O zLejhl!n#U#7`L6aE37kTU2K#8}H zFNFN4po@!DdgV|OEARF)>gfK}LaJ4{l=$nShCJ&im4-gnsHX0d$88efn#oR6+$##z zrXLj#)8T!oj)or<3w`lVW1W|QM5<_iQ)+ne&Y55d>??ed--Uoaw!a(B{p9;xAlJeg z29%irjf>iW3+`@Kd#$^2!?TC}_DX*C$AV+Cy*)#S(c~m^9tqeyIPRJ?+0lwlubDlT zvv&r#y3mz`T;5^V&1*NsC1S=%6?Bp0Ly8#IqHg^Jbk9UbKIU4vQi+*zeplh$&Y3RORJwO(CQFLJ;q`SOhL}%P|)o}F#EjUwKk>d zjq>i`Ncu9R|1|*|HcOXc6X|?T6Mg3Ryf$1uPzsA&DIdXIPLyigYBmlSUchW;iF*8$ z;H~$1y2kXp_d4k^pJgN$f?_U!D#_bk-BEw?WKDsk2I;=Hg#5A4j-<&m0?W)B5O3{$ zE;Tn1bQKK*cX{C=!VkiiZ!{x8F02FObfr9TB-YeM5alrcqueji%b>}xB|OXhj4CK&tl z8ZhI+I7teL85(Pw-gqQcDeC(q5V0#LPVsXhLsv&#f(B1;#ai;|8aC!$ ztx$MwP1b%j;o`l0x&p&MY8CieE(Zw`3b!-~!1oPE*xpHf6xbLabuB|kB=%}9jf<0H z7NSN^kdlp<3pirvQvk{CX%I=YTQb(2YrtEkmp+%IO5BG?KEwNZa%ZmPTL*m?^`cvT z_<81p;d7J%?;z@L#|3$(+f6BJhj5N`FD2Wyczm}AU4srpz-%}thm{UW` z97l~MNCEz(0cX?O$hjfM&*m;svL9HL4!0(Bp6Yw1RTq5o19X%EU|LB;m!@?2*2VNS z2cHXd+938r-kg@7`f92GL{gG722Kn)7%mokp>vyqnx2#>z1buohYxXa0*r@3*M zbCM?M=-w=a@K%&Ra32Ev_uh5|U46`&B7*^=Wp%GkxCbN-C%{_EvTUH(Y^X@eLTtRo z$e0)Szc(8z&XXsUwUQOR59CF(oQsy86GZSuHB#6{Hvv_}KA zq1-Z8-4A}^6^5LeyJ&(ik_3;k=Ejz2eJtCXZ&j8NrJ`lo%C3Hjzid2#C01S^nRRyB z1d4qxRS`CI?A1CeQ5PWNh_*``n)RJQ*F${$ZgnDUODtg*N#%oQ#{nbS*WxWwfwX(8 z?;<5kCSp&u#IYNxTgSQcC3aNu&Nz+o7tQ1O= z84szUFE8237SS$gPbAE~%$Ph4;f@p-+*%Xh?X!DW6C zqEFDSWyb@}a!Ni7KTur3nS4$-CK#@%J`^Bz1PQ$y5WL3Ip?@}X=@$G==K=b&oM5AZ z`L%D#rg;0Wc6@F?aosl{LKRoc@GbAfN5a>t%F{#D=7vS{>(_}v_00joUw!kgHpo(M zK&Xe+XRc+3*CXu8GwM}Wt>SB(B;|QuO5?E@rs_#j>hjG)eS~9&2xFm8yQ{t(Iq-gZg|t+1^FD zs&Us$CV@~%r0n}UW^HkVYC)>1~KiWVG1sUZvSZ)E4SWM@%=Y;Cx3sDMsNo-+ts zh>b-Ky?{z)qaJJ4R2Ul$`Ow77vN8tPIXM_yOXql6Taw@^XxL-KCFT)aYFWl%-7YH& z;glkcSm1zl$tFbdHdX-EF9_~ZMz`JtzHRdMDtFSor1!Jfyucb8HVVj*6lq3U;pvCL z)OC*tLUdp8W4v?}eW_?Yhcvk;Eb#CRv}YwMCXfaHby4wYqrpTSITp2M3-8R=?D85y zg!@N5q<8%%(Fiv02_PouNr$TGFo*CSWn;5VW(0C27aN-!mykLa7;1>DDrv{Z&ufty z?4oPKoS7M69~1(#-Sxdp<-P~%EnOSrgzr*6P(ZZ6Q?J_5g{VO7nOK1!gP=@Y6a8EI zH4rap2o+M~?|G3!p$?q%cghN7JkS<&LIkV|k$K1NPM9U`9%*oC0;X?kX<=Xii?nge zkg6xyjs0$7P>8Y|TrU&S?ZkU0dpT;~o-*Qg{h!$0mAX`GDn6^oMY)^amW39JL4vZ{ z5lkc>^g^GYDL2ed9vC&ntc1Nlgd`G^2rQQqIXQ*ZCK0{1fK@9i`f|Bak7zSh-J>MN zl$FfF6yTb!)TaBXR;F0-Hz|&8+|GmG=KQhR2N*!i&wh^+MJo~E*j8$b9;p=V%W&Jb z@2`wIgPT)fMr_)Fm>EWn#m|%?HzGnU^HN<3CAPCAgGF9=&Y^Y-ShTLD#$G=(V!OlR z`-(HhYD-NPN{&}5d;_w!ZFW1x^->mk8(%Du`mso$+JV?UW8R-#Ktct*UYgyHv_<sWmK@6|UW)Pte$~I8$rC&xWZ6Gw?M&!VFC1`67JO86AO!QUDq$; z8^YVH<#Cx7Q;)3a3uhU)Qu9=IzXsoM= z7_4VN>%HjP`KTX>ylt)DsS9Qzx&bAf$bQI*U%A%(O2)i>k!ytX+@#5HWz(u5itYxq zsK9Xj;_IOUl0x5TjZ1ypqwB*ghoQ9m#pW^(oz4&J3(ilAP8Hgl(POu-$VLo=g)V-vSR+N}($55Xv#8S4FH*2?uzszl5OAr=mP%IH{u;Ef) zK>CSUe640}GT-S;s(KTdoyd4Jk1g`yM2xDP`YU$3fLQLDqlzAl$PL(oG5=bJWxd4x z!{uoK^{+bCf?_$ATP%p?M4|nrJk8&t>h}*^8|JNF>NZ0%r4v&KSG*9si)L|PA#IMEcTF2z1nYNq5RGJbWQ}3|w z$3CzqGJW_OQ}t@&+UV7k!ep)>)pwkv4Sn8FOdhHrCt+fISG;?d6HS3NwH z*5ic7N9D?5|&!TT1OyMfTc6 zN+9gYv6dRgQ;#%eZ7cJUV#{?R^trr|du6-R9M!=r=7q+3*D}c)kdZpO;`2Qt;~UUB zALaw5l8=SC!4><*In+&@*Mz#C$Ko#c7vBYrQ?Ey61(#gjfO;la#9s0ZzATXV6qNwYW8Er)Uy}=`X=lL~1 z7w#4^zerX;|D|B@tKN4Bu?DN!x>-u&^cTmxv2KrUKy0;FwX2PLJBB#TvHPVOzDfKf znEIQ>=VUQP+~v_?<+`J56!9$zCr`d!AH-jN4S6`c{?*jdH=g|3s59`H;OTh8+K0U> zukgCpyEvy0RV|i|;{t7!eba+jy1crTN7l@oGZ8jZyRSvm4_>|O5beImcXMuAYhe9)Z0vY9l1+CTfHUv`tZ?fdK8SA97Snt@w_uIFp-+g{ji=$K;Z z`fJU~Lrt52U|p5tPJ3`5W_;H1aO_vjFdS(t>oMb(><}PLXWvCT4J<88$XGDY%QPRd zvPt#O9P!nBWH{ujRUB1ZN%9fqQ1^H+W>0{T_@g*GP9jj=2uOpdXon38Z$hEr&-7+Z zKYeN$GoDlC(BS%NiW~QEJafXJ5F5wnYYYE!t3n4ES9!sV0%Fp6nLMj(aY74T(}T}x zYpJ#J`Rs4zeVbo6nT@R+WJ=NYV$Fa;552m@M05xW6T%XmJQvWvy*LTuj~K7(;j~T7 zwXcSe+Ppu<8zOw{=QfqBHN+)&Z*|V$vvvBusIGe#7i{uHf)6oQBYC6?kPS*>Pn67i z4XhY&{2NPdK(-ljG114N(0cEyBJ#r+fA0Zswdy#g^U-Y@@q8Reb&(Wwx02+(w}40? zNO0dX(Ug|zWnwePw$vl8VPz#V3|L64U?a>qR%5O7C0Y?^#{e0h$mVRvJg1u zBN`0d(|FsWeLNCrNg65LVheQCs6Jx4*H#xiZ5K%?5apXvANevo|D>^eSdQAl9k9&% zba~Ey_H>GcGNUb>ujhh-aFc=dJ7@L&5BWm z#a^sBJi4Y+5`tS33k%CT7%;CcY_z^Br(c7(Kcaqm*&0S|Bg}8%8O?)>fwn95ak}W+ zQVHW=InUZ`$4B)&!Cr|1*>>;e#72pV-KVrN^g&qw-RSFrD^s6mD6~XdJ7NS`)Hg<* z$FJd?9u@YLOP5A8x#q^@-GC^mCrjo|y*E!0kh5Ny-hdkEQT4x3n-|5T88zL4kSpt>GB&tSvtoBp&aRGQAJ-pmY|zcu?^d19n3DCOF+li>n11KM z<7fS0V-o!R{eINjF}!B4zwDtwgD4&nxkaSJX8H+xUp@$AC!2t3j!xN(6+Nje%O4f0 z#JS{$zF-=UcgopX$)>V6Pj@%Hiij_=AKo6@v3eL|f|L5TsJOI7NGiciY>VqqLl$KX zWwd2Rr`T|-dU#nGyXE69*Kvi^@EN zh7FdQY5SAui#&?KNxQ(GpEPM)zBZ`XJ5H-DY$kteX%k#|FHdY;V-KL2(QdRT$*Tsl zrdE`v)?C5Ot!z;zT?+dj#Bj`eYu8%uJ~~rqG#{*8$y#XCQ2ZK>e8N58kG8u$o{nC? zx!fq2=!GwO(N$%PvY9r%Nt6swTLP^W@ zcZ_=uCeZ}SIKr=tU|YIrM&X4gWj4C>24hY&V;AG9{Cdz5rk9I^k4nked3u%v{&=I3%f#vOZpt`0#p(z%ywz=ryp?8Mp4f)JN~WPLoBumVQPT_{g79ea`u1 zQM$xu(rcYD#`G5Or|lkvL&E+uzl&_@6U*QY`$P2a=n@N|UFbdH73Fy%XnYxT>rcxx z-&WHHv*k&IUP`#ZjKrAOepfJsXzAU=t^pZ;pFy66gX6rip2guM4e?+TU{5eLn)=S1qu|3r^Sl9TM8}i z?obHsPSMnWqQTvYySo>6cP$p&B}fu(-tXQq?jOR=+2Jrk*k|rFpEb|zt;Hjtz6s@B zG;Pql7yIl!5VWD!%)9>Cwq$ebrrb^AD^HJXNAs+MqfBm5I4V9B^y_+?ZpGTOfwn2) z8$-+XT)B^^?YgAQZD=~5Iq=XSU91~&Uu^XJ3ay4lX2Deis|?;^eTl>!Z=11>8%m_* zh$XV7BB+US#YRqR9JBfu&0 zAWD;UlvOx=>w(NdouI`PR(|G+mF@F+EoS;6#ei4WjC% zM*n3JNha0JvYp00)L@M7ru4L&tySQBP&*qM!PcpFIx9^Vu6hInJpyQp1;a&e?9l&g z+}3{5=AHkbH}t4q<(R>Aa_ooucYijpn zOa3c8%|3`R6LUOC3tgKkhp6c%+%Lu7J^kj>%V~N?IpfRBbE_Ay)+$c# z7D45*J{5Yr(^TC`+N4WAj(`W-ApY{{wTcz2gm=FRIdx!kxg%G3jvD6G9YE%RfmOpA<2kxWU&jkf=t|DQ|B zi=Pu_`5R`es#tRMXcI>_%6)%+?Wgjx^pX$pNRV1+z!Lvt)=7|AxMoP5;al~?3{6VQ z*f&@cL+BC=g$>rk|8+Atn+fCa<+)T;h3`G`-%964{8_atH48})`6}*h;d0TRSwNTO zELLo!-LE)J5JS3T3G9D=c#9Xm2PX$1Xftr9j-owVZw|ejU4~`1M3K+1H;h#_ar7#l ze+Zwqx9NM+w6>c`UR|O-{Mo#<$yF1J`bSm4yZa? ze-6oN^=e}!`uQ{IOwRE};_Cbr$0OGVM?Kfon)mNww3pvFy}y!TLzGC0Jvoo#>nbaz z4zm12$yqLOW{nLiIsWi$$}eF^tQa zWC+8X0lkZ>e8O?4Jx3)z9E;of*(>VQX|9IJFZ(IvCq=zWlRDE=>v}ZwiO9=GX=4CQ!P~(yj4<=c-)} z*Hr?_8FW5r;E8%Qi{@=-{?#e(TjA55^EJ@hP?on-XB=%)vl(xADV8_@;RUv3H@bBG zG3!e1kATauIZ;&I-p$&x$qnau0u=~{XJf1oLa?mR{@DGhx2>%%8>)!EqtD9opa{#b zz(Vl3jT|AEyj^};ZQMOao2|<-I|ery&u1lJyQ(!+imvsOj_C8w4*GnD%)g~x!5=H3 zQhx2+f^#Ls3PHDxeWgv&UHN=7F&g|=U%1k)p?Su9T+eXpf|R(*DPK>wPkh+Q&K7W3 zYgsf{G(0xB1Jwqn6wl+o2XS*#{ah`F4VNPY21R^0o|Ft1)ta~s ze)D&=6U8w!(?fp}s;g35mxxB3h2Zb-_Z8!nnGR%^k`sk8kV*GFyu&6WvmsOAT< z>JZ^;`)8Ni_;~ac_$Bxlo`cJx-jU%&8#m++XBd!B!f=4 z8RurpElRWpeH)lo#!J9-%VW~Gj(Gi}SMSm>!-S6Xv##(;h z#1_S?y)Vb7vkyTsgrCU!37Kiau`mIe0&iIe(^5iD&cW)KIv{@^^KrVZHdl@O?Q0f- zP*uaK5A8D}zlz;-z1}YCUbZ~~B-D(I{(Y&(&YokN{byJxctd43RUk*(7whI_4);n; zg_j$3G}ooNc>R64g^Vw*S&7YX7Q<3aaBM6e=$zxNXyS!SHB#bBXqu<4w56fcO;Aq4n0N)|!cN9iDVC-8kRJsfQU3wvQKc z+(lsP=_Zj;Rnb{Y^q0C}=7P^Qv8`*@n8UX-_<^NzPOybXIFpMYz+_F=C9XW|ji z3M%u&A@z91a`o~n;uF@{c4xU*Q)!sOfD_x4HCT?;F&tMP3;Vs1*ksAHeo0QJvx}$siRUF> zHq<}vuZN@H%HUCHSCk>Q|8mpY#hDbyvIY1jTWkAB+!5xlNdZJ{bO$kLM4i{U@($4{ zHVesP*J?+`v7J(wMbzJJ3T=Rn(n3E997SbbyG36^&ff%eX+M-vw)Z=5AYGdykeKDu z@`R@F56P*4CU@g!SPgS}6n{_v|-WCpZb9A};Vf!#h`4fJ=1Zo-p4Mm&Q zJ6$uMJ@mTb@7Xq`Fd!$@#Ux^X?@IMsL?>SHd5W2x`OkS8BUnQ1^jbTZS6F{O)X-$k zXsYCMBOf^ZH)4T*3PxSM%|@SJ=Yh`8pL$>a{e*<18-!`qn#N4YoWg@)Jcfr@fmOSP zo&I!+p&uE*N%=p(hY|S^&)vZj2fF&k`4j_`z=tJ3P}O?7a14mm)$$AoCI$D5Xn-$Unz{^XI%qa4jh_|H35GkRGZG6 zLd#zNE-$+#4L%!c8sA6Sl|c2k zNP6}=kHIH?-(r9rjw#V2;77+$zaozp)rYqi=z;h1DGz$A+J~eHB4jZ3RUtJDHQF(C zWXuf;kucdYj`uoikxe_TN${09(yGR?F#Jl}fP@&er4q$ zI*hO31uvn@>j=qJt2L6%d8qd5sy)7*b;bGE|1dG4i82fgg)7W#49+5iKS<0e`2EyV zviqaF3~p?i-r2AI8UjE{NgaIhg6}^90Efmy+06((L}c2!{!0Gh0w<{3?X~G$ndB06 zrpJi01H3K~bh}>)Jc^Keh7|vLN_!o0d9O*U+FA{{YS9UYZUTz(5B5)cT=E|QJ1Y?e znXihij^Ih4mB#Rb18x?6e}|h*6v9LpWmf)o)AJpEEv1h<&;29d`cQpcJ5(j9 zAvnkH&4C5UW+ehv}(A;swFF89aKD2AL_%i)}=&cH`O3A2JO$ zOUM$nZ2&lBOqYoxaAi(#!GRB))GL45M&Ox!^wdK7rCWx-oxBO&THz`na$n3e0DVEX zQhHj+zqt7|b#ex}YdDq1=qGy^DDQTwM_}lc^=i6Gnni>hjS-=_jgRzV`E?H(Zjm0~`@rNcl~*Nr=YE-^ z!4D=+!rb$I`zfl4k^e^WuQ9IS`ENg=f0XCC57$Sx_l?;ZAIB-x&A!1tF%}%>E+5M8 z2}P~^&(M?VPR9L$Nmbx{WP{dfqy<@S?{D*ZpCl5~T~wi~Y74o&7(;z%7pqE|5zV5Z znWd||~^3QX+goQz!DD_WK*SgWjZs>px=b_0^A>rZ;vYNwr?py%jxz*Lp3(~7aZ`NeU8A(UPg3B#_i<%i-B+ii z`wTUD{H3A8tbPJNW=-J0H$iMva~Ay@;(wJUvMVs?rtbu@wJJ>9S5-)siJvDtI0z30 z%Wi*j_Y`n;N{|7zzj&JL8jZwCIUwF)OuO{&qkSQg^2yQAKFu)IFl8fU&*e_fSD#bD z;cutBE^+#I4CY+gP zN?qcJd8!_3l|wDuo+*KoEzFhPl(uczl2}R?sGb36DBdr-!0$o{y7BuZ&?oX*(;F?VCb6kimedG zOFT<$L?9-qtH7{}B9>ZW91CpFx_JtQ+q>yxs$$I{cYX(VG& zb-dwR90N<(bf-MmgU@&IF&CtGAon8Vi^?4jPU$XV_0;osN;rEroBS67cQ)u^2d_#E zN>;Y5K1{g3FX^4Gmm-g;AtN{ESQ{2`#=i}|>{+Ei;J5i$LOq*jvr?5Z$YdYRqO>N^<^qgm;9xfB zk4u3O>rF*nV&PHu#Dk7Dkce4%96ZwZPs?MJiq7Y*p-J&B0qwbA_{~^Zy1ZkAEC=(c zNX*-N7;v7AinR`bys1B7fmGxdBJxv_npLa#AD1aEDm>ZbMTf!L#ctDytntYzL*LuC zqpTJ~$#GjB0T+W|zK*Y$w`08hlwf~R2-Eq?UM_HuMx22+WJTP7yyWGQ$eR&2{OkH{ zXueIhZrnd2B40u-eKufRYh4e}Bu&#-UnOz;hlv$wo>~0op+76g_|5+aD82-XCmCw!vA<0q=y({A zxhRUr>k|sJFu5_{Bk%}6Hn%ot*M;tDfTieeNa-<8KL!*o7t5YT3+kCU z`)w}wq15ZNf>HQdU%IceyIVcY&kyjrpIAguj|TyV`QDRML)UflA#FqsrLbzp96O|H z5#oTRx))C2R2hplJQWS2eRP3C`KHes1P**8*twykH{OqnT%J1D)0X9`bRV3{l+UJ>UYIHuFYfdnI9ZlygNWqOO#kgy;d(@#FTP#P0BU@wy2g&y zF`I!wn)k(pzj;<7p4mM?GehL=Pa(r1h4q;uk&qYKxUZjW0+RODz*xXf=nzI8GPN)n z<&?k@==!tpEBdPMS{${ww3_PkBvp#|=QQDSiu%7Au^bsJt}XPWL3m0k#3y)HBHlay zk}6rMoR9b|2%b({F}k zf!9Pa?sRz18ZJ>SIqi{7Sh>?CP?)!07mjvi!%asyEY(4y}6Qe8HRf|SDdJ)n1%0dk<`5@aBy_a!OOBRX@((Td z&<}oGYuryMwz2L7#McvO#&(WV1(Fuz@LDnUDnB3KWoF@eHT3O?_bHlEL$^{WgsL)O zv!GdO<(xM!<0H@_4h<*a$NdKoqq{QDu=@WvE^%$1HyA3t%%;lnCpQd9Wu`-cNmyiP z5F5=2FC`cLSD|D2IQo78YIBw2ph8Kg^e*y^4ds;<8DAN8aHwe=QxDlbq2CxJtISkc z<5n#~e`ep$zKf>(Z_b+X51DWYbZs{wmMZ7sz%D0rYHrYLdAY0!c(G(Jl9_d^WjT|B zr^q;srcuN!g%E-EMbaQ5A!-H>P%yAHbXlq{;?t3D_i8S31$>`U>+$@b2Kk65^bTy< z^7hFGIsQ@nrIBhiWM?2N;^Qa>SLW zbCKmszC<^_@GG5SonwfN79eWURpUMijLiRy;m zj(&-c06jZG7RmK9GEDp)fL;EJsq!6C34~K8F=h3#LC>@b&7C#(RO6k@gpOqQU#B9N z=7^oG~fk5yc?#IGLS)^#eSM;#Qyt1-enf z3vj*3M^_~+0q^lBGNgRC)j{9)gF(MQ%OQgkr&D=X2s5<_tC>yx1JesoU$J)ugI01X z^7kMpd0&|%m#UZcw@=fG^-K1g(Hpp(Nx@}w`%Sg2#aJ8#q zSB`x9xc(Q~*T@Hhit^une)SzX3KM7z_eL;vahAYo!+k1YWyoZ-hYU>yH&CaM@ZN4g z%WCmCki3cSR27odz}|SYa=<;nn@2#6J*Xpw(f`}9Rzf`XpxECJDDRJ4cUuS>a1eO3 zt++FD-BIPhW+9kgVp|5D6C~$?aP=#Z+kMT+4*n=y`rP2UXvC#v)3<`}5zs{L9gM)D zOJs4x8*qVcHpWpxI^W(;(+|VwGm`mp+@qHk6hvb<)#$b`FBsaoyVl5mb~jr`Xz9Z~ zzN*HUxyQHdx%J%$DJ_$(C^27K;W&xz*a`I)Au<{u-UM?|p2mpKE_Rw4zO5^2L15-- zw6+lZd131t^4Z$E*9|^Tzc5cF=*DAcOuJY@Q@8Wi)s6(VMfMk=q}!Yc*xszHU>Bk- zx}qwXDZu>eFc@WQ=Zgvgz#{5qne`F1hR?w0?t`1j`tP_)jJ+TS3Tuocbl;LS8iwV0(+EpP zdobU~r2c!JbV%QGiR&ZLg6&&ji%rAD0d*B2AMpP>dpst! zU)9(F?W{#l!DvGiF!ou!fk}lKXU#B}U^zCt890`Sms|DveTuFOoc=Bmy+0+l4$7Nw zt>$2$$vpnJK9x(9{A^Ox4gX|>6J5-EpDvNHNCCQ-!pxkCeGp&UN>#|$26nHXKa0SD z)$h33V7kstNkZBPR+vGR+p!M3>Og6u+yKox*77g*wJeM%1hyT1*KZiQEHTIhycjdB zmbjhT;SFDxmeiJo%ylzEY9kueq3izhdTs580Ui#Pnu1{750ia6i2W)3zs`U1f4%lm z!WqS*_f=Aa{rrG&Q-#8j2GKCLEWD=!ObRF#jDR@M7EG#LQX-tS!NpTY&}l6Dax1>I z34lL6)MNn?lMik&%{fXFN!a9{hFfH zwLlk>g5&7V_L`Q_M58-(haloUlzrmPW;C_P8x|20m#iXU5nPA>b!9k|@jHIEtod-8 zC9;LkLF;G3F2h=_5;sz*|wXCH+rQ$={$m0EKwd0?2dBJKg{=y^+YK!?%U&nX#O>9DMIN7rlYL8Y( zfRdp5S!rB(ksL}{BDj^AfC*cA>^a~H!EB!X?WG9+`pMXz`Af=Kcx9wJixnuT-_`7+sW**@Zsv$euga}v0;J7(O@?3PR{3q-!@6;1I*m8Y=w8K7s+Mg!4{f!^05Vv? zs*~`s8&S{}OmKXLd^cdW^;i5b_?8`;<;qltwwVBl>mLEiv4b5Ami<7^?f{SCrLtCy5*yb+Hw;V`<~!FvCxU_?wR^3SRbWpu zH|fcH)Ko37o7wvteubLu4zF)b2u-SD>iVuC(kB#V>+HKcj~F*2X42Bpui&`g-<!x=&~*|^_bZaw6rShdkA*zBn;T)Lp`!c!Q_iV$po4zK_k6{# zltoLgjG4xmntLvm&k-SlVZUft%ZA<;I0k_}PzHEJyA>EMa6V|_lZugs7u{0_HP~Or zzI#D;IsX9d#_w;l?)J7lP}BRj3QwenLTX+W#gl9CMFz?c{Laap=N8Py5kkeK+LgCV zusTX#0WH8&vH{l6jioB5T8ana%U1#JpvyU;MjbXq(XH}b4@wL2X5;aZVYCQ+!DTqA zd-R`9?IkVxuPX;lSpo#!nB@np=`ECU)rMzPz2J5^RNMR>=^i#D8R_*1AdH@#%0aXE zF8{9^K^-G0hnSXdm(|rJ(xx&0{Xg~1{2&~5dfhDP80o!YZY4*9uRav61zK^*_zKo| zx(`J9(u5g(e0E(i@Qk)_QiuGeI2k7*+1*=B?bVK8nuhL!c?tv6*UzpHA_M#Ep&9(u z{iw`;ioB(uJn(2SmRSP9YZX0nk+k{GHeDc+(hZN2xhe4jgwm5QRX<-BFZB3+dljMQ zYP(=o&zY8aXN`!}EQr=TZH}tR@Z6=FT3E96S;} zKe9)c>Zy+IoKAKba%zKB=NK;B!=&vq4DqmMu85cSLAXa{L&Iew^KKrT{au)+heq*j zwfs@P9oV*ha0-6jx-9=~dOvVk0*j7}n?++Azox{=K0i-1YP3naF#UCYTj!ae?~xGy zJhX3#LhFdYwXc9@`zS=QY_FC15fGuB@cC(om6hZ9a->R?SV=jT+9b7@{Q!l^Ir_@?@ATy5WMVBw`jtbHFV^VZwmb z2hi;Y%I`9qx0mt2Y-p0KM2z1rRo z)ojffzkjmY{i?YQ7F;GiZj59_jN1axod_&NQ`i3FcZvx*zky^BQ55{5jA^bwW5Y_! zo=u+lLZ76bZI&ln&KPc?_T!iP2qGSxWj|^!aq)di4wcQ}d0y~!g z3A^9?LE-=OwSn|iw_gF^3~W(e<3&G%zSxR1@|2 zwJH5br0^#q^UR{086Cy^XvA$GvF?pT8+1#ftrQG&LML8+5Agz>5y*+{L*WLk;g5jf zmxfLt?PD!+&#+%i*7#?A0t4&Xd^$(Gsq8g*#W2Q%RS`LHo?FIZp0$3i9rG5muk1Y> z|F9&NG1j(hK)vtScP}@y6LzgZpalH#m%z>!M3kn5cjl>%iqTlSM15?-KbEwKR*-kOt?IG_+0QXYYDwZGJh^^#Kt&dBI`gv3 z^t-AmrKqkJO`rSNQ{w>c4mxkz7LiI#Vm-IEM3|A9{1Jg1Z_f8ot93&Uvdz}dP3cgx zIxXe5OzW$$Y2<&KQVaLBD?Y5CJNUKfS5QH1!n{Y&q!P6;9SehDD5S6QBwvU&Gf%HM zvwS-CSQpNHeSB3R$yJrdXc+44Gv_{aU1S+k1B)%QkKXH}Wr!(MViUKqje~02M=k!R zmgP^i%zgUYs#R+r^W%Db9o zgmNu{AdN%C-0Z5RYRS!Tbv4qcTy9D$qp!D_~ zO0eMBm!_|9{L{~Nq0^!UrTbs#yg=69Jc9l?83u?#yP6e%#MUX_UwXYs`MitLm-Olh z$f@(v=-0@InYduFs4F^%pO4Ouv8Z`EB730u1zLEjO}KN*8p81)yQ~{PT{~A5{f8)j z_|OgR=%B2!uSUFvqm=lm@q_)3mYLXvOL}TpZ+o@@PRj^-2kGWlc1vD)zF=Y|k~HG# zNyv`>Zx#2;=Y!7Tfpreu6~EzI>W)AKs^w=d+njwchXi(~ig!he4!yCd2FotOFqo${lDA+&xXn3+4}5rs!;j+?)L+rPdc;56;b`XU^>@G3`CpF zN`@CO!f;e>#4qM-cjold?KQE*ueZ#E7xdl!a|Z?I$7)L1O7?!ET$ex-wn)wY0F|c$ z=3EZ^(`X-?U0nMSAU=azdmY9QtCX^;vS5BmOJy3h9q_EUUl7j>w)AO^NgDkkbn;nk zRuiLNlLThHaK-eDW&=s3Q6I^^)@1w9PsF)gc8*>}*eSR8XuZnHNnP9(V23&<4&%eU ze5zbC&Rcp4#KX+=(#n3*E8O0Sjy03>BC|&PXaBH9o_>LDnv~n|M-~n%oP-~S`Im&n zsh5o%IetP)S*_UgV&C%|X-wjeL>LU)RVnO4n}q9HGC=q$J6dye@rw=+H_n|hOG;n? zx?d%)9ClVb*Y{+YK5SEszUWAkL>j@|*)HWgRTMHVXZpdWOrSyU`!7bBM?eM-9{$7} z-&i6YVYH?G+k*+v`}U0MjH-qGumTM?V=Aq9iAJDxaBUGhct?NIBzFQo+3}xPZvQ1J zsL|S~HMxr6P^j(W9q@ofk|pl)XKAKclNcQoY1pB*Fv}crVyg4fL2bIOl?yT9eY3X` z85w-2)}B@xq*Txu`X$)t0Uv+Rgl zr)LfRfqyYmj4pWDHPf-4(vg<%!;ES!PQI30xC}U8FY{k4O!}UB)`mT4Alv0V3tSe& zjl_33R`dUT_e_Z|um{+S87b3H$oEuTdcdz7@Nf<~L*m<(WZCf83eAouDWEX|v8=HT z)P+`Ea=HNjay;+_Guq9+;)&<{xetBy zL3TYb#$tp!M5nQz_CR<}x?)3&LvL8&NxuN%D*PXck7k9g#XJcI)Z&J*(0NaAsFPqr zCh+V*9$d~pfPG<`h(%u%ZlN6u`&qTC)BbGE-J9Hj#>)eEIsPLc=TB+TNp{km_`B(3 zI!cP)I(GQQ^A+99@=YP#&>J6U;jtDU6@|)d4wJ`=ggsJcDHoNObL7-*l#8p(hp-0G z5QO+epVeeJ*0$XkQLssOnRb|3bAvXK&!V%Nn<21A_Skj>txb1mf2Nuj z$m*OvIO(u|i9k}mLv3Q7GW*~4^o973NM=6p*LIKFFmQ1WKRUL>Z5H^a%)tMXc_#m% zmDmiT%0*W%1TIviKQQKu*~whr*R5+;Gu_hn$=I5_w_S2FZ8_Q@Dq4>MD1) zng(6{gZZggB}NDF>8*~K$HqgU=CG~y1;%XlL-#?OlyUWJjwiJSHz(?eH*iZo+C)M$ z?B*Ld@zyf@*tSWdK+AB5r^htCL5BE;$ZP9f&xU)Cx{K51aG_(5CmS9A6xN!Sk%X`@ zkP!9~TMK$c^_NjI)Z)DSzOUM+QIa#6obc?YOGs%@;%ADe{&HfASbhDt}Mm*AAozp*gF1Z1ZZAZNwX%^!w6x2yK;a2^2@5ueyL zq)cs^pMrcoUCOR+=)t7FXumd9ca}tqzrPy4)%x|tps8Kcgsm&<*ogqC8=8Mi`yD3q zLwqQQ?4EJ=vOtpGQ8%7j{Cs?+xc)I zG`8;Q?_U(v-{Mj?NIJYaI0LnNXf@9ySxD>yrzbR8>Hp-1 z2eJs@!W z^1A#IUenz!E&BzN784UV*ZQnw{K;*&k}qC-`D(*ryo;k8ihW4s0#P#6ZAJlS@H;V0 zEG*DZ9HpA_j!k%YLAr%vT2FAwGYsupo0Mo=rGrk<3whn6Z$5x%{4N=|C)?GJ`f#Uh z-@v{xH#x+CsA~!+*f$Ow&r}+yadW3d&IH@ghS|AY0^zB3mI}_>NqsaXcU#y%__T(= zaYSpjX>jW=i;Ns~J=qX#!C<fU?jfw)YWVCZk^*W#~LjNqk?RacN zoFvcdgjWAT*8Q6Bpr$O!QJY&}>=7V!d^?A`9!};)9cEo$f?nNWBHQAqpJ(N+GTxRp zmeIMV_ce@9YlKkFOu{%om!!$7U>IY2@O2# zsm45pYm40M+V@R~!{O-ivZJ`9Z>)|7&ix$KN?IUw`k^i7!Pl=tE2Ft;&8d{W+QcjE zxkxPQb-5n~q7YEtm(|g> zOBR`zm&uW*_s%W7%Pm4{Y^xEZ-KT2CAf4hEkbaDIag3OiknR#!<05sj>@^)!%c$f> z?%2R*bEuX@ETy}6d)ysxI_-=(e7rFVM09A#6F=72V_xmJ)o@u@boE7B`cMMB0ETih z>Ha};Pzbf}Qa`rP3i|fV7d6urwAluF60T-yh|Z$==Yu{L>=Kt$@b7<s0CO$WGk9~S4n8u5wn42_a? zS8Osx$I@gnmW^1QN^jXgA!W`f;!TijBOH5+PL$wsyvv@HmyiER=4Rj}>+{N4RYy*u z?uViZ;~;K6ULPJwiDtTSc3V)}A3IeoAn+u-jMc(o8ejR3}t16|9|oQ@y*L=0#ld zLdhWrz^gQforAenR?08n`OT!$(SA9f0-wmovKdHvC{8hbr6fM}oSf6lq-KyjqPXBk zE*uUo_lZwK0~@}H%FMNf^>zi2PnlABukV0@{iB%I@cuIJstsw>%6_?7z*fV=?C<<% zPOSBP*j%#s@>7vgE!rZUh8%>9D3HskeoJrC>fcq$8TGqZdzTx#-s1^@X#}%PHdM`o zdBd-1`lR}-^ZuuH9hDG=W(jUJ$ExXH;zTFjAd_n2^aiy{imNrIg~MNQ^SnL;sSz>$ zu{sakC*zS6)1XyxB(q+u*>k7hbjNj9i5YzG0rP$&n=&odz!QtHIogg}Uu|7)Ub)t= zJZ_r{{uQplz&4)!JxF2cc3D%aenuGN(5yZux1Z;w?{^qa#Kcez^2;?fDyPkjN6I6Q zzaRZ)aUW-fbhR{mr&XcR6IdK^dlih*YnX4CD>hp8emFjP^Rhx3$l5)BczCF*{6UF) zDhR6ShU12{VjY_xZE~#29Nlqk>FlAf)i4fHo;zgY;6}L`i&Zsf#Gcl188ZK}|^l*C1i-`qzT9Ay8|3 zgus95xTIH$Yu@r$Pp}e?C-73x>zxYO+j*{1-Th}LQrR= zTo*;}p3gS_93nz8og4G2L&Y-qDOtN?#Gr-wZu0Rr!i>2zoadD(v@4#)`Wb=whn3OX zfZL53fD}K|WFGd{`t&A3j{7D^=WOMHIgV>K4s_K6M3)(1#rUBq+(=#>_OnKGUgu1? z=+6p8$ul?di|XzS@w#;Sowl=!>lP<=$4tBLA3q>>&_cJWw1UFhHL|@87kv|ypALLR z7mMGWoPEm0weO*4<>X18b-@VCl`hzjlFaMUhn(RGUYjyiL&-J&riYr|0ffJJP{O)u z0`ao&T@lK=j3o@$pj#mPmD6i635uMk08w(AFqK)9ou`rb)pZE;WaR<4aZkSKvg5g! zsqkWDQt4sxB+OH1r0Vp0>p)B7q0VNbjuZddr&qy^lmb;$ED13$+qjQPe^1B@7f}+R zhy&JDb)qmIilAV)eqDr~l)eP3m~dDML(rcB8+ZW+>GX*TqBg{h2&w76UWh2kY@aHx zy#ZQ7x1Cy;sRq_R8Hu&<^%2K1E(&Wf`9Mm%6tX5N|i?;vp9(3w|+Z{o?Qd`j@7oa<&q%J;6{^Rcm6Ek^y1 z%VoxW`Xr469Ye=R6?9xO=VJ($wfM|I&SXXe6QueIOegU%8r`leb!j+FwWUsNb20vt z{ngJV#kZ%|NKd-^dmGMS?0T8EP$Za=j(5%|@`&N>((u~CWdud{Qf6qmXfG~RBZzl| zGiU$MI63oEcL&aDg^ueWLRt&Yim+6!w$;!&V&+eF-s2xGA8L3*)lSdLA9@yqqza7eXDiJlFhWrQ`nX;eabj6_~#i5@4Ox72m zNu3J^jg{&yAPc>#cT7dmZ4N#vo>$Z}DW0h})iv@h4u6NxVZX!mE8tz%h-lpIRPve`L3EG}GFfV%K!P|4-m-Z!XY zm$s@_=!WqC%e*-C{$_UUtxn zN%^1?cw3=vcPuw{8pBg(HzNv$?DV?++eqC34z-$SK$4f&3b2VuFR4S}xr-wQZr8A; zJJK5M{5%HTIk|G5OIY-QdPGzgIyZ?oe#9LIO%_Q37`T+B88;@5EritZ-`!8CNfODG zPB5>*pnwssnWBy$-}PeR-|*v=eThl9r&jE#-{qaLw`v zm_uppO%x_2@7$>c{JSDPQ7amMIv&w$I{TtJbRneAMQrk&yX<_C`1+RA^=L-4Swl+a z(+2(w=sEQw3L<|Qp`hA4>y+Et9yQ1soGC^1 z?TR>(mSCWaYAhTNXWnn>R*k==Z0*qNi9th;4XXI*1`T!h{hZ@e|IT)bCsU^ml!^aj zyeLqxOSDQ`G8ZT?-oIrc=SO%}IDw!=7P*itw`t(V&YA(;1H>Q%#snK;SO}TTG7-6t z@+%`xla78SfTH-W5uCZNb2$rTw^R$WoB?~x%08#Y>QaR#<~USJ6WW%41XOY9Pv!fc z(fGwJCVR2N#9>wCuyUs3qMy!cCVXp81TB$s zT!ffa)caW>UA63~Le%LW1+~x{*9d0>RRQfo=a&GglF1(vg*^q%NZ%JD>G!iO!H1zE z>38M7yd>#uG75)g%`6XW9Mlgj;X&?8Dt+B#!NF1Xhd5OX1x?$3CZ{g3Y-td~`G9Yxf^ z#HsB6#mb^(^tUg>yxgsXrBz&VuXps6pYxLGZOSg&Xwp>-@r-w9C=q|=O_{l(0_vy` zbtS;*!Lxr%qVI7!kFQG@$Dfw|6*ujvBA9DEz%g$~rVk7ieytUEf$O7Hw>R&ud;L}O zWupt#5(fI3?R!pq^zU30y&uZh9fFb4OBzoBUm~85xO|{;u-fy@(VPf*iA?$QaN8t} zI?JY4>SK5WXgG_T+~_BLI(ml&W9vkLYQ{g-`jf5&YVEEgx6HS=6fOqD#18zmdjota z!g*PFD5yKQu?QeRI>e7MPvG;_3SAp3N{xVU;WMqcH<6eGkUkV3Xy*=%O{ z4G|9ct@FtqjW!aNu_z70APRR)zKQHyy?>@fC+q`o$y^9WZ+t)waTvz217^Td?-1E`&BB*`) z^gCQdd^2YLQwE{?c8bulNM`+~RzxC!^JXob?)6K4!YsFl>i4Mk`+7bx5KFuBk)N+u zoY$dfZogLIL2O7?{P4`?KK+_xDi}?t(((_sce~$f9g@9^a{TdW#m66=2|$-{%xGsr zn*8oAx}_R&UF<}ln_T+{5T*=yk36y$@j(}+ZOwI9JeGG1Q+Z+LDYmlN)-7^$96d<`uD@+>)UIx zZu~<%{j#X0=^2?-g-=Jrvyj&yc~}uKoe!zag~DJmxzT-p^W2$JLt+s2txB1$27h1x zcap!ToJmW7_$+n1;Msr&bX@?3{!dvC~Sj!B! zc%LJ=1y@fa*t5$MIq<#+yUXFX)3YyE3LJJV@F+#C#ZhwCh9?`x_OGhI1D#BCBV<&3 zJq1(nQ$CzV&W?FkJ6zUpT1O7XKFk;{rQU?@EF|5f3KT^Lsr|mQq()sWnqm13NJzGd zT1AOf?Q3nRE`;EpY~THJI?0rutb>TjkS4vNUha3fesfo@xG`KLPZ8@_u*2b~GEPLT zdjHdaWWH!S&f+rbU_j;_Z_y6etuYYpqpIp@9iYg0yGj8Z7)7QtjLzD6Ku+^_k5nHCZQsoHcx&~*z5w~P^ibNg;z43*&HE6xQnFw%Olx%sQ8-B zz%5DvNpUzvKtFbIOoor6Blj@HJ8v+Mry^tI)$1}fo#J7uWXpDnQDEB7?kWQh(mD9+ zTQr*;@-Y5T>~;?-Utc{Yv~pnvCdmC^XLJw^fUR|OHTqiTpd+t#@x}N7E*5|yfOvI| zxx7|Gr~*=ezVR*0-8=9}nGnV#oT1s)Qk*m`Zg%#5?EbNd@Dm|#e6@Er1jh>Av7whH zWyp;)h>6O%=EA~M;@u^IHN{0?Ev zvKzS#v7Ury<46f*u-=VYk-ud`)h-_ErWIrBi?2+WZR zir7I$n5vfWD2{enPEL*cm?Fg=kuM#yJH~x?a+{N4zK7KT&&>;bQ>m%7r&uZ(ImAZ& zz&gUx$72CZ+jUY^)N_^MZa?M%K5%=_#LJgh?xVN`m&Nl8!l?2)(?E-U8sO$m^g8&AqV(d7~c|0?E}pO+zbyzWNz zK^>S$jRH$iSb5m&Y(WanfsN`1E1AI-yLPy0krYC?Dt7NuHR_HdsA6#;&ZoL(K>?h+ z5ly~B(K9*K@D+JcFhErcH$PeF36}G#Iz89n;K%Q?fgeRb6d! zSov~;IK&KLJP&!Rs+g?JygNW5eq{5f5>D!t4)oPCc&(pKyg>zS4w_T2{U)Mo2NuEr zo|=lgFeG8gLkcMwz1(f$CRRO?5SC10Pe>+`dzU%qMyPqG$ClzzXxFF)>c&FmUg+3xGORqkvT3yEm> zO!I0t>6gU6eO)`4ZgsftuR6;5`Pyo#V|qe0xv5pjAXRc(msGlTo1Y-qXEIVDU5QY4 zf-B-n@!EA2p;DQuXwG`y*P@A7zte7-G6-P-)o;4cyk@{DQ{X6-7pJL=F zLkWi~-k)wR2qPOu$x|hGV|7E1&H1N*-^baK{i-|FgPX3?MP2?;TY%?R zJlQ3pW_wpMZP=`Ke4j2GQR{Fl2Hp&=w6pdpOmNwRm0*KHcec*uhp`kY#5&43a?|si z_X^cOutJW&XOmOvOU$u#_@E5#4_1B6;X`|3HfuP+yN9|Va2B7s%3mJx$cJ;pUlhoy z#;MIPxDD%4GTTR@QYy{70e!>BX&Hcc9uxiA^K|x^w6zU=xv%X$VV+uhvTAG5@KA6U z&F7M&-rqZNnlRdEvc$CH*?#%oP}UzibkGF{9rA)|ruLMW*v|oq`*F$sKB{0}Tfje* zamVsT&_q1&1nUw;>A;-gJR-BAb&Co}`+o>XYR@Vwx-R*8Aq`hYYRc#_PVs6kiPxUq zwQXwOC|!IIVjMe%P*2`#`6wAvDDbpc3@ux4ou<#Y4A0cIhWgNGCS2TU2$7a3EFo!N za?7mDa(fX9=e3}ezb|2V_7YfmJg&eP`!J*=su4^LMVbOKnj$BDk4Gg%*7Quo-4t;d z!$^JBf0XS)UVNY?1(5Zx*@*$-cVdLsS1zXiQY$bOn61D7e47bC%n>K3jW77WB~aUl zK>Zj5RHXpz)(S9PJjef`D8q!xRN%|Zgvuoi2q;kK|3AaO{*M$^B5j@IzS67AJW-}x z?h&RQAp@)=e^Ca1MjIu8s68otX*21xuw2^{dSI4{)duhBY!@nMDEeYJeKMkHL^3; zK1;ND`f)LS|I2U0f?YoO%aED-nUKoB5(B%vLdkz9tv0+_HX;tH5>&r^NoUnJHkae| z%H|BbjvWTzy!^~yJd9IvDzEO)r_Yh|sqjldMZ<21-P99@iS*-v5q(8XmV;|+`sP{* zC+&0bdd7QA%CH?f6xdd;^CfNcp#gfapm#$a@(o$$w|b;S!zngQ`16n#7t&0Fv36)5 zeq>Z6Y_MVu&3(G|hhQ~%H8sPL=hIBIw2HxgnT5mO4R9~LC7(7`eKjH|cR4r00{07_ z#(O0wnT;JJ6v7Q3B%hd(^zW@iysP~sge@}CT!=50og~;aRzdEP$PIxLqpuviRk$LO zRJ#{t&YzX;?XxT(+wuIXCPHP29gwj(%Am}B{gW^*@hZtRE}Dh|3Sxu&=0T43WofR5 zGQu<@+K1J`qA#tQnJfZ&dU+1*XOBgXuwWPDku&OlcESJj1s;&`E2`_#4Ts(eZ_2nm za8j&qCa6A?oHd4JB%TXCwIP;#=DtR2%I_tWn@Pji?mLmLE3hrlJ zT#D(US*mvONW>BH_HJG7xQ`b!($~epov(fgp2U}pO&fAWN*O=x6;0sseB8a2r^<8d z$)HeVbkchjd9Ek7OpnfA^4oCrD79+f-B!Y#ldm^v^|!e?{VVzBxI-G5_)?TBpO%=6 zZ#8&?{kHWu3Pjbdhi%e%&E)Ye|DiCHnwVDOWdCs=K2B)67$aU>oxCEr zjfUFa9$E=H+`cTCirSajn@W58F^TDe3Xa&Ju(vB+F_;`+gfybVK5n#T+h53zTTWv+|2DReii=cKe9re|!< zl+26Bty!f~0zw^Fu!0p+gOZIR2jZPkTdf{OT3KFs1J&x!}lb$bnc z>;PF+B}Gng;-B=|yWf$AICGo#nh_I`eFM+LKeDyF994kv_R+eogQBpmMmHBz9}lT| z2(1OMSOy&~#Q&kNuNQ$*)33}u^O>pWKN=;#$o_h{9j;XJy{f(;&L6*c^VEdHvV1&? zoh|(}M?_ZoM%1Ou>so~kNB47VmDZRSl@BF!ZLQ)Y_hhuKfKZtZE}-bGUXm2KvGtmlf zW??cxRxLORztXY{KY}nVdIblW55SX$;gjP;^f7&7^zFp<_2wu~8)C&m8>1_Smu2yt zr{r`^9b^>_&^+Tf42Ns*Y-)s0!*{sNJ*BTO4j)eER4`ng77anYq-7yQD0d%; zc^lv)xu+tYsjKR|qxTc9)hG?iYjdW0yMY>xeHI_pa8o`)PA?ppDKA5+KLbZ!Z=D9; zY>_-idiDzA!b&Eop=F%7S`Hl4`?~KqzD-!fXBq#v)70ocFF77_liEG7xxN#j3LGmK z8hZFCFns>hcdM#pXM%|*_CfnTUgwNs{~hhAz#Y=H^v%;9)v=Z+pIt_+`5+;Iq3M=( z;-2>A-{+}=PTEqqp4`3%PiMoqrf@#_&{MmAC@ZFqhgLjs5%wp&+EN^~anWU>7iIzw%x~w_*op)u;9}5uS2bh55fz=OBw4z^Q*plMWwOHh|Q|`r>SGu z+Kf0VD+`_|duzqn%)By($yFZX#V25`|7~eNo7L64kfCp$f5p5ECowZ_AMuJ34{5N| zy({nI&Zk3*1E1DR^7Xx z+f&F;{Jm`tz6p7L(*LOVWRtXLNUtp@-;-%9RboAzjR3)!SgRqo%lNUAOB6!6&&A4l6Zz>FRwE56zVo zK~rM6DViU9=q%tb1m|vLN{n;o0|LVlKeV-8!*>LGy)u5CKssy7^{d~AWU%7@LwV2h z4om&X*bq60x2%qpMs%*?o=lE#_Xs+@w#)zK)x@9z?~Pv*(0I^lN_idbtd`skm&zV2GT5jOtUn#kRPX+0e@2gwAJXr;Z`Mq&Q3C_OYVwc;3{`+#CA#A|n_Vy7{oOJPt!`W}#NmBut@b!qZr&SHRHeeVw&k2x9hrK%%IRb;|F6KazZuxIkie>fx( zxEj?&EF{TX9EbzsCsl&d6X>Jc<`@@@1EBscnSr?QXWibf$zIFLFr!ie`|$+H`Oo>) zf`Qs@BPQrs#tR1Kb~O@S377DALUJRoe39 zIg&Ecw^5IfGzqmrUc~ypa2D2tx)vYc&Ckmi%HtXih^f?i*(@yVr}_ws#qX>v?$jqh z2IWO&Er{aNPp(C6C-GsE*NPjESOv}4wb*E$r8X7Y3wZ`2>7PPG`%~_wDj9np^S`#W z#n}DC_{*;NqM{mZBx;}PkVbJ~xUqC6#D}>gw}F7-ZcI-CuKaGeV#fMy;-^~K7Ojbz zZFq#C;p?Gp%*iJBHx_o?eacTeeJ^S&1}jdM+hGnVi&t?HIv*jPj?b7* z;SG<@k43#dEsQ5i|A1?ET>DA}nQ~`qHK#ZTN=qOIQ#WM@o zwHh1ZH5$-((?8?>Eibk1&a`Pk!Cn5;eNQ1f%%+)n^wbRH zk5?RV7xG;Oas`Kput!NAqh-mQMe;iL=|^I5e0u&|?{%tl2;S4Sa={s2Y>7c>4f{6# zcC^#y)r;}bRmf24ZhTe4q6~NXge*^?0*9;3&!m&)P=`rA{v>;QSK_a@8&o4jc8C^- zeO2FpPQ^R%YpiZD73;O=HnrV0jR#@ z8vi9D8cGnwh;P%ZmJJcRb_V6nM~tdyWJTG5RziAHh9*~$V+39n?xQbn&(79&D}qwYx=dh?kKVphw9tNi%3pD*tCHR&oIrWX>UCGj-2CR~cu#rScAAyKdRU4gG8?DIuRV$IBW7#EZ^}(Xiu0nKSS>RX zTo7%#U9AmIi?1L4`WZSSTI5~1u)2oT<74A5OcyUoMD~Br-BAY=-ZdPFY@KC5FXq@- z)8j2y)Pt=4G)&d871Nj%{mtD9z%|M?S?C67!AZ&e2Nj=az4Bz z4d51SKWZA)RBC>Kti4Y55o2*n)lCMawS@Ia5x|z=^vU4N(Ut6>s4bth>rscZ($Db3 z?Y@AwTc)0`mb|8I(&UXD1JgOy=I&%Gk29c)DsNTNpe^|Y8OC}{15gv2kCedmnQ zBDT`~2u(nkS$PI|Ay{~Qd~dt=Y3M3r5NP%kvJQYB&W+`l#ZYcoiol2EQMf|ox1aPA z4(U<^C;bl%qg3k3Q6oBP^7srNU>!cbo!+D?l@kl!-|Hou>qI?w6C@`$4LsHNC@~NQ ztrSAh=d%}n87R`p2;s|!(9%0;yNv!r(Yu?n`92Zwlix&tZTGgaof7KFJ}pJ0geU`T zJzqlpW}Xm?7XLUOzxwC8Jg77*An8Q)U^1fALU$Ung%2LX%N(gLM?chFeQ1b4O4V_! zuNt%IyzU?&-(9JKV$bADoc;l33LcJ-Jijh>W9Ku{mz|ov*m)upxH+0t5X1zeR!91! z<`sI4UE1&uvfKFZ-ZZ&O*4~5d{*Xfpit*}u4Iz(kM&zcm?yFG9wV6TNC(w4ZO^wQU zhy>|g?Rby!KGbgKfwGu9^p7o)aN1gaXQ8CHk67rMuTLRwsu)`y(3$6rIV2toEqw8F zB_=r$V-_Pd?NML&UQBK$77-JD75l*q3C-mGnf~Rs+_uW6BO4j%#NG>k9m%bzjfN+6 zO(5GR?xmJQZ3jh#$@T_l!*a_&y_B6XHcW= zfs8YWpv%hlZr%6Y;?%t+{Z0X0tZWGB@&^}^>;Awb-K{v^B`5pa{50hMC z7-k3{Pi7OQ%)zgye*C?6mAuT&i!UTCd!gsrx1YeK0*L*Hn(H^-Z%53)%|f1o=^7-t zB27)y!Ngn6x3e!_PY$eFM!CM8ka7-EUR6I{Xnf_X`!Y95OoUQr)9=x;QuN+Wy#Pob;!Y2 z`?k;`Rzp81HuQDJ0fx#%nVu2*Ra%de78q?e5OGCO{0r+$TlQFd6ePovz*6ig%Aifk zPaX1_$J|x?{Y2|%p+aS#|2$#jHUDLKP54fD+<{L`CaYD#GmDJMw+l6f_8j!LSeneh z>dUO=;7tN{Geq^tX1?|1JbRnJ-xya~agxvdk6`4t*-*7~Sd-knKv=M@d;Y2OI7n6C zKBq^>DxM1ytbcr9a?PFJZ-G!B!|H8DDHNHO@|_XQ_4|i%%x5#wl^)Y}Of?&I73)>c zNOFNeEx9e$ztXZ&5qk@cQjiI8gzugybkEo+@{;yzomkEKb*2gi@J3oXLmhoz1Nn;a$M zxV3j_%hRTgYXSLkZF#INbO#|SR8iOGw*2xHP=o5~ zYnn$Q@&J4b0POrO)|M_j}fl$K#Wqch0>_OlLnKH}~|H)SgO!NM8+h|U%`-bR^ZZijgCc)pqn)2vy_Nn(@K3NFMj5IP%(c&(A|G%YS^qTt z#wR|W$7fQ1GDv@pCDwO(#WyeelDTQ{vxi6X4DZu`aO;!ATIA$$>7^ce-FTEh^y;d> zii1Jx%9X*^MI}Yi`nzKI7l=`*SfKpyq@D6u;OwDhX=z@gV@x;rX?%4lqq8NsM^cw@ zU@AS2QK+ekRnY~%q&fho(b%T0Ob=|lMmlK0?uF*R$Yn{R^yY96ztje-OsCm%oP@3l z0k_b*#|k(F;jzlm)5QjgbKSl_R@+)Zklz*1j1>5d^ANe}Jd5@B({nw&on=s~k zz}Y*Z5i;>uJ+VG+&Gamjd#V09dCRh`;R>lSzMzdjdnWP$@7<{aL>+J#Ji?agO*O1p z4mTddY-_f-N~}ejJR1FlocFaXlBEQ`dFI+l9~0**dJ^Ehs3AEbY5Q}kKG>bQ$0{XP!Uh5^TDxzCZ)h>LqnBo!l0Iesrdx1Yyr8c8QFE&+ zuaW{YbR$oogwnp(&u+z^vXCVYfz}gWAueH`mC%A0ly20b^s(Njo^31Cr7ElsM zLWB2Xn{w>_A#^%%s2z!2gyAs`;zz6K##tqT^eBW>VhVP|w#4(xt?nJaIr^!0)dPAi zS$K;+HN=f>sxbShR5|VaB%8*Eg1A-QAUDHKxSjFC=M|gB7?}_GX)g*v&&f0zV}!&z zYuAHmH9S9Jre{+IF5S;&5uX>+?wkpRs@VgaZwHd5bUQ3AQiu!WXVIHnxVsW7b`v=& z@P9c2Ga8<@59rBcR#`#Tdgj3=+1+vE$W&#$#zn|ekKiWD%Bkl5pM??vFdDAPbanWL zUS+%?qPr2VH90t29-4eSQO3h4&(nySFL2XIqZKKLuK?hc8 z8&m)CuuUZ5zaD-VHS;CH<%H>uM-94hzmlm;*7MhX|D4X0cd^OB6{p(MLlkx7ph;|? zIL=f5ve$-Nu`MBZZ@GW}*y#6;XKv*cL4uv>@p*I0I?aT4ik4fI_$Th*tpJIxSwiJa zl{oqExOH-D@*f$(nVq%t-&5Kl$uJBniXvLNrttg;o&X;G%QHfg8nFi+{7n^$X1jIT zuR=%al<8A5Ue-N~$-pWN7r~Djye0An054noeXhC4{cZdOP+o`^0I65+e}P4eGLwHy zG+HK4U0Vy#CJHdg=KE%#Kjfel*Gq6wmavfh7IJvSd!qSX5;OGGkGYwIBx(=|NnK!{ zcxxuIF^?{5>c{Ucv5UKVEjHHaom~~RO`>rHC6>hzQ$cj;k0pO_(>GUsvaRem&PRTW zz~)ko^UmDWq||P%Gg%B25Tp?EEX;6ha+h-~NBFT;35K7@uc|%>E_5B2#I1@bipYt3 z;t1{5pK6k{1&0VB@uFvI)!v1QTO2Xl``H3Nqz2TD5vc$vDh;gtRJsV7V6oC`%C0$KS;$DN;KGQnHFW>?(6RC4b${uO0)M zjgb%tKUQ&KtYk&WQzt9F>t{)i)jjWR3Q8UjSfS|A%ox`r5tL}(Y_Vf>j8tN0|0Mu- zDAMMfSMIM2-TUBsvCs@RO7w8hpoju<&!6k=Zh%Y;Ak2U`bq_in>6V1l zf@d>?p0$u^Ow?RXUi0;9y_cuf_dgQsM--525_q9ZuWqJ4fpn2dt#>js5`2OW|sK^dPRlIhNY{$ zwO!18F$J&ZtvSY23mc493qE6H*s|)Y6cB~R1@6Uuis9TgQJZMShb6$uY__-$*QJ)O z{Xi-zB@ORhC)QUnT?RSRq;*?SQGUS>CHrETzzMqF3AShfrVD_lyU`~99LWJcL?gn@ElhgN}stK)G4T~orJ=@XaQf2 z;gnf}&RIhlRxQIpm-?^jzgzIvH8nOPn2fQvnpuAztn91u>7O?kLaTw!W|uL&B^~=i z9z12*`K{*}#N~?cDz-|Tzt*~HJi2-+6#7$(c3fd7!2b-omd?DUO+{x59n4z3~ zECwTnzz58$A9zJ5kX{XVuXDp7+u2SM_rTxMo`HwJ=TqTllNcAVw&zbQFHPbI??QjS zGr^#NGt?{-O)ZLkb}WI!7Cl>PU){SPK>Sq@q>6melN}@@mpM`!6GA}(DJ*yR8`-#c ztJcn@+SAI(701^ZVp_lEs|3`-)Xoul`jq}ph;olflk=KIv;8`H;z!p0A@h@5Zz21t zc($k)p9Gfg16WKaiuEaRlrfy=sC3UbVaJy|9G>7l%>$mr*+Xr3hw6v57v#q&*X6U{ z@1i&L1j5z0wfQIeh3IiN+630MXE8lL{Df1^)aA+F_gzgY%k3m6LsSPjQ(w|j9a=n1 z$w|x`mTJ3W;ZJ?wc)xX#b%n@XC2GSVHzA zD04jm6+ z<(jmixtACAODRlb2LPgXiC$s%0DiM@HX;D_2n><4 zyDFCc2haKM0LOo7SZ%fsSYJQ@?(Dx$hzm9VBbxUAN_sosT;L`i1oe6UyBk2<5qS}I zCc^@xTK{?x0_4H}--CerxiN#K1G@e*wB=AHXtN2}39<`fePy0=dxPbn`|KVR-daNt zLvx|4>z3wPIJ~9v@j{$i14Ht__XSA0^z=3FuR?Cxuo<~NCl%EgNFIoKlpde!txMqm zSU2~(o`(XNn|jgGTgJz~f$_Jgi$R}E--OTbRYhFMdB!43XHqMmhS-kOCpZR0-TYtt zHlP&qAw0LKSNrQ(Uv5iexXWla6Mu&v$f?m#S%zKF?zlgCfnuectBC(1$~qDO8ck^~ z|G<(%ef+r4lQ*c`g*|~SF7MAVxe;&Z8So>dd823=3?DP|^3m+(drZ@5l*;6RuWK@9 z4n%yut&@4EJF7E;lnHBmH>*zxx0;D0wyzRcb%2Sne7`KrF}1hzirm|+bYpb-@&jyhV#7=q-FMW4No)4 z*_iK6m+Fs^eZa!4R3-XxDl-%xG*Zw}x?qQZ2Ar zA0M}>qWejdNMR8(PW>j=GzRyoDCyl=0Bzrq_SJvi_~+S@b6YsNj+m_EWtlECK}9A6 z$JssB=|O$|?dxs4)(gsVZ8dqm2R!0M;sbf4<-G>9`og07XmWUgI9jFHtytuJZRY^Y zndNEGV}usa)75xCv4%S(+{|6=PX@Eo)B=Iuv50pI4Dkb@5{KGOyW_G~Fzh&kg88G# zzFPGQvvXfy0IiWwlc|>JQrdhe(MbT2Jf5k0Ke<*d?db@fq<#LEuvA#tFof`ockhth zd@j#8ALS2<6kfmRJ3th2toOD1*Ll&2Iq`zvvoK@t#1|SlDv~$AH>=RpC?bs~O?lTO z)6z1U=Y_?>`f0a#!%|l_AnaIcq|x|@>mF4#YADK1TYt~;85vMwA#NOj}W>x3E31&Ckma{Vm%h zQ^*2$&X(l3EDZLo$~%8+-8o?QUh`+&JeYhx^p}VVg|f%SPZUjYo%x;U-&|u+SLew{ z`Wz{C)#=4E@zCd8SJooab0d1+8H|{3=+eT_uyjg4E6_-*mE;mp{I%|{*Z3-fGV&7P zm46XPi~kYL5m!+6_6Q*ai;Sy_HQ=0jAa7!f!aug2Cr71$`ow5w@bCJ!C0!K5orBaW zn(XMX17ksn`yco>(@{_#RtgR^%2GLnW`~R^H~6XV`mg$S8o(YfQ@Zqf%6uveQjR{w zv`Wft8C(RwV2d`)_Pycb(m(A3(d}`T!2eZ3pwv4QyVLVl z{s&+UBZF0V<8R!W_$w!36i>qRS#DnvLDpRS*(B@d|xG^87&N&aM`!E8-C@Nqrh?|gVmfiwGi$q z_p?97ZZP}pY=p}_$jF+T0UI@88=Aak`Dh&${~j``J_sn7ltnJ{n|cSwbZjxM&`zSKmg9`31h`qr9a#UUk0oHq+6|Bq{u(t$iTn82vxj-BTHo|Z3p(t1*gVH#Pl@ZSNd4+b2b19`Dx<}VdXrjcEd4TlG; z<_xBk&+XXHcLm$8#fsilhHnrGv8bOi(ZZ_QR%JitK5sy&KV0;%HLO1AO z##TnU+i~i>Ee=?bgb~YX&ttORS`sF|2by+;m43NKnt_e|qt?E`FfyF=r&ns3svo$A zX_++_$RC&6zF1V$MtUCSnqs$_RmthJ8vd?A-4jN=on~gVdP7$tPbC@|qb$O?Yq-m+ z8LGs>3wwasTI&vrM%VmqRkvNzqmR~4zanlg{ZvR(w#trW<|H`hmt1D_AouB|xxo)~ zLRlFMq4%3*l7%N%PDk(kG3b-5+Aqm14~NagbwsLL1;Zv~ZHThixX}YXGLRop%r-&p zZ%Y+@CCNJ|in%*TQ5ey-5pIVgFYmRW43%KY9KLlY1|fq2Q6OwoYPS1b{4i$l7wa?) zJ)xITL0&jl2;Cr-{9k#|T9t?YP!?cluTnm(EAV8YYWSbLJk)=J-YM?#pI731a^l)f z-=z46Q1(*WTN`GW{}aQ?gdfdwO745)jV)69 z&agkFvT`_fwOW(zfImd~x88y4Ly0np!Y10)>qSaHb*W>gl=vZdUPgSM`2D>_aZlD$ zB1ixS2SH{ufF?M420oFMfJ(y_mLyq|gPioT*YHukKcNclmP#`QdIZ75qVzvovM6u0 zeP3meG;=V>-6_BpWQ!Yd2|#h2>^WG>5wJZ`~qb^yibScX=Vu+Ei^Ch-ejpfbU7SO zG=idOKt>?FbA<~s)(3$;z%t_%Y38w%Jcxb@CwY|hqD|l2wm=k$v?;{EQ_;8NeP=@m zeR}cz_fAvva9~Z-@?V~$brkEjMX&Zl1R2jxLA_MlIt2evDuscz_jT)%eejr9!4UH@ zN&P#@6_R9`5?DO4Ok-2Lx1|WL4vpeNcC+}RV||z`oqf|R%aUp84REbyo1~$#8Ith8 z>kec$d`_+eO81}B@c|m??>?nh#cUp;mNP|zJHnZCvQ90V&H=2wV%=Zs;SKsVZvFLF z9vu9Ze`yIv4$ASqC8_CJK2+=Xi}H$7yENr>*sJ0k;<_TA zRf3U-RwWg?I%>S+GC(CL5=k)b+AL$uXc|pdly7KrWpjvdKsg3v&EG9CrnE|>-1#D~ z#iYJ;fgM}0^bByj0(Id1$apa3?%O(vWvJkjw3Vod+*;UQ#t=OdiN*n^7v_V^=XPbQ zNKcj_vGH~whIY;;Uun>@t_h)II~9Es^AF|zpyo(?GGo>H^{w!9Q{j-K3g*K#AZv9o zPR(*bz2JB8i2HnBYDp`N9~hlTFL5!vDe?BBE;YLxb7vkYu^N-yd6|Jvv;b+7U$rV3yN7|rAi*}570)EzP7zSQpb|0l$X7+y z;B;{TB%c8T0H16E(32;Q^U*wV2mrovF0i!&f-4hHDSCfD^kfLy#Hg^&A^J0rvau&-R7Rfixi4ws|JM|=3l{ia)=OEZyc2_(YRMnaS z@#c)Bx_PhfS98n8`yjI}Ixo@GM)zbz*Z509ge?*|6$A|KiX>`J2fANDs_}scY=zCO zcuuhqnT}4tz`+zv^|O%DH;`PW1&YCVKlQ@W{BGWEc9Mj*3k|C&mt(Vui8v(5FPJ_ziUh(a0xa~+C4e|T0X(Hn1Gcug56 zX`e-{l(L38D6p;+v!k+V&%QuBI((Zk{Z=6i7uq0s$ap4DaY?1{qfC>JC6ftx5T!CT z)j(%QE@E3vjorjx2Lq6<)S`ZjEuZMyUsZxhal!&qv&6Gu=a%skyi{C3?2Aa*e!6bY z=}q*y{zWN)`(y{a!OMGYUA^HtS{lGXXJ3r6oS=RSt(Jen@cKy%=hqzCMS6b$->Fm3 z>ImpJG$ptiEUNkeKp!GA0H36yy66zuOmaZ{$t)?*0p>8RkWlIoO^jG|5#@CXpAyO%2T|1sv)l; zN5OeUmsCJ^x+NmhvPK;&M9kGS?w4P~zLluxk^kY7c{=*{kc}>>l?02b@9XwU-uBrp$3j9ud42`?l>P@=h(Xv`D!W#8%`Xs=SX>8XMrqG(qkj= zZZ&N1|cTaK`&WMdd{ed@lbLmKueIw{Fv+}sR#FwsdO)B2oudtCd z=h5d=(^{EqL|-U9`wm1v_)1AAdPlbkY%G`EnIc8ns&8+cZ{)3iz2a@#XrF`{vAJ1RT%Tk4Eja{K71=u-oSZxb~;g}!pvTc z3Le+1r@TrLK_T4fH;tner5f_mf(7K?4R5tf)Wj9Zm32Heqok#wc~c=~VEP8lgWG`> zh3x(NO7-T;$eJKqN)-wblcMIldQ4|l3ddKP>ivgG9t&b)YH z;n{hn@ev1OnGr^XZ1i&r;buk+MaxXOIM#T)Bj)dW_2OYLL$>jLHs@`eCRz-Sy#y0l zsFb5@WqyZflDZtFz!&C$#9dKS%3I!_XmB33vuF-iDrP3B0Swba%^ba?W<0U#o^$;1 z3Jk#$+ETu~VDx;RDLTAnv%t?b|qMGLJ{tdH;5RJWAk2x+_Ir zVcUdxyl)W))>bh%5rL+zO9G19i|pgj0O5oSC+OoEq9QY(Wl5~tq2FZ1lIUgI#9ljN zeROsAz3b>Fk2Vzca=R*4)$b>{bLRoq*dK`D!YO+AQ%c_g$P2wWJ{m35i6cM$`Ci_zFFauKdr}=jU&nct(A8 zTBy_~OI85k{7Km7pIQ_Y;PL9?=!u43ziPZ;IX`;m37}{`TlSmrM_%4qdxF-P+R@yy zKf=1ygGY4jxSi;{%VX^}G)BK7>cpOkB4$EFphQ(K1D&wQHxCU=yDVL@bdu@xkPK!1+0LSahkby-z ziF0}1bUrIEM_IVI8r_cnqU(A?>8O*9z$eP{em7Rv%RLMlM;F`K8&usu+`%aL{4iGc zm17doxhze)%tS7^ap>i#{_;PRP`@tw5qeIx&>L?rPFEDujK>Fn4sU88Xada}+fT53 zZCp7oS#%dOG|rt}bpo<|1L+E{esQoQgNK;kir-de6bY@rDWmB)>4msez3!3J>Q#0G z`OE6gPdFHEedL&~D@qXYzXOj&gY^lk!a7>j%)|8ImEw7Mjj*^mwn|62eoghuk^p>h zVzIW0hx|t7(m0=AdI=D@B|Bq~S#3k;lPy}*mJ{nrXJ-jB)9`Xx!GH-Dfw}7Wp2bT` zWzb8EqUkW#_XRfQn8y?8cHd&FD(xIJ1bJuu3p-+B_gO5j%KAoBh3K)=ml`M*`ka(c z!Bc)3-<7F|SvF_gh&4%q0-?VcAI2pv0iY)L(+*Ia=KE(o9%l0`EN#L9f)YbEMle=Q zQ1|G*L=S<+2ineuRT{!o_O)RiUmtrm85_2!CQ9k{ZiP0522f;gXQfI~b~HK$7^d(8 z!9e%Jjx_H686b33ZXba>_6ul6Cz;!{m`4#q;X8Fh_?DQtOFkBlAVki83;B=}lnM39 z8WdC(xwB@N*ID7-tlPfTB8!I$ibaAN9O(S+A3uxX$=D2vX*8s;5pT2C(%;BK@@+Uy zRD_h;7S#+THfKSh_I{D!v&o;hAFHiHpX#SWFH4$wxsWFY3eF@CrOqg>lq15ME5Fic zU#Y$(kS7SRk!HNHarD_EMA$9=ARIQ38ls$5@3#^l&M}QA$oc)}Mw2{zzuY*H`HN!^ zJoPIW=XeH)RL%T!tpBtZ)_z{-5^UhpE!vXqe|0qnQMmj&Q8|gZ)YatMFV_7j==gi&l5clHi8iwc?d}4N#cPxONifFnQWouwH5I>)w1X1@juvrkv+IM99)q zm1(b=K{g_CGc|Z~iJ0^qN24<70`Ew3yDBe>t2xN54jMk4;L;RTl*vPhV>Tjd85a0w zu$%CX^Ztu9v3e0=5?S*I6yU&Qzb+F#5m^I#5>E)6bpMrz=D(sGz-qAZ0K%TSBs)|S zQMC;q9wi?*`@fu&|9zqw9`De?u<-1^X+OZ$02J;E|1}PFsK&|vtNnR_uHgTUl<)wu zBjCBuD=C@5DvOH1Lx*d!tR9#g?5?Y56SeErep0cHe{wpuNZo=sNh}xa?%z%QeESEJRKy&) zWyJM(h{62jKs5r7v{o+6bFKSmaHpuev>#6|>_{nedeli5wtpH}pqacLMUDY#yQ^TG zkWLvRZgV-)6EmUF+1Y)yS3S{B@;@;TOesxqyZ^FO{w^~g`hJ7^v?Ee^n}M;eS6F!D zsGy#4PZUuX`>j1>8}V3^6htMt`nS-YVdc+qKlG@y{gD9l(^!PxGC@WKK*bB}SRWC+ z>2#+v61dsgnm+ipP27S#rB|Hy3^M`@@kFZLfzGT{VXIEPCL(yjRKeDEsN44>G-`liMXrYBdp@kwTkm69J zxTHYw;sl4{?poZ6Yk=TZ+}*WkaCd?R*W%9e&gc97{>z#*nOQ5da?iQX+56gf-28U! zWluzkb>PvjOKQzNw?|(F10F2Wd&>B&r)^dMF_yrmH7{03+4qvqRk^HF8FwGHrw@;e z7SKGFqEZbu@eo5a}Yjn%~?-pKaco7CkVqB~kxXKQy@RbSXX&U;}Z{wE|Ah zj2b|S^K-+6pI+a&7P?@8wxXfMXj~soHblR9zwt*a-0V0wL98ctZH%%XBv!1nEMxyv z7k>Aw>j`T6V-V@~hvaJKlT-~zG^!)e?Iyya0CVryLtVRGDfTL;FP*bTjsB`}*@*YK z)w;G9Tdz872K>o*{B@vVhIbtfvPG+WD7#<$Is3!N!MPz8MXQP<;rAZBG-rpG1BfF%mJ!+C>nhd=K1d>>QR9b=~4zCCVJQxo=E zw+Fv9WC znK;t6o0FND=kqU+jabhditS|>v%j5|&nWy-CG!iLIwtD4wODR>{jaoX?FuBl8av94tChAD zddLqn7N8jj2b)OT?Vm6So=b@46)R7Qif(pcT0QS_79$A`@6%VDe(*oE!jIz4J#>=F za8I8S$dT$rija{pL3|Xa@(yT&cN3&(Y3^sJ!HhHx+MNza=Iz64XWXMl6}G*b-hk6< zDWho)F=eQfdyg0Y2+Iomh;=cA?7L%#c z<$5-!_rq(s$8qZG0Bsa&yCToPsN6~`Fkc8%ZESy8Y;4Tf%E2%pj%I%Z)R)r7F-rFkU_aS3uIZu&lQ|bnI!L|kNIB$+ zY_%4GD{h%Izw4x!?tCfFBysJ_Nww6v<&wWMri&@B;J`QQO#Pv0m88YYQfnpt?n?d4`h z&go7Vs*V(a<8Ug~m_Tw)yhDoxQXFwIc7rmQLsOxL*s@G z>j4MQ!M?+)fiGi}FLd~-ukTC>y)w4O`Ey=o^AaP5wVH(Bn(IbkAD;hYU86^(nfhIf zLKyCTjpTY>6w7RwEi8v}d>CLfB<=^BqInCPt6G;68ByUSh;7F7tOx71NeEqz$~!w` zSUg@?7g0sclPn4)XK$S5R4rk!u>Ub~kb8D;qwM@vf*{+Ov4~ng+_F~h$e-<;5g$;??C7()E!-BuhykpUNL>KYdD=y;68ih_>d>iWZY>IjXjL{3NV@d zDhV|!vWv_4#BcZV+q0)r>N%tuJ#Oy)1Y@60_nRG)Ep##pw*k;-v*jC!VLt`FlBRxf zUJM_(jxx_}?l)oGEEYDq7Z6wI>-HoBrDe%G$Cv21X{;2jKGI&bCG4is)980g%TW^b z?lU(FP&pM`R$+3ByuJYABB^GQh<84?d>=2^Tf2x7t5a(emXpsl@}G5I!{)=((4Rz9 z@j#DX!?%A<<#FoOC;4Qp{JD@gkeKIuWPH?mET&xO^SQg!)VgO3?kmf$WxtMP6@Ci7 zof~)jQupXNmb?{v16qFiks>Q3oXI{7X@-jXv)kq62nCfdF>pWl*OhfFhpqb{K!bU) z*21qo2BpPiJ@L2!FM#oQFn$@2kLts4j>-Njk#CoB5?hK%U{E(HNWmp#Cd!M(Ffc_% z3evn$${7*pDsqY}D89@)T)dI>ZXnMu+t;TZ`n1}F2TZ>sy@;}9t|GAkJ+S9XAL)Ot zp(;m!i&|WJuJ(pSpZm9!gYuy?n;6_ad11}!XqC{Q%lwkMEDmiw4d*j;m-JBN*MIcX-nq3mW{NHA!bdYlhPFfZ6T~mclp2QdF z3iJ&=s=bEDPc~FgsaIED{Jv#h+i{Y;mb~mN)%xvZM;y(=O6S2r-w6t-P~vD(iX7I8 z(IvyxloTzxG(Ncr8*5TH^8s4NkTp@vy5r}Fy5Qv?lfM_L`w(;uSA4iUlQXu+qspc? zJO`FZ52ph&Qsi1g6cva{l-6qSCqnEM;N|8mRlRSIDB`Ut;51WGVf~XmzjAwJ!hR@K zHc9>`DpCy7ACw4VQ!5Q#6@iXeF82R z**-%92&4YHV2cHCTdYBe%E>^^PlQ=Y{~h~R`;S50VCBY!&;c>7OkNsvxbSui*sx{g z;wbUE#lG*u(%+>QnKFzJFIFk6ErzpyHd&Y;_Ge-I`JJEhg-xo;8NaFnvza~vXI!f@(HNrK<@ch9QL zcfef+TQfzs-oHpSE*P&g8Dg76F*%+Dr~#za%xpRSA8@rG4xPg zKAQ0MVHZB2KK!}&Q1WXk47r}cHnc|nIbLr=l+f+z1u1f-@o-a@Ics6nGYOQHr!|)N zzVofm6qxpshp?MFZAoLr@NwMnp^{No#eOhP%0@8D*WS&38+07~^x2F(yL`xO{!7Oo zgfdP}hJN1j&!?aEjZdQJx6=m&B53Ey+lcEZtWBQc7IA`jzLw-8q0>qO(MO~a-p$LW z{EP%{SK<|}3O$tH1>i5z{Q6y|Do}VgT=`{k^9~5E$daG2&++a@sQu1yQj}lRhqIER z_h`^AyyL6_lA~{0YRy9 z4TEMeAV<5I5k+h6ZGglS)QH;t3Tzy?#HB6;Ed-)w!1!)+uP9tF^>7UNy$)iX^O%j? zS^vCb1nW+f+I%shA$vhC!wT{xAF&nIN1NXQGX*mYvq}h#t=}mc8oz+?s82@$`J*d= ze!K3Iqw`iZc5Y+=eeQET+rMt7Gnqim9=|lP=l0nmB%jBW+6OTRU0Fm<=NfT%Hlfn| zWHV0Ppv#R;msgzPQWSa)e`C4zGRKHEMbh$5sFw=l6IJmTjfh~X?B7V6$xZ=F+>ncU zf*FdDe*aKdi68Grnkp57=F$nAsis^*-fO10PrQu1&Fkvmc$RiOw#L}5@VMG6Yh!X4 zNw1?S2rIGKNB%=B7LD)wf>XA@jw= zn{E5ZgB`ckJG1b6iRB4Pvd;;Es zJjDhF!@rO3!}~Zg`=?zw;DM}?Vx0*4C(hp20$W1jhst&38VT!a-lW^gv2533N(on@ zpY)p~sLOpQ_)Q8&*t}Jane6hk$RT;E%xSmBfBhnG*~YM_8#J%~-Fi}0P^Undt1OQo zGzQGgLNe&n*QnFtY;jn9Z0~uHg5>gQ9@Q`roVJs(UrA2>URx#q7YZrL!sY9&4Uoz% ziFWDuYzDcT(h5desPqXWF_N)cW12d5%Dv-9*|{m$&hY2Goe(b0*e%j&FnQNjm)bKongy!61Hz&H!`a$IpcK1U7Ma8>gW z#7R%j0q>&7^H8{!3zyfK=V0|#Em`>$-ONsCD+8lOIEKCKhRh3rUb{RMn7_+bkk`4N ze0xII+|PuG?9tt9Q}qqx-_3QAyBIO1E6sSey}5c;pFGVUKZoK#-113lk32vFb@HpE z6*$%rDc~RaBz=S^KUrx>fqP)Hc28J=XbK7vE6Mty_`SB^5 z3mPJQw0#8$1Um-H`tw95L{u^TE`fInA+y~{B#4`#Yp1bj(A#}xF#vG zOarfyuA=Y?jgl63iZ(uT#%kU&YW1+A5Ne^d<7Dcnj2YDpgjH25+X+b*Lt4>Ef64wC zyy+*6i0_&ygu|4aU**FG1!`9Y0~+y zJ#W_Sh6Z8j|+|#BNv_?E|nXj9)6UBI$yzKWD>(#z%H9d`4{g80ET50`)9=88= znPx~XY0RQ@D*G^|#v08Ttjca{R}h#krK?4f0`ldqMjnu;6C3MG+=lRU3c)9}9|Xpa zK_22)gGIiS0&ItFxgRTFbPh`sAJ7F=+_%eE>UGT)*>$jgqh81uZc}%Vsimt1F(_?^ zR~<}sucIh*wbJlHoL)>hzj|k8y4Wt=m#XMTymmngnXMMlQAdJBx1s%iQTE4-bEdI4 z_v&j+-}>j#2-xMdqfa>lT-Q3dOk4a>@ADoyagmi*JN1mxzw(GInjynmWr^)J781O9 z`$`g2SgRzWPnGY+DJ0qfn3Ti=+Av`Z;8mPXT_&En!vl!Q>ZixTJ(&1YOl2_;* zK3wy=Zu2Vf#B^EODq^Id;ASY`9L}jo2;_AtV>o~O(YGA^59W6_ZJE7a|C2Lv_L7{C z`$7O39!?~c0A}8X#isjS3l@bnsJ8a$NIY`zg5X|X*`zH~=FTNXr@N&1={J;tkiWk;%0ERe=~Lmu5*Q)+J*1tGcRK}OYQcSSD4)`+BfJT&+#tI%T7=iOdPs#|5naNCT+?n-eJVZH1>I5l1psq zT0aK7Ye+Vl81@piwjEZ}_;Iia4{mQx+OOpX2F&b{jWxl zH%LN|frOA~nQe)9H=s?SLgt@Dyh$ z7zc3VsU#sHZEvjl6RXdv!PJ#FjWeXtM>tLpGpcReZC3P(M)Ct%$6uU|snc(p5ljtK zVq*J_2M;B2WT}g$QTEWi~+K>3`u7BF;2ci-;r5|miYl8?b^B*hv?xr0eb!-`nt;4E&_bzpvhKwh z%pgJ~)tqab5r5+UCK!Vf{}=I26r?D>B)*9C{{|X_*uX#o*q+q;R{j5Krdosn)Bnvy zYO%Y`3ao(v$A2~nZ@MK()&KKYkedBJBZUJ0e-?}RJOCvGBo6~KmPzuUjrcEl{D86v zy>dqTG^~d8Ot~NA!5edb=S_GMuO+&Zc^TOwIyKYL^#>=@=Rn>_wkb2lSH;g=saU1B z?oFyV6@yBDZE;c&?Emi9ZD5vK4Ll=a%O_KIf02;F3vi&kS`Z__tJ3r&c4RV_& zaaOeD;f1?P*aGMS@3gIAIN5v`hVX8V>)GGDGq&L~z-d2F>n)OEEam;Pc7*rB*C+U9 zDt1uZz_3>Ge`rg|*W5r1)Z8DTKK3cKtXI~C0NA`Ly3|bl$s{;=@YojSWyJf)L&Q@F z>zGBdQNTW-pKm_Z3Ak{brgx!f-H$r>aA@YS>X8tHheWJ#+)cesSPyYw=e}0k=9HE& zd}>#~x;I6Cx+TJqeUE$}E4(WtR+<-5kB}93am_MN^s+JPYk1{~E2tmpnqq!yWR6vm zM)$xFw}P>OPv#>Gz%EM$945Wa#-46RA-M1X0qOlQxdiRmi7&|tk=3K>c(N42%djW_-{tepbSqAPBoy^>U>w?qxBj)CY6Y>s>hh`&GgFxsBn&Rx}bxJK%}vUC_>M$H3?Hh81Wj;U8&V?X}>?mQ*CMm$0| zy;7dsG1Ku*t>U*7Brq$p%vZy4M97b52uwJ_SPIAwUgvo%1DF$c!c#lYwef3)s8^T&lwq{bo?CNiIZCT zL&XuwHHeGcETnyt0X(#}_j)=tZZZThZbPPmd>5rla5g%l0b|m5dc-Y}!KmV!IHm`w zG;7c=(?Z$@MgcKe2DOlNe!0Fsjh{uS^1H7k!30gaSe`Rlp&mw4 z8>`Z7T=Q2D?E*8)TvN0u^i6~VpOtFZ6QHeGsjF(H>B>DrR^!eK2^PXI2pC}|QN8D}khD+;tn+}iF&B?0xyV6`q9{E)$ zt2;bi0^_i&fH=RVEUKVtc{;((3`;fr9-OT*azKLw4s!`9OgRQ?`sP+Ya8rge6 z0OZOB4m#VIr`Ze@$IJKbh?F6-g3Od3FFS^5kM{JQTL?A~VY!Y(CsMTCO`B{fO7{*> z!!o>U*D(L~c9KBoMpVzS|IUNSdpy(kp&6+76n8V{50rnb$0fc7K-ygR)z3$<3T{;4 z=`Ez32@9>+#V!z#&V@<}~WttC|dG+HTQ782&_Q>&wV(y`$%r<^h!m! z0hTN{V)3Wpk#(Sp!6z>!GC+v{A5p6bS1u7gS zIZD3)=p!4dL5{Hd-U~5gj^g!hGM=NOSd7)EGv4%JSWoiPhQQ( zuB1^jV!V5EdzU4)RG~w1oAhMW|3kySxPz*;tm5wa2MnS65~dlV0sq{~{xydP zFP!oIgngJ5P6QE2rwMw7M~c6!9AR3{psH}c3W!_a@(G@>Slvnxu2crr`)bi0yR1Gd z21ItInm~;x47s-XSsmOXqT0+Gh|?x}>B?}E>s~4(G&8U|K!b1!_xd1k9djzJb)vGa z;(C$=E#F+D*DgXEtyddY9*BrR@7=|h7r8;lX1V>aroZ5f)u-a2MS6Y`rp|M-T0w>( zW&d>+BPD-+1N`xa-XJ?%NSRG%q2vi#EtQ&8C^jpaVdL1kuBdgfj43$N#6&XJDh7`l zv%aiuxjuBxHRXF04^!|S(miZ2j}AJKlOLszyzJ*B&ZD>LRo(HP?8e%@D*kXs)QBgT zeKfeqXn%6^ar0e||H{=1tfRL@#7U=3JUfqTmZc4I8|>mi%5*SMl1f%!i0D=Fx5xO> zY{kjt(0#h<>%O40%T#l}zmQ7|8ZCb`37PZVxt6_dHZ5MQ-X=Hi_4De>5+bwSLA@CE zrDr79pmfDBrRb^Qh#gV~QNwW;&UI)v0-wUEPEgY^Ofx-AEkv2j=dT>FobA#eh4&>< zf@tMJE05ZJ8r6cqJZR33jB+?ld@QpV5$p{VtHEu*oGTpynjC(c)bb^vBya}{m~vd) zQ~WKJa1CSpmIR=ZUqbXs^L*FRQ2!nnB}b#5^^?6m*6(#E(dEj+9^`Nr#6xH8Jn!5q z5?g}>kG+j?4=NtBitN-Wm`J>v`TH+n&GBhyBZC}|!m8wOo~r$R+3cptL)uo9X~Bv= z=6s)&$U|oO^Uv7ZDMhmK{rg0AHF#YEGAn$aPtjel7dEdakA7axYo<9};6T`L_xYfASgRU~ z()aG{Sg-|{-~}A*47)wek*Wg6EMd?2ecU;&LHm%T((t6w?KcCXK7dp#X}i803QN~` z-U<#5e+-!R@O7i;t0_0=Bf`Tl;W*d>a?0m(e`M?b$V^(Baj5&l_hx|%a}!VAqIh5+ zdqc8RDb89>Z8AwU=2p^WlBh^}(mG2OkS;C80U1C{5)HF}=83NNc@@Q~G}m;hS?{w! ziF%;U|4*tEZB6=L!ylM4)c@Z}0JXC=sfcUhztVnD2hc@@p~sv2M>8^rafs+8H3!_$ z-T!w0$Xo?PQNZ{iZ%NWKhiej0MWwxunIwJ>gpQS9CTIhZB8cfL#bfi?*?bpXern;w zm#mTi*$qGL$x-&C~7XR1z{McipKJXUyP=!vf(k zz0z=kw0Dr%JRFTCQo2a7l9%xzaD9TN0uHkt^$n(Pim|`Y)MQ??R%MsISLpkTW(2GD zq10&p@myxNn)?NRTYICP{AllP~#s1R~zK-hkr^w$~wq#%}kaVH!I2db@68pZAdET z-qUdSNx%DHiPv^VLGU8-uL)ufG+3FyIDAx)(9q18+nOBAT;+oSLP3X5O+SZo-=p7U zo|fh(hmKw3zc-v|EO#?d6P+kX1e>)XEf%0+LS>1Z7>T3Zq-jq)pqSz&o{h-djLY98 z1BWi6SaSSP98LGRU&fVR(Z)&@5n$Oi7yK(>o0qGfxZ>;Y~LbB`p2Mpvprkv0&+_srLtc3!C0xzvk`GOfHmQkpS&J^EOArS zYf;q4H;`7{tCA5TW>#v5c&W9TRU%{YZCwP<29hrN`Q8Ty^*8}t*-JeE-S2Kko6GT2 z0oKXPcV7$$@6Voly)}Arnr)86J4TTlYHvP4(`-2Ur0(n(Xkl$4_t>NA!Q+d>#Fx`; z%;%N2X{f2=u)?`t{eZm4GXm=1cp~ZZaU1SBlm4$uoS#bB4w1ipkr$b~2_5AoGs<`& zoNy%lY{YT2rfLrR6*=c`c~ggS?4~{;=%`OtQC>`mzdXrahJOunIZ`7;Ie1_GpWj9P z2>?a;zZq!PJ>oi%9%BeMMX~eBOZkdiHxa~ySRsg=P zKP}C|jXS)wF1XL{0o_meYV+&j+z4KQ44$3Y+HO5#!7+(tf2OuRLzv2M9%Pp5ojdkO zf+-SgI&8(|*8tyXgThZkHVyKkR#~I>4wT07c|(#;h;l!nMu?S<4lVcWZoT*{&P+f5 z5K9jl1clyM94iOD?HNil<=MR3^pe$_)OS0#nX+#C_9Lc}VX%F5g#tC3 z-&&n0D`db(xF$65mrSh$WGJUQ%3sm5<&3WhLEu6Wy%tJPH|ex6Iw3{-iV! zFyjuJ&fr&_Nw1aqs3TIf&oy>m3G;moB(Rjb<<*4Due|&@a{qPYH%k*6_w_4)89~tV zb(LYp>0#Kb#N>zSJ6ZGXK9$aZ1l_V5`$LObtKG?Nw(mB)Hc^5i*9SU;S0DIfhjh*D zr5r8hEyKm+)Mnz{@0E4maP-`nzrUu%kA}34Q2ikDk&(OigodLGUY-Y zZXUyd6f=@7L0k(~n#dU=m&G0aXwah#JnwVlY^ELMwDrpVf`WC*f~&rVk&kAP2LWfb zsKE$0u73WXw6E}(QadsL$eZRnb*V3DveDFeTQNwjKXk1;Hks086l1fCh-)YVI~p5> zDX6Z$JkJrV>XQ%@+?%U*vttUZ_P`Aw$Hnu0RbJRqxg$q50ILmCZ(&8}0jC)vpH${Y zW%{Y9XtZgL**WHx7BdY^AM-U`UR%fD6QCaT$u-)le-V{f=jo*0Jj_ddt}m5a(jrft z3hT?KNn=T^3k{3El#xGBX5bkvIj0dddi8AGt7d+t-?|`f;1|4J`bn#Z7#!8+UDsim z19rtE?E;NpkpI?8yD%m{UER|($FK;mLR$*-N!-S&hrCt`2p=Wlb?DXc#?s*Lu2nzg=mD#1B4v zgYC8E`t;afhF@ZeLlF^cE{|fy!6RF|i!vfB)Fo$gQgl2hMXhM_{0q0mq}MDiFZ7sAWSFD2+<0sxZbIURs{G=24G1J@mCK7` zF^MHbvIg`fy?kN+h4tktHjnStcGUXdzX1-|4(1>7qCNr09p$5llFi9>G59*Rv-FLh}Wjh;+j z_%6lb6-+ka4BhU;A8qKgXv3oT^nkuGpN`zI#%H5grDu`3%o(oe=nV^-S*YUf#jO4sfRd-Dmc8yOYN*-oIlOBOTM|T7K7`@nQG|p+YqH&J!F!Ex_P6zjBY5gd= znOX5=8oNL4c%HLiW*pKJ3}lRG!##$!H?xNyiPG5mBC_=(^R6yw|NK#odpVxgO#D$i zY_7#P(|xH#f3S$A8T>Cja9qu=-U@>iydG_vXSJ}X)zw-0H(*nR4)f@;xaebAAC#=& zrL!@09Ul)m9lvV1A?nLj#4f& zsb&qpv7ZTE(8W47f_(BMXo5{03T@<5X~Y){Z{>+Map7YOgJ1OA0I|;+lhauWi=wpS<|z%q`8aM&Av>=CM~bPUzKkR zO2vw3@1<_iRP{B{IF~<8Nf1`X=Z#MxP;fWmvWfk%eggo(_Qt(u!j*w{P1tC^%w*CUwsQI7=dlDt=MG}fzJo&_9r zQ*mNFcGpq|V0kA6+j2liIRhWV&bt8hl)MrN+@a-3EzCiA(A|*9<(;b9^ab6J7FqAK zBB=@f*G$96>n{&il|K7*$xeY8YQJXkUxgRO9V3A^p5QyA^P{Ev(K4-gJB+H4`t3e` zM6X@Pvi*Suw}OBz;g@jJH_fAKWW|}s@0R6lrB{fU{~SzB?;OV*M_}PD`Pf4qEiC^2K%O8YivN|VC8bJ&Dh6=)ONuz;_y_PQ|{}z#hUW9KAE@k8zPVpiYUfWVKfZMi5!b544`rf|`bq1eDslQ3A@)b&4BAQ? zzE!w%y|VwE^!5liY4OK@#s{geRI?mH>>e$%UfFuU6cOSwiCWD6Zwg>Q4yc5zYu9Rn z2Swh^mM&@39?H2;Ih;|1LB=eqEDd)U0m!`S-gPddA9p+55t5Qk_io5fXy5%lQ(1a9 zXvwpRDnp^4vf+ikatxj-WJT!mrDn{RJOO4=m|cE$EwxIK$dl6=oQlShL~{ANMLvU= z^e<_ca-KeY3SCI3Dz%(BK0_A^W32NV6`4h+0qdrJOVQ|9WYC0ZEGTrWh_`eGL%BX> zw?I=dm_dL^?mdwuUo10M7Vt;}yUrB^kVHXSsY6U_2l+cRJr+2mP3>#S<^|ozsPp2bf7pUqFH9hh!KhIFQ z6bgdDkinG6B_KH2RH)RBkq61PcW19C5SgL*2iS73%67P=$a||DTgzXoLCVx(|5e>4 z#hr}psYE1`RefDv$t=@nl1N^7O{gZ)twX>zc2=08I{J&^l0v?WX3`Q%eLsqva)c7n zW8QT2unU+|s-3mCRI+~$2m)ghou+~p|82tR&9V}@zoGMD>=Xb#nnY_Uvd{Bq?6TjB zH7qnkwP`HGempsQ=PLl`~}b`r4mx_ltUebH?6`hhp=0&`>tmSapc?_Bh2HZ zfsdxx$QZ&>Ug@+eaU13T4{d#n3^ZF3$u5y1##SXpVvX3c!Ub-XcP}7gGO_m64P1o+ zPvxC;1)8YJ`iW!xv^b119xR28FRui>h)S>_tm|!&ATS-0;5B}*SDPFT2|6ps%KBR( z%f2oo8hx8y z8+Cx-;)Ewmf90R6jFpp5_>&WJUm(Awq^rJP(*Z9wPna{?#B8cf>KoWyQa7FY!`W6~ z&UMGvtPxoimr0ivD6~)_*1F2%kBPT@bTeC5BiUy$mduB%e64s5VS|KQ9mfb(Vi^etrGZ4EE!LzPDB(silwK-b($CDn{}f#Zmj!)8Z$4 zjQAJuXmlb}sgC5dGrf>7lLrA}W49)oBh}(4Y5e)YuxORP`P3 z5i-_4^h^GBtos`y?hz0rc@Qv%;acjp(b76$i)sP!ExAp|)>!oWMknDSrqz7-5cYWS zySk9$!@&lg6DXv{03MzB@vD9mAqAHm+g=ohZuWc9@|H_k~bUtiB&%L z+$_g+#r2(w)WTOPX%H=cTTM%k!E~?qO(0>(A5WWrfkZ>n#HxT-J9Fcb6C<_{?H)2z zdp^o>pt9A7B=1xrj2xQt0oKiQ64}(x)-q}zSjhdq$dxr3Q52LZrc=TII!1aH{&>`m z(8`SNw#Ijy)?{^7ePZ2js3LOrkEJ#rr4mih)`CsCUyyUm&s9iZPTLY_Pr6V~mqkwf z_RZF>Y)Lf#h6TO@Gzt6lFf0E}sjnq%YMXj34J;lR2u_m?Nk3Ix65r2$W68*=*8c2> zr0mD_HD5k}GODOsT%=0qMs&%$BTbMJ$1p}sfI|TM#-P-*e2yGB-FK8=&(3I2;5QaB z5xY{dGy*p)zX&h$N$DXp><%~ohHoF1meS^Gi*gSmh`wJMe)dWfQRHA*k?xRol;=8O zgR8#l<|bK^PM91aVae?#wX$9bu47U)k}EoYo680!`*b0+a;ZJOPKSlhZL!$5pvyHN#@sJyx5woDn)Qr#c)Fw97?XbVTciP2PF*{VxV>CQzR zYp-@XIOA}$C%+p*=oQEamg=LDNggHI*sqlmR(MPykYUj^G-8nbp28yIl7{x~PvsD% zifvOp%&lHGTxg`&-E<;bqA^n3DP3MI}|Q zJ-CF9A3fj!>{?2ebwR^3<3SNhwCBroO=|PjYdI(D=~_2qrl??FI9VZ+GfIv$3*+hu z`nR_t;VicqTNC;=i}(`}zZ9a)ig}os$tA1Sim)|qr-n z`=~8ft(eWYcoYu;7y#xl;Od9C{o>(08PV@{pB2{8*ouG!D_^@kQEK{F%f#bxpi{1s z`~uRx&v*_q6k2!pqg4~AwImzoC{)5Q#|wyGvyFLsS^PSNBLkV1Z~^wh<#oCHwiF7s z5VZM8uWHSHirjpB*?KMZpw%yK4lMXoy z5qM8^4&z7y7e*|xKI}^}r!pQIviIZ?Kjz>{srkqLPVumD$cZ7gD;@f$FQBL5GO$JW zwnF!`GUWm#g}xC1t}sd{ZR!$_Ejxm4*g{ALaam!?gy6GQ-u1lV#>_vN4q5z$@_%-b zPtNM1D=4N)P$>XYD(n6t0znVE3)GBHK!h_8tzE=K8a6zCnNK~i!okhutg6#{JX@^%X zap8r%#pYEy-4k=$6p=`1Wb%;IScAUG;Y3$?M!s{m7B?O~^)&V5o*Um9b}v-VLfPh%aUm zBB74&21S9^ITt;f1Z$II{X|$QAj1!)f4?a5e~SC=_;71duVSFcj?&wr1ghs$!laxe z?OcyHG^-YgKs*3qP^@MVfCz=LiT!sjon{>2AG5|L$t$HpPCCLCv&R2g`oC%Nag>)9 zWS!*WI~X2!3Vdgl~iYGc92K70P<`HZij?@Z&%5e*`ED*x;LDCi(?>ek?}vSb#)4wQG681C!9V7&ReFDfN*5_-t9{-72&Ve1 zTU)~s7ZpC85~x(ieEf0oQ&F8qtd}K&OWNi9P9GG`r@MWqGUeN+nEE)O{6Cs1BgV$Oaz6fRZO*V^$jQ-)8Wsj|Dahi?=+*sq+yHmf{a-03)g!1F;kNjln6V1!j6l?w{ zNt)6YeNDCa7T@AkOjd4|4Wn38sQaX;02k=~zX{C0+7^08fH5omVKL)<{AXOz%5h-l z^)LQ1q6UpGWmKtt3(UpS&AQyR6BmYzB3_pqJpPpSdy)4}#V$DNvrZ^Jx<6mX`s#sP ziYzRm;lxMrX1kS4_w&g7%G(xNSi2e)x~;MHjQR1@Amg@ugns}iQX#>Lg*&~6yh#wR zPNmTyJmZzX5wt>3SCf1^D`6|y=AU?XUBeogAt+LexsOnC)6}njJ-FgP!*P|po(Qa$ z!(ZhqAv&U*s%Ux(5C0$9VK&u1Ngk-*coo z#sl^d6=+W6cOR3yblx4=R5oIRkMVp#y zeG?yONs}_%2^`6(x1o>__dPQ2TuEPs{~8Gc5oJv28N$||4W z2xmr4vk&GHta_pnc%WBAvO7`@;P6(XC(yd~c;v|XkH+H`{}YojempdgW@Nb*56NPI znf~+)I*&7UIu>{*Pnda#N0F55Vp0ARAi{wRHd6`@q|)UzIgYF#wm)<93u1b>Pkq$O zy^ZxR2qZaBF=@okrdq;WbB#dj&=UQ8bb=&alozh2s^OH(Pk5vkH9apx zGp z+FbIAHX!;vA6uY}{Iz_+ zWV=uXH@=LkqjTYSNZ-y%^RzX1?r%u$dD<+nm zZ5zNmDEeM0RV(tt3J)tf?HX&P3a$w(*wszYQWZYUvcC~T_%A@e(oRKRl}?Y)U(Jy`scfm7ZRxZ{YH<2m zf27M{`?jzY&@Tvd{YM*{6U-i(K-2*TDb}rT9KXbu`2)xv=c}U8IJ+1?{bgk}KbBRB z%;Wrn3#AD=mHmW65ZUdDUhY-gE!k@*vhX`Yx$&~RM0)(l+Fv@)WR1u~F1UlU?mgTX z^V(89Jgo|BT2YXvSpcpxH5MS;BCWS8$sb|@08BZzI87gXAc4we{(Yu_;gR!{%{yc% zC&!);Q#Ug4NuL_ow_VM8o7bcCq}}gj$y|C-QO2F-Z!b}>n;1)8PI8Tsp&eQyyAI>V zsAU_XaQ!O8KUd7!+GVxFsa|GOhr|mx$xjig5A7e>Rt09OB{1+nV>sNy{Ft5l93zGU z&nAyI>tJG&JVNR?>Fx6uocL+Fp|DH$c;~MdWzud7vwJw-<&1P%9VA9BdYyhuY9X@` z?*^+0h6i(r$R&u&S?0#-pkY!G7RS6tecY6tzipLj9OejPK!+x~w5gh6}0y$c~Nk_lxWDud7A zOtP}MxtqyECTRACVZt{~dBU%WTGqEah!BABsmM9${&}o3V=?nWhS?hh-Jg|PA9o$cTCr>zG8Kr6 ze+5esjQ;>mwddk6^s2`3I_cDubdlCuTp1NY?HTE~@M~d6mOm_KJsTgwE0?#7i5V^V zUKs8?fP0T>+O?U3D!$YM01(7+?kneIOOjsbtEDA$L2(g|W?iIU@t;bFgIDhp=28Oy zd$VUXP_H%a(w{UFJf|H-0!KrNaCi03){V5_wWl{?c~d&y0&Blx)DE9=*oNf%g!EVc z0PFPptI`PF!RcNa0R$D`jzRRVR`9O3{k5czIFGeKg_L^wH{)ISTHMQ#o+e?2p5S7vwg)51K`;=Q3J3=*!Um07}fhww_q?a}0OUj*><;xcsXb zc^&j>S?m3NpOMUL0iGLGXeItT3y-@00Pd4n6GWxjD_2;FKQ*1uz|XO+lKE}!BJ!l2 zT|vT&kWu44&;B)$bSH{Enw_&<+y4Lz>w?NYqOs;Zsxs61{=coxe(DvC$`m$|p0P>H zZ}dH@Hh4;$I+Q;AkHFVMBtk^lV{n&H{{YVkha>4(v&NC-GS3AJlVu#i zVk}XQN|S9a$}Scn#T#l}v~8zwu@>5JTA3ng4%pPO^ii6!x@@Yuu30VD{j<;IMah(- z9oa}NO$DU4Er?LJsq8-*wm!uG`6FwV^?#LrDrDDl%)1M1kFp@bikCD;H0AIwym#5` z?D@EVwMswboM#F?rlGmJn&bp*Bun>WL4_2+xg1i6{cAT+sL@yEmaK#>Y2hkC9)HH3 z7ackEHItdq%0{W{`BTDw_37b1A8LB9Z_=}KD#HT)eX2!FQ^I4fT6kRatR+x(7#B3~ zwD7eerEczyJ}6eyJ~rh>c^ftqv$YLl}++bW_SB3cGJ{{P*mg|K^^JaYv&t1uym8!pe_Gvm(`@C5~)*t%_=5U()4)A2H{HrK~lp3vUwh- z?uyinpX&EyDF-0_6;4U984<`bq>?kh82u`R)ycMyu{()5FQ2#%e?wfa+F~WB zl{@OqId6bbv72s4QR|AYCCAEyI;#LNv~mx*_swEG{@GcE(zzMKe8lm{>_t`*$t>kV z4d9P7gOGq9*S9s@PcM^N=sUtEQyjqn628;w=YTyFQoYMEhL43OZbwtaV!`I5qwO~e z%OT;w#{U5DHDcoQAS;}xI0JS`6~|Kw)ay1T{W|STKPTU~L9>jesHLrhgGav{<6aCZ21b$zeBwa!AY_L(uGpn zobAG*^v8Vhj!iO1k|qq;+z2bzfsS!kW3)(a9k*>nB#4W0!`F}gze=ZlA_!G?Z&l9O z1~9qj`BSA=nxc$08?}ZR%tW%kazgR-1bfq)Q)E_X@xvS_9X`K}Q2-=r?xcYDJ#&%Y zKj*bJQ!L74Q}ZHay~k?wzN-m*NXy#0XkM1~X#mLQs*HPj`t+>%Za&eC?Iz;b+y^{% zr1G1Pvb&saDaXs6{EteGNlL}@E=ebUI6kAceLp(sr9zxO6k|G$+LEJl^726YtLi~cQ#I?$IMQQsz7HW>6E`jb_}?jzjrvEyrP>IZtS zED|v#Uwm6K5DcHCMH<*4jAP1@+5eP_x6QS&O62M0TV9Dko`PdpFFc*=Ny(}(Bisdr-ikhEallDY_aM{AH#}4WLZ_`V+P@Z zfaO^Ffjqog*3N)~&~(u`lG}QX6Sr%j?Z=!x|)PmgtHR za9K|Us1Fcdob6TQj&dtSL3IU(g4~AQ%HGhcd1)(w@)qMB*{hMnRd+K9Fmm9G=jl|% z`uV?RBrK_xB!8ZGsAJX@tCbrmLMHSwebw^Ml%pkwRy{cX06nT@ zoxzPC^L``s2Cgl{46KtWUAS$_fZ6r;u01t>J68K6EKQhz4Z@XS*0ij( zIE~;*QIaUe*(7obp~nO0YZ=po%_-uM)(ep&f4Yu2R~uggpvQ0iwIkYC-rC0{)vuOV zY*gKYkL%j7p}2}e<+QT^0Rl1rRX@T(?lbS+wY1$-GR7`;m&h_kzhBcM{0CMg_WBri5K?1a9@TI+&fufzma6<>+pMOp&%3j*b zawKd9UXpDY1XajplkD#I3Jeb+C9#Yt@1CRZuOky2Y;DTyYLzSOLM}Y10{q8vjpbA> zPC4n;v@Py8?g#E4x^vDw>Yc^1mUgvqJR3<>1QYmGXO)%E8A#jIH-4N93i*n(xwqW( zV523Wzlh%%k7y(}1A>0F2liw#Tp5`8T;u=&J#*Lbs4cDuY%*Xk7<0$rR?BB=RCPM&pa*S1Ek!2n&q=;DfRyVR^Go#^iV-i%Zw3_M|$_@)>WZ(x$=0NJh1gSUfl|k z$tE%67ao|b*~PMw+a|Ps?;zr~n|U86=G9(NakTD8-F-7eqC_We_;Z`Oj(KBm`1ig+ z!3?`w6@bv&ETmdXHk&_qpOin)*I9RG8Trf-Kzlq#BC)RGm6sDFcd_;(h98A;%`|%W zdW)y^`W*H3uHJ8~!2bZI_T4_DR%EKoxQ6OGi2E$7{z$Fq79=EDUA?<^j#2XdwVico zBR1=c6m#9+Y-jNASt(fcsmr4<<+k$4EpG6iSlc40NP&+Ms~`4;4QtA<$HH7sZ|Jfq zAXPb0-0>tc4_=10jYFcwYeIj!pP{H(2<&S6z(-g}{{VHW20%Nw`qxZqg^Fc8jWn%V zWcM{0LEkm9sD)y&T4`Fdw0hG@dymSkR40_LiZ;~yJ;&!p%D!<}gKZ|-pKsUlqi?l7 zbR47`XvIyp+x4KK^rM_yq&TG8?Ne<&xT9`6(awUzK^XK?P7ssSs{LA>voyY6`#+@| z=uI#X!an@O`$oC`bt1L2e5JPY>oflVK~2XUPv9upX?sFFn|rcGy|^p3HW4rPVo;*1 z6)Z_}pLRK^c6ypNdepjw#*&W6ivy-}^rr>(6#c%G?Vi8Nq0}`{7!;nI`_uLhsi%eQ z`U-eVb>LQRW91^*p1+kY%ys;#P`T;HzpXeX5B03wLD-2<=hCDLj+H1+!k)4J0QIX0 zRUL?xDnOMWPtKeZ+PURaX$(}UQaXCaQzUiGI;EsF5~K;mNU2WSS<}_snN*dPkrvub zl|I@+wZAdVVjYxhskYI!moqBFTPb!_p=sCvW~Z1mAlXw|TQ$|pP#`48;dt$nM~ki( zNs`_a3z(f^J+qa`&*fM2tJ^I)!0HA;=gi9uvcCkDDmQ=MB%1dYSynTO-91kS`(li! z&PztFtaCpRZyJF%wu1n4ARI?;GhcUjVn}Lj>@%vcXiyK!1~{ukEGnO^I-n~ zQ@HZU{oknmUrOxu=Dc$9Nvt(=ytZQ;n~N(B%nlFo{cF{<9a=qaOOEP3AMVJX-Eq`^ zTKO#32$(5 zTWP^*`)TVN5s(f&K&W%DSd1l21-OyddBT(H$@+TLxNSVh_PT6fbDpIAM{2O{u;ESv z_6oSi(ANcQU9^Z=`XZ|7;bq+;WGTo4jQ(_!+$aE!J2CD0R96>U0o#{SaHWp}@vPad zq>zP200Gc|N9)vkSDh-AD*R0Br$N10VrhXTcaW?{9FbI>a9N!b9$COI+n$*8_o`P@ zM-e_`rGvH=fW}XyRk*m2#t=$AYEHhV ztyFI>o=C_Hp?6?0AoJ5{BehZ9zI47~JcA={GBkDYtrr{vO|sJuAA7 z6PiT1Mmmlv<{2Y!*pM=us^cJM*RNk{>1L8Bk#fK|8(D(qdG|f)oSJ|P zWKE}_E_!jtPUgARWmckTN6@8Aqy3Wt!ZuHd4tFsv^NrH5{Io(|E+ z2e+@|RwliUHfYlXF$9y3oBV3L+KYzTgq7+rMsf7$D5R_3np*mr=#`4gqMw)TC6S#* z9T?*Spg0*PuOr&3&!J0iEH@f-=^W7Egi?@qk;(mg9xF~uw+fETLpBZo&N_a5dsNab z#nJNDdc+1;APQfOKnL}%*jL0xSH6Z5r3ouDJ51KCB@tL%L-s4BkdYe$wTa_`d-lgU z^{YfdrTBu?nr*9if+<%Za!<;C3=i^Y&*A$$PR1y%V~s7>K3tJ75(f49=e2Y9!s^b| z>@{a;?QlNJr~prv%gTd}MSB=(_|XMc^?}GBr|c3a7HTOwYSwYn^%%VhE_#Bapxoq9oYA)H}(-+Us*!_T1^hp z%D4tr$8+dN?b9{yQI!e~-F3gqsm$F9Xyzr7-S;e~ z1a{myXY{Qrq=si_W^^M4G-epVTw^%oXSZs^)GrLzNQ@&}sJaFiIUSGIy72WSSz6!l z1uIjtxPiRAKqC{gv_0tHKA*|Hu*%94AIIH=~f zFgD9nakS-6$Cexn+iQ+}n0afDz9J^v4yW{gE7V#_tSv>ZfWFagsPB zjCJC?YT30aGUeRph7r{AI6GMHHE3V#;malivmE<_pL~9m(^%;7tTJXP7tKZy%^(9| z2m9SQ!5OWn^ti1{O>b>$5<6`V9$2!URmZ7(hqzvBHd!HhKYGPBS^@O;gbw zG;o})N_w-0z$BGy!`8E9qxVO<2WE$I#VlrMI~K=J5M)%aiTSI#rLfZ;#~wmK#=87&Va$jV{-6N}VG(~=PZ{q&|>(ga`{gfQY zgf{HtW9e5Kb++=1tU@;jJY@P-ZS~^$U`5cK*iqCU{=IhK)q3qevSD6S>8E|llTOns z7)l=~jmIB**i~OSA}t#H@<=ksc<0c1R99E6G*XD0%JSFez#K5`{{YvlYt2SKFJx_! zKqmtYy1s|qrnEkOwjo~Z(w_Zd00~eG<}=^iR$SLmw6aGujQbGma&h=pT-Q@v^6XDR zz@`}AXlCeoRx+&K$DKSwlhC^+s?LHz#lsW5fEZ!gsxvk-2r0Sv~@M#R=tysHyEdJ#lj! zQXGYl@rBP7dAub#ko}V7S7;eBWPU_{DyYV$=CGq#>XC;eax7x!%-rnVcYNPkmK|2& zDCAZqK_~(DAG`-_f1Z`0ec|(IBoMvKZ#sPA5K z`h2jVM-7reSvwLxhOHxA+OtKui%N>3PK0Nl=}t*W9klT^A)`mD+Ujo=n)%EkMqE62 z1<-$Z4E(>5r{CK~5q`?0ow;uBS^4pX5PXqbq@vfQ; zUT08@8_C8d-PQAhyXk@}=kfV`Wck*N_3*g1-HlYc1Y$D9BW!(o4r=^YDX_^Lfj}Kp zaDKIkddTe*mid1(?<8Xw1CyUhNbaOb4*anTyeJ(9Q=jwAe9c@7lx~l1HIv-x;%1l6FDq`Fe*XX=;t3e^{{UqF07~TXOSjgxqh3^H%#M6U9yQ_1BiGqj_c->b z9u++AZ2EfFIpLoYd7&(I7y0@{DL;GI``1#*kKtd&zJi?LT0GG{R;?+}pCpW{l!bHq zxlsQAdN4(0+^B4ruQs=&$UmKQax9qnb`Py)UBxrxG*Cz@Dv&m#QqTJRPHN`X;rW`{ zD}~2c&mytzVzi9y5>IB{gb`gW#k_H3wY=8n`_UYV%$PiYi0>nrf4ef`xTm8%nl(SK z>geaCS#UyL$13_rqaU47meuo@v_}5`YBsIe?8rYglv(wTOlGsDwpBSclBc|FP^V+k zgp=6Dk|bWz#(fB=8GR-{_swa_5E2E%v7bp6pmbyRWsUy;3|6YC(PDv2^pt*88Akr{ znz+j+bw>PZCXIb`$CYS`sSzG=^ZZC>;lvF zg0Wmj&kLGehx?#Z+kd>@r6E=9Gf$i>MYr5iZ3ooU!4G9oOR(dU`BeKrpKlqY;2#Nwye zdjs`6Qi${K^r&@1l!z(qz^Mdz_o+m9>w!`P{3{t%3lN0-{i)(Qm>?YmI47qyjH-x; zC*w~E`P89%3UEroRR>}ql=VuLQl(U^B~ZH&q;)c;Dn(3GfpJ)-=Il)5qKNijQMRPT z7_POV(Bf^kay_)0O-YI|MZpp6qisowF-qkO2~%3YXT7(Cf6GZ#e=;cmt9o+?E~UFa zcGBKS&)~QpsTI+Ors~#=JF}k^QBt8f_Dtt|Qm0(;WtE!bt5ez0M|{|4)~%yNsNCgk5o94PkfK&X}6MjwzlM+iM0Mg zx0SA$(-pHLwunjLGm((2-_QR5uUZy(gG;tSz&ptNaB9Ag1Nc_$Pw=t+LY}8OeTVz( zhyM9$9*9!a+sx(ffVr`qhgdFH{symjCs;afh!);f#l7YuIp`bsPxSn1m5}p}p#kdI zBC;3}j2tlU=xfTxQdMd4%c<;PDmqiT=zHEWDK^(R;SUurK77ERE(!Fge`Z=llg?-rjFh5|M(tkQ3?0>P>LdqbjWLr%EtaWY?FB>;pQaN_is$ zeJeU!h1>%wu*MJ{y1go`#pM24e1{__(ct7U?bGXn?NPO@#nc5_Laf8(+z%_b>MHi1YxdaWbYiMV+^fdkm>m0_l?qyz zqC}CImPZH63P66rf$Vz_eQS2k#`8Sw-`*Fu2yqQ<2Pu$s#Jkn%kZD5BAh+{3eWZ9C0fzuyKr*U#h zw218)=nt=@M?9+-1~N+X&<5aa&nl#1bTjyqE=u&Q`3@2A;+h`wOX`}JTtUS_LWhUYyf(U z0)GLYTK3~g7{?J3a=WZMB5y4)1 z9E^Z#Uc*m@TZ1g&pxGu29&Q^wcN5U#HK_%K&8@WNZ=G&=nq|tm4xEFWuR~kZ##5(j zSmcszoSH5Cw~~F0&C(#=(6Gda20Qd89DCxltgKxB0Hh>>OHA)j2?OcRBc9c;x=@Dd z3!63tRG*PYEP9f>bUg9KD@GewZQe&kSk=gOP&4!6u08S1abmL=a_PVIbpHTGOex7f zBQ9oSfF-m*?;yE6s-A}){pzgNkr*a|_7ISq#^NLdOVgffYDrQ7x+W2^A2B10{{W3r zlHjvp_OK1Xm*v}$UQ96!yTw|^XS2GqJox>)!nfvoOx$TwkV>~adLKiM_0Y$AW2|XX z{f2ZQqbl*Z!>1j`^%bRdiz_-Z#=(IBN`tr`Kz|zK^&bovru#;XBU?_nK2dHyneFti zqoa$Zinq4XYp;J(%$^>u9k)wZzMZ`YbQ|Zl@cPQ%^2%0)mxGaw*vI?|lH*bPC75;n z)T-^}xJ=_60Q{-;+FE|}PUaMy(o-JqyN;(Ggyag&)fPh>d(Qm1QH1}Z$h^^7SRTCNWkod`A#cve1 zf-q!3>+>v6pjfB$v{%nl7$tt6@I7$%?5uK%D z<&F3Ndix5AuA^AOLZ|0qSxCYIkK*(gsU-5|iPCf?bZ^~t z*m8Y%J+obK!?}77nJ1xBQL~x>=N3f-kQG;QdSlr4$E{?~8Y&I3fw*LpBjxwwx3zU| z5q6a#fdmLn)|IdiuHZ+#V`~oyauP2sSA~@q0OuTb&3&d55b8AsjtZ3Rt;}fMR7VZ6 zh|x#!HyQkD)G`E-am(cIRryKBb5(Lg$%Gqu8$iZ*?Mkuv(1SkZVV%Kx1s{iV-n}P> zRV1&m!|k~=Tb8D|cos113}lc4FWk;~&0$_$M*=X2uwZ_5Imtevmj3nrc-eyygfWnC zS2!3ooYuD&ZanAY0fCX6b*{_ClzC!#HEK?*v?7GK zxDuyt$N=gqU_~9YL@F*7_Yw{{r#;MKaG}&^l33#gx?@FK-X&3jT9jbWAa~B>GnkZDmZnF@(iH=OM zJ#mkx@~X|FYBpB3_fHn|h>h`&y&w7FyLfK)#O1Lhs-u3Sj(SjqFaHOiNBwm52BsjXSg z#irW_2`r#|pyEN2%rT4~LO-Q%N2Xz7*pbN}9IpYBx8+q6MlSKQ7fpaF{G$Y9b^ibg zzYv7%@>g!oGC{*}gU8`qn7Yzwa!Bi;Mk%|*vMhYEVBQmO{{UE64DrqdQnsz+TC_u4C=O`RH`hR=>0IgTP%N4^Go>U+xP!&|1dImhz>mMrK z9g0XF#q$iY_0O-?u&;@QNGtAR%_xPel6;sryTIx^;o&xpgqym@4Lt45466p(v^Rc3)m{{UCJ>N&1k zLh&ovZMVKAOMZ&|)BgaiU4pc8!m`08cYHl6XvSXsfU%Nw{sfi9Dl2k z;y#3AR$axbx8H^-z0}~>OEhlEcDP~gD|t0&1cIJkM(yhhx@K8hF3?oN~VSsq1z!YRmY@@ zRV24>;({~ornaWHk@~DM{{T9nHLzd31IO;CH9YobY$bClNYr(QA4aH5YuOsKT>$&C z%08=_h^hYop1+#nwN*{ZK3Mc&_)+Em0KfTFvJdyxnnCWv@~c&X#ZwXf=|5URhx>!` zt0e*V{3(Vt{_7vYsZ|##Ut{_THoyBKr}EePjZN4;=b5YafyzQq_i;(JJCbU_RsR5< zbemhU3VzUBq#H-@tuEI+IQj~o59}z}pS;BVDSJ@mE9^hS->n{6`V&?}evA3>OR$co z1K3piVc4uAIp_fV>A^GHlloNtSUrjV0C>}a9-pGrx}jpYO!doCghX}&=~gy-{sNcF z9+~>odr%+|kFUR4ca~4JP7~D8wzG9aLL&pUI3{XPnWqG(bppg;DZw*Rgr|h8rBHSv z2~SkTN+YQjKT5^a0ws>6RK-XVo);CIZ#GP)$}v|Ip>t2$Mk}f|O337sav8Rbl>AYO zzG7@gwvCkhQsSqWs|?vj+LIKaXyzS;1*FvE`zM37N%}O!AP3lypXXBrs-7|Z--a~n zQNg;nEPeZbg?pJKJ*F{v9v*p9lLq78LqA7M&8*zFRJ8~`t&{%%veyss3gdT-jt?-v zh&_%o{cFC`6aFdPMt+9o>NuC*8%Omu(Tyi&Y_-6TH`--jD9E$W$CwD?4-71$_>l(t24glDcG3*0FnOy3eisK zGqu9b#^#JMqv{d*F#cctYGiGlJ^|e>Q}yHe(rC~Ai*!HdqUZ8*KdnTMw%SmCh0%x9 zI5n&IOl|Ko2TyI9)Pwuks+x=*P3(#~XCvrp!ymKjr#x+xgY?Z>)vdh2<^js}`d1{Q z&lbBK(U&aOnTCwf$gYf0)a6-D2tJ_KsdyvBPpl!Z)Ejl=OO=zSnxp>!W*@KTUSEp4 zq-qVNYZkZB^@c#qJy)S0h|PGo%rtQ{oLbkR>tUPhvkI<*tfag~|QUFq{k`L`SQ9l!&SKEH)SCx@c)6v)8=JBU`sayyQrkzE9m zEIY~h0}u&bqOxDNyZs|Y<8@cB2iX})}=YK3Jo@SW{gPkq>GZw+kR}|WMiI& zw1u=wYN&T1UK?@y{{a1ZWVa}an3rMCBOiFxDXz8&0DZ)hy-somt!XNiYv{B#PPKpPnuWNEEU*J44l}#_PqlNC-^l6}N-$yw-SaO`=Mb!<-dt?jOh+ z$9u5qeA$0{l3UI|4tfrK2;!+rdfzg(6iLBVJqYiS{eP8r(aW#79PxtC(7L^MQ5i;J z7=n=kayTG?`2LkpU71~DfmPcWDn3x(O!PnfdWBu>?98x8zz|p}oxxAPbrkt8V3QtX zE?9D8w>iNa@J4Iar-e~gjBavI64SE@u0D21h+-QIpaFx>40h-1`qk#XjoK6wi~yiF zQOL$g_C3Apo|fT$-Y)d{<9QK|R!|VhpI`9p+O%|7Zg1}uF6{0E5;n$^CgC1=+SvUM zy>(W@!U+7VGIVwQCV=~*6?SbKh{SIgm@{+ou=|6!BfsP8RHVFPvN)PBtO?piK|iNIg=34EBz@^1 z9DUuH4Uhi4S7{}7$_9Bi5a`FAzy7NBsbz{f$(g#!#?pTNNeAyjMldDF;QRV?#yzVx z@?s$ia6asyfs6(pUfBI=7~Je=+j6cB*93#Rzh1ReoMn&9Htk>u=y(SQy?0W=^2yxF zRTEkhy~3rdZ7n3HQSc{YZH=g{_7`-C!XVYK3rwDI~3;w`gg6n1dSmZZB-=C?wRDr z2l^VrfRtrrR*>``F~c8HJJy^+$`xCumAt@4+^$zXnCto1COevO=JEZrYxwL}9V zMs3(>IgpOutwSyBk0gPn$=i_^Ab@*y=~h?YIxV@N|` zvA7>ojz9X<3fwDY|H>NwE zt!I+nTF#pz^8WyK%)qut9Y8&Y=Sgr9WQ0vNSUB6a0J8Ta`}+PB)f_b>(rfzuyvcQC zYob@Xl5O#^@{~T}@4(3E{Dna_p(Jt3g>k@PwBQkrNykC|0M?|Ic;Z=D$a2SMMHyc} z0X_ZeJ!F#AU(GJWE?aQiBhOBI;C@EC=;7ei+)iq>9rSvg>};2b<;={>pS})1Pw}dA z*~HN>TbSf#`G0!HjC&4;-l+4$UeWy zxD^_(T-08NZ!*24V^0s*<(f#0MU{Avw&vV>fzuU}c_PITM|OLO2G`mAexHXm*-so& z#tI~lY*2#eK@07-AIiB4n@Cz`me{mHAP(sUaC=~S{cGI7V?C^OTAp?*4`q0nVg-zk zH|InLp6YYfrGfmwbAqe#(^Pvj>AY*WbtsrI3QY$*}tdW`kOR(aVOS1MG4`@(P(j-tC^ z2-8}b)T&AyQA@~rA>c0~*m|0yE$Xa-D|r+|qs)yNUY0#kFt-iG)S)!zh z+h-2phbR94ty+<5te$6;xA!LwNe7a6!Ny1x$QW5wxFc^~DeW0VK(Yd^GJd}Fds%fV zIEssNwS7Y3+G!B7wY##jFWoDj;so~1Ul%1@#XZT6Uo(~?8h!^ZEW&pLnltG4qMoK;(`O`8AL=rc2hhVYQ&;X9wjznfmcs zcbbSETyi$|WCm3WoF8vN`S-6PrdZUkP3icawLEHi4ua!u1G2V0P}_<0$LaO0DDKsq ztZtwVaoqkNUMruH<`WqEI(d9>3G4LqKGgUxRD{C+2kzC_rx9S2^ArC7Ee9W+a}eAaprN|@OMk+$S zl+q15jBY5!E)69BE-Ob$)-H6ZKXiA*Pt6fLAE%{fb4ynl$`a;_B6Q=0NpZ0}s&upBN8DDidtob86gpgy+I$=ZT{{Wp^ARpptZr^tNs=cEbS7-Yt@igBo_EGuOqQ31&W6xvw znx#+*#R%$g^rYAiUB;~l&tM1RM#J3VpS1$8j~~JL)3zT~A3;-W6a@C8+6aedxh9RC zz*PHa*-P3mAPMy};T=o1lqaoAs02ZM1un{%B{(OwLzt{V<N$mS0UbOgOSYF4q-nRSGL=rmdvQi8O|+P;)`-FCGal5q zr(%~CR}jomijx##mBvMiF;ZfTQE`j{lnQ<+aZCn*oB_!v09I$jC-OB-I@3H`C@1u; zi%GWg^$pB2mgv(r`~+lvxUNIu1DlT)%NRVCNQc)VzQ2bk+ES>`kK!uH%AF?w6RVDZ;QNZwEDhp!0fGMj$4zAlwz6U=B+oFRy{ezZgd(n zpXkuHW=048`V_ZcBHZKqyNC0tx-;iZkJVU;w|C1p{`6#5Vt0x-C8XLgb#=z4E05y= zhvQm_Ah@-PPP><%uX@eY{$T{4-c6_Q{{Z#Wnq0e~cu!S3g<}V5mN$~T6YNvIxAG@D ze(Q27u^zRte;=D>U%K=C4P)0eSaQhgr7mcmzwrB38lQ%vh1)aP{^f^qzruemKMLwf z*M|6W#1|eM)dkhK^CMxlXi30z{)Bcl?7A+wW3SkYojy&DhscDCFpAmbkS zuU`+Br|}!gY9^cAl#+ikMI}J#!BL;Rj^e0XUbgVRGb((A$0~g|`qfvCRjoMo;E?IbG(oV z{0<1uT8qU#8<$G(!{05;R|z1=k|u6JAOpt-pXXGx%d51pu#)yUWw(wn?A~Z>D8Ny) z41wPr>#mezIt!9oC)a;NmQ!BZ-bTf$+v-z!wze?K0|KNK3h;5B{{VRPd6hpoUnCWHx&GlhcwhQaGB;Ko<_nuoneE58sb^ z@NrdYH^~>udeo^#$+Fa%qB6-GN=syd0l+?l3dOqAU6~ZfvLFW}U=n)*Dy-wp1*4dw zh2V{&e?Ot`_*D-zcDI^F;7(Pt82V$@wZi4p*SvJPsyY^Cx!%hWawLx`!5buSdH3d` zl*-BFJB2$*EE^!>rzgH@EUQTofEU{NR>#Z^GC4n5SS=*FNflTI2lt7On72$5o_XnB z%xOjn`xr{AWRb=v^BQE%0QuM)NA3L2J!(t4IV>&XmNC5}0Fk$x6VE;I*V>&8%&^F* za~s6l4*kH8Ja=zZoc8=WV-?RbeSP}kkU}nP#EJ8q9l&*9IO45aN3!B=G4{BN=Y`m06eq3@GDl85Dh)zd>@EkA z7%mExJPw^f9rH!eu;)6j8imh5fCsar9HW+ZO}9(}P@phzvE+P}X92H=3e_wDUZ zy1S9#NfUA_3Jml&15dl<@R zoaEPEBwD*Nb7|QVnVI2>Zc;789G*Zuah`F_M7jGJaPd402*6=DkU-O+Qj9Xt^n^Iks$KjS+SI1oFD$aDy-H7%pTrfEawb_a7Rx6 z0Mf8+Zf3V1F!Kr=0IYfc0PEHG^#@r(0O*^B&_?C_`u8>GJS9p}Zks(-UNs}{BJbO0 zfh1XFnk~GE3Cl0()A?65sz{${jRXrMxZN-V=709GKR-(8eDf?NWXt2AQ_)YhD?7-A zR6KFA83Q0Z9izYFTVSG;^;Twhgyjn(COC|KTb3d$6T8DFWH@1h0po+y`B%Ts8cRdxYgOggzj=M-Z%C$A z+aJs^$S2aV9%5tL4(++g4N?nNGB3)-n~{#frxvn#h6sjS9$Ou~tDaRH-lr6~=2B*s zSb*q@(=?t_O1TUOQOM6hTC1VUDEXxF+dnAmMtaqOrCYz3v545j77LKnl_|&-3BAlE zg-F`Li?`jttvx?Q^Fy`VZWnLLM{j!Bj@k&MjhQzEz(qZe_*IcATZ}O%2{{?g+USlQ zUhLWwBAw(`w9u_>qjwC_z6N)W-Rk3M(A-F~BeYI~0H@txI}uJ>h5g<+9Z3h380%9s zqF#tphT|T-;u*r2e z^J^>Z!n4MHP=sKQ{evM%Dp;1wL#TBr=BWXWEpBBSN?tt7XM>Fmh+ zB$RDP+ybZ9r)uQH#-$Z_tDUqkl;-{CV}eGOA^ZGDvk{oru5s7(t%x9!TSy{p>ar3e zeb~qQC-MIPJl5UrgBrk+&u*?<o4?waW2P<+9xD zrGs#muR?-byvop$U62M0INk437^DNsDq1DO0uLkGp0%y^Ykw_#&I*hpDvm(upTqjq zDj-wC5`uLPIL18S*onVDs%5@alOSi0J?$llAt?* zz|XJcT2^yp2*M4uoo{VAAW0)^l0oDgk6&X~ql!Sl7L7>xbJHW-SBr^-HKb0cH*HRv zO_!s9SC5$HwEonEzV#{uS`($GLMgHSZOMKWm}jRS zhpi%BkoaOfiRq000EKh1-2BD2mEDn#Ppv&J&Q(FiMn}!mQ}?ZM{;I5XkG4;Kvc`O~p!9lWlBbM~Yli_R@u>7bDwC zihkNLNG?fjbv>hQw~(%Sj-dV(s}8Rm!xstVxA(gm$HguyNYs>0dX!;3Ugq408Fxn9 zdX8$Gv6J_dHB^pSB0nP$?Nj})cly=*MPIaw+{sduUYeC=iF*RSN~rPrF+Y`Qu?{{~ z{3^VOo~2LJR&rgAm`$0F5%&D5CXD-5gTw4U15mU_(AF;JTxL=zM_Oq^R?L3brulx= zH#O!e?KEunHD}AD^8Kjh9m?A?*wSqD09CIplgp*d7b|S~(rl|fT^?N=!E(QsN0(MM zQL>ja3zha$f>h!B2f%v2zu~bt1=81f|YCFHzvkyqisKJ7^~(Y#CW9JQthK{9Km8ew7Y6ev~8l{Vm-8C zq}xW?E;|uzqispHj8nMAVw5dE6uU{`dY+WycI<@OO6=6q)Ip|iJU!*YMm^L>p}$Sl z!0Ny0e-WUKx&Hu441eGx{{Znd-CSHp7Nu`z5d>JrE3o(60RI5ctA)4y{{Y0Pq(626 zG>7od{{R(VrDe2zg&toc^9*f&tYT$Qsp9*CCAXK=uCC;E{{VFkKlWPkncIEDjq1j| z55)H#Tyjju{yptg7yb9-{{S;ybJDx2Xmns>Xk1ErKkh;g?-oCh#WI$%JqE6AXpR1a zNBuNE$X12X`H{P42l1@UB;wWmn>Bv;{{W-09+|G#d?%k#e(=k_U-WE$&o>9~BOlVM zSjpwyr#v^xYW3poNxkD%JiTR98~@k!9b8JGxJwDerMN?Z;u72f#hn1bU5gYgP~6?! zf);o8;!c4g#l3fa|La-LtIV32%*u<*%sJol*?T|7STHe<_LT}&d$lWb=V#YiZ!lvD zQDp@P^(1gpM2FW6IHVvfLASf~$L2L4gXiZsa7A5pHGyGPq+3y*U5Y=|0^aRN!!y@s z|34LL)PGO)t}(YWfda4*L+*MmXbKO=f5t6J)JI8iUGxs^W3Qu+trTwdxh#vSk_vI_E4NcZJ1J90|Rair#voT*^a281jx$js&2 zw8)i?s_c+hrO&L&xA5*RpQ~FRa`4D>wxenry;mfN=HM8T1l6?UmE-;XqP*vRnUX-S zc}4c2+kgtq7duOorS@alA*pD?O8RZxh*JaC^GUa0b(?3JluF2Umx z%dtLt(l4?_YCk)1i-)NM)4*pzS()BiqF;`!uiB(Nmwn4+);n&{VAalts^a!2h3X?D zSU?iNsZm*%jmEt2EFD)@=hCNHi>L>x)2?5RO!a99F?-#JE^~HFTkv_zl|FvoZ70_v zrOf5NItHt`Va%o9!RpMnGtVKao3Ra$2UlJsv|hUJBf#QnSn#*EmasC5dgLWbh2dh0 znqz{XA_WXBJJX}4x@U`7Gg5tVH6YO^TZ|i5Cc8ogwDW}8bn&#VC%T^0m~fh-0#okg z7m2Bw_I}(cx;Tw3e`Nc5>z&Zwjy=z}-Hf$MIy8<0ozkz^ed};`*N3{Q-t2iM@;h^+ zHuO7IbjZoE_+U#1kf7n7TgCKAO)Zed>^{za;kTBX_4X-m)ni2_&j&JCoZFFUv~p$l zfuh@7jUTcODp$uL;N$L5dXQ1S3m?t2(zA~mYw?RU z50Z1P7J7d#;~353`Unr^vlts6@DrCbmyW&bZZnvEj$hT>%RI~b94PW!^c)xqL~-HX zlzUJSEak5fct9tI`RP>n;Lm1-Yp1Wt%j!H+4DTbKeA4Y$^LZ~}Qe(s3G;r3}yG1uW ze)jKm%gsZ|jT=YgKY-U^Gry|7(yjNi2N|S4s!1_=@UZS}2wdf}q(d#6MNiD9eu_It zYUKDh=}lq9J8OqWcZ)b{mGxnDD4iw!LRGL9K^wx&^;d@S=An<3E ztHh+*VN*_a#q&&bX4zBChEd!zzJ?N$E3U(Px721*J%3ei^Te30G)h^0m@DbF=NW56 zK#oiAVVv_@lQ?4A^v@A)FYg$i@^?^2WnYuukwY$*d1U z>`n!YRs%dSM9u%kS4Xd?WZ5z*zyJstvUiznIA{c4e_Rh;E7vo#cnF_qkL8a1UG#++ z*PsntB4vf}}r-p%=Sh073gKOTh$G;F2&FkpVsjC`d()oj3_w;{~c9oJkedBNBE`kj7ve)P)GpQH4A|&9HJw-4y{w zi@N+h%+1_|%^p?j--8-ftDk~V5e|V!iCS|~S(`cOl0qZ}%q z30CX$dR>qXt9gIYiYQW|35wM?;nlbfcm-sp-13&7L8fG%F3DkHNb0edbw*y~)Mp6lYQ57q6#b2olM!lp#=Z6y;8swSkb z3vQ>ZU#-ogUb@$+{Jp49qlfXMGw1#M*NQU9Tt7b_EiF?WTRcin$1{dNYue(!T*0UlMlj#0RVkSTr z`#S$o7}Lz!0RadrKaI%(gAj#q~a!nb>uZSKnW55TA#P7_;PE#?U^ob7gkOshAW=HuNnyHaFPEb9wUww`nq4gA0L`i^d6I z1^z=EKFTnIS?1Zu=m%N3LOd9SuX)G1>VQ(Lb)`&5O|fQGV`y zx@d~%SOlt@2FEJLD>M7g7pL@F*a&op=%d~~@q^M-(ZkL`dYR5hv^6vMXB7qgobQHmUoX;_4BR*s*w3cIX%Rq70y+D>=UFil< zUDHGzE14AGw5(bjRdCkEuj>I_ViQy3k1=)MxdZ(>?NY(v%$pq;joaO%@bs$MmT}Sq z(Q+8MvolVUS5y5fBzAa5N2B1$Nt!c3WjCvHilF##A`WvPUPS_U_F|XKuT0O+B&IWw zwJdO3X4awih8dCAoimePVc!dqG@)hw3&u{t^-zeEjhVf%v9aJFm~4Pv`*R&GfI5Na zG7(v96lXrx65g_*@@Qvx@I}b zm+sFk=ULwHCT7KY#m4KpAV)zzv>Qogh0FRN3IHkWd7QnHp8&j9J5n+)6THrkB6N>D z5Hg9aGFwS>Y*LhFs?Uo}9INnQ;`8xu!v0VmY6R~SD*TO_sEnFGEYyw6Uanqlc4Gx& zMWaKfxH|BSiZ5CmDY7g$){$LW0t8(nJZ~=UVbx*M{ zeBd+ymtA~1`(J$7e`mp_n@ndvVqN}}qwx}l187+?M1|>UcH2)!;Fv9%M|1oy**dbT zJG<0r2M2C1u|&?`)4R;#kkS#qotk&@Kr8Zi|7Y`V-RQDGwZM1shbRPq~#kH6q*N_Pq7{EK~}j+p-fQIYA`M zq;fQu=!ZP8nlP(rgE$EL$`3Y11qo-iCPh_lWLkliU@pCgm6D^i#kA;ojlKi$a3KYu zAYdzJuz2(_mP<$E_3qT;PWoN@6bWR{IND8+5@AR}tLWl+dJLs#7m*>TIIwJW)g7&L z(DSDHT2~r7>%dm9SqfKoSXjx5bgK2P8t?Q&=3v9{6(LpyJ+zkkDCf|~ENRPDE1X9w z;k8jD+T8+uDxRCNF-o^6TG3nU3`#2(m_z}cQu@2m#P;X>km$rs@blL?(~4IZRb~Lj ztLvB#R&SKk*ZnRFGysl)J8#2#M9OlZKmg%QsyBA}lFRK^iEVb~!G*nXGD06fLTjjM9-M9#fn&!jsZ z-7?<2p9~bxo_y6k+8MEooX;AvZ@Wg@UD~> zu)QalYGxVFi|Lmv$h3Ghf-YXnA5!T7Cv|i?5>fX;K0PVLbi2`*^E*J7irgY* zPOXaq>p4e>d%A%o)N_$F)i&!{~`2JqOQ`V9LXH?R2Rw+ zo!Il*f;`y5D8rH zC?3x*b^#|Y@a9I9tjjI9*t4Kt3E83Vd)l7HSMXl4wy7eZo2q1vYJp&viQ9TW(M)vv z<7)pq;qx9zY%UywN0;qdwrZBnk zD3ozvJ+ka4AXH%6hVqt$#hgmQRDk4EZHy=3<7E$!QWrvlX&w~_W?jRR_?_Q0y*}b9 z?VE6N!02HT6bn}j#wV2Hv&Q`D1Ql70b<&JbY$YasAB)`tDI{PZ>Q{W@KVNyi6>)!N zNlcL%jZst>3~t^`iQK!|#1C4g?x$R^Y=G<^xUN4_azt|dZ6G9DFr-r2TuA>9Qb|u_irr^Bu}jZf!-yv`sm%DG%PWe=MM?yy^ zl;Hwiu!`^425~SRvJ&`9@w&0N$_A;od(&#Srfb_MTFhHPMLU_{?&a?!)<=*Qi=KEw z7l6nK+(A=>KwY8ULZpst86h39%D!ah`5GdnP^0NXh9(*2Red@9{XUagzkh(o1wKpb86m>b$ z+;6cSvm(C=EJ!rmU4?u6Be(n3cLWA7kG4%iOj^dBxe65&lMPzL71Fj>?xjVze!Emy zw9exvO_J1@MbA5f`B9|o|3DWTX<8s#g4@d%@>sdvaYbV2z$~>LWE4}(!y=dEPzgoJ zO5Cr;mF{BBLllHn=*BNQ9hjkfN$wP{0%c}(iBR7#JO)-7&Q+@Ux+CX)-Jw=hlW>}= zbD1Fv5s`xF7)Zj#7T2K2Et>5+zHFiSy5)!4SuZ6}+Q^iS+v#a~640fTIi%?gPUqZD z@1J-k!Lp65W0_f1NB8q8+VjUHl3b3GjIQmWDgL*Sa;vt`9?FL2a|&QBJ4;$0W26YqR{P0`N9?N?X5qa!=bYHcG9iWTNfQUgL=77t3ghN1)N)W+J5VMdSxbX495!H1*5B40hhlJ-aLQu*0zqc z-nWe9B7`}q@Z+xe6WRdNWnYG^Ec(umhQ8R5)qYKGCzC8pKIrfc+iFFaL?hit0k?E* zCX!bKLt{O#4^eCL@xDDbYqZv4fE+r!=TF7g*t#8)*03`lU2Eu@QMvq&7RXL4-BgWC z7UT`^#0(Ca*v>hxI3&G;-$JJiFi2R9A!>8QIMo4|bIt9-wZkscXRk?Do|)Vsbu%NR z6e~cVoF?e3ySr85w{Rd8 ziU7Me4m@m17XbpvK2z&;3VE7X>7)?F*kLYrTF=%7;$w;OtNYMq=LxMgI?<4-m;+mR z7KG<7M`g%~s1$DQ4C@?)=bF_MX7B zkVSdG*GzV$_`j4~(`d~f`~<0GoHE=>@aok-Gl;vJRBS%&{OoS*@AMugM?0yy4}EE_2Zzi=cV_J1dvyK4NEVPv%8aRCjGV=I*7^urmM6Q6!#u%4bC$8$UQ)Z}#rA?~K8Ci!qeVQo<){qNoS=tgBgmUMHL zI39oXt~UGj<(H2>gT<|dO`@>I%E(n4sG_udu2#W&I|8)Q#AoIfeWyvlY373T@+7pi z-EJV=k0r-}w5#c-ZNRj~Z#zuTP0S5%lh)bi-thi`U^A?|aFy4cDx*OPy0PKk(4&n zE7iB@JsEe+d$%_R*YE+fH(HE{2Ems%;4Ip64)4+LBjhH5$3v?mJ=gRdzO&B*l4p?C zVS~TfX^$7;%$Fzou69c?mX&i$FqXazg_}$6O zZ>JwR#6m&TOh62PSkx=1%ByH%Jdt}%>6EUaPB?Y1rL4x0Z5W;ulEf<_VhPbNX$5_j zFSF&>&l(%^<4lSJxN1=aYKf@6Wvr=+*3?QKiu2#8q1I0pD-gMw&8NC4u`r!eBw^P5 zxh{RY@!Gdh>>LgBA|z?;(#@3ZH-}GczJ4)W)kn=1O{EAEb+)U&huq5fljyFkt+Gbz zTNvOcUbPsR9A)2R{WT>H?|$!Io^LqlR1tSFwU~RsBl}8tn=Ur}V)3#eM}2}mMR{`g zcd>uKRXIm7WzKc~&#yY(wTxfJ#^Y1t#Cof2@=+}Eejv9Yh9L_jxtk?!i?57{f^GP!9Wkt5Io5mPN-!kia-H8QPxuJ}{ z%965j9q~iQ9GSmd#S$MP)iA9&3At7cpY60aC|2-U?;bUnh>J_2pB%07Qa07XtgB1t z<@OSBiAGlZRD^Q&RoCGKyd}zeIN9(_@uE|5QzK3p@%5*N&JI}oO6JFri@d@I8;5j3 zboSH4k;Jo_5<(Sl*@}hppHOU^k;_0-5JTy6d4&PGmAoU}a(ft^NCov0(#gYud#hi9 z$Z}E?1;5-)uca+Q<-zN#n9+!Lf(xMd3VfI2sfe{8_KTRzc zkG3^Mg8hsSM!=BRLgCtY$G(x>bKAWBm=3k+XVHT@m*R z8}PWO)mIyn?@UoUNRG^3qAzbLOi?XElF2yfpQK!^=@sH}{*hD>>+h?wA1V$8BmC`F zMo!wYLDi61*YeUIl0dL8{|GPFlF|Pj<4QAZ$Vc2S@flk?{?>1o- z*Y92Uc6;v5b`G~X+xiZQ5$w$8R+Y_eDqQrN85hFQ1eV^rZg>^|=K&9JA|Q)2TXpCR z3sK;aEY@$rwi#OeyU>wyPqaP$X6m~94q<2AzNb(}jb{s?8|@!p;Xym@vukTpjg*yQ zqgNhYV>Jyj^PnsjbUv==OS%CcP#arjHW!|SSJ`4iSN<30!>8UPK@!SiDMoNJk3l z7g@>|n<2Y&$(FM@^CkVrfp;-r?6DDGfzNJ9k*PduOSE|k!+}Bc-xflfst>T0n&gxT z@>ub3Len=WrUdUwBe`;4b|WxiY$rEWwADvaORX(D5w@`-NE5-sD&>RK2 ztF-D6cRinqLRxB&NC-{7s?j$3B03M%94UPKk=E!`7YdJQwTIu?vF3YWl-^o#A7Wr1 z{jZ$5;iOTAE_Kd6og30@x`lGa)F?)`$&6i<$*g{&=YSsKDl>Ckx39OAO_{$1g35vW zgDil$N=xr~*_>q>4~Z(;&p(M=nbA7l98q~K(>Q(z;9ix&s{PfaK-!})vfGRSq3(N! zOZ-ORCKZ1#Qsya&m_ha-Z+c=Jm)dBhE-;hAv>^C?0!bO;@)tI;c}UJ=O&Z~pnFRwr zV|+f16{go(z>3%c4_fZ}9D@J@eO>6hzZ1Bi=>Sp+e>`e;hVN>d!ayI1REC<&zbQC zX7AKHimWnCLpEZaH~En>p{E8eg*%R?54bh-e5reCnZDhz1ruD3iEtt?5Hp5h$Ee%$K7VNFs` z4~^+P=nD(yc)qJ34dDj_gUo~U%HO(Jp-hp-I76d)8WTR2X?kt1m=)l+bi1TrYdti(|N)Nmfma2E zU(K%GX=Y12x*>K5eTTwDH&(1=SCra0B~7c3v@Zv*Nh#GEUU$VLVasgaJ{xIFC%y(` zxxWjoo%5jjM(Y0D@Wpd{Qc-Y>bBx;ig&NpB7%jMR7dPTEZeBj30g@=rq6j6yo!78o zIkF)9U{!aemKgn`ppAFuyE2K%eVhVRf4mv?QM^1Gz8(;+I_iw0@O;QSW)V{1`HbUu zDK0uk@jT2*s zByDO5dEVQMR75D+XRY=5p?WcTcJa@a6=?J0KY)FJ(EJkZ@-+XGaDaqn4L;-v=h61( zWYe=`!ALY*(5sagdssAa-rqeyQdCsQ>K1__fGMFuFk;!6_YWWmDl!ugUOmyj&H4Mg z;6zVF+;daIkWdWSL~+O5lV^Xl)@0tb2o7DP5B~AJcgMrop?D&Ts8SGA{~7T(C#
Uwns|}YdeWJ*scnE+8`qcBA&3R> za-%K`gJju3yeA;b`uK67IHhD9BKe^GlaX0W{WWBXdNQrX_7ru3T=u=g0lD5h6wxg4 zW=LV%*T|&{tc>05pd0u$`96L zcB(f!aX361)Z|V3$YI-F-Ug<%(H??sP43>u8E65x38DqHj9Y6giJCJ={;=DOx`p4SFpDN*O!qLIvNd#-v6w)5m>#R{vJu6S3q; z7w9q!tb4rDr+(WwY<_QO<4wECnc${wgcL5?@JcxTY?!>U`p&KrIQh=43-KQvQ}L8caume=>4sK1@j#)v_Ql=6qx3~?H>&F5QlJm0Wm zwlkzNR5NII_MQ)TC*?-YU%lsg^^V%CZrvKZT1;9%?+?*LUzG8n0UdT<&Rg<02S%pM zO%0Okzjj1XdDzq7kqf-y2E|P_I68>*ZqJReEcDQW?DqmR^!0z_ukgj}*`#7UM{*U! zzO);|U3(}csz0n$;=?5}7$AS)r4dCO<|_ybwTq;@G|CApOYl<&w%~g9I;ZX|;Y519{$(M;Ys z9FvW)VE!qzA0%+HT0>I(=&EBzs;$Nyr8^X) zuaI?ivVQ#F?1lCfP+n1Y&OkdJ>o zw`2&V+6+}+q|DF8$nkg^EI;-3{9*R!su4Otp{gw~@x>aMp`=U{DIfncAVLwOXF|( z(ZRFaRvyl>f~Brum#3S6aLYkI6g~BTCVbpv)l~9uasAY1#>fC<2bD{bkXCzc zNP=G(Ns{&?|1JO_Iv_@}hBVRCf>5)Ta__EkHA@@Bnu@r^g(ewID zi$NnJ@WtQ}?(}&~Jdo+deXhb4QjDSvYftd>H}Y~@GtgDE;-%)r^m&>VT#?O2G1GF-s{-NR~ zx3a^wT}~Gf|5day%jg?@iDqEUC^XVQ_J3ZT$1ef5ZX0r zUFs14eJ@k~ir5b85w*4SWY4$$<=Zdri68Qy!{q3m08WC%hg4WZh%^gf&c{CJbzJ{| z_`b_d+jU%Q)cO${v7k=wRg~dr0gL7d%nk|efG7RReaZcPmF_$uYo#_)1moLitQ;JpN;2_N_-d;xh*Ag= zg(a))4-ON`9ew9aoKU*GqD@KkPz4OJy-F*wWfesI=_vd}D2YvSw|f+#&&Qm+G@Svv zjQv>HESUhk*Hmzqg$O~U>7$ErAlx!)Ay!ZwNQq;U5Q&67G2!GrC7 z9Mb&9+Iy8Z9!>DY3c(Z-LSm+*m)t%>C$A^C9c*CdgruQXiSS4f*B^O5uD%gpxMLFV7BHFd}kO_Smll+qAVz;cIr?f;VS7e5eY?VJ<}~AK||9aNj3* zJufJg`d>q1*qC9xk8S$@^$xWU5r6;d0}LB=;S&W;cxM11BL4WFE*IWdG+@Ic&>|Bp z$gTHH#{Z9&S(RmhfA@b4!sve&gwGq?XCVbi;ha?e4HUjt5m64B{;wOEF4RMmPq|*4Mz{Cs@{4Wv`~$UCuw&Y@oOf* zW7D6}d9Cyg@b3eFx@T!i0p9c|BnR11shji0Gm(eF)%*lojH@b$LBv&ixU0q9L<4F+ zOs(Jo$@xjSCW+%;#fn#Qpu0fc%FVX;7+xELaDrU8)Z?^L89t-Y%z(;z+hFeUr90^x z#OW{2`HFi4G|OvM{<(tJQSF*Iw-Wntxr7GAE~U9Oz}$Psyv~hsULbL*N#baeq<~k% z-pf0R7r7db_O|feFO+gS4`&AN0{;Q%@R;m) zo4S{HF7Jrd1zeh#KmIJ|T;6xa_*8H*37Xd-Sjqfz3(mGLp?`Sn>_qJ4EVSX*@5)zH zIyOveyP6bFa{jPaA9ze`aGV}5ZUw(J#|Xsx12B($rJ;@iW$ZaW$^0|EIjo8ZyDJ>* zg*++j9IDTgkIF>1Q$OtD7>lnsRGE?Q|%0P>(HiX4alJ?x>;MLx%@p#EFd2{2^ zkoy>KtfyLS31w;4*EQ_m%gYS;3#;0J>5gwApPo{IVG-)PpHn z4?TXKc>YqbmaZ(T*>Lfaa;x%fJQw1HMn`>YN6Hfw6^eJv^+06oz>F14ia-JJfmL1n$mWC%X7ysz=Fd_~b^ea|^Sxn|{MfL8erfVeiAjXbv%<8LrL?h@$l%p%Cx zsz13AQ1d{wK^BsD%c1Bl4EB>06z&=Q7F+RxOoxmm2&`}l{Mm(`?KxvX#KGuYXKa+8AZf@A@gUkMm8Id*HT~PISx1| zHZ)1;NJeN{mRj$MqdbG(9glX3o#~Z(pvU{~2rTlFJm3VFs2TJzCGm}ge$f%dI0J1P zER>b`RLEuD&}~!wDYZ9FT4w7DM*=WoVf(}xc~8OSHviN(+77EpjAnZzj+x(HS4sX2 zcTa{#)F9Eg--tD-Y=4a=6Cn_1GWHTzTh<|Px&&oh3(dE2V;VAPGw$~RXzXE~%Av_SRkMUqdO|dMRT~UZkU$PPt)bn-H!DCIzCG6_5&H6HkU(Wv%I$*a(V-vJy@qr}4ogsMx!P}6o^9M|-@!z9dY3$9HbZ+p@^mmio%dOC_AO=5w!SJj zwn|e(@VAy0S?1IMem# z9dM~$kbslWZQdRSMW~K#bQw|$A@XeX{7R6XIht6Qz=Tkh$L zopybY*JIa}_Au%kG?GT6C+2%;b#5~gK3kpwI~eX-4BKW^`tR56;@+&8nt zMnI4qm*xyg;G{uIY(%NH;k&Wlx9J>C16l(30DB>yj0wj3yvYY4NAI`XV#6sL8n4h)pGmwGFOX79Cn zdMqhHwG|R{rfOlt#{FNRIYn-g;aB7`k>J$RqHi~hgHT+xj@@g*BC15o8Rqi(T;X~d zW$;@PfC3@leIP>!D8>VW*akNwR@jflb0!jU#~UM1{7dFeKK4j;HDbp?P{gg=u}B$_ zWc6^@%a|p%TSQn4{Q+dA6i-HYMf9n4zrIrxUY&qjRWiY?W1Ssr)TtPlg2LWq#a<_L z*`}w+&qaJURISy)yNY<@g>pcR*Cqgo7~E~mgtC&^w_e4A0&cftp&*|oq;ep_cs4vj zr02Z)vjau9B{Z4~hFp!r1rKF03}oN;>qFj#^%$9{vrh@obYqM@!=`ps^EIv6D;0_C zVh(ig>ah^}GRYb!xTJYZWv^&V0t?M!J&R;VaoG7-WfqYrG>?5t9#WicSSFbtT!n3B zW184j17Q}zQLd)K*bBxV*?NTA{0Xz^>`z%@PmWZ;som0+eFEYfPPT?T9)pEudHHlJ zvRX*Fl*jX+(NctmT|mx1fLf*74^UjrFFcn`EK> z{9H>@V-u?En%*XBVdi0te&cuC_nw8L)w*6*f|lpUf`ln3=cD|_$pV5T&j{C5x*YF4 zdpNO%2Df{}4VJz|73gzhxa8x8Xn$jBT{~uoULD;8n%_LmZ|IEw16Y02aJ6ukLs0jV z@z#Wb-^ald2O=d#VUvC@F?@pW&YK_19P$FuXq?`9u1y&Bm4NP1pUbM0z+>b!jjWej z_5#xOr{bWJBC*k4y{Mc`)~ZYXvEZ(+w}W<;hV?__7&^`{j4P?P zQ*$1{n$51<&Uie& z2JkryfomWY=33ejKdwQesUAwd%nHZkOJd5lFyhcXCW68Sv?UiWnN)&6q(DlRgqnuZ zj)?*WO48}0mVBL_MBCkcAjOuuKvib2Copc`ps37*wizsHD7jYyj5lO!JYs7rXCjv+ zrSv=bE#R{6b&#IA*P1KvQ%Cy zrm!ae16a3`hPKAY?>)V+ASu`LuY6U-j`lbEExeRJm@lyK&P3A~VOb|BRe>wAc;PX- z7);94_HB9mFUrN-bsDDAB@P>k*A2uoNA=0Fd?s()$+uwbu2TP~XZF_heC=Rh;WkO+ zJDCDw_3n3WoyMmcW)R!d@Mc~)j}9LL4wf>5f)97W+lXwk@`|@JbMV?6tkib3y?l5m z@etW2?Qa&*2g1>s&Hnwuj}sBm%U$ZZUPC{@VYkgsXug4eSMgF1j+kUa6@@F?<9Naahb$np9 zw`?CeT5BwRYbc9nz~=%xEZJrmRSWg?UVLpM)V;Nz!~ydEsd?YMz{R{eos{g8=xloW z`>h!l)Q4A9$|9#T-`%g$;s_IfoD0-%`Q%_JU_d)Y;7DkPoi$G3Nd=Rl-jR zwuZ_LBxp0TSEa6+;$1(U5N^ucaFYUzaZdCGd{X!W`nhNYOmY4qGOd@TJ_OyB=m;*e zv3(%u_Kj&(#n~nZ`7T<<^r?8HDYOLP6Jc4db&LLM6Qp>#+A`}l}a&@xtfpu^~& z`3sK_^S^BK32TbChFnVrt*xT%rv`VgTC-my@&`q#$mJTt7CiN;X|FzHyi|J)INAlz z$u^jOB$RzP)lp_QvJu7IU94iR zujf)5Xb;Gj!g@RB>O|Y@S1V9G*P9J!oj=c85Rs%bjyQ{Dl4pHA6h(3KCJu3}LNXgB zI=-UwnvFgMR?nQCXtUoepYujs+Xuv`m=fM|d%0 zhT68_$%3r8RM}~-O=Gq?_4g_L2W;YJGK1vql(@2+0l!%=p+DPaY{wG4$Ie(}EiG$5 z%52z@2IJ@4KfugR*07txcd}Fs6FL<3#CW^COPU8#RJ{E`FXoW5HTuX3X$r^flkR-NHce%dP&k}OuZAPjj+_4`<; zKNudiyMCwf4^S!oc=Poyd-0>}U(1Bz^WuMiV6Lj`PN@N646h1@<_2~5PdIe$#mDLO zAumze!}aJZ6T~EsRGaz@0cGnaF3malA(XHOi`2gZYHbTVH5lNgrf=Y=+wDDvp_iJA zJi|_%{1-h8wt_3a7xbU?_8r_M85y_oH)!f+GNPcCCg@$;#74gR#=9uer)bP_KWUIy zr*lh=Z7US&x>X5czG(door?qM_a#wl&SUEHGX!IC9V?ADT`S#{mgNcI&#vr zo-UcmWjKK`NI$4+1#X#V zt^L(7AP@v4$vLJ5_*|6IwK4ZOKeyK<2zw)FZMZb`3PoRK`b{Ry{W-f4jc9WyPX3Kl{pCKic`!wB&n+N`Tbc2HcJQq7LqoD*93qkTbFaK`Phy1%W}iA(Npa z-7j1${{ZM$SeJApdsP1bDor!oLn1+-vrNkVq!Hkn`=gHdu9wK>TOz4W-~^6%1(UXcu~oIV4$Dp~uR4x7 z;Y=49X#9fnbw)F?wDONb&yR*6{6_c#H+Qyf6f3J8wkE4$Q!xG>>k}s~jr$$TVMYWOJY~;?!*8Bt?3h-#Ytd5K4GN&bb=Ts!&Mo(v^f;+|C z30f%H5Ugl%cXxMpcemi~@cjGz{^R7zQMRmm%{j&ud79F_JM-!~(rf}_O9BTO#N&H$ z*jkP#>2UAM5|KuaWqmJt1sU3j@3RW`mw3_I9jI`;&G1SKmrR|z)SX>b42G0}l%DBJ ziYtK&QcwGwVG@ul_^!Ge{PP`aQ?s%EtKoPpcCAxBZpg&Q4E%g81d9`5)VYG$S+)7A z1+PyAv();%WO44n9^M#P;-1^MnP?M~1$XA6I8jrTu_GxWHz+7*kfg3{hgg9qJAOog zLZ!<6xtQ1Xrf}MFP1WR#Y%RJV?zWhoLNp>b<7yOBi7eHvPlfw=M{6-2b(UtU?TFY6 zLa9yeR6fc6F_qQ)QJG|YR>rgBJ?_ZvUqW4ElH2jREt|f$AfV7yu?5^REomYoBv7;h zJFf^~FqPZiV&%}_)?Qb%GjZnu{GzQU^S_O%WvMI^giZrG2U{H~b~Fk46Ail~j@<>& zgYoLHeidRoR*3(IxK1%FrMSwQ@YmMAdov=e`vL!vJXvxq`hjns2+LWa$kWvVLw38E zJe2t+dBTlW$G+d7n(LXOSL9fM1x?$vcj0Gtqkn32lf?2`ZB1A1)prxl=WD3{WH92Z zm)KK4a7bk*ZGK03$eVhbKQ=ha0hhNt$annE--~3)Zg61OrQy1iPP-)QnjSOU9h&9g zJFn62dnhxS=gHsWO6+tzVLcWtM_fFZ+Pdb?griU9JLTCWy)2AXaw$)9Pm7rq_iCj9@kV+b*o$nw zTK%LL!=q?1`%2q6Fkh9;mb((S582FAi#fi<$%c@8#MkQUWEy8rKM0y`6;LEr%it5F zb4%SZ$CUMHW%yGU&dZC(13OpAn~CZGnjOp{teTXS{x!d)M?=hsN{rRr@5WTp`{KOG z*1xzn?ci@3`3b^QN+jjt7obZ(cqd zX!UZDnOVS@5+l)$xS~p$Kfd(*8lXR3S#QphWSd@KkF>xon(Xrp!0Dgqgl|W*)>Xv! z=Bw;Km)$i<^@3o3B=%BCwIHd>K^!@g?ppl_?|ivXfU{-Fc3jeX7Hl~p>7KyNN{^0< z!b@R9+S6v!#bY`AQn<8>IDpeL02T{;mL{q)Sh{%vrPaONeHR#jvUvO2gh1JGf~<#O zB<}pQIe!$|2>I#`(${NW!1H!(`;WM;G1WWt&>-St1#R84>q9B4%f-Hg5~ zN!EphmuFCVMG-J!T2;_*$^tKDGUqbB-Zr_~CDo9^6>QU{yPKy49DiS-ELZbJ+w=hR z2th=zS=b1c+OlAjm3CfNX^V zu8d{~x5#^Unb&FXD}8z&&c12&GS8fs73J(rC2yo2R;^oKxjJwx)80S53@a*f>a?iz|@Fin=@BnD&FEc%!MqiIBUBAWS z$0sTzC%s$?>At*LT?i*~RQpfRK!q9y9u{D!#jGmfS9gXF{rnZk0j9~7j_krf?rSe?C%`G3 zv7DXgTz$oXMqSg(tE5PW8_S;u5c`96tEpG^d`h}O8fgh}e=>NPuPj$NqItA2A7W4YZ2oMEOSXJqi>BCA=H7h)kWrXlA zT-(WFgttwLnd8kr?2%8bzc^)df)~DQMtvLtTGf4);*7cetu2X}(gU_n=|Mm(%1~TO zOTxd9a^P=~l^Fu4>ep2{5}Qy`x%C{jt3P{eZE9Pu%R14;XS;@!#cO=%7Q8DyAcfe{ zm)JY4k0V_?dH>_6DdsZx7vK^+CFw3Y_M_FpFRcxX)$_;BeTjLFJ2fyZ^NcKKx3VMW zc=t!3>^|TBoYcX3oa1Pla!jy=Xj~<T;m_Ze6G3gmE9&5^1L}F9P$AU&%)Uz zh6^pWaa{V~akl1|#vwQ_FnkyJzAPhw z6`Bhiv*frG>Ra>25S~<~<98hpKNd0??mlyc!_i?(@V~A;9W)i~{cVGtQG;l82yiC> zpuNHlZhX7S0OJXw=B$~`A9ruq{QQ!T6<+;&^q#ejM0@Lmic(lRK?A$t#ND4#pOU%o z8~Dl!#)TIp-)-RBcbagdLG4PUV9)T9b50zCt|HlP;{}p?Q#E9meC3_2-4(7Ck63B2_@^@%^undSsU0Hv$_vurQ zRHrjd4?y#)Q)A-8nswuVQJK7>cG-dZ1r?+d%%B&FO{e43xa0i~j%aXf>sA#KFrMKF zb$Xa>M@uC6S|8;OFGI^Spm3|!KSq8iZKmItEOH=pOB%wYEx?Xoo!4*H4Hbsmm%E=E z8uZs@oIfU-X1B(q{@w_hITWUwNX=pSOiNb)7a%SZVNLgHm6n2J1i_Z9TVoEA)G3t5L^ksPIGnsT&1j5^C@ zothdA_|j9KoHUmlgk4R1t3P{GSFGW^o$^h8I*OJ4>C|9@Nr#QEEH$=~W zg#CN9Juh{}Ote2Aj4{)ne{2!yp@ER=cvpnW5zuZNl!t_Jq94>D(gTSatSXCS@g zv-NiYjGBvMd-&O0d#!nH%cY_NQCgOX=~NN=YL3k>8fXdCbXN@p$_b(xaXh zlZAgJgR5h+b_ZUf7GQhYE&pQloWN6-}STqvE+)fQ5~4 zBp5yF;z*EN$&yJ+`ik7d_B|?Zw;voFKml;l4k7%+KmJ3xXS~e7td^%SQ;SP1!b95c z#56VIir)Iu`!mZk2<%SO=89_z(u5BWq;*oxTq{slEbBj%$Bgj})vmGa?(QIXqsNy~ zmep+}RTt8XJkK4KOF8>B9xS(e;Pl?b1>?b1?+dqF6)gg+)UefTS6j8|gp=}ZE~FSE z3`8e*Opo~e>hQkxdekosJNL8^Eu;kbvwBjCT^(Hw zY`=zAF$FJhpRlx@eb%*J0F;FfPoWP3LM9vMP6>v}UYW@eR}oSj5zFg<8J!>-kSvZV z82U;xJC$#B6!G;*f|~Ff-qXDGs#g^)Iieo?8)wCKl26HyG;%byY0bNgW4&I;;7B@K zj?irxXmc#yeA;rVo@aw2I;Mxs~>RJSq?DcH*1x9<1PS5HP;pFwCP&7dxv!r~T1i)aF^P`jrTk zkeWYHSwIh^h4vl&LYj>gWR9FnoML}F5RbF*BWb%g4E8}EG6FL}u2Xn<^nv`p3^FPf zm$`~C(t{1mzVj+Oe;|JKC)lhjp`N*=h>&D^pEVcOMV4|8p_gKyA&acHXAsDGxomBp& zHCdWz#w?siR2mQrTAR8AY_S!J&UVe$ds{*Ahl2FFhpvT7_CFA^+23cmF&JotA}UoC zy(~2R7OY@x-kT))r4UmuHbar)joDQ-yB&rP>ZOtzWqW*5E)9yOHl{YSA!XoQ#wAaQ zHBKwS+HL=$TfOO`vFtfk^vB@?D*(5u-QhzG4-r9WF{Q?XFUmgj8Q%ZPI$-eQK5IhKQSnv-Z{)Msk4 z_z4Q0&8GFq*Mq}5py7YY&`G8^fR8_1B%1pj?9wcybq!k}xrH3jSzmG8$+YzTlItNC zkkFQmmBCx~_O;)B?C>8WmDmT6teF=_&iynxlp4~1zzh)ea_9>M?i1~FMu`geO?QBQ)Q}x}> z-q2~3(|Rf8Y8=ce4i)UBzp~Y#5d(YjShB8ypxh&N=5=e0j z1mM(}4)5IC*s^H*84pwlf(d-A7+bTnT8jg0`5kouRk~dcE7aKiFx~7&l|gTS)9Fk` zp0++&Lc`*U2A+_$MdZO zjfE~$IR!4wx0q)dQw7m_*`u21+(-`1!&0P??(+$nxS#fx1{jiTaenqs6u|&}Q<`0Y zj^eu0mvcQk)*X0c0qB0qA4cb;Z_-oRop^(I6CHVW3A5eZHD#ksEu_rpie=UYlt|w4I-K|Uqn%llI3OHk9q#BLO+OgVS0X9g=2+d z`*NK`bnx8~PlIBf;Vue?e!jfjzrBo`NV2@SvfO9Jy#(l>z~H1_kYZ227@-1^N~GmF z>+M5Ih{$H5#5RdAX+fd2%w(dXWmP;sJ(jj4?uF?PC@jRee+m0Yjl6hQ`Xt-khPRMM z=e+dK_!2V<#A!Nt>aJ4XRHE;Gv^2+rCr7F%#q+eFM95H88s=QehEeC|4;eYyA~;yO zmD<>1y{U3)6AE$%)M3EXbbaFC^2II>Xp-cvNQ92E1GzGwW|H0IELdw-aGJNG*yYSv zYxlosSe{W6$uhuMvf`SqJOjRG_3T?DHf7W}27JslRUcjce^b-42BZd2B^HdK!?03~ z1$0YF{0j%NVQSWxJI)YE3>Sb^=eZFkAkF#j6EDb=?CJ%kmAd!*)>PiG-Cl{=SH`%C zw}loEbpOw4)gL_r*oJDY;Zax)b#ucnE*$dO6Y%J;wx7G~9V={G%NE!{BUJu*#w}tg zY`pzpJv;dt>Nv%E^7-API)lF(Ccu2G_9wVYFQ8*yC1PW~H+^4Ij&8S$*vqtL^f-4kfL*?XK8K_7k}qISJ*Xu(SW7IO@$)EqrDaR7nmjWEgeCk(Bu# z;hT0-U$;*!tM+$~MGL0>@CVT&+~8Jw=A7_F zy!fy5hvEBICaX&(ia1grp}Ha9AKIo6c1+5<0*S}y4(M9i+xZdWPmF{Mt=wqThe{UwoZw9&hwVb|+jHq%}=oJ5i zzk9@rueTJ@jOSfxwunx5i`vUG=^Nqm1qk@lu;_LE5n1Fg}hnv?L&V zSNG4xLh!U(ce(dxig>UDPFd!IlJWxHXffj{ZZebkmdM>L=dlpMMSMswp7+lUa^^Ai zsI-nwZ_S_bKWIuAwf~asW_t_i>}_OoY1@5*m$fsc@~4PnmlgQ zj+L4kL#Dr`Y;}J}chQ?K;z&vPSVx;OpoC2OLAdo_l$pk*iF(DW8>x5c z>QQxFeM~ct6bA3kRcXJs4H7**`#6uqanP$!nhh!F(#%^^Zi>uN%TAhXE9!Za&yVOi zVrB9sE7R({=pn?_CX?HdB^u+bkodS_>6zzwcr6j+)a25#+JKZsIV$F>{qn~Z|LR@m zKncNtkIW@HA%_hqdUg%XvN=3ThUhv_<^23m{M?5dWl<0!Cox2y3J*Q8i4$q10sLAn|QmxKt8qr5Sd(g_3Z)3JGUd4 zzjeK*1bzGGe%VRnuVx@bSCfSc5(=F$vZ9+ygBb0ii{S05?KJ-} zVkt;^TwMu(i{|srxGacaKSI=F#Y4R8A)DA0Es4>wi!pa@GC@5Ne1h`%-yR>7m|Ceu z#7lV}H{HZ!(eg{^^n2@%keU64dk_Tw+Q`sJAEI<2D%JO?3MdDczw0U~!)C!T`pP}7 zC?)7y%*(WmC{{n&6Rn86z_~X+jty&erCk|?grHwgy;2juV1T@j*sY`db#M(i{J@D z#y%j+%&kU&Mt}799D8Qa`DmA_$-WJ^B(Z#+dVz2&F^+Rf4td|G{;`DD*Hflsjzt82 zc9xG(ihQ$Y(si*Mw()C?uaZ)S*7dPCjEwnk?6JehP7%qJYi>@Tit*)sVE+v`K^Zl1`>I=yxQ&T>|HuRfhqK!bUtY#|><=CJl853`M9M;J`(Wxrj%*JNj9#`-$ zO{jP6{rura>6)H;XPG;a_+vl9=@>@4Gb(d4Wr~LIfOqfk#&PlR1Tp4YM_0aHcxrHV zC^a7DNNO~B)k}s%X7?26j$o^@_g4pA%CZ%T95~EgLxWOgzo|;<;ZpBOj`Q!l(Bb3n z2vf~MEC_YaeGx(FKINvoD?=2i2OB9baadt#I#4wa+b0zDS{Yk;DvfCA$7q_JS<*du z;H2@pvOrG@#Q{Og-j$Jk8xMzlSKtvAg_8B`Cjg zTwWd}<4(bpw$IRN1g7|L&oLC18g*>gFjI8KBQ&rdyMDMajaLzbMXUg5E)T2Ta1yQ= z7s6b(Ti>oYh40t>%WMv-yxy?BKrxngd2e3VTtv zAMr8H*EpfgxBPHo!t0oaNJtk8U6Z4RX4G4w9B?}39?2K8dAOEgusdgLk{9H;g4U6m zd_~rV1qLN|_TFnOzJo;ROtxHnHnStJfCOH-y@ z4GPty8=K+n_l{A}?AK2E%pSl8s2gPymFt=-({;TI38*B829E&ckg8;$?*n;bc^}8L z(_*)?qx;VXc2qX@kX1osp`;=z3q1EdFcCC!&g|PH@1O=R{By^vwcB2oWp*qlGIF_K zm8SQPDFqrfufRSF3UBvI7;udo&->s?(#&ai@Cfqlmm11b)Q^~Y5xGdlWB!pVsn@GD z`flbS2=h^u!JE%$FO{gvt6SZmnhJ25jfmxUlhy>YS{(4eZi~vr@{(!07TGWySEmCP zWT$H$ow#Nv%8^AihvSH2zsT&B@6)Y>3HSQaY3=zl#v&k?0gr$eIu-kO{p|g? z>G`=@v!8}DT+saNZ}m!H5CTGc_b_swfO>8Vo;$z+p7#3(%R7M?^m^R+5`VMVT80p-V2w^=x*Acs5VRa(sC3iB&E)x6H8i#{M z&V$;L4oHjA926!JW^|I~etRV{^~|Mkiy(;(;ze7)GOTB-8wvkP=V7Cr9`&TZUTTLu zm%HPr(U9-M6Zu;Ii2o6+ye-0uNs=?GKA&4G+5h)o@Sg9FcRZ9d2|<4qju*%GOgWV~ zK@w~Vz`p03c?XB8=ws&7>7-#8qbsK@6ezYaU3sPVIzZn&sePbLc6> zU=2c?6+AUL>=ve;vb`;-9$5o<7c_i>Cp0IRcaH2gJ<3Ot3-;r5ru~fLnTF*>_@^A~ zeaE@iubOqHpf|13UMR=^9K~;u08D9cYWGV1J;(ZrX>Jt$w82aA-m=gLi?c9_b4{LM@9TUfV>1wPR!>_dlE|!nHFSPI= zS8k_FWwSwtRtyL08sgzfLMSQ;a-H9=;IYd195*KQmW=ovHlm;Sc4K~h<+br2n;~q* z)fo6UoHTC!&0c31)@NDJo`nobUj1})a)#cmtqD~wF<|SXKaeu{S4DzN4A>JWv*hMw zCzVoC0TT@aXTK(F%>IxsrPKsKg#$nLZBbfduw-$Hr8$Y>he>z>Xbj{^rx!Fh^>^WY!sE|XA-)7}I zX{iaR=SyS!T6)4+@x`k7^NJT2tVNu3bR()@M_c7#d>iN;gX#lOOJI@p8TfX+zj}s! zLxtnL-V!aC8cO@@CmKdgrU#YQR7O~VGqBazS90xJ&%}{nwG*(O)Pu64?$aB`as8m=6?1gl3pEu^47KbF!+9UvJSiG_E5k1(s zxeCW>Y3=Kt!-jAr>2S^I__Nr+!!Gr0QQqIMatdHU|Laj5)bsVCV&@6eCLde z+Sss#9GaorG2Ub|yQf#!ox9N;NlZok%r|_2YB?NSUx$J_1aTcC{&;iRd0R%Qrz**w zAaGao$xGF)dNcNipKNV6Xt%KYKe(D>?FMd@KSTBfzLnZp@UJ_wZ+a*l9*&GWHr|rsHMHw_bD*p*m zF%as0>xv`BA1DB@L)3XKt#UjbO}nXd&{AJSjX6#mJCZpawPd1`2+E^_Nlk9bn`!p8 z)i9H$u+H)LM3;>q4ERq$x zd|=uze}vnzG&>qCCi3O>{`LuF{?hOd#U)xK0%3Ngm}Bl%-oPcP^;HGdD&^Q;U~*e( z?bhKG{WSp3+qnZUnluzY8I&oQIVjRO@5zbRXDHV*j=&A) zgdAx|8I!b<+GJ7UwpCBM$|TV5GB1@N_@E;r{2LM_i{`Cf&pzQ$=Bz$OBUY08K_&14;nE- zqkXRlRg~d(MnyA4P6ya{Re1p=+UxS?e{&G5^z;$eLHaFW45SbuXZA;sPhtoyxouj6 z;wY2!9sYKWJ@eYqH0uTe%OFWANu5GL{I1G3N3S7+q}wOWmXKj}(ssw$K3BsHzY$o#Y)2yZQkSGZoWx21gA)1|GRT)FtKA>nuZzZ zy`PfSz;?`(Tr~##i!C=F-maA6#^C z#*;bnu9KquY;NLh(bf({nwx!BZl9Bv194r`BAa|HpdNCWxh=1spWl$FLdD#Cmmu)7 zy)JU|PWomf9Ip_SdUVtg_m>X6D>J_RE}|{tJ=;MPZ>SF#eXb7GnR+hmMkkV!DaTb! zaILN^zR6OaB}O3gXPnbG?nb>gsMn9oMo^n$4WBC3os1PT78||kOi8D!VU|?!qA*3^ zPYI0BQf-1WXNwWZsc>0-9wwZ$NBqZPgl8?)6d9iP*uY*WSL175d5Ye0nzlYodJYjp z8iQ_pXFpvU`u=hTa(#TmX>fJ&ody4!?)fcpAovwgSXR`-+Wg8)gg71{GjR15?ry)L zne25+{_nWVzzFpAk&CiF%>sWw-B*G(KTe8;>5L+oK$-+-dV49Ub&^L7+!PsW?}e`| z#Ov({<@`F;Mc3*WJx(}W@sBA!wL>E-wYxfC&M=Nh)(kQ@v2~~AY`TuztI-C~xmU^; zzEY+tNc{NTEM;w~q;-pbf=OFLL+%aVs!ACaPH;>|7%@h_pFszD&KtT%eVHbXu>E!V zs7EjLkX!VmGDbn|2|gVN=cIjZz!Qb}0Ys9%ZV`5hci*D)iU#@*?&^$wvY&oEjxP#x z!#7Uty-M@48|!d;=j~I(a8+_`$qrWiq1m1m$y!RSk2vy*>wJ>)#b9sL!-P;RD^A0W02w!@XNu`Z2)i{F_NaJEoh*}^YXqW9VCb#Wh#eAiN?cv>m zCY>soK)+-I^sCCglw&ofcJ37i-?!ef^qxKf+_9Cok??3DU633a^Y2zT9 zk7k&KJs9U$r-rDJTIw4Vw?a^&Q~Dg{{seAr|DS<)qcN<#xlGQ^tQ$vO012mK3t^V3 zC6-@p|AQ7o5+2!S77;<0jOjKAiv-$lv~=C)_Ssp{eI$kAoItKCDSO{)8+j5h0YOR` z#^%%{NITB{0eP<*2D&wvmtBA`2iaWKh)c>rXE4R^1{79^EI<_W%TPp*tT8w{89UhV z^ZKhfV~ZsbiIkYj##KfB@U>z2Iwr{C547`DM_>v} zv|D42H%asxAEzo`s-?$+!i~M~&2UGH!I`)GpA>}q_rBoRys$(?X5NcZ%;va~3n!-b z8F`&;pcd`!6>^?#T{sm4XIzFH?y7xL%~pT+QsI9kfKtJ>NF|(B_o@e;VnC*t!1X(T7QNHIiW{1w_}|8gTdA6xfNLd}Y5G7JsWLLh-;?_qa@QLqC-sIc@KuQqXn zCkj}JvQ;Nq67^2EwI!U3TD+?UJgtsdrFO+=+ajESsC47}K+jGy@ zf`?=rTJxNdnhMZ$$=Tr_`dT*zs^*-ko}j+a@H+IDX!8$q1zTLr7D5{EsTW{lav*nP z9lap}%6)Czy{o0FI^B*6j(nqJ)jknQB=CN*ZDZp<)DWYd5sDkV&(<yjJ#^F=A%-#ZpGf;f)X`AG{$N)oMO?|7p9}4>)%iD|FwbF4Y%Xi0^ zVKnM!48!r#bJXN7_1E$65K`GOs0wHOEPW_-gZK&#c3f@DKINm-y2iMaEdWtgzql$A z0=dar(WeM6j}s`dylzDn6!IQ}G?l%+3RA`h?j#5*J)rbOtd(b(yBYAPdQxWrG6G$R z^lQ3~#&8j2cdrWba*PjKp0%kSJUJ>w{QnMAEoIER@`8({|B^%gM9zdk>2>Z2hc34e-P~!l7kW1lQ;A z$2u<79@&8MaY6o@vvc3aGnWCS`8KrKep%VWl`jf2X{BtyXlqh>fQ79(B@&p@vtOXf zvK^RxOF%CtGiT7P5Q{m^nnRw}EE?Wv8%gR{-6CKs$)w>54Qui*(zo_dZ~7B0dFb%wW!9t`uqw0|UC5Ky;_)beZ{|G%GQbzk~vur1-P2!XG=jmsVMBZx>ccM!X z6D;@K!L~J{uOr^Xrn%OGGB?b&rh!DG84LyZA^!b!FQVW(cSiHFf5(;iuz?HcpdbJy z@70&PTWO1Ma%5<52HDnsX0q=6dd^WoB(1*|SX?*+ znB&syY$!)Bdl2j>Vp%jK@EMeh=vRF(HVGX*tn`)I98%9^gfTAcUnSj)2N%a9qU_(&N(Z-8ux}1v zJ?NLdA5aoilZ6xuf14{KkR5Rr#(5~q=g(#;++g@pu`bwPI(tKsno_gn|A{K}-Tdtd zVLxfhpEoZw$C9JPd{3pG4el3;=C@fCZe1XO;ZFJXCa7e6RC=1B(W1MOM~`4}|AqVF zKe$@|aaS|KJ9uUE2})YKOk1)r+Dm?}sf4Gtfd>n6SwXw)lR_VHbPq>6F)jsCJ~( zaNJouKjf0%&C6~>5Gr&@`=-&94GZV_T9r0m#bT4}f6#I}Fx_0hiQ~7i@WMKV;$a>Y z>N?`f*Jtr1%tV4p@KxlTob;Tey}MBf-?ON$CCJ9|q@K0jY2Ezok|aH_nUugra(n8; z)(rKl`|!w%!G`QLEL$r<3bf81D0*(-(r;X-Fx=X9wx;ABq8r}Z=S}n6HtH*TFcc7Q z0$E!)C(UzAxt(lD;bbH3ym1=# zIU%mB{URT~e;W0z^t>s*8ppYERzvIvEw|FFAmFi5v_7sZG~UzvoI{LHy@DM_k0$Ie zoTd3JFYZED{}PK^m^DvfQ~p3gs6u<0z#=Z!=o*Vt2eNPooS5}D`kRzE z)iFQ!`{~{C;>2)_vIca9ERvE3PdvU+CHh z>MZEXi_{*39H4&BYi)unc9SiNZ$_o~?1@0~0#UOW8{K(6@TeM}FawsosbY?- z5E3v!>HN4l^V&7ul@y}%&TVCehkWbd~D~#I|@Rt+sGo z4x!>I(|W$Fiq}zzQsWt*RbKoSa5au>=npp4l`dAZBFDp6K?ZVQ)3v+JsMcX(ErFVj zJIFIIU{uc6GYDD!qAa_pb&qz(1F&~fjX4wHwGHa5Mv9e45e zBp-S{RD{S1B1|d0$eQe)u6J(kyxeAe_0>6CvTFUdt=-9^G68?hsU%t5I#>2}cnzZb z5p|5(mdMEamA;74A=1Q#Yk%Ut_Eq~pIsAmMy=feeKY|_QUd?`%*Xp6K^iIs8W$}wjl28EQK%)|0d zUv5x5159qp&mt-!Hi43I3OTfla`sm};~&f2>z~R_tvB#DWn(5{AV#Lu_t!=^5Z^gs zlV2uBGx|omsoM;Hh+xv>TzWPzTN5N_!=_7T?9$H zRXOoR!bK{AwGdOJD~s@|^aCg`*&KSl;U(Kt_e0;tT<^5iK#i(E3{FL>zb%=n!-^W= z=f^3Rn9de;iWxKw-0!iPGDbId1dKbztz*{|ul`DqTgQt1x^_pte$!wGBy0?n|Ggtj zyAuSHY3I>26VfT?ow&H1Z_eJ#vC;oW!j;JC_2*mvQV2!Vb3tP%{fe56>*cKhP++yE zO1?`YS>ApZF2WyrUn%`^6avhWmemS7wv%b49}aMyDK$Lv{eJS>cc4La6y*JRthbBZ5Z`cj64 zCpI3^x{cdn(Z=dTas5uG#{i3Pjm`6UAl7Qf(Fl9jec(aknJ3?SQI9Z4%|W$S6PeuJ zxeXT|dA@FImVh%-E86A0YG7e`{#LxYG@Av`h&KSi_=eu8U8L~YKt)vAutU_~I29g`(zebEqE3DH;RO2HCcu=$)Uq#Ep zAfb4$bj>0X;L;XQ)CXO<`zH_S{H5=n)**r4ZhKcttRt;<(7qTeAVL2geK*mKKj2xT5+ z25yR-zGmFp+)EP{MmX8AEV7996SkQZbCrXI=2iw1EJs!L8CQ_er28?-_}0-*8~;X^ zrPt~;#SVDlAsiIwrL-k{J;XgLM`xY?9T(#i;X+FxzhEDzM8SvT?2RG29N0hq6`iYT zV$C=`J*39;pL-SsBuPjbj)ZqiF_93cNS={ltG$SoqwuXxN732uH#aAAZW=*cyYh)Q z?VR0#&~6a0i2WkpB|iLtu&O5RVJ<8mnJ}D23gKqrrfja%L((b%hdgTEe0lVP=83Nj zzdYS{GkAL{+s0Q!fr;;Ir4cS6D`&nKTN zLov8t_r)1Oj(g=nf@iTSI;r)IdY%7Ok)Mh z9f!UhIMfRY9>I1jHKRBTi<%2)TG*1~vymwIqf;5uYWnz3+ts5_ObYN#cviiGj_5R} zA+j1jm+{i!)>6?9j=;0Au&I)4Rzkce=B%#!gCMPDq0lVTIs<$c7qY*5L96=1%1a1|f!pl5hu@xigdvY*KA|R(wpJa0>=9W1+p?JTlJgd!C7iviZcba_TlU z!3b5I#4_c`{e0n#lDZ-9H+bSqyh0ej5GcANhM;dr^<3|WtjZ^zesIwH^l<_Vmjf zBTWvrz1O<0sa$B*hEDX*{8o7BtXqjUL-rSno(Cz>Th3?!InM)BZ1D@~rv*EP_Bx5t_?SLTmMod2Hia_K z8zZ$`V|&J#NN^b7ZQOP;!0~dUd|hquus6`wbuqh=Deu|F2%}6?ic;{xZi%7j0>cR% z0m-4wpjgpUNW_zT1M;mWH{()|PDg{aobi>A7R{*$Z{e@=^;*||@4Q_R%Jx>Wld0mK zV=B~cPXx!nvDS1aUC)Kn(;N@mA^gw1m=4q@7c5&<^OFC;8G-!)Vo9;6?`zRES4*QQ zWnGv}2!lUxBs#J4Qsm~V9SKQ%=ab+3o`;7}kRAY8 zFP)sYcIgcEeGi~hSD3LC%2iX>WQ5*yqQnZvhu`J@QhZ;Cm#OjFu6|%PPC59|6UTaa zdi!agUYEx#-3lF(XTix-v%2X|gKDGB+QmXpT@;RfSnWU6siNLQOrNTOTZcp53dswL zt~`EbAMJzz`k#bOfZd7w5#@G8SI!$%J#ZTDbQ5B18YxyBmFIO^CCewStT18kU0^{s;dSeH@wUQtAI;z^`Mt!9FiaNu|&LgX=JmaqI2yj=1s2YB|3QF(-rZ zt1hrYHt*DUyH4+!^eDF~lJO(k6)m*n=QxdD`03k0ee9bC@i;AOt*^?z1E z9fbl5=sKTQ^8C-ibZ?{wd9KR|1vWXbey=dCNxL70+kydEk@bm|(S7waIS4z#-F$Hd zrrQc$xBpHDrGr0esjW0U&CTQ=k31KureqN^hsWRQ+XH_URhyB7(X{WP_ol=F_xB~~ z_!kdz<-)aYdQ5579$eSK&iuZ}{==D*0)kW(tm#{=|L_!lCS9QOl3CmemK28CHxHqq z*tyCn{+Ssa5c5wL#u_Rgu2I&DeoS?W;%Lfv;mvv95q75P^(A}8m^XrJXH3j@cicc))wqkKRHrrElO!uyei%E zpjpkl&~&aB4bk0C4<}Yid#;^ZS&P;F9k{OGpvk`(knx>$T1GH5^Y+ZE^jXFtPDNi+ z?axGe^TcB5GtJrQVWvk331c(#{{aX=_r7a{wuGp(vlvAO0f_E@BUQ90u61o~mSQ~l zhnD4kffu*aiYVggRg+Pd{4vp2F0D$Bw3JQl7Q$<(Ch<7ne&J6YQ~Uisq{-Dr0kVll1BnPLH4nDMyv z{{SYq2pL3x&m-eMe;l{AMRytn+*7`M#NJzlLDYQ(Zsel|<*Mv)QjDC^j^^}I?p?jl z1Z~J8>Dr&?5tS$o!^u|q_x_a|Ng4t2xMRV`13yDhv{OkZ&gL~^mDt$89dV8iYWTc< zBil)|xng~;28Xjs`mxg92<}G5D2zd14qHEutwT1em3a1T{^~FQ`%xyA(kY!}j|$vv z8NkLcf1PH}XLoGIRvu(ZxfndD2lVajT)28zDa)6eQ_`I3C3_CJzZ0xW<{aej;0zp` zReM;P?sbYnPs~Gnx$E3}bNbMb3$}@0EL?yci5z`9_2;c^*}!dKU$idwMi^$v!1ney z{VTT-T9yu@!4~P21D)G|-Ma+$+u#2H z*IE)zi5vay2{=+eI?TGfNfA848CktXF^^u=dkARw5d{x)fBBEf*0K?yhLFL zH~`~r2=>6o{{XJ0xx4a%qB&6gFghPk%BZ!{iHKlAg&_-e_WuC&RVk)3!h}BN3_%#d zQ;vt*(!PP<w#fHx zk)i}qMmp4k4E?MmU_FcKX zu<+KNmhcR|K1he&W9n)8b+cMpwymoi-MaqiPe@nW-m-LEA)4#^Pgo`JLn(2c$G06x zJ%w1+?@iR8T(OP~+Y%+^-1=aDFItI7wvy2z+y78?$u6rP0B-k&T8*@qiXZ2IP?OEVLOJDicXo|vrT zs*8s52JbYs6#&BnkO$VQ%YQWY2^1)N&=~C*=ngrh48FDPEj>>{9?mN0(2`xM2_JZo zo|xvMmNx3ou@vP8t}8Ijb@qhY$pm3K5=&s5W1-{ot02!LDq;_pkj5kDk3(9^2`X0A z8mXpA*Sj1h-KCD-yYmXoywo<^I>pfec2*qi#&i8JDc2?Uh6UtwA!J2dm&rIjoR0pL zPVY^=ia)iejnl~@TsKBvxS$?+JXdxb4M&-CyIjJxSw&t5s9+{C;(YL=uhKC1%swKI zEje`gpD&5R)Tc?sqJ^YyCxEuaox)apdf<%z04m6{CA>jp{{UT6ADwhQ9y@$EjKhfM zV8b1bKgP0k3ntR_SZ+`sBuE)grVepkD@tq1$B{ZOk~1#qH=@^8dwZ4r%&G|X?fF)G z(8;GvC4ldNh+*ISn&|bdDK#huJ7huoxj&)yu4XHV^!W{){A8fx2e;GOqD`smu;mvQ zuEmW)(mg`hUD*$qedX>wsVwyujwD5iLMZuwz&~1IG1KN*pl>qTL(V&Y_3KhiKHm5w z$&Z+1GMr|MH!H0fbm2EE!*awexmHM|P(GOl+N{PTcFqEjcw%~rNK;@DB86VScc}>| zxdD!Q08MEtv)-o`mE_4$KY6}GxxnCb6*#zMWh>MSbQK6k+mVoa4{CY|H(?WgdK}j@ zzr4>v8Z8S8dOWk{5Tmnk{X12=dAzsY?Ywmj)EdWVF8vF33bq5<$or$j@3WZZ0G(=RiZK&m$YS`d61OdXeVly}ORd zJ0pYmpz<=HbVX8gx;aS=>6JYWD<4EAFALbn4mRF7ACMK+d`^kv@ai#Ks=jhD*X!1~ zZ8U$y18V;O+_@}q?IM%xi`DXDE{Q#3*Iye38)2k#$B z`Yu&do-FV#>Dk2OVJ;=up}9CTPTQmf-*YL!;=LMe6G_&z^g*`IR@=HM!hsy8(SZIg zyw?$`*-xfkNo%WF`BUd1(}2U&b|Sc^Nx@j`qltxIcVz zEK>%JJk0eYaOs>>8eFQkx`VmGbz|+#DK*U>>!9T&&j~PuSW?WZ5Oh?-@$*&p}U8kf2TZ4b>Z*d*+l?^(vN*?An3F zyr{&s^*jO7@vV3@P3jO5yMMYcKN{tsY)KlVxKi0r+^#;gejC{1kVczgc2H;Q1!`7-neY^=U7tr7k z2dB1cTH1J-x`iDNJxzR*gs>Gk`G3guoMTctGZN}YZGt*;eX2Je(M4?%%h2<0Qh-!qj4L+X0aB>Sh;Ul7s)th#7(20+;BLS_TF`>+9F#0^#!vFDJvAM&MkL&%ADvqS z!0adexG|IFf7X7H?L9cirRxvK4PDg5Dy2AmFHl2viwqWS4Spkg?Owqsnd3B zhsIijIf@w}mL@nUGfA{AdmImLxvwnMZ;BbM5yz5Pj!+%LJy(nl{S9}XG1a8IvoSDE z56mM>vnd{@KAkJhua|yRp=iV>54kd)*dwRXzeuSyIH@kE{L2rAbU$}Bsv)qK$zV#k z0Q|so$6jmHJUioyud~H&nowvGMTQnm0VEHX-1Ywe3i7){B63u5{qQh8xvsB5noDcA z%QCs+_ilG?@4!7c7~p?8%A0m86;3l+p4P6iMj07cciP*AdyM^QYArV2bEYlBTX`|L z%8h|eJ;3N`w|7w5L|Wohk3V;JbJwSO>`hqtsloGE8FqJ!JV|wHC&(WzySd+uqagYP z$LH-^SmL~=OV;j(jQrW=iGXGd!z{a7pOj}kvyXc8O&?HO$*)j3Z!T=^&PEE1bM?<2 z)lqjw^{BWs{EZSRDa28VvzI}XVx5XHMZ-ZT#XA&jqys>y?QUnYm`QCbM&0muKT+PJ z3rmWbB_(MDWa94~4#Q9JEwt98-Y|+(`^pF2`c`(M;wi3k9j;)E_s`6K!m#6sq>uy| zKGe(oTTRh{7gCw76o06MZejK8KN|C+fuT;PG*#bYx>)Q*Sg0c0R^~>_eQF#1N)0yE z2OHxz-14yaS zK;AS@{{UBRx$1pe+Om8>;)Z6H^p6fR#FA=( zVuC-JZS(T4yLz9kbH{90xOE?CW!nTp_yrA0qf785 zN;K8h<}Zyk##B0TxM`)qxGT{|WEGv^zYWW)NG7@66`(&e^fF_x_V=muFAYh5aN35n zaD?wF=K~(CTKbLQ)S+8FJ22S&)^awPfb0iQgSq3$r1 zP68b!*k(r@EV%yw*>VrmkF8mIl}lA<7iFv?F)8EBKi@vp3@LAM8e5^;X&K300ho^M z?dZJ#82V7Y)S2x`f<+G59k>AFpFv)3ClOB%N>h{D^Zx)gdKhdp>(q=A-}3(eBP&wX zU7OAMwmHEW<&R;3`28yp^=U0rHnOpDa(1a3dE|<%s$GU>Vpwb^4Bb@p>T8(1zGY$o zlMHaD1YmoAg?tpMVsX_uq|ekaxEwwjm6AF()EU*5SzGT65y1oT=N{s$Y1+%#UL=;% zh*V>4)jWWC&)2Ut#!sk)o+%_Glo>0G9+?Cmr}^zr!>AT-G0Kuj$?KlHdk&pH`t{w0 zz}KYt;OuK&G-^kZO6c@WQ&@dPM3=I&s~yL4V*{`ro%t0-=8dKi++mee82xdMJJthg z>TTmO?#d`;P@%X#$Wv{#1q{XT#DS62kH))ywlT1clGyV90I=|_+~1Pk-H>gNVa9X2 z1M$Z-Lhov$HvE<286W~Wb3)%LKsJ1&6O8<&c=hjDciMtU8+na@y&Qj@wc>kBPBRdy zRMhsUQHC0yEObX9t50&*&Af?~`ZB)b$Mmj#=V`j!-*lXe4z;tYMw48#W%d*IgJqkJ zod@|9ktkT3JdNLb2ER*!{n9@_#Qy*llfqruNJ3O{ReD!XqUv_X_htlwMz3{{VNmjE>BEA8P3INI|@gHdRa<7SBij0PAu2)`_JS+07K(wA|^f-6T1N zNgcuCbVMG#ho`k^M!Rhr&OkCi1QGK30a7Kp#HeQ^hU78>{zq=KlFG2GzdDk8703h7 z^Ix6kILKG4$xCE?{#S>cDtDgjsds$MapXv>Rmlg;8Qzl=_r%{IjBl-@k5nJOhU9ny+)IGf5bl zKvXtCa0p&F>E68$sMV{dC5#nlN2z}1>&=OXV<2db-O_MaGY?N%hGu^+=gSf|G0p>fR2#wDgl){+BAl1@7_VkAQAd|L zjG8$W@1r*>kYoMe^auF9z3Rn=oh{tIYnDfb(Uc*O{pmUAqx$r#)*2$f@wTHE+HQa4 zosyrHG3z6D`hWVXp%$K7thcAnxqcEQ;D7a$S4BE0+0@Z~XPB`b)~6*Ue=@3Cs#~N` zN94oF5h?kkKCIq@*Yc*_-dkyqhq_|Yu*7jjK4a{w*n8Bvj;jWjZ1M(4WFwgm2oJFD zj>5Qi)Gl=`L6cA1_Y)j7%l+(oe{_Do%D0_ME>-e6s#REc@~vcCzPY=HSmXJwZX2tg=qoQ$ z)#lY7>>5IqfPMETZsF>E1x}QqcdKU;mnXkHWLeg{PaJ!#bkcrMLrxDT*9ZFbtq%<7 zl0b{8>L?oaR0afL#k6F7{C{|lx;q1(N}=J6658hI+`9SJwylE2f8*v~?t$3{Q|tKg z+5E+h?p?`dJ=u>^eQK!Dl}F6>JUm_}R|UUQKUke@^lPTwj6^`m?s%*p4Ed)1)CpQS zjBDkvP1{Uo)nyNJu9KG@N6#cVW{3aeV#-1IDD); zsoaJ=af8Qd^K&eA8l5>ta?zu^EyC5jTvn#_uBH+(YctGlV*s&UPt>V72a&+*QR-GN zsYx7WW?wAtkYt?tf-%AA+PAH&Z7ib+Wdg$r{Qm$b!BdQ5>-yA!DAN)~qieBuQgC04 z41QJlJh3pRQ=W>qnfeSRc-lukx+He7#Utf{5A~9dm>#2#O!ldckc(}Z{$|~h%DWV? z=b$|~JXVbNn8q1?UKfskAHtw#-Z2*N%gJ^)AFXFBRcY$vZwR!Og=sX{UgjAmA3P)E zD*4)J8a0jC(Y|&75y>Bb`c=)Dj4Q~&Sd1<@pIVtLS#Z;aNL2IOx4mTQ$*+0nX>}c0 zl*t-$W7=B;=YrT6ryFgsD@xF~PC{(Xcs-9CR+P3e%E8l#7v)uBk&b%%^G=pHBqY0T z+m(?)``mFywz9p$oi_Aas#bf;yn+I%2U2yOQERutzGX zKQg!7-JZwkT4@xptCKb+Uaf(&@zbSx2}=yrp)ll&x-l>G8+&z~6&zdaA(aM7@xjG& za$3aj{hH-Oh(-ZF#5)mP%(|3DGMAAR%HTSlaqC>hu{Ev69@$K6J7bT=zWXK2E7twp zXun>Emx;s0M5aoFz#IXLRV&>--ufMrr%$@Wg1?Pf4hoVwjtcryOrI(A#eGbond7)8 zqdCK)-Py`g4YwBW!z&zCot=-_L5(COTL)_l?LS(S%e{f@Pvueu`EY)q`&Kc6TNg%! zI6I+^KSTDb+r z7s&)B_T=Cb`BoGcWmOFPFnyablfbD3-f`wmh1>2G9KWV(mbE1~*wz@BrK#3Oa1tWK zs-ZdDy)o`8{4yhM-G+@@1KfRUh!^uqGb~p~%$;J9c6)8Bw%V_h@yR5fTi}FTWdZAh z>CJqmGbxJt0~%` z{{RhYK`HrM5*bf!)zGRrZgEtgx^LXc*EA{gc|#4-+Ohd$1EKUjhL|nJp{5{&k2*8T zp8QvQBuo7@C6PC*fX8zmVn;{(NA#{X{{X_8y@m6AhSK7AbC{%!3CQ>N9V-fyRDI|z zA46&whb7BB(UBu!=x!YC0D?WLOKF`%yFmf5oxN*DUk&TGEVDkHAYw3bK*{{-wEAA5 zrR3jQO#p5KGVo8a=~(hfJK3FZs|PD4hQ%kDbT<6M8R?84QYu7b@_%;B!2GT|=ds07 zFCcN)o(TgPs}aqX3c$05Xw-P{%mH2PUL~ zQ@NGfW;$n{bK0PX52{JLD-4oqG5C9{V|9B?;dk&M(a zC=#fTb4E@^*1#G5wQxGixC6@YOB`mmM{H7R!i!uig52!nHhUU#tJ|hqd0c()I#g<8 z2wZ@}b{8GHe@dP}n2E#fzyvOEK9t$CR+savt0)8q+QO@=m+m1QKYeOAV4*4r<=4De28S4X>g2v+IM3BN{L?oQHGmIR5 zRH-zHBQChmJ^pxcpQj1RL_>4z4wS4N%<80+f&Sg;JAeAStHQ4xT#Bykp!#%B)tdmB?x62S_ z861G$L0oOCgOxxC=oR}P{iZ8DCB@T4Ds|Lxvh5gOAS+n5=FTD#mQI5AgTIi9@VsBqkA4yDc*NEi+LeB zfHHD73?Ngv`Nu(wam{z$5uVoDREju^&?^i)P6!)^9D0M-rFpYX@!WZ_#krIPW#=Qe zUtfCZv|UlzVOZEKxe>@≪Gi1p0C_L?n|saS(EW?2)?58p#s|Mj2Ke2&KhErn=3h zLnLm}As;M`z?bLmWO2<{BDcBV$ytoW7^i{hONuTQ$ueg=l5&Iak%gY<5V?0 ze@M|=%92BWb^ic7VmMHJ%||}ATC+&fl(sFTPb+PWOCMoVG!WX2*Ym6+`=I5GarYi8 z)-^AZ+hv3e(PA9ps>^h2;{l{;u3Mu>a!0wR$v3`+@uOp)*8EB>p=)h4eVnlQpR42g z8sZ}}YdV72S{;gcmQsG`?!@;sTJq*u=2#%ilE`}Jt!NV#i>I4dTQa{h$~&L#k6~Iy zDhkVUvM^9qTaxP9oDf45g^$T;YsmiqZ1-dDSg`C}*L z&+wx1TBg6b7o}M}|*ou3HYjlc2T3e>z<)6zO zklf?Df1FaAg7eLEdIN+Xp#3VilkCv_q5f$b{aT;rSF$ZcTbVqIbhA^=cg#nxVNFx8 zL_7WCRYpsE$1KMr)81d+u^+;i6)U;7E|wy>lPJf#98{3}kfQ$pbZ#|Vg4;)7%~{9p z+w!H7sS3CdVns{EFAhifqOBy=B2!oJqx5sN!0f=AI{hk$;k zy>WX<*U~$GU)Sn#)vYh<`uzUl{I~Gw_YZX>gHqJ7jpX7eeV6>^vaVlL@eHCE*`_4q zx7|b?`0PFU)r$`gyvi>1xjcxom5e}Xf+ z-cHDl$4Aq=F$lQ4e>&)n4l>Q_w08G3JhDlsjrU|lYy1$Y$$xhL01DLpo1PV#4=PYEgEC5c)j z?PI5&=vy*PaHjxb7#G*q>s41wRk{Aq))`sPDyNXYhqo0l0x3o9{{YQ1ACfW7J%Fk= z7bM+%t5i6*Okp~xB;YUmM4Z{>4(gQdB_c%g?7ou9dW@vweXcGt8?Nc{{R#9Y82^Hicmwu$5pv2 z8C8>lW;y3RpnCC5Z6Y|;Sib2w!-1TW-yHzYHE6U}k%fvBP!AhMO;&5Ah_Z<`kG+A6 z{wJs5T#>6$%lA4Xxkl?#pT4qtY1%bsWkulh8RI7-sOwp>GBLmfKqa>UkVkMUZsPv{ zd~T-5Thz#cWw=zid*{tEmgf18Hu)zvtGs z#Np}Ei=4DFrB+gxDkS?uh+0klX(Sc^f2&$fEkQ~i+xZIMeR z%$zB}&-D6MMA}RZbdy`i3n3?VcAu}|Qf>-9@y)t@_{{UTYtC zF$9y2hi@4F06vwaq~1dvzs`UvpOkdmM_S;=Mo`1bRF&E2W${z36;oqa%+OnWyH^JR zf#V*&{{ULfy}TwaCI@INLjno>&%G_h#IF-DbvZa~$L{CT`EyM=9p%i?kffx)Glt=? zJC9-pE8wvB1gFniL-Yq;(6lvXG`BN4l{>eV=bU{GdJ3*!xO^;%`+@l|q+`=PuxoGo z21#I#?9!A0m=!8^oDM6Q$iuP#ajVVpPXzX$i71~@r zW5`w;i6d@CIv-AIv5}osq={X;(p<9){eN6nWbbaUl$DXPK4G(rcEGCA+gQ8AjSy8< z1p+<+9eMq0p((ZSkt%RsY~yFQl0^y_#Edu?`=dQRwG6;4&a<#+*9T(~GJXAPs=d*z zrr#`Aeq4=`Nl)f}zm6+9Ukk0=L^afE8zXoGh5RdMDw5L21|krqoUUQPZ6(#(TUc8e zWj``y_*3dfOjk*!Xt8NdQUbRZ{Uy3Tb}vx1dfq!N6HoglmNHxYZPSmQPxI+nx4P6; zs-Vp=ug;TC1kXE zey3(vl|pg$wUyh|32&yuZ481%hB?|b65K+YL$K?|p`{x@WJHZhs2SOUqx_%NqkE-M z^NrhmvVAjD<+*lImN>=$sa4)g`${-|U6EpIg$D=-JF<5A)k&@xla?UooOC9u zwBOx1IVb2U0sF#Tv9uqUj`cBx-lw4&Y9z*B%g#tA0D4o=o<2rLW3@&}`jgY}s**-! zjanug54=YQ@b6OF+ZhyzRX$P9@wln&j(XQZge`W;lSvwOsSshfD@;a8mfT0a2NdaU zH!+Oi+q#pUYO=g&p<-oDK42FF{#8-#Ol$yc#E=3G3FoQwu6GAH87T8Rvg=$H`PGO~ z)aQ(PRAOacn|2!q9ltL1oYxG9&GPRA0KJLzs3g4dos5zK?e{|PPaSJThjP&ya->_5 z$BBAPwjHuNXc zo|S$bM(X-OC9EW&ka>s8?e+8(a6_qi4YnXbU~pRu2h`TMY)&r$XsM*TI}=u%DsFk? z(LJTZhA-wQ=hO11q~cI_sY8xH?eAN9u`_vsUpLQAFu)QAq4lf=Pn!f2yMjUg06bUe zI80C0DJeB(c$FyfO4lpR46%{+VYvW5E^Nvk4i_>^b1#od%xGF_7GpQ;*#zZ~nDd1m_F^>@qV>5+1-{a0neM zIYsm|RAA+HHMH1^sgNPsm;|0azs{QaC)zI7c%(#C0FX)KgU9(5X(ikna^=)=GtMf+ za^Xo}P6j~fisGp1En;>13Nu=vEi@N++j6T)=tBdZ2T%d;kMe6p{5xpd*gT0WiVBi* zjCK5KDD@JOoxK6%4uhVxb{%8Sj(x)+NdZac{{XGj*Ue)kk52J+UPnYI(b~mH^f~P< z<6C!Wqg)0>;eqwz>sM{G$qGi-Sc%S5$VN#%;PKysQrzm%+1ewLso<%`KgP0` zRU1-M(ZJ3RwhMVt?UQRB#B-1Eqx~L08fewoPDV#g`OOJuZ1VY=P_{UHb6jq8Y`LP-CQ-Z8vpv)v zcEh$!+1xj9IrONmEUfP(W}LFh(G|*p-?;{v9-C_bWLWkt2MRiVH93`;LYL1wOJf+V zFxa`_YO0uNG||V2!qcx$l4|JkEqFDp9YDjDL6m$#Q#vjw^;Fpy6;3=L5H`ezS$eEL9a4LqYR0Q?IsOsAZsYpaRp~2p zz}pFkpEt9q@-oDQ_L)kB9mwmNOKD73vN{|d#~G+YVYdC{2LR)!^r9#7t_zMxt|s<9 zZf#Mw9pCQ9mIHC~e7~+g8nBm9mth-h1D<<+6_pIoy0gjhvB~MzJ*n2RDmg4S1kT?< z*k-qk&e+su(i9Vs832GXjD36m06Lx-qjm+#Gl9bq)9|d6m3~9HcQ^pB$Sdu|OeRk- zle6xeA6E4KwXZtrY^qqi$@{R)nR&?CxHVn~tCW zk6K5XDPmH7`RO3f<^KTJtm)xn6=$ea z#Kv03Msyb1w4y~w%N1T1j-Z^@qUuIeRA<{HjDBq6wt1{8OAD}KOLFZb#Gr~K$s?%b z^!j^Mw29h7E!2fBB3-jCLa<@R2d^D?&nB^}mC~%VTAYhF;w+^sQSB;UWrBILXLqFmumff0blfqOKlPW*Ol}rD|Kn z8#*$bjO;?85@G9c2Y7UcT1uWZN@Oi1CL&PYpC$unZLWYHtQYCo^HbfWFK6djBq>Vxq_K^ z(Vr#0@bn!(&1q@VPipbc3zYf3NsHreK7+ky9nI>i#P^G9c_Fsg*6j>v+ap%xih*g%W^0-7bhBSxJo8=NUCyv5C_(bZdgmkQ`5v{|n_siUCe&SRqhs2Njiz-QNe8ko@Q;3`x!X%z{LVW5w8$eexp(L)<4&+dT>5Y;wh@gu+9vk0 zvMfn*mQnCGO5b&BBI@g6GdpjQ&*I10k)`|6%6?`U{{VDVc!)3dsfPf5?L8@?qXivH zR@l>HGB6Td$~Yh0s#g*{#G(~#+562)G^KJ_=2<&`bW;Nhc(T06+zKyZ;}*T=xhpr8 zqTUa@e)D=#q<&^1L$)!?zpX_Q75PUj-CCw%9E)g<(fnj~9+diAs}~IG9gF#2D_mkU z{{Ut&*Ym0p%a)L@-2f>5f|gl#NL_eT8&FhRe(Zhz^BRSt1se!gaE>Fw0lm+)Rhm;g z#ri1WN1&$ikI{QFw`&G@%LT|JZ*x>^L%>{8t0?<((+zO7Ntx*WAJagV{N z#p!+m)b5pKzS&_82Qwd-{{X;6bhcg-(6s%nDod%zIsNI|9)5$b> zb^<*B?^<^{wVsP@^+oqKC~(ONa>_as$uF< zpCexJr+0n5##yGBxC9K7?4z)zC6sZ_}D`xss>)XcW=+Rr4aQT}v?W^RlTwZmLJ2`Ys;>N;ofLC8ON+t7#Vb4dkSS2#rXFpacoIZEcN}E$0dA-0KND58^4DfRCh`LDoy@SE zzbN+Y-=2F{tB9=_(vx!Ne5M}R0RgJJnBRC!S&m)TDdCnbEPkC7C zjzd}3f68Ud511L)Fk_IZ)7ydD>r}5UJh;kkL9mdc9ew@2wClZ1BwyV$fP>dK=lm-Y z?&-HYO){3oKpiXOt2(&q+|7KCyTZ_o2wSl&-SIH6GBIX6jt9%@>;C}iteeZd)2cq) zoQ&s;4C9Yltt_SfRT3A$1pfdp$2C?+XK=3IfWtrU4xfd69tSLp8kDp%#A38@i5s=S zb=sh?W6tQDO(y0`gpoE?AxwT9$m$GoaJvY1GI(uyUj^>yPVNvCljbqDj5JP^Ey*KmBH&{{XyB z9e1vJjA#66o6fet!ZKZt0hIj573ju{E$=IyEo@xx?wTg${OyH`?i{%1B=zEwc$w8z zNh4|f;XMbfQ;BlEWyHU{VSuH(A53+tk9J`JLTq3-Svk+;TPWLO$E!I`TbZ)ynuM;w z7a_UHAObp65$M-FSe%odGDhEAo}^ZkmyIK|Ou-9qLw6tHLZfX8BkDO~IX~y64Rthv zl(Z|`>BijSa0R|nSa<$|6{&Bg9l?+|IR#{3aKDG+Q^jP=5E9!nlb#E4*0ms%q;8xs z1EB+x>-l%~t}I3&Q;#e)XV74>>B;lYL^97yf)?Bu%Pt8(a8-TD{{Z#W^R#O5BWKGB zaDeso$G7QKWxi7gRnI*L>^(YhSkJ0Cn`FD<uL$VHqWfEiDq!L2an7hQIV%_;- zK4EO6CCAO4hd2kOI#pY%w@Avur*3hTJSoqoe|mxM5!B_|frd~y{{TMqJ(@|VV|MF! zT)v#83XbagQ|ChFm8M}Xm?Co&C7M}J?`9*n`OP{_ zUdK#fZDY90l2<($yS97O*VDxuZ5#;GI~;I+`x1HQw_cTzZJ@rZsUf_!nIU2Vs)BZa zK->t=s2D%izevI6s#I4x<+tK|Za*nft90*kxbXh~iFFM{S!A5uA?f5M!5^_FkIJ`X zxsb=b8MYEYFCKYho`vqY!rdypns>Ij0F88HQc3y}!>&FU+1;36P+WWIO;l z9Z!C16|~(#@rLFsV9Uf?ka7WC->Cd5Ft{pnk3C5wk4{y29iojhcN}a!ZXdU&9Z#ov zY*$4{3PB{C?dUy6{{UaTU9!=9j0wmfE=r-u&qK)r)ba1$jqt-6g$~?sqi#a}ed$z| z9uJlam~p8o6=Jy!51Gm)MgtyL8g!Q%Av@TY+%~Bke!XaxRW|{nCkzG)CVv5rF;uQ3 zisCsu)+DIOAZIJ=J!w;dl(kwK^d9=6$s=LQ2RlFj5)Vw~qie=xb%!Ld;n-tn&p9Kl!V#Sn?`m{k z@R8cZ9@yJx2aI6n{N}7CvlPH?ZLHk!$Q)+`d)EsjGQh$7^>oSHq?`~0H>TtF ze7RG*j>o+@N_MdS?h?T#tJ&Dv}U13}f4_0Msd_W7B_+@8X% z(3fZ-m5BS8IbTCgZ#_tm722R-RA+IgM-Yid&a1)@GC=gJF{vy!W1nx%mZp(|dzIF6 zl8}tikWdr3iR^J!e5k<%LhjCYXOmKhz>j9d3)=$~grsb#Ppc?3 z!^1j9b14fU8;DPrr?2KJ+TAKbs|HZA{L-s@q>e^^m032CO#zNV!45e?f-1y1Rk)E) z+I+a9!I5#bSb`3HG1j;%)~6MDqSYGFdTe7Ly}J>-#p4r_xK6uILIQ&XTk4 z8yG%&cOI3}N2A)$`^PS$JGQFCijARZ6zpoXQ7MCru zyS$8J<&0yN2u^ZOTI%P*;;H-6RtN2AJ0x$#s}+mRoq~bE2N}+LR)wyk<`AfP9Z&SG zQdW}J%wf5ezFnBxfIDOQ)u`{*84o0tVYo5pCme8j73Siynlp{*qoSTF&eJ<+r7obH zsSUyH_|tD=l4wjqZzmbUsq6KuEjsX|LF8fbw>yqHo}WtGgHcJNL6sn!kK)`bAcHu>e$4ndpA-CsIOv?V3~4x z3~&hUD+2!jM2dSU?BEdvm}h$dfI#)?ImLaJU!OuzRO>5xY<#9ODU*|(HhFYX`4&+5cqwHBfe{6fB4oI%T&rq}0be!D6vS1H7HUQy=d{>)C zGeA;kvZ@<;WB41>>0NG@Yjr*1Pi&hZxFdR=HWU0q+ppzaF|MG!_hX(|IaZgrwJPe~ z5M8fzqLRp?b3#sd^;6!k^o>3BElbKp;0?zI99OGa=(9wdUopA)uuoBpoK{`0f#!$A z?>I&KB&oQm$mI9*_xx+d#%5S3!OERQe#fzbz+(B7YR69IJ)eUknl-umKwsYM#JuhG z$Q}A)wREVJXtwDS6K)uYZe{{RI`qpMz9NM=tnl#Cyjo;&mDT+-Ox&AND@ltt!kRwKSR z9M`(DD?%g32e?sCJ*}ilRpN+3Km%%l^Apt9Ru_n-sq)p0-*cL_Sr;y7yCcah+DMg5 zggdYRM~oC5KRnZ?l1OK0=5H-d2Tnl8^{-BwL9x46X)U1=s}k~{!91K}`PUz+XwP8) zxsFKhLmbR^50P*{v~txVDpL?nR0CW&T5!il;hmwOpR|LPc!#&w*jz7$F4;r zw~k|A#{`|hfapg~l>6&K(aj{PpCKRsFvs8DiqDECo-zOrLGZre*u{VjJ9}5DchK^s zDMj+5MjMUlA`gaRlg?BEIsIw!?U7|hWmiQ65&T2isKpva{EW785cWCg^sN-T3dICQ zR*>NzAAkeY3fed7TxRX51KYSNpk*#h27IWwK24kF(w$8{#Rxl zvyuLOl%n0r7g3anFuT8t*m|Dya3KGydvrPadehEgtY2KO-OIU94IxF2Utxn}7l8u0O`8 zY91I_VhwI~Hzl%v0oU@!wO6yWRANRW2T{Irk&Uw4Z#zH)4&Pe1k_3DySu!#6U|kL6u`4>c=D&nXyo z+PKSP^Uz|oESf07$ju-qz=czl_Q3wNoal0gEV&&3QGD)==nncc3hu!U3 zQhHd{s@CPb#1=Fu)uim4B(Nw%nRzy<57{5<61L>}65Ys5Q%56-tYWo8M!) zjtaGVxGf@NATGv7Je~)&H4f*C+KKHdW?n;}i?dh`70w{S^ke`hD!k#>T{F@v5*U)HtS?lfdg zH*8j{s&@hNNPFP=R}ZS(+)JuR(aJw~+eu-PssP75tIx*cB&pguT=g=1x1SK)`d|Ex z{ykM7A7aq0*akUiPb|mSR2TMMMx8Cjp*A0OcIP1v9I^H1rDo{Y=G(&;GRG=KE()85 zIe){_sB8M;&7%uwoqU-{F{odd;POfJIQ?i?l%T3Ezj>Oca1XFJtgjF2>8D!B701oF@`McRIOOB< ztZi3O8im7eW-135WAl6dwaoEXmWx;T9>olus?AT~TNkvwN$lB#QXk&udr1434lqs! z6)Dx4eL^&fP=&wNHqXEK_TX1MT*VOCw7U_K1_=KE8gHD1(`oX`@~j6V>MNd%>PD|J z_)mKif~8uUO?%s1-_`XgPM}%mUy&4LI2}6i_|jc#vAJFH31=(!C0)ZKIX;}$6wU$j zS>1r`&Pl9G>$R5na#BDs!6*4v*dA(?UG`=;tUWkTihpL-Huam|GTu%Ys2OHZG19rK zEltw#7%>G%Jf4+q_yw^p&aBPT1dMvtB97s*4?&(!wRO_M#yrNh2&Oa*onslna z;iSCT^pBPT&uw!Xub2DOMc@I{`u%y$Mvy$#=^U|%6`L$`f`{u?tlmjs63EPvqXHKh z;PNHoAk1Ubh)OJ#Wa?H{IeBrY^5)Z!>4c+3hjmlk5Qawdd^Ch|5Ap%fFaB>Ddy|L?G zdxNIv(n^03@mSnPw5Z&6EgPuqlM)iYHcJu5(wS{5PKh(bf-Ia6GDp+Wt4XHKJaHK} z1Cmuw%nmydR-Oq%ubh0i(KkBaWFK!z>bq?7Wos59l_GE6;47RJJx9Gzd#SFR#cs?1 z?s?nv_x7moUgh03a9pbSWMqNsn%mLrrGhnOnOZ)4)xg6z$0PiGt2&h&b~><_S6XQy zY&3aZ6_MWsN6PsJC)6I|wBw2>_zJmE(BN(7{P9he^%rc6Hz?$h-l<8b<~EWh%5q;F z2Q`JKQR;mxDdF&{$zAs?$#J!I@Kp%mPgChtp7ez>L_o%IynW-3Pw}kpwaYKe3c8Mm zp$9$3dd9oCm`>59!+eMuU_i>Az-Q}P!ZhjXV_O#~8*<)D6g#)8HhJ2(P=C*VrDRJa zS5G>mjMD5X;F1aNfHCja+ObZ$!V=l!uui*x9nC&XOJcN8kkU5;ZWJgyoS#~65e9Hd zvt_w0t)aLNBY7|UD*_$NJ^i?=_I3+pw8R+kmvP44+z;pY)jbl^P}HUdC}eWHG=uK9 z82}UgE4lFYj&Em40F^=?nl``y^~bO0Ts5(jD8bZkV>M279g$8QF81c#T~<#e$KJ>J z#Zx!XqXeGE~I;oOKRb&Aa zmg|gXj1A4leAmkV0IhGE-j0W)vSldUJx?`5EcX`Wq9S)cBt|kv=U$7TXxDadx?V=I zLnz&wc`%=d?VjB_Ry@cwyO`k9#sXmo@&twcZgP4KJO2QMYA&XeN4g0a6^}9ULv#o6 z_WD;AYnj!>K`LJhI=B~v4}sXtF|#=)-$Vb>i% z$6u%a0IICqSlryM{{S`7iu|eo+Pq847-cd;b7BCKF1TnKIlJ41T#M`QniwbBQk{2_EHDm`F;I+Y4T; z8Z_v}^W4IuoSl)Uq1zkL7-Ffh%zzW~6OMx&55IiZVPR<Mv86Vsr`?WJMilU? z_i8u0ET96S3_P1U{hCIYof{kW zlBeYgGx&;g-9>I%A;{cD)AM7VD;=-?X*{*vyY7;C$UUmG*J-U4hZahs;u`LV+EH1K;tJE{$909&Qlr6<7wbACnF&C&OiFp z+qIto&=;7Eqq46DkbCip=!O+@S(TzgZ?D_R*o?ACoVGE*^dGHf!F&jxckc|CVUjV( z=RVbFS;X;0_JC0i8RE$xoMg8hbLm;>X5&XE>~FN*QhLtyW}G`|=e>Qa!s? zvfL_LH<`O3Kmc>}@5k$16=<$XZaVMznZl!UmBhLhsOK@3kCM#!A=}p;na><%o2NvN za0C&@MH!z7Y1oblT@{{Zz=UR?Tw&jJNx9JwSeagM(DKaFl$EzH_{k=w;3s+9&j z07iOX=e=^*sq85 z)g(IPk;57`=iGT1CuryY0IyuMql=?IX-20%%(=##)tWeY=bAgGj^SMj@Nx)cq+8Tt znRC=+wBWA+yXqL9eWYhr3D;-xLkWsLMn*ckU0c*q$qeXZRyDM{A<^_o!Tr08%SVq zcpOk|!u-R7jBPzCn2t!uCR2g(fWtqDt1-*Z%5pkpiq=CrLY=6_-R>nWXYVt4t>6n8Fd;ol70x*4KaEzr&~2_^{lpQ* zK0|j z4l7n>>Y6pB?Yy9(i6l*>%8c+q=iK7Bjb}iT8@9M3%>{sLVU<&b9FMOx=m%GVMnfh6 zCt)D=>`44-o&2!sF*+j^Skz^TE#mmt_rPzn8OUR^BQ#~SJ|kDG|@;v64M!?CWEcxKh^{OgHKYJ+rVeh&m3`|;_06@n>kexn}SX@pp<>a?d|#1pl00@%-i8m6e5B; zbzD}}GMaIU*36jV6K7itfUq4o`CGsL09vSAXmY*Glf{1`NW<<)hE>I9w71ZOUoD&X z#tAtDj2w2W(CU6$Z7v(1m?GnDeSK@nbSlx4l_sC!Y|@xT14tU)=0JrAbdgwOpL`E) zl~2UBS2ubO*{qy-%G=Cj51*c&%C>Gvy0u{uiDkw?VZ~rUd8;c3Z>?q#v+Q!9o-@=i z^sTUtq^f&Zw48ObIjdUSkE!L_qe~QqBP(aCqaIZB{A!h~?HLNhsuB)4#d^lO;TSJ8 zxh7vSSzUuOU=l!Fag1d5#tv)Db$udDB3FjtzkD{~O0tkWa7X#i<@Y!|!j>&wJFbVy zV=%QSOPiq?kjZiz%q}y?1fB(L*v6MEv0TZ7bHP!J9@xj%rE@~yyD7#Pa!4IbZ0Rya z@+g#%7YiU$${x%=8ulr)a#Naev3=n-^F6)Vt1u(yBRkiz1oo@YZip-~U__@N5ud~9 zR=k680I|Ko$iMOLa7Q0bdml=~y_)G}due0aBDWy+_xfVGx#Jb7(Kxz#^g0Pn*K zH(k6JUIKm{b60H|B;2v^Knt|vJajm&e@z!ZXAQJ2KKVK4+#g!f543!e;DMF_az8Oz z#?sU&)Np*r**Es-hAKH>yl0Nw)q$uaszyt$IsgM#Ke@J$GV9%!jy+9J6wyg>hz1D8 z=HL>3hO3nu{bo{S-A=(TEei}YsLwnWHOEh5JW32RXPdIJF z{&=ZgCAbJmDfxqBj)t(DN~wd6BUQP~Sz5B%1}wo#1M?`s`~^1dEzEqmHz+t5?0qYt zp4cE}19GZhWBJx)>kZpo*klan7(bP2wT_3&VsP`r)p3$C2IXDLfzMp=_zJmb(1c$# z8NhCvv60swokXd*up5==y*pK;ggS+cWru7Y1rdw6$2lu%X}~PrH}XSU$TI3=a7P1? zfHR!dZlGfklvffQ<9ZUg`ii_FNa00Igq1si0GxV)dLL@eyGHXDbX#AHuw+sR$raC7s1RkNn4 zwqVH-W^>2PFgt$`BB3UeIysY)Hs`g8qE=nm2ZQTUc}pbGtEi95IQ7WKU&ey!!{n2B z$1TSm*%`>Dw<8L0P64l(uTr$1B&B`q&%46VqN1k=+T&g_*DESur~@AK?5neFdC-{;&RRMnAhb5ai3FB%(00} z5E+ylWM>@prSlOX+Eg6<<505%IB(+ULF87mtx{{*!uH$Lh+(Ox?Ie}NoTIQSl_Ua4 zI2rY*T~*^!=W!hX<8kRyNTNgpo|M&$FbqH>9y43vSFuW6iNwKGk0a_WmSU8?+%kiJ zPebcknqANOP>v#z#&Ws#C!CtjMcLClamRYHhssh&<-3m6mNyXPQ=U8YIq>)>#tsi> zxwC((i<&TPM9hTyEpPy+(LB&){ngYPA-pMJzQ6 z9|AV*^+EPKb&!wTsX(#~>-UN6>~UGr%o<5q@q>f62l@I`0(>uidUH)?%z%c+9FyLn zbmdX2^e&w`benPNP3F9DkKO{f#uK@s=5>}Rm2;ID-JVG`T&NAtPNSL}lhB;jv8Spn z*QqhiGl@K}?MJJY}k9ti8y=cPu;oDg}fVx!E)?vfOe3Z2;lpeOFr z0o@=jbA!_)ky0`3>ClQ{$za2PJJyj}Y7&r0HjU-j?dG|e8_bxya@)BEL;UNZx{*>6 z-!S8hHZkv9@7$0&5%V19sr+lR0S6T(d3lXR?xf2%Ov`MeDtC2HG@ZYPrBPl^6tF(I z>-klS##V_i``aAlK>cc>4o*%htw%%0#7^=y^!vF@xVL627BE!@IO=)-06i;CO-IRv zf`PN5vV-!UPPL7u$>vFkNeaAfPyioXa(ydn%8P2RlYn@~Qcio<%x0?(N}`mbt3Hb; zb!t;-J&M*!ZYEbhHr(#PBP+)oR)ft9DCnXzIU@{lkx@_O!3kJEjN>W?0Dp~T++Rl= zEScMqSO+7IdhnGAW2#=7vRx0L=Tu|wlU_?U)?!OW-3b`m?T-Gnjd5_p3LUeP^W!{H z&vPxpWV;pvfH^0vR4~X^LdAF)Ab(#<`rIB|sYBk}@~h)6^cQ=AFiR8Z>Fd(0+Fs3S zK2>xZa#S8ixfP!rGQ8{&S>FJHI0`Xa7B&;y80WfAmmGj`pRf2=wMvSK^Vr-%akCq5 ziLMLC%2568NGEn+aoIicQ+hW389glQX1E zE45i37L(YyHleaY0OLKWB)tPFm}Aw69DPl5_fj>9VDc~^l2aKUol9qBaej;TNpMLe zy5p}IJ%vl1jbTB}r4wc*mhxSQVRA+aXYUX2u8S=anAj=>QNr*Kt}8oB(ces2B)lSK zJv_hRAJ(DPHC42=B^)}300r;=0P3ze%i2p#kE+5^cse}O(XrRJaU`L3GRWLzjyOI4 z07}Qbx}VE&0RdQHfdG{Q_>@*v_PAw{qm7iOc0bVn0F7kI_djSQm=*(ogF8oMCau*` zR!0R|%2K*7U)@izO$?jac_fX$E<1YWv)El6Hd;p!g=9RQdH(?G)8)9?F2@^+5Lhwk z?bD1^wYynkkbJRZe5?jB?^yhD+~$_7&Rs4SrV4ur1*LX)k^KMnHN?i!f=9Ny&w_%*}J%w{uYK19Ju&MI7 zs4k~j;Tfz=syL%r&w+9^#dsS)l`=)|MxMuQ`1gAM9_6Hrh({$}FTdCPY#KI%ExdebP zFhK5j?nkG3+MXJeVa(mnLAb#hI$Uzy6ty@hh);ozLjy2CL zyG~OBfzNUI)Cnb}vnxRe^S10@+%9;}1B~{-7!=vAS!BaWwE+Q&^d6?UCsIw?o`}zu zQ+L?Ab9fZz19)AfzyyEws-43zjdw7KNZi3dC7DXI92~b_ zn~dY9Zq;60R&9?0EZNBpunfyn$SDA(;1ZjRzMDoZgS0HZk= z$m7zl8<&|_M=59vU@$z7U+Z0!>(usY4_@TPnsOd5esLYD$g9Y~IodsNc=V_JqzFfp z%FHvF2=B&t^!BR8GOAGmgjJuUcs<1=GKn ztrLI_P;T#@eGY3)0>>tBy@phFt~^#fHkCez+MI2BnR<4G3=$-&Pnm!ZL5zEM{{TLf ztf)eeK*>B3I#Yb$jh;a2$2|wP6%=y2lbntTIUR`NysEV-)^1Z}sM|zy5$mwLfu@Z_ zJCvk+7UbZMZ|h4Zh-|Iz_uD|LE4VIHV?O;oJuA=b^-nfPQQ5PMe8=tqjQ;>CxgMg9 z(CmxKWy7m8XPlgnJvsjXIIq#(Nl85$x2fVsGlca$8rQ^e+)eW?cMzd~&PQN-Ra?zD zH5;KdmC8pM^y3OZyDPwVa0x+@KAJVnH)yT(G3kUM~T z*NIyLR|O|hg16*)5UWa}(9F8Ly0W;DS;5+wDYpcSbr}BuJa?>%?OpC-MVJ{1^k*5* z9-hBS>UC$fywkQ^l#WJnv9@pqJ8s4Yem@H0FP2MDgfYw*U`gnHmG9+FomI^!+DPEV zN>JJ8Ol1%Efl-b)=kcjGY+sug?YpI6{h+4gjD~g}ahpHn)X?0>?CRL$9H_^yetOr? z-=XF#LcNQ!#Qt7m=npDST69kSQlFH*dV1objK%%OLZ_7{ijjj7B#e%j#ya~^V_5er z3Cgw@5D3nFDGk(WHx}UVK&m25@}Uk;0!jLN)awv9`@DkK$5MEyb~KGf_9Qn!Gqh(v zC_QmV*AjV*st)z(=udtscoIFM%L*45$E9jlN-G3$)iJx*Aot`y|<5bO|` z)T*D0?^-oVq+O7<+RD56li{{ww?F5;Yg*q_W*Y+*=Q%jz{{XG;T*bw#mh%X% zV==IgcJcQ|zA@K~Q3cF)U^IJ-irLA>C)DG$an{QySBE`6_y&-w){;9&FSo7b?hX+5_x!7+?;INMYhs?Z;_dktn+1xRVZ^=@5!TZ0Be8wXOQw=V7 zXl~MyIsH-z^!Wz-N*z8)6yT0}kSeSa+s46el#~s)py`ZsC)2HK_>x03F`P`St@0l% zo!-X-+PPjDKnvxYA&X}jJ+Le5vgYwPWa@5tRj*EGRSncr+p61bWa!b5Hvx{IlaG8= z*e(?Bjls%rKsY3N*BunbHC2M(mqDGa)De!Sy;x08%eNUUz@L}^2~(ftTo{a_o8WpK zH1Scjq;_&Ml>sGZ$3u_^MOlQ&D6mrzsakfAy;MrIogmf9AA|`3ez8;N*8Camn_phgX6%kvHXI zDR^CB?3Z8MimGW=Y_=BqmH!ITWCz~B+n+PZBrCb05F+Mo_Qscam7 zdz@6stEbHoWmhLVfzC&*R9$vCt>+G#Wkbw-0)Cavh{R&CF|?zi>=36;o8NP43$&LY zjF5Km&ONGg6`ZWNfk|VykiBaT-tNpYIwmF7i5WEYJ{`VUXnJOiarWy`YC_9}-3~w=V*4NO z5B{}HtZ6H%*{epXw~&mX#yr43hwJTJe}{EREu7oubHseez&%HBKMrfZwYrZ{jV6V{ znK6dxwfkq-^go?-w|Lz8934nkr^!F-{utzg$|3#Jv|=&H8OS{jX{~;3nlyxmQ}Ym` zhGYDzPsJ8S1v;Iwl-zb)CO+$W{x!?ng7XC|RZkfw_4cRj9juQ(5lU1laxl^F2rBH2+z%rUuv}l@~a5h z)O93sclO}cwo+C)>DF*}Hv+6`aT?_G$gHhTO$%Zc8fr0@x}&YhBPc`QR@b2Ikb-Gykq=@yD3!(d>yKt1cK5{!}Y^(a)OEg7d4nF%9wfmBDI zERZ?RtuNW$DF}d&n89{9Qof$`lOCXW`S&`Ua%*1RRlbd5na)^^rxjW=BXr0gCyi~M>MUT zq*4_niOo!K@v*$|_LYU`xLlrj;+jSd465h6fYeo@!deieUFndM`+fCoXG)hI(31dYe}{x!5=%NrcEF&@@e7UYtk1E23? zVAW+D^Tk-7AwuAPqatr^>=lzEdB>qcx{Kps630q~VDnvR5M>wUKKy zv9`#VE_&f|Pc=N7e5^Z;J$s+<_vXA@eF#*VbvkT&`CUoVruhQ)`P`k1S8>TwF`vq) zJNb&lmP`($@@iRU<75h0vHSSWMNyVSX#kVu9OYN2_7&5HqN2Un*&WrZUso%!UTC8Q zOEWR*yo!lgt{8a?GtLg*LDLmx-ZYr(cFO1Hf8=xhD@h`@v~cl~?nnWjr{i9fDYlOu zzAZySy_mVQ*i3JR`L~im1L;<-u6C)}zkgl1BPZ)ml1X>8L6vjR5x4?-{VJ{Xv|=`J zL}R-LJpL8YYixMZpR}FiLUlz12$`~@k^DrS-t`^b(M_{DKbSxz0pUl|n6C(kO6|Tx z=jB{|Gg_Kfk0zjjor^%Eu{gmz^!j$J=QU0|(`RNI3o7*#DXmQXF4p5%RRIT<8!sac zp5FD>+3EJ0WwDOhAWLI$W0q`Hh|=D|6%ndNiZH5+fuCXCnDZX4aLh6gbpzDblRQkS zza#E2S#>NbP3t1P>1DNY7{UO{K5#K!d#zkgJ=ES&3{+*nlZEZ;gW9{@K?m4dKQ__0 zBK2eGT)oDEB)3Qjjwz-6;aJ8A1Gms;t#rYAx*XY+3YA@v#d$bfDLdHz0ITdVQY>#7 z^OTa=0R}Ud_VvYS+}cfT6xdcGPngRm1SsPJ)c%#7e|R0hfCWg;3Zn!b=CW~7-1&(^ zlX5vFd89EaWU;{qsc&4@W2EU2Tib@2cgg_X&TA$NASQr(6wVQn$0)Jn56?MF7A>7{*E9cdtdV`vru)MYaSi$ID=)nRDzo9AhTD zT<0*SMw6v$wvNoZ2|AS&`XfpxLMjOx&nG9I`2A_EHYpzI zac1NX}!2 zv0T4_w<&5_l@tirv*3kKyPVcF^~CQO^4Q3LVS=BVt}s7J4fG0SkxMWPNACuAejko0 zXVukEBuTKJ^>{m(zaL8Op+cl(q;6AjOL37|JXUY#T^mwcc2#o6r&HKdKFMIS2YaIK z#H^c78OKxU^sIUH1%2B|4Cj*USwZQ?r=b-aPR2ld!ZJq%MjP0X$^QWB*Sj3Pyq7mK zUWX&81kHmTsQZ^Cc)(qmV#B}x09vZEBS@y|SA0Jb1S&^ik~;MCs*nh-=h&^Vf%8iJ z0nb1^IRlE)dv=e^EeMcIM#%#JJ+e9hQz$AEqI3VYDOoDqK>CH^yNakJmPwqsZbjPpOqx)i{ zO2OM;=@3wH$2{jF{Cm_*sh7wbSQQuyGlDwjIPG3Tr&e!B*yvMBscK8sft?^$nMY(% zwN5!7=ijANl4&m)LqO9-4o(t6G6y(6N{U(Innm*o51b)E_m6CL{Ody6+DWC4%7ku= z?U24h2G2Pi{dg6>ENI7?OHo}np#)4YWQG&vsKY9$UA$mm{{UKF?60)KWS!(=jIQ!P z_Q36m-NGw;(T%OpH_f#ARE;tP3<8DwpaK4IUSzWg-tbms@~w(?))R&D<&{xUKjn%0 z-{3u}{7DjE0IeWwl7K0y&31_1Uf|1&<2}9Vqv}y4Lwn^}h~xU#G?i-Z_6}20Tb+c6 z*;Hpdnu(>%M|TR`4l$GJYc;RAPGP6+vVJqM*mZkd@Fj#%_31Ppf-nDUl~5<4tp93C6KNOZxH4{LAdtjvCkw#NKU`*sZVJYve({?iqP9A8$F~QZ_pF3_XEDhfh`??okp14jtw$~1 zW=2zi7{CO6b?O)P7MhK$=<+N`%glgs0Xh7*u2;nRb(|J9vsuEzXK{uDA-M0)>0hi= zp~$YbN95Ri#Z~uhW?|cEBsRm3EcM9!YftUvC0~=uMaI&+22Audm33;%F@|ulOBT}X ztHIse{(`OB36eP@o;>*@4gT+H=jB^T9;7jIk5gRg(hbQw6B7-;?6x`|O3uAWrbRHe z;G^zzBCD*g1Z7z~+g3T<-D=!&7yv(dGstgBxKe`nRgSN%N~griEpA7aV?JD&Iox^* ztjb}VBW$IK4o6e|6|nZP+&1;VZlST>sZ9rx&et+AcEC^vUwlz0GFXdsI)jxvK-ygO z2hf_GZe)#slnuQH99Dd;(XlAZBWCU@$Y_=&3A+pl+Ia(to10UczU7N}yy@pGLv+Ba z;u!Z0{{VO{(Vp1vS(X#w=G&2+bM&jkx0?~%0goMwTjylviE9`2Uw&sjC%mM@Qs3)IZ)un53`<=|?Ly|sWl0W*@&z-@GovO>z z2a1vz%Fd|!hTe0Y06v2if6be$@!O_bFgEtJPPLLwh-U6 za3i{f+Rf;=?OHaPgE27rQxW3a#U8QmI^*KJC)y>CZ^4t<55d-&-o=Eo3 z^{JOp-3Ufz+nBt97;w*;38bfcEK}RckF>&cxo>VnZ`>6n~9wMSP6tV`Jp08^KZX zWOn>(mbGkd9XQ{9rN*5ais=>C!d9zqwtg|01E=17)G70E# zGC1$rry9`niDi*?u-aROW1f|AJyl-O+HWk5jHH4aJvjE}yvbL_Eh>A-n<`LR#Kn6X z?MD%5Zp20g=XT&=^#>0c!bA;fr$4n1u zNIXFsLXRWvSLIc4xkp~Uy(;~-E-?Mv5?_g%r_A+cE&iXVL$PIr(n&a2IhJoRpc(f1sN=(r$0(#>k!0zpE0t`HjID( z_B|_4uHuv&V+5Ne;o~N*_A}-1#-kB&6ze0n!xAyhdsHc=>UMFLWhUUJIkgpds-D#)8%XKGN}m8E-djKZp-XCjFGE` z0Q1x3K9!qZtK*$9@AN-EOEJ15e)=>RF4!HtMmlw`ERr{hLXs&)9YGw5{T~U6hANVp z->UOI8ykmoD!BA5SP9bb*;b!;tZuRQWlK5X!JT+HNcSoVQj{4R`m3+ce zWaC~W48>as-C8*jv_T<%@fGPP^pKNq-w<}RZ=$W zm#!C|LTb^sirviY;K;o|&PnZx=92B2Rt7xbe(rwnJk_+lAqL4oY+#J%Z{hSc)f$H{ zG1S{#p_}G^-a7oNlk;O0s|X6)`eKOKBt~u#r>an$iF%;`A#?|)KLZZl&oF33&wfPUe~P*GVPWhG_fou1E;lN zM#Q?O-5ZANuR&bua*IAY6+tPq(QX@;l211o0OWht?Tzs>t`(addF}07^pP}oN>Bo= z&;wS~$Qb!Vmr;TfjE`FAgxWmnl%pf9wYh}Dx=98%e7<4(hw0o_s%kc^AIQa>a0(7d zC)3urdwYpYZH5_Nyr;PKsY6TTJgDunfsX6wYgsonsHjF%o`-uBX?F;c1`;Xmywd%V zmAUEs>TidLnvL(-qaI|kedJ91xbO7j*QXx{5@Shq_IA< z8#C9xt$K*?a`(0e@ufZwL;nCS%lXzH+BEx->?)pGEePpJ_I4Zr-n~qCH9zOse>x9? zV*dc2VfoYk(WFwRS}#M&#hc*ZQvH$mHtq#n+% zW6N6Ak5RzuN7+tt7rtxMPlK2Keg6Pj2ZNXXeaGiV_KhNam0rh|{gjM*bffI-ImLRX z{2~MXeb@68;o%Y9-9JiS+Ej+9)jXr@r2hc=sQWVm(E8V`f5IX=K_BTy{2}{?`l!Vp z+H`~1)VbvU0A_DODEl*De5V!bBf+WsO23s7cuvv?jF%3Ppzg;Mo@Y!J9&MgXEu5U~ zH5BkN!r4>M*L{C+;rjyha0(Ude+qOZgNRP==z;V z#`4h|HOgi~k6hDXE=JAAXdD5>EUmgi!ie}&>U}HFYUs&3GAVJ;cKs?GbBuFWBek04 z_Q4Z5>%jiCuW6#iY5hV?GE?YxRHE?Dmf-?PfX>BHbBufS z`cs{J+kqNjs~*FpOXZj?)VCn>j(XGuyq5De>|?^FIr`R#D;axPGgVPi*efGPwD84& z&(^5TD%-L0&IljI-F}#)eL$pao3}F%4hKqw2$up;=JB3;dsdv*R!1U)o4Xq(pza1Y zxW+yI0Q%yj(;&LMx=12S$i(o+A%3H^YU#cl)Gt*Xq7Z^|OOAjXbDz*xXKkli>2~b} zq-*A}%gjbukJh}5UTI4VtexfkZhM#B4d?sLXHlcHzoS32L{{UasweD`= zv4vF;erzmpFLU%2X_TZ`8F@Pn=lln$tht1CgwH&oWX=qvecxps-M#D2qgtjiSE%&t zeSI7)93&sJR`V-)mk%p|Rc2q7Ylh`w9)Nn&*^<=>=%c5Y*va*C-2VWwDW*HCovjKj zoH73ZtMf)#f7&b9p1ku;SgjzDPFm*c96ZNyLF?$F`PZfHr~X97PEVPBKjc9Y%{gXK zmpvqvepUAi-}zL@X%s5#GS4r{^0~{f^y8&Pd#U}Mk_c4KbuZkEbwBMf{t-@&Pq5n` zwaO*9VV60;9>dsHwwr0OdrDl|>uujf{{RG`u2?LPd5FQDG}?)`mO1`a!fF~>x7GiDLRa;EA6JH{=;Txl~9Id!L+n!BpE?Zhm z`D}e^Ri@k#&P8S+Q9avvN!3(+=KLvcb$g44`8WrSxzFKOAknY&hx=92c~Npp$Z^2_ znf`U74u++~Rw_%n@Nx4uzxzLpV3eMb(H?YY&RZi-!#ahIlWwnhGelXqwS|fEPCAj- z`d4!vv@T&uAtcB~Bjhgw>-zE9xjj2Uw9_MJxJ9{?u3IN6qq!vY^cm~craGwe2}NRZ za?QvcfgF9_Yr@B6^y{}M>i+-(=`eAX2&Gd`BVzvcVyB$Iyy`@f6D}9$Y?3lQV4(dfD~TqSL~_jK(3A5J8v4H1iR)5PYkH(D&lIsnUfdccITx zvu?;%lgdKVwm3Wi45ranP&2^5dbG0{{R}d7Mk&u zj?}Kr>I`FmN%&j>;d1xkyX>&Z$!#gxVbMoVcAP>tu>$uW%D|s;TNfdx{ zwG$hMzoEu>{A#{|X(GiQSiP|yBJk{g9C2If@xoPc<$!UuK*oNZ>*6zP%2TM}O`e?^ ze6boVZ3M0g@4?|l4hB7Y41Fp)%S%<4GD8t<-zjz{j>De*^pegMNLKREdJg03iiSzy z{{U8q2?LPcxav)K5tUo+&ZQ3J8?7;CxORynjy;5Yr*>Buz!=7Pta)_77Gx4IIbq4; zwa zRbGAg#_h^LP&nh;+v{EUiZv>)d-5V2vDn*djke?mA2=lAs~!&(QtMJO6jeLdaRi|! z%NZYsddi;tY9iz}Wa0Ao734 zl55!%;Y%@YQ~~!18UFz5el?V^m7J{`ThO0r6{{}XO>qsXWzWi|BiNoOXO0wKGYp^t z6rQcz{{Sz_vn82sBaklQTPJb!_x!2m;F#1)ji;*g1md@NH_Dfure4`8T$S!5X&k!* z+z39L_T%uX^U1O@9rBG~>}) z#Li8lrZLG35ioe5XhR`DO9vl@Kh9`TY^~&ZF^?~6C?s+Ze0t`aAf73~CP@*&Ef61c z^~fU`9XLH|4Nmp#+6fcOl$FOTnGxG@z`vSIf%yP~`4Lpuyr;{NInF)D{{ULM*yXa+BQe|+WNw5UXWSmfv#vZf zBS=~c$fk?d7XUm-I9rA zjg<)|Hpk7!%tzL(B+QG2kz0;<`G>Ho?%r*%P`DKtocSeJvTz`#D>)}^KUBE~Wl2Rl`d-THpDfLP;nVO)ciIPXpq1Y)7i4V*V7epDaHV&7fr00?arg??wA2h? zRuTeWZY{_@qxGj>O$EdZB#RP;#uozv83!2S@UJ5WMy?9GNqelit~Ft1bJmXEVwq)> ztdSp-fHKFSJw3%u9i*=7hYtNvr|}Nh^ro_}o}pa`2RshH<60Is32XBR*M97Ry}iAw z-LB@KYsfM-GS zd4v$G2V8zN$5~>iy<>Fxu7;6tdovDbZ!N9}x!iU}Rb(d^9W(h=O~}QTCuNBVAz6N6 zz483}cdZ#Ureh_njFLp>%PbQkY3O~iQECRj*{i~h6u_J?=;yyqI~wkzI*F*%j{S^g ztn_7RXN*e#hGz~`Y+^Xbs(c-(^H15nlf{nOqSnCyu7t`oeW5*3i3}m=OKz}N?9nQ;v3eECl{IAIa9e$N02E_4d*^zR5#$$#x^7U~w z`MI9nhoYd7K{UNd%n)25jA2PR-LtlRkLOa20_CLt07$z-7UDyJladJiD@4qI%w0$l*}ZKF_asEwlX=# ze@f7_`G}O7mrrP|*xowfNb-p1VdFoCw`xnBA)eX4vnEt!UEW@DBwla5(9a?_Mk83k&OAVo$e_?7S?4_}ACh z9eUTO+Rc4yGF#lbpmzBSoxuGw`BYvjOF8tfHuNH4NI|s76rAvI1_9~#*KSu?;V~Sq z-csy&nEXGtrEA%v#HrD_M+~O~9E0go>QF}o>j)W4ZSyYDcn7yWl~%sy{Za8!T&~Aq zVX6NBX!$8Ki+j<!Y#N^6s}vV7iRrXz`Eu5dO92I(@`y#@_6aZy_0B z$&I@>{=U`ITi!`5(b-GiKdjZSrDcVV$IgWUAX9Z_C2M>*rK zVn4>SyhGtjOsEYve)01z*JK7f9v}1R#-$rOhe3h}_0O$w zcPaJ@!m&DUU@)H8>P9izwRE}KOrwISv>btqd)CpJtr;~tGh0zu+TQ_ARCUJy*Ec4Z z(hrtc*_-bToUrya(@84%1$L>E7y-YC)*165yelLsT}H)Ken0(nDP5g-=tiSVVbEss zK^tOLAYq%R_Ns|*JQEOAm5C&0t~ysoZE`1VtibIV_8!3ZspeF>jZ#stspB6tRgNiT z@SJZ0GTzwFZ~m4GdOCm(Jq{{qtYeN3^f_F)86~+5?NQvw_U!Sm5w{9)*B-T7`$x@2 zK>JFsAYc<$bc{WWA?~%Q(s)l>xbXe#k7#3#NruTJeyF(TZ`Y-Jw}dsrt?3DJvl*2A z;z9Xa&}X(koqY2Jp*HFkK(dF)Ryp~5X1i@G#z(_CExneT7!It>Jd$#*bHE>g#%s&W zF#gsn*{^urVcU(8J;QmW^F8a&^k0b4>N2DoL&{}f!Hx((IKamzIUj{~P+g_O5zP~2 zBv8tIjeNa&btpf59L|iRxqpAX8~y61o$p7RT>jXfW?SxBh^*yW4b#EE!cWzcoxZ#YS zxUHT)3hbkYtyx_y2O3HIO(W7}@rIs<3e4~6jla&Y?>-{zL*zt}oZxRcuQQufe=~g7 zEYXqopN0H5%~cxGlm7q`_Rt)-b)D=K}IZFW2mrgM5eCh~aBe)+sJe&!j_Wt;sSz=?}RT z)jT~sEIFlbm9O~!0Fjm(7c5myN-(#c#y!pRi5Kh;ysywm<0KEIRedK>ynV(pOLch; zKU(Vzb4{A@O}ug}YSFfScWeMXm#O~%>sJ%Z=l=jMDg4{i_SRbm zZ*M=i+$o=F^!$I`ML5hTk=xsN+kRCDS|0Z203^*jOicc|^YM)M}YZ173y zYk1inZA#9puJkQ#noYULWA_yEP7?*V5j13#7%VyYxam>JalTKR_k z+4cBLzMUv1C3aq0(JaW3?N@aJY3cwOppm!b8EpP0s+)@|26+SSEOOE@<(t>(MY&*F z{`45WQn13@V1Ib~_53Tz_9^??r2FpK>U&7TK`&=Z7!0l%zaqzbk#6hsKc@6YWFOCPwQJV&t;+62oPX5@DIN!^*+_?#+)Loj~-a*)VsZ- z+_IBJrK$Tj`bET?w5`?8_n7*63d6kAF7;Wy&7~o^lR1)He1QE=Vfy_l{icsS{08G$ zEe*RJ!UZ_brsMr7*BYV%(CIJ%0uSD@3=cq~AD_M`a#plAIjZx{3jXYRF03@|GT%XC zId8S zwe-yxlbK+2i$8P$^MQl;S1nvc3@jX~GkQ1~No{0dTj`fK@_pc{ar?#}D*GttYhv3@ zj@mgRfe0APjz;EV+j0Jt(Ek8uq?V=#{{V6b$Yfk?9^aK(7BNKtLI6f=vtV=XYvwT- z{3Y<|ER+lhHw;-!8zl<;mvs%y!vwYXsaS)K|N);kThFp)osi_ zDlvTR``PJ}-l4kFRx+Y1Xv}I?_>KvH1dRZi6?MhX_=f$8ciI(eg- zHWIU(f=cJ79ZoC4SlZ3*&e&7F=;7~{Np4@tEXw&HsLH4Z85!%xOk%MnidT>og`OOY zfOdxbyH}u3WcrgVDB)A97=bJY^r^J%3M+fJl5#TNC{#e8FC6-3BQ^9`eBzxNR+jyY zYhjyu91MDO)9kr;u9(}8_(k#y=Z3M>6mi<2OZYkFnZ4`YJxEUxu z_D_A?tC?HKt>G|3CQF~6cy77I4u20yZ1)qpl3lU94bOqd9S%EI4=Zz>*2dAR7_Cjm z)Bsl9H<;1z!=};P0pCB?sJ5njrbx=FZX~0z`MrP1tb-(xsg^^rG5})Su=e#IUrIzt zV~Z^)A(xN`QJmoYYtW~aQiYtE4-nqQo83xoh6e6-6=FdH9Y;}7Ava74hB7x-`?x*x z$4+roBDQUUb_^p%A1k&GLEfHabwWcsNKODJ@dNR$pJh!eQ?oU%>UJwXLvMNhJN_Ki}Ka*b~_2ueE0R?q%3yRehr*{{Rm|+MJV+8728Z`LoyY ztYwC*?RgfeL$PVBahVomNO6!DWS_&eW|>G~8aQ<%@&_NE=~kqZ%ntY}hwhc%tkZE^R9{xQ25=*sjtFRVsfK-ReYx-p!NFy0EI&|sAQ&P#4m@dUle!Jo=Jx+?q?t6*3>T0b$U4 z`&D_Q#8I)EY^=lQ&s>~;Dy=99ChRsy+T?xRr=h51V3w|`+nIWdo|)<^enrsbomZ{R zLh3fh6N0Pqle-1t;tbs2 z9+?@=X<0KbZ|@|Hadn0&o7J=5f`7uDB&u?wAmir8HB>W3p_?U+a&i4BOuN_R#y^RN z1b!W8RMqZwQ=7J@O{g^R=18Gb3^&MmAQkDr835zAO2F22d!Ov&+Y^YKVVv>CaseLW zzp$>@g*nQD*yME;lc;H)eZcY#3l%a3P*kWm{P1httxipQto*MHTAGsPM+tRw@>uUh zFrB~;FmWJI4}RdE*0UkIcxNvm-;shlA6oCPv{Ynto8_|~y=ML(M_=h&J=Ky&dSbGZ z%~m{R2*>AMU21h9XLH=4QWY1vT3hyPDJ($fGoC$1_*R4$IE(< z&9QeMl=u32)1$osjIlOGaMrV!naYLhtwLmZK_Xh>nc8GiFUSsjcavLTT_Nu zN6pO#siilCw7ZhgTxn+3;~4@8e(?2P#<<;cMVCr-mL@Y>slakDQ2H?KUW=z{_IL0l zyr&<<7dT|~3VpphRM&Q}&l@G)$wLi=*l^>~hhs#jQ>hzl`1DpZLs12W+s80~i5QD&{sk5gv zmHOy5&Iuh$#z{bVAKvz>(aAh=ZHc_VzcIBmHA7s;nnp@Qv>ccD^wJaPxFMN1<(#&h#8(>zx>9m%$kFa+dha3p8ev=>!LElJu# z1>^&d#;r7ZG_g^$O_MB)v1~>Ji9r>7HvQJJBuFWRK}mKVcr$pyG4ALkXtTAf!faoHUZoNcPPD<87RZLS!A*Dv`<32bCkj(FZ6a0>>&{C#UI@1Nx$G3%D&Xz9%;R+3T~u-OQTMgsk7D%3gQX6*cn zjP=mm)D5@UqFZ}`6o^@qXvkk&{{V$yCGhejw%vxAz+^$3kb54~^KUUFq+$a$B;@`> z>rvm@c|mTiLWv_ea65v3rDI19sYjK&lUj3YnISb-w_B|^9s2 zYrFSnZVyKT`5e{_)GaotV-&0bXCa1gMm=k#(`=z{E(^8^9&(YY!5f^}xqYD*e5T7ED4UatSb? zsKFlJrCg1z?bPOAy!^_f6-@hA&Em0&dQ*mnuTGPOvSlAO=6LtIY$dla!NDI+-DxAg zJKHMzP6=!rob>0nO3_ENIQu-CazSzr-}=&B+M(tdRTwYM6+zrSo%>gp8j$AYlx%Hx z&_urzNUDYHI@kq5|q{W^50t+iPd zqx)D9E_RhXWMF*Q?ZK^Om(Een zG?^_fl3}onwh8k33^yG!)9YQej;|Yktzx|GL{n)h8-LbKKrZmVx3 zJ5-JF%YnzHIP2?PMXq>u<4Kv>F+HF^Ea}r3<35%3SS-qxEn8`JK3f}?)WiGBRAXlq zvt=RFti_>hT*3)~kMRzrKd7u~baFIOE5=H)oT(j+deN6Fx#lenze&~7ZSdTt{{T1z zxr3(iI!K<5JC60YZ8VxvtE>I0!*>p&l>9p%(ANudFu2=$7t1C`nxnINkFWEtn?%-R zxwa+%t$Pyw@+KcG)P4B=@cn+Z+Nvh3{{Z37Lk)=Kc5hR=hIn;LbZdE2a`bQpK7@K# z4dN>W({(1yN^Rx^lZ@?d>z>}Ve$>9&up+G z1M#kUXl``t*)606XOECa@hR>-u~#F#D5HCUmB|DF$o~NKS5#U&x^+1(%xm8*jIjLN zF95oyEOGSuRXCDZE#r2GDJ``Xk1P+rYz}c%Y_Hnk9$E535sVC!^zGKREsT>ia{ls6 zgVf`0eUJE6T|H5v<*M$6!>N6gW_6KN3=z9L`qq4x{{U;aXB)QdAxaEjd-~Pets3G< z8bh$;00B79zt*Eo_R=VLf-o3h%gG+bmD^*KDs3%@A-eLTSkb;>+;+(7I@atH`S!|7 zq;jdx4b=WM5lekS5U!y@`*i30YLtj{*^tNc#K(r+`PQ1GTIQtheFp17Uo21ZvZ%ua z037=N0G_pcN%KnN(*05zBMHJXSZ$7UomDvE*Z?wks(; zZgN%SbLU4xZ+yD2I~~s8V~`J8=WW(ojuQbC6PD^vw=~z5NK^*M z-HbkXCa;+kt2B4glV0BXeWM(kc)|4ximNV-ZvOyr zq>TGEf1Yb~ou{nnMY7?7j6NB(cwyD9t;6||%`~hzlHxW4>5O9@->p`CSM5v=r+DRm zxe{#S&{w)XgvajO@#xWtt2c)vZgFcb`|%3aIpr(!dnJD@4W1dbZ)x|QB-)*{Ir{`2 zK<_u-lkn^+llX~Z+$`<(GyVCW=6Cw$y>57HCDvxrmt2`sSBMH~^t)rdcpHIkzlvZ!zO!5?i4e|}m+nTc+K(=N1 zh~sLWMr+e#(rje4c-q<+197<{IUi5TpJAt=$2#jN%YtOG!6^pcFtsdXzdpKM*3^%=dvRy{y!jz47 z@E9*WJL0l#by=Mefh4cXwLQ8j{{W3dWx3QYf<|PxX$dT`@t<;g4%O0WxU+V^0e5Ph*ev!rZ-#y9gT^L+6>&aBD z({C?BpAlZ39yaRB8jhmPBgiFE!%Kiw_ZH=A2A>7kAGURXJ{^N zf=v*(oq78*HX^qkvNx}{TI*l;T5s9ob&5W+I*o<>v)MqeWywKWJDxOSR~tL+9_B2D zEiwV6Xb}0bftTCJ&-b0F);c^E5BA+r{?l~ajPrx-??k%&jhc&1IXu}I;E0C=_Nq-K z#q*VEcKMm!TK55q?JHRq-XCQyK~bx z6>~__jKVpEp|iGM`DVm?qzrEN{*^wLqNJZ?l@S#eAgLJL>^;46*0v?Kj=`H?c|&0N z2c3gC!1Nqfom(AFFWFPv^eEupH$6>BV6#IEZ3fu(w&jsd?=YS+Pp36*#0YL5%(>k> zxZIrp=cpOrR#dakcNj?IKmal_fHsmj+CLB0wCtg|Ho!BKfv^h5a>Qex`d5LCa`tW1 zn%AM|D5at`f{c^%0y}55J0oU(^PKI-=Bn>VVPywsPyrQN~??}o`R`2PTfXUr}o%0m8ObMmM>vF;Zg z2sPJ72@0>8GSG)EUD2a?sW6S!6)|mW1MlhWn$BqNFER{@*#W*rB#ieX9S0*KnHH0I zaT7B;tEw3z1xW`OIL~v}kAAh%X*xSXV>hv^Zlw8OHz+ZajP)MfE4p*T;URc5*^e}( zxtC+%yPMfzl33ameD4p*DmvijA5P;w)uV0UTLn;VCy~rq@|ZaJKBK9vQ#4lbBy$%) zTZ5iQ;fjsq=W%Z-K;6ey9ddDAO-#!ZH>0uzNiwwhZMv&MmazcD<8bN8Img$qtZ95a zBeQ*sF(YBNvCOBR$#aid-FaY;fD_GO%Mx&Md!K5CdzBeqJIfit!)NK%iWuBQEh65f zx>08@sOS@0Tf3>;tGKKr7E_!9gOC2W#bSM#qmyF%sCiK!&|o2;sU&{K=na zPjD}a4KX2<#WsRf*$Wp^I5g6u+LE6z}IbL;I(>hhZAlGI++Efdgu$1^g^(*<(8hB^1DQ)~)Q zs3ZbZ=NUb}l}~ea(&ZcssmUN?0C%cSdm5{UB^#jbKDFOUE-K9Bt5PqhF?NlXQa2t! z1GlL46;|kjVWKsMrI+n>z@9VS?_$p!algeXXVGZ{OU6Q0BDeY@NV1J92OluwJdNAq2wEn z@UY}(*i~J(p}prf?w*bqa7pHdJ$rll)mOIsEiND)YzGkZF|JRhIIV>tkaZtXKpp)!tmOqQ%cnV0dK^T$ zP3PJGfz)}57^AK>4xJ8v&TATNDoAx-vJ>;OV=6QHSEkCFv+5u6d*)O%4i(?v9HxY*w-aZ;wz)Ta|>92PLj4jbl12LvBcT6Vr8xk*0D zKF7FGg)X=fulG-_VaFko5gUK3jAVO{YOAR~n;dfk^REPsrndIdg1R}UMin2tj+RU0 zo^(*=3A({1eLAC5`j(w*uh}ckf(x`569xOH(Y;5frC^(Rn$plpH+{5xjovpM8}c=y zV0B9wic4oK%%^Oj20<9kaopCP)1OH#4tYb8(u1={Ep?{c*G=rAyIiteJ(V&JhOf(`)Z)AAfupX|$WxG^e| z)TbP0{{XF5^*KFFD(Bc$G`F)n(^0gUO*?)O=m0|_ZrNwRcCiAT=RKDsVLlSi{A|0{{X8o90QU-?n(ZGlUS43No^rer)MB!S0t&*FH^6U>zXd;&4rkJqoE-E zYSK?EGc;smvG316;Z-6ZF(7p2tSk)dTPN$(fmz1nQ|7jX`0h5V5J$>>Tny~@saE92 z7Uz(1x!eYS{Z(Gx&9;LZ1|(x>>U~8%ds7{km`O&?KELNRcRRm$qjFdtZzMm=ON?Y3 zU=H8Namgy&ZR^^+j7B1!9dal;_dPg5bssZ& z70bkASB^(=r<{$u&pduYw5%j);Mm}nBpj9bbN%01ouWxmqkW)*{5)gO)fT$AhHolG zAv2Kiumd0DSA$l!)>kT8uIFlxDlObaVQjmuk~Ve&JAug_rlE#SGUn(kmWpJ;gk~8B z915CK4a`1OLjkb#Jvi_5ta)NKw=DLJB*HPdV^t^1H5giIcZ{UezXK_&JJ{Eo zLb14Fw(l$lAmAK)!;Bt34AB;o3^Ovcrz^=N4o|KJde61i-6KIWzUDbABJ#ODzsk2Q zFVgNcV~0%bCgfQj+a;y(?{(ILU;=M+A^w|E-B&g9yaI3p1+>PA` z2ldj|v6D}}cxGH5ycj78!_;xosbZ$Mnn~m@D{eo^ zn;1r8<}p!&&O76Z`^9@}x$ttcdIVCK*yLqcVTY)xZEmEqx>=xnIyO|*aYlVA(`cQJ z(%(~7Q>s{@+DQ9dvYc!`-&5MPkNRhqs_Bveb00ouK_~kzJ$>tn(x;l$h24wpl0GGb zkG#LsR{gExS%IqQSvIV3ac!UVXZ-iB$jg~(dK4@4mfzN=L#bL@c$Y~_m`{~FWRIA0 z6L(hj{{Z#Z9jNJ7Fj>sf#LYBp1fT=*d;5dyU2VLG+0^unvPUxQxV9%~Z$=;8VNd&g zwc!#*7+gV=k~SD(x#&lzts?~{;>V7Ygngpd^*M_Ppjcu^qys3sfIMclEpNQvls_x{ zAZI^XqpEm(p!;5p%y07;zrwS$>ydAj2*OZs#GZQkRjbAF$r)j>s!4JkkJ{8WCHX-6 z!x_iYrI<%|7nLf9Q`4~@jbO)iOsDNZKquxH11GQHS7N-+&SV~0&O!bfa*tEkr;Cie z*_7^*Z7lqfJ%&vgTWj zARjcWh{xt5wP$v{3?}&`?W{_-X!>-m~Pe8t;y7S@abwT@MNy=ztxb7Etem5~U*V0vVG zRcm{Dc;no#yvhFnOwZgt!?>&K;w-6(Uy$_9%pb2xZ<%~0(XC5WC-)a^?ydmdW@i9= zpz)u_(wp{(gf4E6mn&qAukxC%me$z@O+nDNBoYSi=~l*-r^=wSi5uCne_Fn3ah2L4 zc&e}6j;POH5k&HNH^Me2feCcm$|#yEUcU87~@U|3#iVnHFZxOoQ&Aa5@t z{h{8vm^Hh|RBka42RWbT$EFT*_02xp!*(F;xOpdj>T*x`3e>Xj&9u%I-oY4mErYk} zdw#X^c>LD|RqWgTyw76`hs4lNno7l3e0=t=koaIgDtcFE;jb7&ugN4TKX{9~DmWt` ze_Dpu!ZxZuf3)y8jy2=)ApZa%TGpCvuAHy-Sc0?t=OAuBTvvrx6Na2^2sHgiT1s2l zze7)TChDsLNS4tg^7+!L0P=0yyn)HbY}5Dg*10*3eo}qZo2mmS2a{FqFOnrEa3prm zT9-7IrN*0+vM#iNMo9pAn$Nt|)+n4q^EW_G1EphL>n3JjE={Y0jB}m{Adg()v0D9# zlnyql9D>|#>IFlTrKC?njt2V7zj3Rulo>`F?)AY3*Xf$6Hm0b^3+|EMXxqYy#=N~| zk{#C`TziRS@1IJkF08RT!{#G-XFYSbk6v@qy6NHPB%0NpoM^%({F&;!HBrL59u*TB43=c}dxxKx+g&klCl{_~ak6s61$mXM%nWJX7ogv#E#Sy+j zpIr4h^sf3?2AXmz)Z(sb>bjJ#9lSQ)VzPisDC%+B@vPZxFD}&@ZXylICnx^^tyLk^ zX10l8u~{XRvI?q}5A_{~=~lyCyhcq@>0-e7V~K$SryV)^56-<>bSlw`j8a~GS;H!{ zTlb~-kx8S=hKX*1L|XxI^D(9`hSEuiySoSu0F4L)pM3Q1N_9A`;U)$##-M`v%MtZG zK9!g?*Se9T(_(pv`^=fa_axV&3@$DFQn%)CPaT4b_jUY@$Gx=DBU^hIQ?BNG;1GId zv**@i-{zUhw$cQwxC!c1A5O-xuV(Tx`n~)%xaf*^x%>xD#<4HFPiug>eY;9=k2d7z z@*mQ*r-P$P?YpxH=L!4~G_HltqKuX>O)@C?q+_)ddb6JX)fJwY9m+K8Z4o$jgXflC zUz?(jrA4G^TF;2C7F|whmeGDsnSm7SpXR&F7NR+~6Q2MI63~q+3GCZt|O3;sN=WCA|vv#dJ27G26^j?HhKGC4fdxn*Bq23cNJDTo$|A zHvg9o~yym4+5jQy$Hp31x0S9gJAjrQQuvS z?YY$kdk#mhHGw_fnE)vuNY%(mPi}kH1{(`JN4-;XNjS$=Y3h1`WsMQ=G0z-y{{ZV& zb+z-)I$}3s2`2-U{Q3_5rn0W&!tNI;?Yr-&``)7+smXC2q#2detCiflxX0J2@9o8V ziwg=kepszp#VX2EvD8I3mY!U4EUovG2ar8UKgyO}O%dHUmnb+m#!ut*u4DUVYl}xI zl|_G(BxBR3ukxlFxa5god6_<7=;3*9$NZjaz?M@dK5B9unjNQPBaAOP*t z>C=Ny%c-BVGALzKAgRVPp8o(!!TWoEq((Csj19P4=eBwLsutAbnn=FV#J+b&vmd+% zrhgGy$0?2NV0gGCq0_b8HyDOTX%LOYV;Cpeq_NYZxtceQd^lme?+U~WW2SO(*QIl| zsVdwup=k;o`_ZwU<0SEqPW98rG_W8R=#hMiNe&O3_{rm^IqRC@#L@by<)-yzdtYg} zs`5t+zkR}O0iz&pJazV{rM(3e<71fkGBae8>C-ik*ZUxShj1*)6Lv`*@_#W|H(Hy_ zi3+|WLzNvs81M8ru3SD@QnNIa;}xmC`ix5+5>lo|{{UJCIRn4)#a8~*c_R)o0uaJH zeBR`qz*jc+l1Yufby&FI5yv3l_Nb)21;C85762(D2XB6r?ZYL4^)Ps(bjIyfUOzbx zo3~&CfzN#Q{{SCqg5vnMo8~&FmN6biau0t|&0veBNXU~a&PXgek=mEf-y#J(g#m+c zk3v1`SmiNHhlp&~yS$22P|_12Ty7Z3u;(QA{{Zz?W%T}ivnm`F!B$g@f#0v;_|;=_ z%K7R?Qls$AN4y~ASaZ-EkzSS`0SZ>Jl}fHk!>4lPwt`o-+zu+coW@p0&p$6-zVvQV zNo?egx_RUO0M}4EtZ{)mU@;)|d%b+t#a<7&);_`Clx^ORIdu9Q_aSs`6ct5maLG79{7ta!=Bn z!x1Y+%FYSt{C?G1*D^^Yil=sPpbkmLN?S9URUOhL^X6&FDFb)Rz>MOeE+m#jVpT9U z;d95S6r~>F0;u03518Y=cps1zb5GNyy^?%0Cy+vXy{cW22~KTAbu)ap$SWH%y5|kf z)BY5LBF;k1hEPKc_2RlSVHLXui3xw=3^~nXYc|FdS=1FhBu`&To@Dkat{zmN+CB zk=pAdtsTw3wV2goar>ce|QYZlcswBsY34?}}o)hQ{)-A^@6r8=%NM=NEeUEW#4b8R$n zv$;{821imq!}6}C)5Es$q@l@{M~@N=WGniq#}#VIB#I5O>=EMvG{N%m&JA6LaDp|@ z-^nDL0qt7TqZp*+xy6XBUAsqNVbd+r1dc#vRBgef!65qQ@vA1$W!TKlthhN-j(z<> z^fchXw&404)S1ZZj8{sENb%)TrS%jb0*v|!2YyC>r3HpKIsUb0J6%Tuk^O1IAa>%b z%Q=ZRh24(4;;Xi&5E6-*aD$(5kJgpTRHY?!Y2LjoSW@c_%10H}^8{$hilWWD3<*8zdGmLloS87H;8RHcN)tq-R zGB5*+KQzW1y zh8+R)rDxi>DliG$J9Ma0BO3%mx+BTYW9d!+M!Qp-43DX%bV0sSg{+JHp>)MRck+*v zeb7IZTbv2OWd!xd&DN`FvTmO7*yA!|r(AZclLT0pM4~q6NgaKv7WX;m-O_K=p=ltN z*4i`kDl0ROO}WSGR?_dwk94Ch#F7B$Gm6X8T$r5`CEF^au+MtaiVdxYIcDZUM^bMpAK25zh$z(6&@yt;uH{_^o6B)1LHMGkOwNlP~$)6^wl3eca>osn+IIkv2$4L!GA}Cl#HA`|j$+M=$DXMUgYP z0Wvq<$vwR@Sk9DWtkLMkZ>hHqq)pJ=+1-NfTkfB)<66;Q#c+{%(V@$C$pab3;A@q6 zqmm3S8U7+W)QNF+>anz&3Y-Q9?vec~#KdKgs?&yXis&|s44_|to(?DnGF21otk%$Pi( z+UxE+f1Fnh;>#Q5wIXYKgmUVol}^cUI+4>O*V?++HS=zEO6}$@Lo0GR^y|~!wBpsR zuA&Ich|k#atUs4L*0gHkFmqCrF1(I>Mjkbw;Mw6)TT2rV=yCwgDTtw)<~UzmbgxqJ zKZY$@(tEo>wm^sFgnYanyL#v8USYV!DI*Ls$j1i0zXyt^il@&L<1v_7;it6j>a1xTj}%AcW0+30pYdDAW)>rWCJ zN9JpM{PBB(?b!77sqQZz)b0}JOmMoKL+6YRCtP&{*Bz^z)AhE1tanT1An3!Y{{V>g ztCto@bgOZsOdO%eLBqy<2dMO}rBO{@4ANMJ;mIwIlE+mZQJw+57XJWWZ^8NtZ}4Cm?Ir`b07pw5bTrl0RLIOy5aY_^PJVtI^$ zN8y^9=Te?qCbxCvk1f`{A~;W!rWmbc+B%pOO)=Va{{WT{btg2^~8a}PmALUyXnl_uMZTmV%&Hn&Q)xW~M0Y1=Fe8|4SG5!@eS>#{d zT(S01TvEqSg4*YGJhrt>nsKZP&%71PX!IuRsC1M#QF;5|`L@0~E;dnug$WLI(J zBA=dTC$KGwnmATZ^-*p4RMp^v{g#Y3>B+5R z@Z|TFqC1y`pBOT%EV&icLtx_t+#t{0LH%m_Y3x^p!1aDft_s=wsl6?8Xk`?4lWgVm z-wNAWLFJZdRC=%g*1f%rm9PmEQRn-y$C7^&TFa*>`RCst?7RV5w)XNWfo*80J#m5l zCcGR*Wm>ztlYfEeMvWQ^-9|h%*G{;G28j>RfBN-cX}DkXsdo(h#Nd7>t!xV`hW`Lr zDldKpYG|yYiMKXnZpFF}pszXBlLuBSZ@{Nvruo<|zElIEsKI~!wQoeJ5(J4tsOSYi z9-JHAqW5<=syR*SE%(~xu$DbKkSy*>c?r}K>8`7eu zElbz>^MRP;39$UjoR&SYSbCneQaSv_Ah5;{9QyvEv#&Llkr}?s>9~;_bCJgY{vXb{ zsqSWWZ#5k7epVb;&}I2*t1sT^W6+eT(S_o*H*R&wWRyv0qais2<8DSzw_46OGfN&` zHUMm7)c9CxmTg{8cek0TMA%0BHaPL?#hxK(@COIU%y-hO5n?0?Tn zrtn!o8JlUX1)C zW7u>RYuUr$(DA2<#MXZNv^z`B8rxbak4uMpKOS?hG9P6a;`u; z(fXQBZL5F7ml|!oNMA1GyBS=bdFlAqTcGF=+x?dE=~_t{@g$*WxOCS&Wv(MAAgUfSB5D^mperuOyuSA%m^7i;|8y`t|CW*6-1AAM0;_! zfyg4gDNoogTs&^bUjDLW-8V>mGBuL-=S~1nc)+RmXx;S{#jgLCMA(UNPZ(Ir@lg6Fsde-HWQtiv-% zm@>w#AT1*~`?>AUKsfyCoqz>EfH9LfR#ErQaBHT}blX2N3H1h(?9TZKv9R3t_pUnA zj3DO!00QV$sX16Jt*s?lt7*vkkEg2#Eu>wHA>UgWts18GJbXGj|+H2!6Iptfp#aQ?0;bRx?9)9-HX}4zH z3C!!XzQxA%VU9o^zsjUpCWg#Bg~(#r;P%1k_>5Osb!l%rXLMy2Mj!wQLH54jVVGwm zWOmLgBL4tP32yCf>?LIi$`~r`13c%BwCQFv>smDyrtXW2yiRWJ>Phb#a=7yc3!DHC zdZFfdzFcOCx#T83Tb-Mpih z+w!sYuDF;xM^aCjn`1HB{qachXMwvaaCqPvgfluj{{Y`T`*rrHq>+TN#=%T(HV?XsMbQR0Z>2P?^Wb>S&FMDeS;o=`~DQzt{Cq!4YDA^udlsU zh>vZpi3=0w6lDi~-K6%wG^}u;m=<*blpgg2WfiBHok&s%Jv)CoagDB|WruRaMqP+K zGtg0U9%j~?i8bVFBQQId;db@Oq>WgOvYnDjK5082YPk-a0ut@=jIsj1ci-QjH80q0 z)AxM2;~Rqw!f)$l_hwNEyZ}J98~g%8oSZMK3c# z_VP{Nx(&xe?dVNp>XJOR#0-ea=aZesy-zD7nfuuo$R`;!kAE?F8YX2WPb|F~fGYV^ zQOSz0qq#noP-(M*#aWvJ@~CXxmcrzYfUbDyl4&&N+ii4x6kr-lZOdhNpx|LQesrSr zUo*+)(myiUiDH19fF#P~`wZf)=o)O==A&14bW7+2AS2`uytX5f4DO7WjgN*T!(C|9_YDT?+B}9>ck-k+5 zK+hTAp0&y@rv=1u{i5;YWa4*hpgd%iBeyvBr=4cOm2IuwW`;tj$XMnj&H>3E=Zd8? zTB>z}WbvzKZbssp%PJFb2t$mF z9Q$YS#WesCgs zYBxs78!C-s5UZg-@SxOkny~N04)l0qy?)>Zqr-hEFk-5KQU;kgjo#b3?wz zHYW!ftKC*+3=>L{CKaE80wCi)gr3!Y-g#L^o&Y%H1E0q|>15N4`ho>=tFvQBPu{@n zIv???jcc$X<(rOOk5lxg8MV3d^>T^Sx=4j`%F6p&jBUqndX6iFb}PMD1A&3p{6!CK zmn`HF7~DpAJn^4uf<}GPJ7z$u!l1xhkUqYjt!>PBb#nTYuBL^(yAW*oLjp3y@&NS} z&R*^FkJ-b7$j0M=&pkWVt>~6moGS*5AG^1ZM{lKN`Q*Bi6%}}FjOW;9v8e9s&G599 zHGZR@wr}(r2%_3Z*xZcguhyM4&I<^dG-JsQ zN6cu8QA4TkyIANg+-=+D2IJ-=^O~ElB#oz)7_M<1hWNbOWDu3Ss7;n1DR-^}K&EQF&ouwB{5 zvG=P|xyd?Hrz@G4nnX>Lv!e3MbCX<-qh`?RR>)*uk((qFfyW<8^d%dzgz!f>9csLm zRzoA~(S^s(a6P}z=Umlr>UtyS@OkTmd26Q5FATA;TX`ctoKtMArna(0c-bTXt0vxgCyJw}Z}Leyqh$mzBbNHtA1g~! z(Tw>hT?wJae#<1;q%Pem`3T%`H!``WKscTkCJja`_+%M#9#p&t2xL~&oy1F#`NlN1~bnd zhO7v7jmWB-M=E+|uE%5@O^Xn)U{-9QJr{O%pI>^~)7~J%%!ylY05*TlE1rU7GkJ31 zO7c|w+;r+H?YyZR$>oJ9-9bOgxcqCM5k*ylS39UwS2}rg%UhOdq0C`p>I#6T*Cdb1 zu_i3c;Y*RhAY*QQbL&>EwJ8O$iHK<1h6kQLo|S$ZOFNkh!6%fhf#r5%{u!^IgQDa6X*loI=@|M&^Ns%O|MEt~vbrS1~I^G|wPVo0}#) zbRN{`?xccWoIFyeAoL=yvf1lHD8k`%53OxZohokB?Q_kqh@D4yyQ4SAJ9mUH z5#xIC_!?_NcVmfyy2g4q=zg`YXJ@NvGm?=jBk;knQbFm@TGfTF*FIg2VQ#Pj7!Rkn zwQm|~OIxOLxxSj5>iwTlOtgX{-u4>+TJ7llW@iQEk>y+*wZF zz|j(!{*`-A@TK{Qt+fv(@*m;boUh_Qr^zk99-lNc`y8AQX>&)l+C@40;DSfgRO_P3 z51FOitgJc=?P}@U;mxZy<8drQ`=qHKnX5Lw2GgN&w@{ps)60=bKT4LOm%f+DnyPgr z@TPMjV9c@W_OjbbKJD5rGxh9gR>o@>T1bD>wAbqK4DI@k-%9UOO0%_x%9?qlOkl@7 z&Q$$Wb*XJ_tl<9uk2@=9yY3Mv&*xI{G}gDfUw`})rWtl3OKTop)>B!?ZuI#BLI}pz z$p_ON`quuPtNpQ(JE-KgkPakeBxmp$uI)Vi#H5WDdX$VVe@d|pw6eZF(II2hDcr~F zj+K;g^l9p^Z@|`BMOm+Tqmr5RYhsUU=E}#XlH=wdOxE_5{iAnoB$m~om$+sht!<@^ zx<1^=B&;!j$B)CBxebtvZHmNzIt_pm?n(au3gxfF6sf1n8~U_qOCgiDH0|h&+bd~i zkbSNoz#c+A0sMN^*3<38xf_*xF7cnIPHRbRh#$T?2lr5aTCp9u{{WUT1^)nO@%q=v zVzX+srsl05L+SANT67?y+kULgO+5!YQAfLE)wnF)Ks>RvKfK*P0p7GDvKxa;-eDaE zFe=ij`V7~XQyDqwdz(s8UkWS3X%syM=Kk|_{3^mkUGSji`Bc%MKf|AYN_#oMMCaUn z+UMnO-jg|;+jiiQ-*R2_o4sl+)Nc2qm^n$e((kA@ zVvY233zV_Gjr9iVH&CP;q~c6fTfH{U{2x9sxc>kW@YLYf@Xfr) zSIvB9fwX?Uwasd}6WF$T$Jx|Zalh1&@99z8N9RV-N;hsDan`#jV=F>ez0ss)B<#;I zp245)6)o5O%8phU9r6+}wS=wLiDTIyIM@7%-)!RvK5P!$a zsyztY2l!MCAIpRKGKZe*)W&oBK*eEy*SoZ2nAX!@*Wg zn`G6e3JyvOYy^Gb%9yyhLioS{mdEs}hf`P^Y<_4w4bKt5 z^*O~{CpWM8e~^7g4ILl#_>k%T640&KPc@!=f*HKmG6U?xrERFPg7f_b7~qmcBV;H7 zzB*?es)g>MzSr#WPLdYJ+$PWuxvK55(sL|#3e)*&Qb&9R^*9_1o|&&UR4P<%q}%dH z^r23b1Bndz%d44b zt0o$xlERQK!ImJ%zzh#enxk_wB%9s(5T7q$S~_i%mvwDqT(q<{a8?%&SN*CPQ$lVS~K#j4lUZny+=J=lvT@4=k_u zoD7nEhxMuTDW|=0HnDQh#noa#^S9A|gnOE8ghu*96_vTRv0wG9tT#U#ZgcDCIH;*5 z1$M{ns#Nz;nQf!q-bsXsrDo)^wm|xt-Ozj=s$XhK8Yv-Yq{A_fh5T#M{4u3lJ&X~< z0xWnWLK(kz>PgNrbMIXi*zCz!TObXJicWF(SI6Txf>fmG(~hUJhsz?;a(bLbli(21 zLnP9y6E1#e@XX&oRqPHhIj!3d59#&=4aR1gEx|*aZ~3x1b;liQ<;JB1G8rDV@?o)VlkM$N_PR%MIj=Kvmh z4_eQfa!|au5Rdt2yZ}8&&o!Lus9w&Z2+BZ`0q4q2Sbul}*C771-?Lu5@QmhhCrOv( zRbJrxk7~j=XvcXYvMwxB;1jR0oa z=b7b)Gcli!~#H|Gs+)>o-y_R02=Eq^;8lpWy}%=18TnIooJkRy7V^wvX;N@UcA&(>$a3c%pgVv_Fh&b4%=is5o-N!kqwgY_b2;??1=rdMc zK?{~6pdnQH=ZyXp=`D{QF7VV(HJ0CjhBA!t_i^9zsU)-yIJ0Ld60Smjrzpr_=`X;bHujJg=QG-*BM)xlCqmDmqrf30cShnLTt z2Gx+^3CBh6Sz12v9pq};cgnHn(>eZ?(nD(F9$GM50E!NFdSrTf)TzcX)tX@7=LKXz zw6+k8BK*Mn{{V$Zk?&t|$lKI<)bQBL<|xO?TXu8n+ZAFMV2N9A&5k%YIq&{`tDdeZ z5z%VTS`;5$2EhS{gCdgTao-$QKRdKOGGiIXoe><4yl47W^IbDZ)uU2*$iX=4T+X=O zUx=+_7(F);-<%)kSy6QyuQQV!D8`&zdPx^Aq#Ko)20%QKjO1e`uvS8sQ?NXcA0D-- zEy6ONm*@vMHIL>7)LET;%r9=|KGoZ`k1rQdIVPEL3njaGzbvc)9)~rTs5&*k5rrE@ z30k>xF7l_$WQBq`%}{&q?TvPALS78z+2mujext%lRBUULF3Z|dgq$cnh7Fyj@7M#3>Ffm zNj}G+9C2L@&Yf#c$m79#TYtB;p*j)ZfPYUZ7( zt<3UVEUZ=jPbrDYkZ>4`aa$%_YHDsZ$%-&pcR3$%INW`ON*fz_+Bn*$__(TyW;;&h z+#90-(>=cmm3+~>qnBoEgp;5Hur$roi)SEw$7^*s90BQ?Xa^kw4{r4u4U9k}0D3oJ z=nXYxb_~R9bop?{k8i@A%jO?26Df&?0qcX0#CFXjeq%E%n*rOF#xSk)rcWo9qxVfA zKQ2`L-r&@Zkvw1!94X|1-xL$2#%+R6-N=KpEew5F$A-8SFp)-T9V`%oN^8R6D zWn<6`ecGu#x!oPWE)BaE5ynFY{QCM(%;yCqs<{-}OhzUMSi88(w-`~@xjjn4TS>os za54h0AAdge*g#$wqet^4CCJ>}M|>YjuX(4PO4ZWoBaPbx?Hu49nW&PwvBy&bPD=1r zXAdj0Zr>nPUz8{xd;I#;a$HEhP?go%Q?-Eg_04K(+Iv00Gbh=wn}NY)R8fJ?rh9wl zxrs!!*IO?#l1BT4Y%oXE{VR@6P(0b;`5vg(jzA{K%SfMd9S7E|Lvqo^UP1T%@IC64 zymG|B)-usIBXH^uPo--^YV#Q*m2>5ssoBEzAmgqLZ3QNBR#ArJ*0)dOG@TVl+7Eoz zs$4J;kT4uzrza$ORV!k|hw_o42vv_w*~T$fJi!(=6<)`9Cm)aHU2shvb$LQtBII{w z7xIq>qa6PLI+`o3@O-#c;1*-lclWIMWKe`icJYoo)KJRlaL=$Zh2VkGwC1xp9u-tq zOH-l;V;zSekCl32pno~$aG!7h3x+v8$i-2EP&-EbyTbkE(cM>eH31MRR2YlNWX1V1u0JpF><9NOYc@_|Zjhm0B1o!$? zmXIWa&3?HGr>LZ`9!{SMzj4StshWkjXvD~N$ru>mbIxnYSGAA5ij-)kijcauhyyP~ zP7mS#0N1N((aSXQscd6xeDZyNTF8fV_wEyd{2Y2#;f64Zf;=GTd-SP~Qw%-le_Mh| zh@@nv%aBhz{{Wv_Y%?)(6cCMu4W5I&Hh9cRKFV9=`HBI-stiAOGD3GMzI`$=`g2+v zRH-OQCTq3CO%YQb+^MJfg#AQMgDt=O@oM*mjSCVIif{)%K z51Y96p|LDdlDae?x_Q}{p-`j-&s4{#_w}vYYnEAK3mGIfRN&<2)7R--tP(s$;(WVI zGTxzwzolD(?mdhZ+iG&e9-l#6)oH1wj+z**TU_)VH&I!mRGE~_xhTv?$j3rQv8-F> zk?z=r40e*SfJw%F@}A?{{3{;T>GOnaklX%YjN_4?dY*g4zS!nWu*loL8%{q=n(=60 z9V|yQR*#|RRmMlzK7$)YgHctM?m=w1AQVnX@5itC6%zPoNQ0HQx@KOD8v#-+wfxfv zR@vocU%woHH{^Q#DTht*AY4IUs>_f}k|_Lfo(+8*BB2{eey7dWl{w3r*C&u^kjET1 zl6kD(b#8|CBiD>#oVsq9IT~%V4_%Butx;bFYLm(*)SfNpxwr!yeGP8f=TrUM-Wj0IRrdnDnmm5-!w ze~nKZmht}ptK3JcM8D%)vc#^5p3OWY7R&G;h{_w}Y1{q=a6dYnB$5mkc|DL8{Y_X) z&EsroEWX88ACMI%nRG|YRu%UE8swHC-0F@N&h|xdr_1-6IO8AQkb-JOwk|Lxl0n=? zxF4Nbf&`G{D^ADwh$ra)id0jnz0_7 zs&MxMK8iZk#J5xY1<$EsN&f%}nPR~H6dg9G^n!5&@yA3efy_EGo;a2`=S8NUCFu`6pt{RxhP1;SeWy+J} zS7#w{k+k)FkU^|hSGRB{&mf2pAVkq z{M_2k@%eTiJzXXQR)cO-PFLbLPQmuzw1AO{}ZDGpDrE8+C&d$+j$0`FMq&ESfjssYsU_i9NV*N6RW6{cA3LJSog9$?Si|wiZDD0K0Z8=-!5y z;gpn-cMyAZtrKflvBpx1vChwJH}MdE-x&RCHfusjEp7+fSVC`?(Bf(Ek91 zRhrTI$pT70yq_tqx;VO&-j2rJD z6J5-fv2+GV&!et?QB`HIJ9;Y*U8npj+{5QpY42z1j%v6XHG8j_z?}x@He!)vAn+D+ z!2M~*Nz}^&GPHzxvXS)bU3*x3`2})4MMEut-TT;}Bm2bxuTnYOn%NxR*@`allQ^rb zKT?^bCeu0VhZqCX=|p-f#dqe-GLq`K5slrmf=?OG9r{;ebzyQe?ba})(sTQyV7z+& z04nqSe^Htf6X2tQGv9rr$5;=8%=?S-H2l&)UoSZ_PQOW zp6f2M=I-S=Z!z}67u5A9*jDhIU%<3HN|UR9dRqPLVqDo;Wi(ALOUM5Jp7ZcNw3Eeu zEMiS(R$C}rq!z>GKGgk3TQB>K3LvOBX_Y|7;&>mQ$lrxB*Ta{Wpj_&*c@dM)oU!~nR(fAbvTd?OfHwd} zK4bN6rncae$7l|vFYeQ>TP4q@upg~s6skMRU&yMGl2%{vOVC?bSwPLF#IdTc&vP%Q zwLzxZ&*F>8BZ^kF(%|{7Zdl_Z)9QUI9^&rr#5eKS*dY>61;eQ3qa<=yuGKgS}!HP5U2{DYEV+T3vbJL|{_%_uZ z-_M5KqC_CcaP9L--INR-J*&Sul(9szLbd=0xhwj96;k3?4<=?`ojzU4bL+tFJ*ysj*IA?%ILk3C zS;o_h<0SPTC;xi#KWikeRbKi^#*VrsVQKPqCKbf~RwUMztr+0BE7VHG7K4i}^ zj2vaVaoepM{>rRF7oF8itWE%24mijk%+@SggW6jKFsSno0UI|t@00#})jN$%mKFW% zg^Y&^-3a&meQS8-GnBbvtsl%@+ACITUEi$IzUej$VO2%}KhJ@mPL-W=t3>VO+yl2e zfzMycvFEvIyVAP}Aut$m8m+VPB`ObL$Ri7iR%lTaxkWRz+55kbUh5?y?ZZnU#eg6Oo z?fSJo$2Yf?uX5~~gv$(wrKN&H^PKe`<5XVtqmLzzl5!P@&p(f~Mj|USs0U!fM!Dyn zwELS`B|^9%iR;+a#+^7l5tOA;acvn^PE6M-fPCj4sjD|@y5bKoWV3C^?}9qhG&S9+ zPOO`bGq$eZ#$nW~*qom?`IdKW-7LiXqmzztp5I#L{6I$dT~0tGWPWv}GncfQHf$1y zwlRvx)pq@cH4F(&yaSx^&2)0v9zHUsJRC_RMp8fp^1uX9%b#kjVp~h}6BaM!Zg8EB zdRDxXCBB+xjX-^omXPvBa4^1uo@*LbKia9h@(=@EGFFO_<5a87DWq0+k?Hm~Wsy=z&d6LAZ$QkpK<%GTN?CNGb~q3aftw&F-Efh%7I-54ac;k6%pJT?|E3(x}c_*jjEa!D@3CGm=D- zTBtD*ByepdUp!!S$2|9`t?qYD4aKaGPb&P*t~ec-{$Hh4zOlWrTZwZH*v`{#H*Cf@ zBRzSlb`eU;=EJ#>VO0N1G;`N}-JHr((BUvO&{CwIH~?XpT|IT&G%hx|oP zZ6g^sMI$N4%>%9q@(ID|{KY$)#%6?|sYTpG@%`2NzGrTGA6lv|L~Ih};qw3vtCE|E z=O-Ue(yWN(Sc)v9u>q9GxxmTdhqXzeDC!q;43NDs>G;z{JM;yXc^K|O;E>Un+W@a~ zpUaB55DuWYVcX_hFygc0wA%tg@|OyCvxBvl=zCK}q8(YM^CAfTSRj(QKZhiDp_Lh1 zb6+Q(qxpVxgn5LJ-&MtDHlSgSA;i;2%oM9_R@?n>Jw0m7b9w4AzE=KAH$I$VgJn3f zSq4Fk;|wv3FwR9|-RM>_LoVp{EUa2mib?0P`c}az6U*Gdsm@5|nJi*(HjF>;K6)H= z$MmG*aZa3MwMGOsGuy)qCM5BJEP( zHbZS-JAYh#YnZ#U8fz`1!C76~44S}@P%?+U{T zG70P}J5Fa+m7*XWqZv5kH09osJ9OKIb?L|d0Ism7vCT%Kgi)>Ll~#-p81bFGy{jhb z32f)sr-$gnfKPg{1;~+rNZwGRkf4$UGVa>$<>N$(T=YQ5!!>#|RwATl4L37fc}!Ia zWMRo1XB9e`Zgzw}zl?mma@eb1X1SZ?TUmp*Coz$=$A6&|@t|M5qFmbBPRxISBW=ik zE}iJm^R*(j)l3D}@)Ij#7$kx!#+!8rW@rI2$Vf0A0|V*q4PTGL+J>oZi`_cv-suO+ zDTLw0SN{NnGg?_NEy!Q*AC!Kkj#s+X4OW}c^e2iI=I2df_$Pg{@JF}*09vcY(cQF> zt4S1&xM-QXgT_66m7yPob(?^XD@AW|qltXs^8xAy^Q~jx`7bQ+StLM`r{UsyYbQbAR7NRKngiZSV6R?>opZ%u5TD2TY_hp-& z=>p{bb>r2?BYAIh}F)uB1x4-Qr9a~c%tLN=)Gz?i?dS-pA+dKZxGP*1Xsl+Fp_ zo@8R7bA|LA3gqTy&OM2+s6J-}y%(R>lX%Yq^{9cT1JfOSsa!dGl5cL@eFY#t{<=Z} z{{XCPdk#$kZNKW&FS+E>Cf$kSjrN_RXg$p}giH6Fx%3qQc5{|u$K29pi-#nyt0pr4 z0C7V5o=q^w#PUr?x{T3sxlsJknr7-;(jA9xqjfVhW{U-f?xQr^(VAgl`>8`zd#Q6p z!oz!KQtUdMQDl;bGRS?Tx*UwKvQ3JbTr1_8M%>ywU^w z%@G6+8FEEr$)_@8B#cz`Qg>(YYU)3I7u+Nd(wg$eD8s2^Shr0K|Q#J-%-D}j0_0)Wyj%+)-}h&c|YZjq!0ad zgpcG+dZd~R>NiB&m)O-QZ03ai@mX=|wdlt@_M4>t0IyS(;ULKxU5^5bq!q%R(8hD$ntkK3;VI^ zYqg)jnp~5#@W$}W+?NbnC+OL%yYC0y%OZ)jM!JlgEGmHy=g<#&^=RU#*H(`yX zk!zzy$zu}Ba7HATZy!KEx;@V|TIyJu=&TC*jp*vJaue#^7x3rayJ0~+UdM-18r$Th z_x}J}6=c*VOL8=eQ5+~S_N#&A#)JKp^rvZ>WzMD}l6#3B8Ca;D)T*~1!o5$_@l%^% zW=7WJORIo$9g-j2#63tIe;V0}{a}JYXuH%Lg?7TJ=uUIT;h&{) z%WvzcnSG=iEX2Pkg@Xi{i1P=a2d{o9*3GG0D!skD%!H^?Tm4w}f8)A6odj*o;;sS<}CpKS7~J zG?MqV9d~Hg4%v~Ahbz3ca=7Ge=qoDz8+%Zq7KPt#)&%j?{O9S&sjsz>GcruzLXWhE zjJCsppTnO1qOmVE2fI>`mm3_cWVTmn&*nI-@EB}De)fpktym@IGD3g^x11CV+gXPP zgVXXAQHswNOfk1AE%bpD>G=`};ZKEN8FWN1dMqenMfrkWVr|Vjg$>n7ifzQp3zV(+i zhnBK6yZ-ERBRZ@IhzEW_I6b;ni{D~5LAAyRB#izQ%;|HvxOOZf-yCC+ z!N4OmqW3{pYXP?h7{_0&S1sMm@c572H{G)FE3;k8{;GRkM zsO8iS^Bf>Fjm~lW>YU3dnf83(@-jM~TG}btnzfbv!$AYJq?Zz8FDg6kz^tp-e2qE8 ziTmBdcl$oy<6AQ^h(l`+3mS%ZBkueCeJe9pl^V#Td@8UX=iaw*QG;)L9yMz6o8>Go z`bL=&xx%)>$LY`XtgER%;$yh=Be<(^TqD(;veq*+|Rhmf?4*Sn83H!Z|Bep37kS^5Iq}w{NW@io8 zt_R~>%d^1EAjGA{WmCp5Mt?fJ71K;m29+`b$icefp!YxhYS)%2=zVN)QkS~-Hxp94 zxpN8Jtl+sl0x|w$^Q#GS9JmU&ln2-WSS~ zwgx)m>s-x_p*l`Wl7JtWu*f4k^ZI@@v2Az0;aWLW(ZAM2$`q*Mp{_cZcNTX?6B#7R z`rVQ(s-*sQ9PBKz?nfBNV!gTVT=2IR(k--7Z1TBw&lw{b9^RGNNKmfk*iQKn+qN-_bDGX3ch_0V_VRvWoB{W@NT2$hNddKki4H}ZLI1{sLPQd>G)J~ z&jqquM9}VvRSJG$cKhSCWZyw`W2i|4+mX~CnR+nw6>>IMAV(f_uZ_nCIOE>Cqf+}H zJu9g=y)0|W$sA!PcYAaoDN%-x>U}zMR;?tRHiUB}vf~SkG3P&fq2m>c18i27GWQM! z2rNfENc^f_x5|<&&A$0#AQNxD7$AS1D_(WEN=HDNjE%U*iAWokF#iBDk;dH9*HL!0 z^W-8CA@aud&(jC%Sgottz**3RkN{QWLD$gZuUdxBRDd*YN!^wb^Ni=C0oY=nv__uO zq0>uoBL4C;GdzkF7Bljbo^ooG_eX<~1Ckw_n<3CF0EiWzPY=bh#g;o9F3RwRD z5CW}9aMGDo58rB}@J6z&-5wDb8?t#vFxrnsIRn9M*Z+Isar=bEI@`HfY$#@N)h z^0_CD7u4pS`P#t;%6Z%XtUK%1g3P1lX_IV(Me>;kaxv-86tMV;!5&qAHa(F7z@Y&8 z0qAJvNTpRLW^QjHCf)!E_rl=Sd#gzG2n1}(8zXFkB;+4YN@N$GXofQw+r4wUuHk`< z^HQeSa;=!yjD73^J+Nu+jWn9GIakqt(i+0en-R9=eV_(DfcNdk*05#M|{eB zk2&SJ^y~)}=}&6~*OYU$qiyQSF_zDMDP^>V;5=?ZsUJ4pgSkJUu4&V`!z>hD$C1h! z8Cp&W&euD8`WnBcYHtQwQcO77LmpSt@b|0wzM58B+f|9Bo-iNHcjXhNHi7eW>s-X8 z2l}|$J)Hq;riNxOG7dyQ6>Rgg{{WxTuBFHBa<03Ty7e4(u3FaU z$K=Xe0W%Zl`-i9NR_2wW+=(a{0I?^i$Mdebb+j>fbt9mL@s*3pz$xc}j2}w52B7{< z*&l*P99J_9<2VYdw6M+(HDWu4WGaphe~5p3`c}Tu<5)F&ojTqb3ZstvU{@{USY)}> zqcOyHND!Vs{VB%%;gUHDvrFb-^11T);iR8;!+nNLy)oLoPe{|WwQscr^5r0txxQ7;arLKa zJ{QxqSVhIlt-PtyXod+Mg!dKV)x@klRZd%XJqmezs!`^H*Rkf;I;WK+GHLQOED#n7 z#{}0wq}{#z9&|Sn#XtPCja-bMP!AN#uL|knR(U48w?#NAU|xQVIHK1{(XHkZHO=WO z(1H|?%vX0eQ=SxR)1SPRy-i_teh2)0O4#~jQON zI!|wy{v_409EYh$NB#8G=GDeY=)cSU38yb|53{pIKGMwV{olB?G$TJHy5M?5zt*IV z7&D!XJ9@d|t*gto3$``xK(9KbcGrKvr>4Y0$y5fq+N1lV?qgSN?6-i@4O?EPG6fyDgW?PUpF%1L;tPrv|Jg zCnt6|(@|Oz2Ih$-5~`H@pkP!xs3xGUReOYio;FbIn}#rn2c-9WiV(VA}WQ%j$ERsnNHX{6F-m6*qFqcqwx zM=Jr@qcqwxO29ibxu#}}(P7wrXw5e?^^Kp9Fg;Hc6707VbuQ{mM!i_M^k4C#&0+Tx zC)2GlU$G{_Id7DE{{V$4a0U}Tza8vsJBtO-khZ5 z3MmiG1*$UCgwzhiqDEeaBhrH+Ki*s#hcwc|IW=)tdIO5wmjkr~4_bk)p^SX;sQhZA zy3Ld;x#m#5wcATC#n*qkrr*h&m2sGQKg60}GX7dKlx!bP)IHMqXUzbP;<-&r$2x7Y zF^)xi43oOJeM84u#U`P}PQSJiH#`(4UGq1bA{u9viAIGQ&ww;_GuQg-G)5eN_Hvt$>jY7azESJf7*xkZl5lxciL-`T<6!H{F{{WIVZ?CMa+7*i1S*iJmUf!hi6_a%@hxBZS;(bEnu#oZi_WUtem-_A9 z{{YGIK?vy#E46?6^ygaJzqrbd4h?O8U-CN(-x68dzuE0wT35h%Pbv>`P6=aKl6aQ( zd46|Tx};vY7#%%2^{zaUdYUqETs19G-bnUv&+5_Tf!IxS!rfNlSDG`4UR-Sjy}f^} zO>uV{MH*YHFP5nwb>#b;bpp8&doIUUV%vaTPeGbU?q-fn;n=Yodi5TalcubCiyv)~ z)ZgF7W9A_l)&1hL$Pj!WcHvG7DL1)Xwuwn zP{4A;;O9ND-ngZh;ncZ#j-`dEC(UCT!*sw%7v)wd<>P_=9cuo>KRX;17#qKY`qgAv z7Df=2`4@AH{r4YFT8>ASX~PN2YIL=WtLIV{pjpj8=?rw9XNeki&T=g&v<;tD##Npq@sMBvFtHcgY>UF14m* zETTJA*%=Pm|LK|L|s z{{XF2^i-AZCwPNlda|4m&#g_VUY8zRaJ*3CX>c)-?d@5@KJPO$qgg^0>P-#2Wmy!Q!09(7rR6?t708j(58MntFmiW@dFeGYOes+P1XIy^Aw3n#s}O_LF-fi z?cTYSODjaHDQ;~YaJC`0K^S9#?bO$2;XN?LYWMPGOzVU#)nXmbyB=}$u3N)C7?)CI zyt!SLiu+U^4A1@1>^iaR1$Xz?(A!3-A0co8fy)p$82*i>RFsY{h?j+`wYX9U?D<@vZDX0lQu&nLT|Q%tqf9hpquFK{!~oR@Jz`)#7a zj+yW8RxMKJP`-*7GFo@qoD+{z$Mmj66z?e=iY>b$cnUS*F=D9zC4*q)zr&j8^esQ_ z_sAdpR6o9y0xgY+j?>svP*}OjYFT|G_Z=lW;R`Ihw zzb~TY002S%zEQ?q9;YqhlGb|Ul0?q3NT8@y$Q_MkqdQrmKn}pg+Eu_g$Dlt-+40$E zr?i?gg5i|^08i&!Y%M&QS>gM{eFfp2G1dcJgs33GT zWv6JZx4ca#`E!QH;sEG<>jh+B#uCaLKH}KxkyG2s&aSc&jElV#a0wm#>qR8Y<6ojW zOFdadyKPn~@Ap#~RmVJb{{ZV&l3lKRjrlPohV^E6ipC;`eGGJX}c zYGi0vYBYrg=(0dK`t)z7y+op(sPwS-hTWLmcSsjfwe5_^C-TU6v-kaehPd&2J*>?T z7y#juJ?plt6DAU5`J8ea>OH7alhE<6B>7o#+zbP7`>nt~Azzd?U7+KwYBX`nsUk}? zyvJd5Dlj-53C~~guCB|&A8GhqJZym7-EO^%mFMOAtN5rtoq5-vEzGe5j|i#vF~BFS zI(1Xn>Z?jrR9otWiADmhz!`Jaug@e_QO59w0YE^2hA7>@2O_!a%S)RJ zu#-Yoc+@fTGm;NZ54Bi`3GB&&0rr^&3gds6ew~eUDZ5yhNw><42~n2z7QB72$T0%} zwf&Uw*wn=?Hl$Zew(XASyY z-^e%|gZNOUj*nqkYs$vUb8v1vyU7_OfDN&xQKa`$IPKFFrTwI~cJOZ>=*~%yI)-7# zMgVc?T*jd$qYkHP^07%qL`>`$jw?V)F9zG0V|cCHP z1L$f=7~Y2ypnsQdPrNNd?KmH^@ojgV5Br4lXa@g6&D# zeqv8t5Ad%`pG2^pP}erm%69F^QODEY-ns2V!togvRaqmBGrM|WnR0p+;F0gzx#xtO zRpY6i7crw+YCAIu`ZN~uT`u8=A$jU^)1GQ5MbDBSWPUBP93r7_6&pMJ16?VM8C`=ug(QgzpfJC&^bBC;%8frnjwwjMuxl zjXYN>5sHElrul&Tkf+V~{{WFw!3iUHNbSad)-m5BeFYJ3my!V)``tfJ@vE^6w%EX$ zUVs+b2h{c+)!<@nMoum3{eN1Xge2!4VTo?4Ln@ujaI#DbGkp{awy@hk;cu?h9h>Ef zX>fne=3^^!E(-h81O#|#nA+}nl+`wU+W6&W9yDH zlUtBPVvqOk9leK2`3!U(8my_w>Y4PIdaNinf{`=6S54^R?^7hGB!PhFP&{~_XhU%=k=yy>c?p6 zK#<7eg*f#*Q_Z@@Gc`(Hj_i;(Og4Hy&B~VY@AC)O8y-rV}{{W3Jz9R|_0qgHhw~x)=pW#y2t5LhMCA%xI zS%`hHj2v^fl7AY1mdnfSj2?btNfVDG>>oCH_o}8`wq?mZ{i)fvptsnt(nhD`%!kzQ zKPrr|Z@hC&E~5tqnA0vY59wB;CbUCVPf_I2n&oahOLGIr*AbX-#4IffZeTidj`go^ zsJzi5%Wo~YWegF=%mzExr(Y_>P=seYH2XW8v~jenO~NUr}C^_PsR2+Vv{_oha4*$G5jmfTT$0Fbu0+eAU#-|$iw;n04mJ7hegwJZscj_ zJd+{b?0%gG<6ldK;p{CK^VD44Z$&E!<73|T~0rCw?-7S3-Bw*<>0<|^QJk}WH8|JWTY+b7 z2!cL>C|oEX?>AHURa>2A3nwdNf@_cBo$_(?&1uU`Z>@-vsPtMJusYjY?9**h${zB` z_nZ9t)>ZbkZEhS!{{U*7j0QY`{XZ(_?yv4|TQ@T^`*!@QwGyq;AsQ<8VwNn#vB@VU zl8P$>q$r}6fr(CDwD{q9It(c4$DppiMDXpDR~}BaxCszCG+>j1fHT+*M?Uq1deNgE zW^~ZQ&a}B=a%zt!W@C(#*A;fhN}Bglip?1#k^!2E00hl6xudecn2qAIjX`$G@! z4uq3Thecf;@$TOIxrzQr_79Z@MalNZ<<`7k?aEV&j268QW*JnIT+G7LyagO3ZZ!#s zXj@=|X<;UM5WxPGtezgR)0r+WZ(NHc4G~ZkhdKF=ai7aI1?IIajF#r-mz?_T&!31?Y)>Pl0y;qhkA1* z!bd4!&Rp;@jQ7tUg;FQY*!V(<$8zHbHR{GGBZjS4Vt+ZAln@#6LX2_Kts_iiokW@b z>i{~ROnTPEzJT$>E^dBv8tz!)Oo=no>OE^xMS(wiZm}$n^EpsH_BqG3L~6owDn8APY=9!>!N0P9kCqDiIu zJP)~fsxjq>3Jy-}>GbP{ zon>u{3{2x{DB$~(OEV?GVE8ffl6^B={5O)qp+U+6ou-|0s7r9601==V!uO=Ar5#G- zcQvF2donGic!qcj$UoMdWu~Hj_EDDYxa5k)w!NCz5gGYUBpTeaH@d=Yw2#Szf2^Il z52vZ82}Vx#)Ka=b?l1PpAhV3kaNH^=&OPd<_EN{2 z!)`oDH}N;)*isv-ct3M$o8&!KF~)t`w)TA5$mjN*B8zA}Uj8>3!@?qqk_nMc( z9vlAI)E(}Rk!5f5+(>xZKfCoceIvTOxLajq6Mg3+?m)h$)cRJYp>+m_qsMOuDG)i1 zJpA)F_nZ0Ww?R%cRr#L8%CwT@j_kdt>akilj?N&CSIL4E{{WUqJAF@6`Byo6c*Axk z+}$OV9uR+ZeM#@{cYt_fdO)_2{XHjALkp zEI%u|+_v*uS~+5ZY5IiDRzI()sy7!>-bf6LT=c-`K9yLU@=i~Gdby`st=z^RI7KV* z9`1zt8uR5QdJ%=%M#qPA71gFhlH+0mcOrwivFdt%`m4Foye}7r?&iH|RnSMaHOAtk z^T|Cr5l+zNu+wyS6ptmQBhCs>J&Sz>T)goF5Eq4{P*4y;edi;f=cnslU3!>`l62y| z)_rah1wxb~IecBtPsOnLUNE@}$&`i}>yuqn2nguPPf_?+t=(Nid1)2xqlZACJ4+6>}Z!Z zI;K8*In=WO&cbkOn^@fX48t#{iGr;i)vxA{K#7(EZiq1)YN{95FHh3Et4Z*)&*Pm^ z(3eXKD~9tJ9By6TDtS1`x!}nU}iL|A%3iDk;=?}ofpGeUxuvq+mAZz zkKSC!K5xKw6<jiH~ImEJxJi{`nOKtE@67*{wf$cPByDVD&zgtfNz! z-4N;J^r2c++TEG{Bh^}aud!P2P<-qZk(%)U?q= zQJF5JU8m+|b_03!B=)Rd6Zo#)(%wrA8et^vHcN3B`B8lo_xvkWD8@RmVrL34b!+L_ zb@VxnZYU<#jkToeRX$gljsrJ;!lAcNuOv27hb+Kh&H?w&*0Z)2Bm>f>Gdwo!134Hu z*vU>Z*5|^`uRTXmthSqdI=j&F06u$_0k(9tdw={u!?Ee5W(`~O-Nc`CL$_EO{LBXdqC=?jN zw3sB5)w$<2=T_b>g=8-z4FrepG)Ecw`u;VqYvSj5RKX%hLv9gCm8tadsJttu&ul31^oF$Z|*pMP#^E6TWnL2e;;eg;PwKj+f7V4gin+s?9- z2mU4gB=#b#8DSD4V2Q&G%_3tUV>mrAQ&NJHyiaQ`&!<8@#=f>WU$Cm)$vlyknSfj$ zLG4K-epCm_UU~$;$MUX<=FMWgSvN?rxcRu;U8piyM=Wtz znueIkkuhkC9&!n)x3hL-MykX|o}0OLYxHzww{eo7ayt6fOvmPs+R2wLH?ismZ|7RJ z3#XOLI1bqb+l*(mI{Qh5+CLyN{{YWP6}k@GAEj|hvQxE>Nl=|iN>@#ogvn=iLX4Kl zJasFN%c!j@8_8Zo-aN)9B!>M6tm}Ry_*OZ1IrMzT^CS0KV0wX$JKR9#%4N0t~bLD>lOR8MX$;mM|3;lHXI) z6}KJI!-rA~?ZV@t4{G>)XSJPI&Yq{yQsq?r(zdH*X(~kvTSp^A4&`*+u#@ZSR3g%C zk%3!i7oYA{t;(cVQYP$^gPisH(ro9P*EF$Mh(^$p<@^g+YA;!&*!y_ZBMJvjoNy}K zFnRBT@}lxhS613H`BkZ(%CuBo3-zxhLDZb5K8Dkif>DO=L>CkJxLEj&e(0va0~P~3 zDLj2DC`!n|8Pzl0j{=@%YgTCX9I*SxjE`zuy4{ON$JxhmyUr-xPL39s6BytQ{{YIO zE$dl0vvnlhktXUlRC}o1QOY|KZlg6G>Nipg5^km4QEsK(NC|gQnv5D;`_ckT`q7$? zbsMMxT+)h+HB7NFUCu%;t5VcWsO}&*r{f)dU#&FAiT^0F_u{YjpMWpi1z# zWaO_xDjd>>#WdO_#WHf_H=P&VCzbR*l^w&0hVS)FkG+kBKRzw++Nsa!S@Y;tN;5W{r^-E2*$46<)}Gx|cW?Up{{TY;mdYAM{)d+}?1msDZ;^ON z*MU`Jk>)ve$@KKEPkkpuaj~@9Wj@IwpV6wMo*K}k`?h6&>z%_N=bF{dGt-!tm(_{q zuv$rQ$6!#a_nq5nuhg2>v(uwk{MfCU^BrY>k`??nel6azWWQ~OJ7ic9 zxH~$Zsq0;H_+P{5*v6rcLc{zi4~G0d{4r`-{{Vb{g*!?0Mk<8kx;T|~jaVVuxL}dj z6wa004-IG?(@_5azluMlJosBe*}O-nWB$4+{z9kO%epmGX8If*8Lrs)cR}5Nm-+ty zfByinp!jD+?6<@I1V8wiJ)80?*iW&<(${DI00_Q}M_ojI1Nw0K_N$fqDUUVjSOhVJ9+ z*D)}RKqKB&a-nnn(Hx&{mCcBh=`VK7?89MIO=KsB{4HxCnJyp!;9{ZK&dG7nfX6*? zpZ>jdvft^NUY<*!MSZfQK=Kw)1+sJ3IOnZ%vRy}QY+5vVW0F!gu0|9LV?F->`qdF+xUm;I zm>)2L7pBqc->-V~Xl3*$waXI;W2E#oWx1C7QIzbG2qXE6mFFiJ>*@#PS=-sJXG#A6 zeJrnt)V4mE$81#@Z=`#syluI1IvnG*GI{*y(_~>*Gq|{1WSaD%lGxyuB}-nV`DPYZ zi6n9>N{@yGvyRvv)pFwcR8}v7R}65V?dP$qDXyAbsS+j!C5ZfgDv@1a^PQPwJr^oD zByxU~^uD5wBF(7mT#Ha|F5#qqmu?EN$i387{j}tlp^Jr@!qCQwP=MZv6D`t(Hk?~M*}+}t1i$IFn`qmI{rL< zVw6J5(waW4!NyseHII7)@@9z&lX6GAKk=H^EAudLO9i@MNmf` zWS@0Mp#J~?Tj8*LzWmQS9h-Yb=}_3`wcFVA+vx5JthW)zwiIvSC-{i`Ym&RVp7Lgz z>PC^hYp9+28*#Rar+`Q|@~>mUR@{2h z+}fapqE*NGwfRE(Vz_#IQAsoGg&IcmQJUH{Ec8ZGrlq?&=X8{Q{&BM%Gl5V=a&B)U zvCTSU^?yo zb+jcpy{wERqXoUoYPOmVn#@~tYIherwD0Th&r0Go4MOi&zLMdQoxi%HAqS_@xAkk- ze86t^Pa}QEN8Ue&VNzIV*LsmfmLU9YTPRKuJ>(<#b^24SB-NX;D-B9b-7CqQ%zAzG z>Vd$858sjSi}=&y(B5eoA)RF)bwxNojdi-Whi}q$v@!XLrz#*AKs|;sY5IPRGz<2J z#75s~Fua&8-;s}gmBT4f2@ikK;CpJh1l$K(fR3~po-v@ zDpaFsE6cJyh-GxDNjPoX=k&c3QJYUuC+>y{mfUg>b^z4(dQ=dqn}=ia9jfD%^!dB} z4QpQN%3&}g7}-h6o=@OwpS-?XjqKBW$}))j=Z@4-`psEhS|hr&XifRU&J({{TdZw_uO;j5~cRGsRZ&Yr6W_K;dE2;mm}8dAT_KKPu~P z?x2V?HiRvnpAlzopwH$_aIJW3w0G1T;^Og(vF>ts{VDRN6s79LjD#wCv)g%kEs@5^ zHnT5Xq>7(U)8*7|zRzwlb?|}mD<^#oB*yfk=vT-G~W^0YVav``P5^~P8~gutxt3I zltUz2ZllLk`CNV-4Qa`4htfPg6BADSyY)vi;b}Ds9WGfKGjVGRl(>K&ok#cA+;&LKztHcM zl1OvHNsbtPl`(>PqY6@Zd@PhD%@uU-x_{-}e9fC{o2^#v*6gDT8%Sf=ydOGEgD2=I zn$D;!{{Vj|^YsYH)#4*9)y*k3iKeS&IC?igAbJWsLJ+Fq>?OKL! zHA%-DgSXgv3e88CR#Y(&_B`C)ZS-I8dm`?Yr{BqE5B7}a0Hc+UmOqbb+<~<1H8_bE zJz1M2kEzcj*FOjLrlJIwbH`^9=#hNPN7VGI@LSnh9qYB9ypGv#%+$E^Jz3pJHx%NX zyS@JauejceRTB~9S(ypw%%9>a^kOm$ldK4OoT%g6@myu+i!{AF9L{Hx{&~U4KUS_k z#2ziwwM%mo{{W%bvA}{e%%ATbrlvHMZ)48JW;kp#TIS}LW&LP&J|_5+6|%`?;gp6+ zKq2o$ONjpf+~bq)UTZvGXjHwsX(Sg8`{pc57Y8Rk{Db(`d6l_UJ5=@+-*_WUgIe%@ zt7UXDsWLjB?vgQ)?^O;~?0lM(^BVk*OYPX>TTEtM*0Ut5=y){M7xrC5sJ`5av8w5p zdM2XQ)8sJa9As}Ev|H-85pd+V9;SuMe#elu4+N4Pqz}s!R{8e zmE(}4aX{PH?MTHr(ttE5*W{#hnDQSa+rT%5bJ$)|;#NJ??q zJDgP0*-vk_6drG8ZZS$MqiEFH&-=lg{M;Y#s}sqkPbnpok5GhvDvRtoWSH48UraFm zqL(hc8OL(wov3O$T;6xgHjY{`xHr^SRj2sk&hbQd$Q6M3ij4l9N9$aKb}m2T-%@2A zff=bj#XIBAn%dxh(?r;RFe|E^YOv8BekTowt?}slqtWee>?U2dH~2h!q=v^Hhnlqw zx->pp%8~=YbsI(e89L2Fbb3E5B04al|^;ASUg>PzFd`6s{@R@}w-@}nw zNm=cJhWqWw_Qhwem#Ex962}oRrhj^KjGo`6ShAg@lMfShJA-2eu3Q_L6nQcf37jWk zp17`iMlwBFMc1cw(6yvPEsSu&Wb;a^0kwJ)*c#PMM{J~Lc~;xI``D}td#U4B7ohp< zqbeU69=}?x9E8kd1yVH~BstId^``05pH0Z8T5yV69l5>|+!+L$_Z~(%aaN(dX`BK` z&j69uxchsTd66OnV;h@2exKu8me)>X7*&k)`3~TJ9@UL}B`MN$?WK)rVkOUeod`RM zs`5#zx{2E&AR~pStt7RyvPpL=v$*IEaZvfMJf|N!HzyVFj*6uT#r1!Y>w9Nfq-3^_ z;w4*%FtyC>wh(f=xykmb-h39v<|1iFL?3m2zO>!b-O0XeaAj_PxV4LkYSGaWWe+1O?s?58(BxD~ceW8vuoW?l zVgStJKinN?RGfz9qq`b8{3$n5Jf+tPHZP#>QI@QwO(&tGWZvYPsdrR6sY6t96>@FV zxu}CwsD;7DLDcpYE+V~$15N`YJqM|#606haKE0`~#m~*p^r_{zxv#NzDFjIC^Sko9 zyAN80sw~tvw8s{h z{nX(2_a26$3dCdrN3hKR;L_z~Xihes`+bcuzE;M0^sDx9k03C{PgXS?_ll}gDE!QI zEznhaJuY*5Z+n#@kvO0lYcO^v0dv@NqXwFZy~$EnkM1!8)3r>G6x>N38`Xcps&kHp zg534Nqf}<#p2dkQtDt*Fh4))X(YL2#QA@G^0ORu9 ztAEc(xIdSA>8FX3DLeU`+?Mz_XrVttgHic0r$Zzs>szwhvjgQ7x=%oMxcoZ*06MGW zZc<||0P3Z80bZRvQ)n#~wKAF`A=)H@Vd)}Z{&ftxTH`Naf$IzL{cA!+gd9k!gVaZn z{{Ysge$NrhTV7k|({{u9*L^HRx49Ch3mGqI4sa%tA7_t0%AtKYk^8%dB=$L9<|}Yo zYAwiBk$$17DQskK^|p(i=Y{_O8uV%6<$GS|Bo#MNKjF9k0Ig1sRkxUBCro4x zMQcf;SjQkOVQUt87CBsgBBiz#u;kiUTL$eez=`$XWxky%sku*BrgYcD)Wa{{xcV*3 zq`G-ALc zN{j+~9;fuIof}rwE}`2cjQ4-tnNw)T*QIPD&WwUo5tcalh~w9qZV{in&qEJhoszz# zL8mm0iDjBZBoc##C)A2}_Iy_3bZ|xw@g0?ZR(?FTKzsV1tya32tOcFC$drEYFHk*t z@+#v9t&!Z~mZeGAAv;bZRUE3X1HLg-Uq_zh-HHIL4$g3)iOz63n%cJVF_<>U2P)u# zPrgk}w6O+8$j^Q#lzI2qpJb%9Ibq>>*;!IZkTYe~o1AvwRp#)yxrQ`mSc0ew5N9~8 z!+B|Kv`ai_r-8^7Y6ts7VeR8)i1c|-dflsYbWhfYBLY)t0taI;N9Gg6X+{gQl_5A ziC-HHsK-w1r){BHJ29}dY0hwFm5Rha6W8z+(OO%f2lF)hkF)7< z!yNmy4OVU-iOzO=?>s$M^uj*b*k0U`akRL7XIxc zjNIS4tZFi-qk-%PbMIU}r+Iy?xsed9x*wHufIlAQwyvOSmiunMGO91!fE*(Gd(~Ie zUUbEbYi|O7%V|eXa4QPZb5_}QI_SYeRjWCNuyRsd~x|+LXq}<;jl|sh?{pqATRDX}* zTlPK|hQShjV2NZrrK%kMX0+z=n`_?{#DD6okbJB7S4}z%>m!rz}^Ov~~J;EZ>3Sob=tDoB#%TZ7OlaH6#R z-7_`qCU1H6wy=oHM+{@4D8_#Q*0ZlQDH)kz2HgkVmO=9m!?)72VQZ-r$N;p9Jj(pB z`hWGRV->Z$VoBAXy7|W${5q0-X>-b38p4uJ?@{nU<}aBRVTqd$bt%ipeH2%Hp=r08 zWslgcNL6e+?Jh#5e}@_6i0fQ2wbdWXyZbwwpDlMO8-KfiI%k|8YVLHqZAC0&lIm^D zh3tNqHN%C##u4_d zp7m<({D~U6dr1J_x)%z1KU34cZ+eSR)E$>ABY8mm(0l!Ajnu0ENbSdL79AVCQJUZ~ z7}@aI$zU))3iM%ygr)5o60gY)R8_y!2b@|dn3Z5iZoNHyYoEBil35t;;g3&GYS?QV zIb|%)a@|x606j^pYx}!Zk>byg}&@dj$7RN zR@CXYO(WaOZBB*W{{WD9w&gUR3!OR>nXV7-U%mm!{{RZ)FC?E;irsC{v~k9(f$NNa zN{?UF+g8*iWzUpIRL@oyi`Rl%}OSEsr+7Emy=zEAnlp zWL>n-;Eh1krD&yN^2M}|)~_azbWKv%QnqG$bZ{mIswv=~r!_Ub)%3QdteQKxh{279 z89s!HiSO?g+GwvJmMB>YmH-}c(zJ0&XuTQSp@xk+#xCnr?fRV+#-g@=YnoH?!)>}V z{?R9o&{rR2;qSIueX8?yD!-MTVOyeWCtVm7FR>JCaQ_loOW4(Tp&3-Bl+W z?2T*vQ$y6?SnsA0%8it83OsG<4l5H{)i>$l&Uxa!1=PcN6AX+VLyuei+=H{4sAW&4e)tnFi;O@JFEZtb5NA==!0FuPvQsjD7r`zfsgyKCN@8ll_^gu>H{l zFdukh1D^a<3%jP5?8~zZk%-72#fq)b!->r8?ox_TOH|iWtB&%{=|AKtbL=F#BOigO zpzzD=@nj1?{h!Pm0T;Kqz ziU&26>se@zLk}5Or+U=VUqAEE^DEB-!K1*E-(0PrV}&w0g2&svbNBuX)OnuTRlG*z zVDpNf#QKHzj5X+)~4?jzjTd#e?yC2 z(xkPQ3v&r3C&xqE@HOSyZkHwBh^@4_l&iGjITz4ppU@ih+ll%i~?fs@*i7q3B+6i6B_`O~Ful0sjE% zu0P_9!bxvpzj=$k0P0TMe>&Qh@5#A`Cxtm5k*;?5{{U?GmIpuTEJ1c8^<&%lR~{ae zx%rOlbq;Di$#t^)f5^Gv4-LhnDqrdVJ^YLEgU^x=r@y^W_^Eha$4#|dlJdKNJ7WX- z)~s5gm5hfRoG;S3>%Tw69wRSn7-%h1%8pn2pywsM&2`E)j;AIo3krBotI%4@Mf$UZ zO*-dtBSj%+`>MDk{uR>cUkbXTEiIxU8cf`iVxf4wX6$NU91 z#9kwhM$;MM+Gc;26+gt@zd_ccm0IT=JdY1X4yGaOdGCmHeJ{i1Y@)i7;{O1G>HrHE z_xJX!Q8ZZluKxHWRk^M$B9Y$We?E65q=ZuR3$6-8k8F zBNHe>62Y9FnM!LOo9w{$SP%aI6;@vQNNmN+S-QZ@v0ctLpJ7mcZXfyN2et|S0NI60 zalWtcFmaS#s6ixa&)H@}>l|4AWYw#EOv3w@?SY3vBLI)Y5m1eBPu|=0x5UJ#Clp~1^^A+TC3Z)lN+lrNxB`}j8^^L znRgtP`%1_&EiDA)@kL66YxcfT~C*?nM0(c*5Q;NK2{oW*RDn8(F57hUrQrA?NL*?1P_cIRQ;Rn@?X5U=L zJi9eJxvysjiz^a#gXm3pbu#KzYB|+8D(5n%`?^bB=UF$1wA)YJ!zh+R+Av!u+Z9S} ze^i~D%7Ng7aMD}=gY~Qvbv=s56ln&Q8-%yFUpjXDdwpt_rb)oQxtSP@tR=~F?#H+1 zTSozfiodhJspq`Y7gAcz$Drw+A+x);OKT^EZpp-o4C zPx$l(IKjw}GXeFuc}k7yQC~0e@;-+Jn)BM7SFeBb zJAB+a;@TfAJwWUD(+#?(`q>Z@+c`B|wF$122_rZQ#;~2I~f41C{Rm&FnnB>jny9SdNWhXXi1t~ z)gJ0MRM`P8?@BjRTfOMrOOy%s=dW5#*r<0?gXCbEIYqH1>NiwnvB9L>??))^SV;;T zslliDk-uNvpHayCs;=rcQ?n(-JxhxlHz~GHbZik%^M8gyKfW>ypTTPHnnReaAdWbRk?=QKlph%o3!p+nR2sY^jNN=mBVm+4Kx zJ&f15{pIs3nRj(6-%*NPppF3i8lJ(&-T}w0MoeGCn|_9~ab~Jl*ifzh(4%vXrz6}` zN0t0C75a)sVh`R7Jw+!HmaLRIpS(7o(t?F>z~FXa(xBbzPFa}`$k_GwrOH;l3(((9 z7ZL#?`_+%NTTHlcV<+(yoGuZ*`fWa`$LZ351OEW4l}Yq(l>Y#QLxGcoci6P{XySE?JIWV%a?)sec_O60kGQ6uiNAm`%Nd&q1)+tA+73xyVYA>~ak}{PT&06LU zifpwFOUO+yTK%35RuLdl&FXl;HOcB)7Q1^YJ;t3P$b)K>bs+QWn)Ep$TzvNIKina| z`t@9-8TnX)=*UHTv~!w~hqL8P9z{%+oaN2T%vf}(zR0UQph_1Gjf`iw>(ZsN(E`ML z$BWDU6wcy4o|UXTf<9+l`W2;EBVoEdtUWP`C7Vuhags{rv@*z1l9XF*8Bc9_42qVv z+I05nLo(a|Nj?6Q%}UzWQ-{jBd)qR4{gtx6%cW|xF_VDJ6Z#%%w3gEY(Ip@3oEp}i zJ*z1ZTP>khPEuOG>qCx{z+0c?)}?sAyCe*UA6}I_J|BZhJMA=vypeiKB+BRT9V?-c zAYL8hfd2rsQ5YG&)!d|m(TD#4TDzf(^1qqEQz@mhZGT_aQ-53{5H0BB7SUZz- z-ROB0GesRJC+hzIL}y3g-4{kHb95T!VbSgzaVObo%%5!3)P;~(LFxurkFRRNzVTIt zohujz+pfRoq?uq3wriKvd{wGyPE^Xu8=^)kTV###lT%q%nEj zCy@UD6wmjM)00U4EwquA+eL})?;nI*ZH*7EKdo`Lx&_tFE;Tv6$qDY~F5f|26_vbJ zcE$}VBWkbr?jMzhD;Kb=>x;N_ z%*$|E;`Uz0?-Tt6Lv^ZK>1tuL3njt)LVJ_k)-)@5eQ?6jT3O%k&fUJAl;u$`b!yG& z(z?6b`uqv5?zhY))Z5OO{{XADL&E)iD#Th`w{iWaS(ohg%gBfv3{;V>o8iH}<%P}Z z{t?Ls`hPm9BVOxMEZ0##luz#AT!7#GZ+hq7N2~4rX4kgEq|^(eL!sOG5HH@HxX$ZGp7*j5Y>#;?+J%w{lwB@2YV@feXvrBT@u4W;9SpNVaSFBjCR?Q!Hk+(N1^8=qz?TmgkW^H^dGBv*Cb|86~`Elu7t+tDJ zf;q0+W(GDB+db-AiIyT!RC#Jg%j!pez8hpj8EPq2k)a$Z^&B$+18-d{VIla2?k zuhOW=G!YWX@-y(|Pj5=jn&_(+QgTQqKDAU#^05jFb}O3np++#!^Xg&~Wr;2}sS0;w zo|R4R3aM!r40So^Dz2q{o+BzwS!#O`O2m8(a1#>nQ?JTW#cuTty#GXkf`qy=OjsEUC z_3}MCPOJ7&ex#aala>w2c~*w>s73wdzq35kTd52I}`8h zI+|Y<>NfB|o*ja3kUG8OUEY_E6zLf-h2=Hep_x`-n@62Ewg$RwJYOIMCUpb`KF zIO$ewf3u;TPNI^?ox&Vca>={M@@iG7UQ60b@7T}gwY)SGYu4q)MovDsuBJ86WV2U} zD^-Z`lDR+QS+<%?16CK7=_D4&*^O9y&-C;kTCZZ_(^{T|JPhg8grd~7TN<7xvP-=% z%%}IT@~6~#RQ?^)ETPgg+nbwTGWKF-4E+&$1NfS$dvz?*Eu@N&EZljfPnAYTUMr!x zzP5^W5RlME8pRgQbF^b0jdZB%bb1))My4@K12Tenh8X6yGatV-GznE8!J z{?NEXz#sF;Pk(xb`&gRCO;}(?j{arm9c|R{pZAaH)}qlFrPH5I(&H+CpE~VGz$dc$ zpL*w`MpF0cYa0)WRhPnXg7G%2tUu?Nga#wm2e08>bEg^T_UL(7T(Ye= z-7B}dBhY+Z6xOQ-t0Jtaz>$YvLy=uHS4QqL_Fb1~Mi?eo29=(g}8O= z49Sh-xaO}zsp!{kBe=CoYos`6WKE?{)OD_VUr9Yrxxizw5Q?RUX*IR_{PwwPQoXp- zG+T)7ZKsGwwXNiIIN*aS$_4df9>V`q%vJknK&#yBS# ztJc3gt-6dA5w0=xtBTgl^eMMG)2An+iu~E1aj!{tZ)4>y>x-y>0C9u&PwEY6NMO+P zM|Y4bg~>hHzx{R3cwtseR{HVEgtl$5ZagkYB-XB_YYx2}ZE(((UT$Il;iUtD$Iu?t zohZIXCA-+8XIMdEVl=%kU0zW_4lp&3!fs=JAFPS ziZ$x^&pzJ2UgoQ?x45+OVRH(o+M8D)N7KJbM_Iyt>rf!$04=&IsQpbhO{A05>BG~N zB(HfpZ>QX@^Z++O$i79}y;9pqvuW=n>)Sbb52bmoud7_^SI={DcBtVHebiq;TH2^}y~2^r)0u(D3sKVzJdd zgk#GT`u?XiqL6bVFKF0KJ?F>^LysAQN86O!KH(%jl?tmkq$HTbfm1WeKWx@HsxWD84 zt9QiOg|?gF>$r5~SmH?V3XEOOd#ak`KH;#-xr$wgftfp>D^a_x3dT zoOMt7^A{=HXuc;a7+wB!Bw>LWWFGnIzvqg-c9!>6@=0iROrLt$mpMM7xZ6DhUbJ^H zO!oGz%QS=L9-w;HMGf*W{{!*=16kb9;dLbs6o|U^7)etMDWtRC-Ue%D;edF z{VHE3G0D$RKai(JAa>cNS<8PY;IR9H>c{$319NXQ!|jqCMtuCXmia0VzCW#LzNm{5 z+v*Z|Dn=H<)2SYeM?c6`(TpRaIq?|GMQvur%B?u^HB0gaarP?#*VvKI=ZdukoiajJ z)+ls~U^3D%4=Em}vi>=$$54{i*qCkY^t*6A{GT)b0N*2w{cAez`YV{km-EVPRPF*( zgDL+2bawjInj?vGJwLCw*S4C{7-wxDY=wW<>Qi#>>JvTv>yf$EBbw!dM~hy76V5%) za%sA^h#K_-C_I~F;UQlB=di3vu9{p96&IqY#d6fDwmaz3aaWP8ajHu+MPrj}dBVgx zpRcu9g8dQ`CEUBDZU`I<4uAn%*5lf)Y5H}}qc+GF<^Cr;DvwfYIMr?UX8frmvGDJT zbtoig;ZrrdVmvc8JxbFt|k+pUf z7cc&lgDa8;m2d`v6TQ;48s5;_j1o4T1K z?JhHb2So#cOy5}hBcd(!CJjyvPzI$BOWBoj+!BRijBu~PK8LZZVbbLaP4dd0#oT|0 ztgY24mA**{nO}u4K_#>Sg+h+ z+o(U*xur#>TxzTG%W<2i-AyRrcLBM7yu6BFeDtXHW~rvMCEZ5qi**~RdoZ~!X*X1R zsNF{($%EsA$fI}T{VE;QyQubJCEf2v>W?(JsO1>_)SIb=sL!FJkarwSAvE05XD0)# zH3r>;+Dhz|WFw)W?)GPMbnBX{ad5DrY%9J-2!W0=Nyqi9O@3*WlFG@0%;PgTIOo&a zzM}<@b1rN{x03x&lE-ljVLnVUPtE>@((86DFoetllYkFQ)$sZ0URiwB_HiFN7h`= z%DaI2fC#9>wiA!zwQqh!W$rzVF8=^pl}k-Y_Gv8{vX<8hH(SiIFW0RyMqZb8zwjjB z)?9PEO_)VghIjHLM-9(!W0OKBHNc}h3@^OS!h zRo)5XKfQ&Q(2ha;Yff%%r$i?P`!W}dGU0-e=xVdtWBp5%_9r5*h$J}u*#ovtC}=i} zv#-!|Tf(JCXo7O5vt~rlTmgrOI{Jf}r6r6F$%6;m71AJc{;6Ulc5&9Dnc08V+akC3 zk$0(YQPY9=PpTT3>@7NoeNOOm8*;2nX)d(?8xm-93FP>Top{CM%U$ z_Wrw{ADnwiQ@u$y5~Yrncd1O;hl!_i4*k*Ejs(Z23NQlu*G@&%;(_wnz&KSLxgSo7 zDk*LwS;&D+)aUPD=O0$>RIYDdYYU0@pZVtr+KcsWroGy5P*z&A{;inU;uESb|rgrfP9`^!n;XyO^YxM>&(&np2iP zPfEwW*QL6DvuTLR8;pVTH~0!-+G-lk*{;8F2r(~E%ES*X{Y^@5uP>wfHj8ieOHsj){G|Rr;ZBx4G7EzD zbN!~{I3?S6=r|Q*-Nw*~H-QKodQwnd4ej}aruVU4&qublJA(bL%YxrNoGT6G`+=zr&8RVq=ltflo(forbJhEgsf4NFp~(YFP}Sy{ ziwQHhZESOZJu4dGMwv!gNhIWh#ZtJwNY$O6C^%dmTB!{|E(!9OBB;x`H~?3%N;K%M zSm^*ah)tPZSTufbw z_8I6ZCpFOEsfkv!N8Vhe#Koh9Vqb2j>s9EW44ewW=brV{PQ5v+(b$NPOMHB_?3wwE zJ}IrNQ!x)d`ik!5;~S*c|FZ^$>J5cw}#tLwp~Ug z00?f-J1?@h`IqPoTaQE2^*N!n(_F-hsqguE;{{c=+&-eWscT7NW+{CD@VofNqOS96)fJ>ZW&^ZCdNPw zvEfz1bTSN?tg6^Fcn!T=GwVkTQjuu%Nl<~@d0sQ$D&)X(T4GA^6OG>E+u7%j#W=ztrk&Q>36XmS|2t!dIdc>>G;$} z{{Ux1bymf?7kEWze!fPt>Zzi5v2G!|x4&gfI{9C_&!Xq~*GiJv-6giMYoN^@rwpEG zw?_Td*zQz;fMd zYpOf6{{XL3=J6AiN&80iWqbSCgwoBc%1~Sq(z10R{{RT|BW169Tt3AcS{{U-2Ev&OX*#w9{C!Db3o_hUiwAu}>(KWlU$2msZiq)qpC1YAo zvD4|AhN*9HyHN22b4EuoJY?tTftuEm?@YWKWSR*W9yuU`?T`&}wmufuFQLV>vD?Cb z<*Q_~Rp`DRTSRlH>Ql)1`Gjq^HI${xXrtW2*R2~WaQA%Kt2M6BuV-?SgVjm-g<qEmam__#t;nKN z1X2l34i!NMoSrJB&ZsZ1lI?K~%pi4N^_=wlt1G2xBRN*X!NR3IA6sd4)R5XmkYR;2!6t`v7!@)F73~apvDOMk#Kb<#I(-!_Ej#n|sAKa7EKhmY%+8&%; zUt+zB_m|wn)2@cJ%yzU-w60jcPUfIW1f;Yq>^QMs|$jcd#9Q3V&l)sh8GjQ>Ld;`_|O8ezIL@ zvOyDVIm){C9+j)28(Rw-OS?FuM`+GkMbDDpVc9d=sV&;8X;$&8yf?CMjZV=b90mge zAp4rBtm>9`*9oM-G|6c065>d{=p905o-_DXk*9rGh6M5309M0*)z zAwq&nSv^^7%YoLpUlZM|w-H-|ksr?fneANz8J$SBg{FSWzvEe!{tdgjXPZ>JSeimv zf(^&~E1q|L>_wSMwR(QWjBkDW9E{=<%vlHYs`#%#J_yqj8It9`tOxQG!J=qZ_ff#H z2`7=Zpl2j!-!+t}%i@)h;D2M{7PD!5&O+NpyS&;hlg)PL%Roy3>s_9c;FE6@U)m2H z!n0&9epuUpKDj^2yBkd&Jv&Sm>JlT&pJ);8P{*+2IO4TY^JOH{pk*H})xz$^Fjl;J zxYWOfk4A=5N-Fczw(-4P7maLt0P+Qo~1I!_E{v332;O&g~zuT z+;fZ``SvyGaU@8DMJo{K1MfM`GH^W(cp|9YtaoyGqF>&D<+Pq(D}Y8Z*yXdD%llTm z-KDXo_C^&eBzb&szlYtMhLs&y;`5NsjHmMd&LsYq@7~@;fmn?;6(C^jFdKD58;NmflANHz6Cn zv(TTV8uy0h(p$|7m{^Q}4r3$(gZb3DoYq&|=lY#U^E-Sym{lYe(_FsTBQAZbq|*E^ zYkIK8*K!NDi_MY~9^I;XV?B7mtM8#-X;zn6mJw?ZC1s4^nC-~psHjdn)@95y8vcv*yKa50-q{PX*82U#G2CGP5@3Ic(&bWfeTlY*Ht+x|YuBJBw#A z%Nqt(9gTYCgRDoa>2cjA1O3@he+zg202=Yi=zJTke`V_eDO(oTI7iP#J({_3G*wvn zqtnZ=PK91q@Uzu(NxGO@6x`yxg-Od~eS4CMyJWkmchm+&9as;>mvbNnQ~Aw2YMJ@e ze2wZoD?J|y7ts)e!J0m3}bWIwxjkYx`eX3CjBZ3t220Jyqk>@9pp6n%Qn9*d9p8R*gu;4muuxI`=Yc zEPbP`Q>NahgP!7~sH!xd#lNRwXs@oWZqo8m%N_B>*+=jX>r^$pU}@1ZZ}To?<<5CE zR$mZW+Ca|>iKStM3);Cmi-|7o8d+O&H#s%*)ob!AM|0yT(Q=jE!>%snxqZ?(KGi;- zs9acF$33%@k~Ye)?NCQ)aV5J>kVw%X!#GkvKAHS#PqRq2>nK81$r(_1#c;(+O}DAv zA9eeuws?D2hg;HPy91nUkpls>hf&Yx_|}<>ZoqH}=%t50rFqB0rN2wbt(My0+e(|f z&4+5R>66ouk@;7k%WH8GjnQNtPUDRIE9EhmDAB9SYbUBc*9VK0YD!J&In|K-%0&S2 z!ObK-awzi{6!d1vU!kQtAECkaBE0EQOJ;g8sdcDs_oFoNj*Pht6^bvHDbrOG>!ZpR~vxelh` zLR_GkJxRk4#zoul{JmNr-uG` zIEq#a{nN<&jYqI+;W2dIO-r%}(zZ@A+ECQGe4sGX85^W#+MWca&x^$Nk0Py+TW43xA>AI3L_& z{W?`^h(zm)%SbLga~RL^ud8(xMe#I`n*Pd_H}0f)eXWJop>QFzh31g(HceLaawe8VSo8EJ zip^2J$30k7r1gt9%{RmM+KwF~@}lo7oDcrJX>8H7GD&Z@-H!O+8o8@!-Y~l-&(mf| zyGH>8eulXT6_MFo6<|nZJP>^iCn&>Z^Es93Jzv~(3$OjMW^1?dp^Ri8KYFO&Ym?oB z965#_m|m^*HI+ptR&7|dN-5lveg*)?zj}PvQcD^}PT0_4P;{tsNl|ubVQDC$kiv>u zN-P4Y!3WUhm7Kd0{{S&QwEKHm?w@76Pa*VCn$t}h3tT?0bg+;47t8WXqkIlW5Dh^0ff%%{5Q%8L-lq1nJ)>y|$WgoAlG4$(;rw^=O zlm7rr(B%IBfUTni(O>d0bAE;EOKma`e#~Md51Gj=kH)U0^q~SnY|9YnSOpF41!O*< zVWlUPYZxSRjO3H7vV_I1otq04y7Xop1yP=LVdcUK#twPv$JEl@UP*Fs!bT6ui_m-fnypz%OQ9EU zRxKqIs-@Tk1d?z7?^z5?X0@#}DP&JJ@Xp^? z#pCXtLfr7Zhv8Zzmga5A{3e9AdUb#+r$gsS{{TIu=klz%?j^XBe9l<=_NdP^WAmjc zHmnLZG^UBms7JwWN~z&TsjF7-pi+?_$@|hbB+@0k(n8XWskr1WId7@-qT=e}+6}%+ zfz!-Z-CKM<^GCC2({6H1lJ0n{t;mm=SYb!?r|CMpscu&8)h;58?pJIqgq{M2>+g?h ztz-6yW)duMn3tE5Xu%bv?Hp>1W6n51w;B2m>srA|Qs+*{##AMXeACh=U8lEu)g!_3oLaGCpI;CpZI;e-m7SMJ!g!eXQNJ zv}!+f8k{it4(6k87rTXq>M3W2W3({c9zo;SAHuq1<9~ap;A3*y_-c!mUhikRBJmx% zT|KUqr&*W3ibq)p&f;^DTRx^gNx3a=Z{`Tb9^|km z%g5dKWjOkQSsr$=sLGMN#jWp2+S$V(>__2A(u%x`gxsTOvqMnOw9Py$7U?g}95l{) zFkh5swoOr<+B@-WHO^hO1pxOWxAUuUThDgKLAl!;7OR;jQ_J^Xr>#b^Lk*;nJf39M zuf4I?7{=->xopnt4MiI1Pi99Ix)doAtWkd(;0%vS=XGo3xYbpmA)|u|o`)4`=Kkwd z%ob6UfN)Ptk8}MhG|>@vdD5SecR4}AisGv`KXwaF=e3HI@e$;sA9udSRE%T2TrPUm84P}72)As{1-jOal((=(KFWv6C%S{WrV`fd@?jMza=*h< zx3JZ0d`YAwuCs@@QcoOmc*v&3rmm9B9-2+ee|e(=Eg#moKQiLxR1a<~_MEhlt_b{l zS7YIQcTUhWn})w>E@funk3u>CO?F1ANQXkEANP<~MU$^wTSh$lmJ#EDjsWxr6-@Y= zO+r&`q}w&sz{cV}>T`^-?bf;L%WIDkUtVe&d=ect7YE7nFiv{x_pXOWxVMTQD@3zl zW-w*D=ogNJbuIj=uO|J}b$xO+zQe_Z%%lD%H=B-fZSC7?f$rT+WLzejVRJBbjws^6 zR2-5>@AMt1HLnsb8q-N)?&JoP43d2Z_}4veVLiYyT$W_qcMS4ReALQKJ1tpwd|ahF z#y5UttIb8P?tIHA{ILinS$HO`+R10ES|2?;yOkd?PBZURG)OK9bxS5TJ|LKW0-8XUdO(F`t@{_Wq7lic z-MW>9)c)c`!N@(v4GsS zp}Mn*+B0!2)Oa`q4huHkc_-I_Rp6Q_Sb$3?>Q=La7RiYu!x7P>ImKE2)U%E~zHaaK zSDgM;l&0I&pJ7F+Nk&a1yv@k&;zZjT?e^=R(x(@5AC)6OKfGHf@)gd*sz_TFmQuFv zGmWG3G}#UH*Wb2{aNV5l1bdD%No%pt@u}$zeGOZ`?K`;SNH)VJKmpq&hp!w}eKSCo z6^{2(`$Al)W(WxRln$SkYhO#2-Q$fn;`Im4JGUIFr+9>qJN`S|wLIod79=?Z>FDnBn2e?9u1bt19@5N8Pu6r&BeZoYGm% zBmo}Pw?-9yQQN2a#b$V`R*y_;X*`6PWpE`jC_gA5AIF;HX7O&Mx}*8INYpxQ``dCc z{J=kjTuX@jS!)trNLuC$7Rqqmxg(~3rE;t{BfOo_v&B=YrA~&DI*Vnumf0>e(uBx( zX%m*mVUMk0>yh2x=yuk+cb32gH}kQ_mivz%P7g}4s7VHwZwzEKRuRX7io`qQfHBT{ z5^I>dxxT-<8g`tUU~if)y14EvY!F$y4@j_k9t}&Vnn4MQ!0u0}lT6 z8Lg{L3TxeN3n<~3hGh~Gyn)>PJ?WaZjrJXBEg{~~%tw|;$snA6I@6h7l=)d<=A!3q zFJm5xxqGL}XEqreM#TAU%Z=mKn{#6=vju&uNYBV~ifN`~>B%;l*sVb8Q>B&6jNu@V zlwHg=^d7Yh1hmoU`Ui`R{enR=oayy?&uYW7RziYPp$UbqN8EdjqR(CL?DMm`iU1@f+D&f_$(E8N( zZoGc3n?AC=Wyepvqi3Y;7^PeWLmbdn7o&hkzbeo2lo zv=RX%cEQDTKV}GKdR=z6WV zJCVb3M-9`V6@1(zP+~<^2cf{`smT(`*&idG#Pz8o1tT!rkCgq>+O1M)a_n=~<&~zs zXh^!2$Uc=Miq1GW_Nt7MTRv3oPgA(X7!){8trB>6$4+yGyrHe~C+fD(K{ZGLX0{{lnA$0IgZ_ z>5A=$oW|eWYWfN|s#A^K(>yxZ1qZ6o%Sdc3;%H%SE*3vDQI-JxMsfMn@kt7?`!%BF zxh!MJ{{RZIx=hO0I3JB(wy;=E<~xtmirTI!ojBb};YS4*D=3m${h{Cj>RB8U!WQ6u z6>=rm8|Ei*&JQ@I06WpU9qZ<+)SY=ZCVd4eai<d?;ZDq$=OBJm)J^N3HCNCR z%_)jiKRapo7N;CP@E!1wI!3xQjyz6KC(S<)!KbC3UTud^X$o~LkgZ`asKXoSTaw>h2 z4p-5aF&Sb5ZX>ds5%?eRrj+@y8}2{dJU8TO190rSWW$g4hJRXVY>WfP_gm&4<43Yg z6Otw;4Tj+Q8cft)b1LBHZ=w0u{6zvlMnJ&zQ@DSPKV-}2CEZK9p%0OsqtsIF2fZAY z9m_FYO##d@GJOy9t81%n7`nHPPxjN~Z_|p(-A4L{MlW9*Er5d&NDZm@6bFxHY|VX%tdoc%-#){7K5vDb%)N-ekLv`>X*!k*}er ziEUjRv!|xNkM(1oFB6~N!33V|7|-&m(rFqr>{?p{40^Ii8xRy1a|(iy-|7 zu8`OQKXY*#`xAJmf0Zl1U)k@u#7W^jYBk8R^9QVQD}P#}E|-4? z`s+F4`o?R~7R7U$dz8=DX{!QFQrUmkIbTM@57wHD8H}(M7R?@86|~b%u*DO+tZ=BS zl26jQD_;&nXNJ^O?rw3BX>K=d{{VQ3><*zh8*VJ&lz08uQhzF>(CG3;QS{#+H&Bwg zgZUcP6yepJwJ?~D#s$Mpi?Zf;w8%R_58ooQ8urH3kJxSe*+1v3=W+b2tN#Fmcf;;D zw%PCIyMCNgKj9VA9lz1xwwK%O5+VI^DasRjG-365$8>V5sohTh0O=Q77tI@5;_R zoY7rV_&(A>{*SAGdkJA9^#-d+;JrwH$Gn#J1Gd?4PvC1mV;YBUQ8o_H#dJl^);g}+4R`_n=|L#w+%4KQ4*V8{Ue^XDiwRt4+^vKL(`0i!u2lTBJIvF`OVoPri5~Kw0m=Xs5;3qjDe4p zex9^Rb7`W&%;(OXx{js4`qj=}E%v!TnQ}@+oT`8Is@%C=nzOR2R8^rH&!X+>W@|dU z`pw~*QbI2Xoq(+?4-!Hwme*Ic7g59qlv4^<6xr;ow756ioz8yjW2vmD@BEv=aTG`y zPs**xA6ne;I1_^4r_m z!Zwva%F3i;(1TgeW1$@dX8T-Eb0ofG$0RTr#v^b-C5))MN68+JB?wYaMsRF`*n(0PD5mO{{ZXMmRA=SI=oS8YQY{YxMPfi?^Aet zNw>b%B({=uo;c9p^in-ef0Z|0i4~!i2^GW+5XhMv1Hq`Y?abfE)!Oz&pO#sCi$xJc zhvq6edwpv^TfRtb{=sTM-OQv9(=|2TrEv_V&ekB)%D{<1&f-U0)(zFX@my`61<2e; zInHYul6=!t6N#{ zWn#c|9lr|77h4!%)Zp(HXi2IG?U(HfxfGqTj*NXztzyq{b9%=k<7!- z_Vxm-v|*uElmtRD0RR#{f~}`6rqyFQt5#gnFC$Atu$xkn*8dNus!z^XB4a^)7kVwuk_zhw>XL!xOy>m$d=I5j@dfp^pKkF zc|nkPu7kpISzSHFuB8e7&-*YEsA8L(bQ$+Gm7`tiesZRr6qB)Gx+Go0>+RCIeOJV- zKAd#>U73)l#-mj?ae4}qID8o0+L2@Hoh2R(A2PamU|n* zZW+)4k1Vcy#UsC^RJ>$|Phl%7f2-ukP$h~PaN^z-m`Suf(`Y}!mO<(1_;gp$v=fuwq=$z z8ikC`eWq-ZPF5d{#Y;{5G0>(m=$^ zzcFqj+aJi)gQ>%3+KZ0Fh9o{ms|s+HDv{?&&7(@z z@@-R6Xri7eAa@zJljhxnj;D|gdez-SPA;pa+T2=N`E!@uESr45-2$9;SdNg(6&t-U`_pH;B8dzieHAcl#TJMcLpKXm$5H5V@W8^SZ4;Zb|vfzW9; zFCD3|xh3x*C0R^ZV}dY2@1Lz@>bh;7sjk^NOfCM*Op`9seo#kWL!8#)Ji8L!HH*uS zVJ#!#Q|-X165auB=$nybkhGzR%X<$@{{W46?rIZqM{KAm!;(fOv!~iiV*dcz3PrgY zh$3KTpkmnhefm}(hcp9muQj}upK7du4C{7O}zfC9SJE zd1hB_rPOn`uUz1B?_7j_9ELkcUgjnR=6IKI$C$(JPH*8XA01(NAmB;YlVEg3canBX4I87@tR>RSo=6jxPsOZ-^av5d| z#fiTB8i|TgVhti)ce-El;M~EM6d3Jd-hEb|Uo+*ou9uHhPOJkjTk!haeC>ay@^< zRH`2qY(2G%C)plCRF>sX~uBJ)C}UTN@y<&(Cf z;hgL$eqt9R+g>~)SlI6OqE*C_iemxCx5Bq zKd3d#ss8R$euu4Ex{Y|!UZc+(8cp57!R?B4#ALsI;;R{f+yLltn&EV0rDM{J`IXDD z-ZGp>LHgCTyo(rc$)ChlDSA|qlkT294@%v0TXPxEjGfkowaw%<@MRoJxP?{Ea&b_x z$vwR*GD-aF`&jXSt>!o3z^w-PScA~=D=PHd<8f17&Q>j3O2;VYIOH0ZP+BvNwM``Q zn{y65MOhL9%AM78VS0S5<-}eUdC>vLrsk(*Sr0rKji6!oern{YgPe6*owV_k=cCYz zG>ikT7y>=$Adb8mO6QDR8QAS5sH+zP%4ydfh8%y5F7D3kr`|Ftp@=m906IWF)<5T6 z4smPVie?I%i}zTg2c(LBF-XH5X{Kg9S&!7y2^~fOtT`dk8>vgE^>V#|9)_G5KZTkz z`HF{iChAU_V9Q=?y~@ZxKmNTSbZ)9t`l?Tx}*jVVh(Z+ z4GxQ1l^Uu}-AC@Dbw!_gH&pvGOOtM-$UL6Dql$}l8>y%Wn>^<^KD`YE&)4a}sJBtN zj!_c_&j955bTn?LOL2q9_oH{A{ONKaxi0EAR9mRsPmp&d-A{_@crI(*!(;ChOVO*c=teb$#AQ}!Fb{YJ8h&suLm$4^7jw2I}} z=9LA!U5zPi^-E>jmAH@5lr>XjhF&Jqz<*eq2lAwf=4dhHOz{rD{K{%~)Qf;GtyWL= z(BRiZTU#0XHoAS<8IwZ_ISj0$w)MfLi+zJ~IZ@byTM^va%DWbB5B##6e_oWQOojey zXD~mY`IP<@x~rv`Ev2I|iJSg<2Of+8sGo5^G->z@R%7bDii0=epnXjW+d0M(tI!2bYBwI!s;xiOYsRXG)0j@73%%2ABA z)L%F-{p@4?<>3DSg*3vl{{X9!GJm@w?LVJNUB87itu;1@l_>Pkj!R!G;%joA>S@1Y zSw7_d07Z4d{OZ(q62-tdU+$hoNxIfy``FA!)jEDvx|;4`I!~(miUuj5;cj*x?#QHm zxWy|+GYrWV;r;kXKc{-Uw(<}7^tcYc-9O<`JY`Ss?3X{@i0!SEnmOGx_A}biWauMv z=vT{c@T%=*=-7zJjonXQ(}P;4%Wf9o(E3Wb{b|2vorn6Et`K)#qOD0Y3DS+VGg9*U z*pVFb$BxIRQG|P+YTWh8N0H%@P)yGRy^4vo-C$>o-QVRr=n`^ay_FM{SE{x)- zg@)}JhS$UMN{>F3U$zeME`+eip z4u49h@IYH@w#M{fkLOd%(o6I><%a7Y^Dx&#mcd%%SPu#)`@u_>K7qLvS+xUm`>3%5 z1D%=c?#H>VrX{(z=GN(luslo-Ld=9mjGu_J}^R3=6Pb=kZ$Drxbvy_@z-u{Iox*Z0ib0)KVZEX(NBR@NX!Kb#S zwume=i1#=cXBD3{=br(#&~0Q$zlulu#-O#+FA=vas*T*7kyFhl@T>X$i2D_YsO5TT zw)bnI?2SmQ{(m;<{K-9AwMTQR+uDMWe)dEBDbRgQA7Xc3^qZL^{{UfEYSIfUbm49B z^#1_s)~lT>M?N ze9J#p`WnQ!wl>ok_oTjsO+_9{50`hS^{I@xU79?pQ+Km9tl47J3tPKK1~A)>2p*pG zVtcVZm-cycB$0v_bDh9vt_kT`&@@u8S><^6;Pt1OaWY^$*YX#?r$S-0&VyJ#dT%!ii)@6~waF|jJx_o@8Wm6+Uo&yHQY52a$? zGPTNzvl^viT;q@DR_r$1+$fSk8VnYb`=ihgTFiIzAC8@cU$KJS6WGXrp2b4EHTS>aTl2qD;Sd?fO}^>%~Z6}udgNZZe~O-H!x=8gI43zoLIqy zNg1F0S}c6fk9{@Ol*zj;BmtkOC$0x?Z^F9y6IM87R;zbr2BV7>dno4D=`6>u{?3y)@{n_7V=97-SQ|6 zgOH;ibo+{*f?UIu=B&qG>QOYR*1lrS9J5GI-zTZh^r%SZTdSa{;jwAMqTa`u}gf*I|)L^&9+4g)pERbAk<87Bu{+Qw#*u2K^bD|RF7|8sH~f2idT{+vbLJ? z>{-U8i#W;83O#Fn?#>%)SGKif-55OQ?;QHoXoA|Nw~>8lgK|Ld+vQ-4~nrP*?jhY1`w-s(k{?x)*BalP}5kI~D zA6kEzajYFD;`MF2&$hkTQttwD2cwurAavD#0{Vo!KK8hU<+fnNL@N`QzH4n>PvOTb@{8f%=MtB)4|m33#L(4ss~nX$l#e!G{6TTKTux0obT<>RT( zN@eY`=(;>;g?`fP7|5jOkzcGcDBmM zr-{39QTLKbd3QFUzqpP&IWDFa{&x>Lfcul_&poTGv5-jwQd>o^ykS9h-L-Sc{Bc|y z8Z7rM35wx=*;al!_O7BhCWQ;#Na8UNK^RfPWM>$tm%f^3r-hwcK6TjHYlpS7D{(8_ z5cp8Zw8RM?H=+8`d9Pd9nP8kY#d5p){MZC=J69)kZbkAej&53FRAha{KBMw9`w3&S zn5?r2-B`lqhg_V4&!uJcc=b2>%jV@u$5C%Q_Yg*J6r{2{Zv*&-Gu!$0#ZE3QjEb=# zON8hc{L98MkHFRp^2%k;oS}DaF}dmLMN4z3*;~r;iNuj%Vr_>65((t`aYLbYbo)r9 zt1ar5UR|ziVl2ux%PYu0m!5Nuv`H+|M`qJSY_i0=IW=mVx4h08n9Mv|nwQA8;y<&cx|z4G-WfsQ zp11(=2Y-6u+XdTiO!1nvJogupGs?{)bv^M>BxXqhuTpVSCY#XttX+SsPU%>S8L90S z-|bQTxX2(U<}u@TJ+oRd=>eJ+?kO3uKG;ke#Z_{ohKi zUR2xN6D%`?WaRAELbd!6SluL=eC;K?gLG`ik?IX->xTGCZ*7?xGYn)XB#p#!G4;(e zMAPB8wrS@{9W2R~R9t0`@Tm64=DRyv+bAq81YdY4Dvpx3lsfu`NPz1k^a zk;pa>4@Mjc>!7&v;|}eXQ-fRqkjkWvBvLXx7*^$;qOhcMJO1s+SwGqIHFJubY;S|D z>}p@3t*PEJUt3$EG@D6~qT}X0v7R&atxYcXeXB(>d2*{3DZ$zgsT^jxQF>nf>L$ZH zpkhOeB96z3(bDIJ3rQtZF~qqb6O!2P{{YsaP+rN!nnF&lGns!#)YLD;Vq71V6yE;l z_*R(-ZzThtm?+I)YE!!D5r^Vy$$ZEE0C%_at;9GAJ%w^jD_HlTG^fhCB7M~5CZPtG zb`+2@M?gE8#>nWnmZx>(^5=yi7x1klx)|Fa%Krc=%!yhE5;h+<-eZpR?=*d*1+kRf z`qr?GmZnZOTAH{!a}nM>JBo{T8!)!U?xw0N%$%ts=xSkjABoUExZu`q2@>NZwbZn? zrFfM;5mU!?8~xWGLsbGS%6y3fdaHG)9U3;nAqUXaE3M9VMx6H1nykUNbq1JWR{M>Q zV_ETBvYc-J0D!mn)wP%&xkKn_%9JLxL@#ScM8}kP@*X|w-yNzlp@w2@!_&B_7G_?Y zidHer%csriI5p1j$>?nyOq07R7Qw;b`cay&(J>fS5)R7a2lJ}Vm~FhHe|5Y4J!_s= zX-8wS7>LL>QWmBbC~)Hx;TdJ!8QeRL;-ONL=xFLiZX2lGOeBt@2hh<|$W_TVP#UAn z8>r+0edmmm`PG*q>OV3+^{PH~_4gH?9-NZ0Fl8hW(E3x1dNVo2yEyKNGm%M}kg{BS z%Y>J@_G*Q)t%evrl%+|pLr7Fo(`Gv~xv29=np}n4mo%HIJ=A87K)ELGN?p|!>NipB z#d2)Xkx=fX-Aj-zNt!e7QEsDkKV~bE=94uZ>NipB$8u1`f5MNK_*}++G5J(GsdJj8 z288t%MIe0YrGLD=DTrNv)wds0OSprTI5dTU`^pLRUJYq$tw<~;rFjoVEIkj+QjJ+A z`EIS%zMuViqn!QM{1{V7VfWKNN~I3QP7!?#AS_>)CVO`DrE7UyWUMzU>x>`Hvi?K& zPCk?ec+OWpRXD8`%w3zgV@<;yw9+yGC*pOnJ_v{AS>p;ug zftm~!QjeBbi25+drkg8;8Ml>WUrN?8YvDPiMmP7r$QB(IX_ww9Nc}3z zT7+z*N2$!D_L>!c=~_qo8t?DN61nclwElFD;oB)!F$MtlNi$ZW()U@*DoJjvaGH=h zf0g!YhE$Yi%O^W&V2b8s4w{PO{{RRks)b9H-60R}-l|C(Teeh^zN58py%pZat2(xm zm7g!J#@5Y6I#0jb6xX}dBH*C<)?8YIsm4nRv^8!9S;ym=xXKpNO&1(hHoe)&e87QhdWOwJqX};&bJgUY^K8wG?0wyR zMK1pUE>P>!IjQ89DgOAwe|QWZ#-MOiZEO&ERuz`p94yK(CnVA6juzg4MsCS=5`s z`|Z$rR)v&NTuJBNNwyXK0sLS6YR?Ss!sBT6;8umJ#;T@BlN&fB{o|hbtEQFg64;*R z38#W2whGanLZ|}xWB-2#G|Dv8tEcFuZghNadIJ z5U|e~&kSlOxM^6Z>5M9oz^Ah!TcM6dEWDAsq4unkOrlA2=@HMA+oVyF7Lqyq&W+_< zF94t79)_$n?A}_u5-f*k48d`?+oe>whD>?VNg0sxcABSUtVLpd=>)NTqA+1^ynyfz zsHyb}ozW+mBS?$PYTV!++3Q)580DCG#DUm)(Q?~mAy~4Gne9$BY?>pHb6mG%B}qKn zX2BU7N$XeM{Vv$8z}m;}FSs3R87~rGX%HRVrITm3n$9F@sT z{vxUax-!0;ink5hBK-LyIL>-~eQLD0G8RFAdjrz3moD)Zqt7ha?gd)4pUH%xs0VQ- zKsf`@8m8yd`=HaAm;#{i$C@J2W{KNVLTmMxcpNJL5q$`#B{3zjK>3s&ee>3;&he6| z2;oO9>S;&LH^_FP@sZT~3enBWRwpLQD=dzpD|O@mSoT_cEp2S)%`Ecps z#B-kDdV2a+X|j?@7{;Ju1cG{1wzGx^S!Ovo0rCiF&tvpGD2G2a*~wCK_+-jA_JZ1c z#B&>RzTTpV51pqgkM)qY576|fUg0Hyp_i5i8;@VF^s4r!5_GR;?g`T*Er;WzQqMXQlVqt(q{q9Pn2h8X~=t?N;5KAR`X#=~WjP z@2fiK%8Vn*^^^QOjV&&A1*5k4qfwBL+n#os)Sps#kTM}VIK+gVx#RKlthQ*Q+Z0?V zKP#xuYL0nAvmspkyANy^IS1x!Tj<$FHFl1X(X+*{?@ zzXCSxj+LuxrJ%JflG3(BsUaUQ>{s5f@@I`iueD)b0Pbr_);Po+^^L6X`fwvfci zFz2&+^{2skV`&_KI`5Ho{oY4CYfCGhq&1&x4nM$m(3kwi!$N9)(IskEyt@i6icr$)@1q~wm+6xI7j_F{SR$1Rx03)Fx0>dH$4 z!jZ$fdVkAhTygJJ7`HBFIxtl#DwB79WrVSs3zKnUBSsv4^#K@R+;iTpLLsuZFtVXc zuBx!4|>0pPuaC8E3zB6i_TlP4r7yv0R9t7vJbLJ82P{(9YN|kf<5XBOGW{f z<*+4>a8uW>TDhkSczn4f`P^=d#sDKdja)A+UY9;$)0Ex&lUcmUZno|lb-)bL4lx+@ z)ue57P80ioS0QJbOl(yF6>0B&VL|#490$X@|gV5(S)m>eG z#I)Rp0ANU`*V?k>=hYg}s--zd(N@h$CyETTj6PA4Ju94#UXy8AA=_|zDHs^+YOVe1 z%%a)}_iz6ISs^q=5nl5<<5~%G8$(wzLiaN8vYepw0;^l!31w*=xqp^8!N~x7Rxw%b zq!HT0WQsBr4w)ySk5YX;wZErn=S|3wBR4Jg*-lCJ2cXBLH@%kTF26MFx^DcY zq*Q{Oe9LpJNhZlb2eS%|M2XC8<|nsmv2dSig=C8iM^Arh&kV@fQbGPzbtB8Eg&8?U z&wi!&?v(xUgX@ZzVTR;Cm1hY&-@HI?Q^h<=VsDrj{lVgrdIYKe04|1-&w;i<`O{<| zq1-&k`>%ESRWWoKe|iBQ-d>;mYH*R2cf>=lw`#ssBy;k(vPX@CVWa)@Uj0QxhieD7 zZbwx;XpZ-5ZiwwCsvd@>X)P{>To`2j@ab7PX&dvn{7X;kbV!9R7of=Mr?p1*tNaa* zV^J7pfw#x=uVTIG#B(gm#|_+5aZ^^3Gv|~aH8*rsc-cPiW!#{SwJ#NAShf=1Z}_5DZV zRms8UgXl&mq?1E-Wt;~8094z***pSjUv zt@s`(2_WF{OWBpoNxF>Gd80IP515m68K|>HY4VGhmvtMz8jCb$j!?NS>P^)iX){lh zE=!s-QD%(M%06Ok)SIe2(q@iOxgkEZnIrC;Q!`1LIY4tTKYf212mLd1{OOsbAR3}< zNJ}5Ss^6tEDcF=g}y9X}@UQ^2n-v zm{Pd;Z?>gYizAR2QbvL{$}>iP*)?gB0yD6$`=*d=#uDQP(bqI7bTe^%QRa5RjGIB) zF;lGD#@Fh9T7n5aYz{cVslHh)%xm}1?jK6!hKojcwC~HKE4q1a?b@CY@t?w&?p;Ri zqNaj;F4yD=m1iS3sJU1IsQ0G{8BSG*KU$QmEI1?i8j0e^D!`9Q*3E2mIJm6=*=GL$ zS}@<<0;{^Te<|fpL+ejD95pnaFnJXc)WTYjp0rZ1$4-<1k_Bd9#G;jys}S8sy(VZ8 zAY#BEq$=IxjRf+Ja);+^``DSKphV?4r##WtrrWHB9M$J zVgWhlpg5*CC5c=NX(*&2a7I1pbCtz6KJ-un zw;W=VCm8KXNk9)~%CE4;>q=bg+wYo>G*A~Foq6C6w5;Yv^aFrBDTMa?=%V0H=lnE!saoKjnKjkGmmezU9dXj4 zjZ3f!xjxkyJC8LiQ#51bz~?!mcXHghrFK|J+9S2{Hjlb!NrIy)IVMgHY6&HhVhoH3 z_w7vtayks=s_fU>J&|GL!n#}y>_93(>F-gaMHRTn-0ph+0CZF=#PvDj9cj7g?Mmiu zrQOStu)Dd7%Nqd!d8fr35zJGUJGO?;YNRAMZ*hth*>FMR9yzDXw<%Ot=4II~QZYOM zG7EgCxT&GJu!`5`g_>Mrliss;7L&|xT%1y$orwb!`I}|Nyp*nsH1nODs$PNecx(DzN@^vHt+1+$?gEfDB~Tn>(G5lT%GSOJ(CK=^jzlvKu`; zsw;wDa>h2ie6#N9RHkVmouCW}&I5YWqqmkvZd=MEZavL6HL-HUN`CB7U^S@+mXVkY zFMRhkKAjMG4b{?bSilIi3ma>AUNi+|VSwJ11XnUz$%#+y*yMpvnMJN)b4D|dJ-$Hm zxV`h-<+p$Js@0o_bvIw(ljlNxx&Htfr*`Pm&nm&f8v$M{3?UL?$_9yeLv1>Tec}{ZEv26 z7>`nFT(xrY{L58ZZhe)BV7U7#p)l@A0R)r5=B>>e$r`%Ke$2l*#!qUXZlha8wvc3l zoYFP>$iRKrfCFefs+6LYo4BdejALm#qU7>_YDiuW-K=97JxxGnM2TRHF>p@O+3QvQ z(nkLPAZ=DT!0lIb)v{P*T}4>SFWhhL`-4+=H)QoK*6SVmvg_#aBysGye>ig7@%VaG z^u0E!Be;F+zs2kS0N1RuDTRnKZv&$WsWjeT;g2=b?!!dRdEz4N?A48%opR-v%Mb-S zf$3K4b(1>(0DN}}v1sZ%sm(s3BIF>z^9Q&HD zJ*i)sGrfP6DN}1yO*;2{l{92wK^v{)=liFF{{YvgM8rP_%9HdpTW>sYeJW&~)NODb zeGMg{(9&em1(mV>s&7urY7xCyjO=mMPeV`H6ys=PjCB4b{{Rs~0;m$`Z%&IU^%Zu| z&*9Km?xczST%q>lAHuCH@Z6ER_cL}Qy;0i4e1uNF?DeS3h~FtZ`gav1$`PKcMr^Ew zPT9EhVbIf@DO?Z;tjMn9g)uMhqW9`5cqS8n$D4@Xl-&TnF zgT+2uAi$W;)9F;Cj!2Kpyn2epQEPLioT*RXc4gCTxMmqvLFjm>o;Odvgb!M#CBfkB zKGk9?$oqcn1dG98*TP0bmj zlsS;>(VA{(%^aiVLy4rM%_e>5+=8V&@`H-|`lQd?F3z-X( zM$k{8^%UIEns#K}G2?jepRF~Se~os~8jR+ip`a-vnBO0bNG@d@I1D|-MYRvH_N4jO zq0iQ;F2uN}xiM&)lC2r_Q%bx40Of?iy|aT*83*wwA4&s8$JVq?=1v8{bv%4e5hK== zwY0;D3KQ7XRh8ra097jwW6ddk>Mf!t^6cY}1x}X2q3y|W% diff --git a/examples/models/dalle3_concurrent_example.py b/examples/models/dalle3_concurrent_example.py deleted file mode 100644 index e31f1cd8..00000000 --- a/examples/models/dalle3_concurrent_example.py +++ /dev/null @@ -1,23 +0,0 @@ -""" - -User task ->> GPT4 for prompt enrichment ->> Dalle3V for image generation -->> GPT4Vision for image captioning ->> Dalle3 better image - -""" - -import os - -from swarms.models.dalle3 import Dalle3 - -api_key = os.environ["OPENAI_API_KEY"] - -dalle3 = Dalle3(openai_api_key=api_key, n=1) - -# task = "Swarm of robots working super industrial ambience concept art" - -# image_url = dalle3(task) - -tasks = ["A painting of a dog", "A painting of a cat"] -results = dalle3.process_batch_concurrently(tasks) - -# print(results) diff --git a/examples/models/dalle3_example.py b/examples/models/dalle3_example.py deleted file mode 100644 index ac9ba760..00000000 --- a/examples/models/dalle3_example.py +++ /dev/null @@ -1,6 +0,0 @@ -from swarms.models.dalle3 import Dalle3 - -model = Dalle3() - -task = "A painting of a dog" -img = model(task) diff --git a/examples/models/example_gpt4vison.py b/examples/models/example_gpt4vison.py deleted file mode 100644 index 01026171..00000000 --- a/examples/models/example_gpt4vison.py +++ /dev/null @@ -1,17 +0,0 @@ -from swarms import GPT4VisionAPI - -# Initialize with default API key and custom max_tokens -api = GPT4VisionAPI(max_tokens=1000) - -# Define the task and image URL -task = "Describe the scene in the image." -img = ( - "/home/kye/.swarms/swarms/examples/Screenshot from 2024-02-20" - " 05-55-34.png" -) - -# Run the GPT-4 Vision model -response = api.run(task, img) - -# Print the model's response -print(response) diff --git a/examples/models/example_idefics.py b/examples/models/example_idefics.py deleted file mode 100644 index ea36ba77..00000000 --- a/examples/models/example_idefics.py +++ /dev/null @@ -1,33 +0,0 @@ -# Import the idefics model from the swarms.models module -from swarms.models import Idefics - -# Create an instance of the idefics model -model = Idefics() - -# Define user input with an image URL and chat with the model -user_input = ( - "User: What is in this image?" - " https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" -) -response = model.chat(user_input) -print(response) - -# Define another user input with an image URL and chat with the model -user_input = ( - "User: And who is that?" - " https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" -) -response = model.chat(user_input) -print(response) - -# Set the checkpoint of the model to "new_checkpoint" -model.set_checkpoint("new_checkpoint") - -# Set the device of the model to "cpu" -model.set_device("cpu") - -# Set the maximum length of the chat to 200 -model.set_max_length(200) - -# Clear the chat history of the model -model.clear_chat_history() diff --git a/examples/models/example_kosmos.py b/examples/models/example_kosmos.py deleted file mode 100644 index dbfd108f..00000000 --- a/examples/models/example_kosmos.py +++ /dev/null @@ -1,10 +0,0 @@ -from swarms import Kosmos - -# Initialize the model -model = Kosmos() - -# Generate -out = model.run("Analyze the reciepts in this image", "docs.jpg") - -# Print the output -print(out) diff --git a/examples/models/example_qwenvlmultimodal.py b/examples/models/example_qwenvlmultimodal.py deleted file mode 100644 index 561b6f88..00000000 --- a/examples/models/example_qwenvlmultimodal.py +++ /dev/null @@ -1,16 +0,0 @@ -from swarms import QwenVLMultiModal - -# Instantiate the QwenVLMultiModal model -model = QwenVLMultiModal( - model_name="Qwen/Qwen-VL-Chat", - device="cuda", - quantize=True, -) - -# Run the model -response = model( - "Hello, how are you?", "https://example.com/image.jpg" -) - -# Print the response -print(response) diff --git a/examples/models/fire_works.py b/examples/models/fire_works.py deleted file mode 100644 index 114557c4..00000000 --- a/examples/models/fire_works.py +++ /dev/null @@ -1,13 +0,0 @@ -from swarms.models.popular_llms import Fireworks -import os - -# Initialize the model -llm = Fireworks( - temperature=0.2, - max_tokens=3500, - openai_api_key=os.getenv("FIREWORKS_API_KEY"), -) - -# Run the model -response = llm("What is the meaning of life?") -print(response) diff --git a/examples/models/fuyu_example.py b/examples/models/fuyu_example.py deleted file mode 100644 index 537de25a..00000000 --- a/examples/models/fuyu_example.py +++ /dev/null @@ -1,7 +0,0 @@ -from swarms.models.fuyu import Fuyu - -fuyu = Fuyu() - -# This is the default image, you can change it to any image you want -out = fuyu("What is this image?", "images/swarms.jpeg") -print(out) diff --git a/examples/models/gemini_example.py b/examples/models/gemini_example.py deleted file mode 100644 index 75553bfc..00000000 --- a/examples/models/gemini_example.py +++ /dev/null @@ -1,20 +0,0 @@ -import os - -from dotenv import load_dotenv - -from swarms.models.gemini import Gemini - -load_dotenv() - -api_key = os.environ["GEMINI_API_KEY"] - -# Initialize the model -model = Gemini(gemini_api_key=api_key) - -# Establish the prompt and image -task = "What is your name" -img = "images/github-banner-swarms.png" - -# Run the model -out = model.run("What is your name?", img=img) -print(out) diff --git a/examples/models/gpt4_v_example.py b/examples/models/gpt4_v_example.py deleted file mode 100644 index b434f257..00000000 --- a/examples/models/gpt4_v_example.py +++ /dev/null @@ -1,35 +0,0 @@ -import os # Import the os module for working with the operating system - -from dotenv import ( - load_dotenv, # Import the load_dotenv function from the dotenv module -) - -from swarms import ( - GPT4VisionAPI, # Import the GPT4VisionAPI class from the swarms module -) - -# Load the environment variables -load_dotenv() - -# Get the API key from the environment variables -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the GPT4VisionAPI class with the API key and model name -gpt4vision = GPT4VisionAPI( - openai_api_key=api_key, - model_name="gpt-4o", - max_tokens=1000, - openai_proxy="https://api.openai.com/v1/chat/completions", -) - -# Define the URL of the image to analyze -img = "ear.png" - -# Define the task to perform on the image -task = "What is this image" - -# Run the GPT4VisionAPI on the image with the specified task -answer = gpt4vision.run(task, img, return_json=True) - -# Print the answer -print(answer) diff --git a/examples/models/gpt_4o_mini.py b/examples/models/gpt_4o_mini.py deleted file mode 100644 index c21f3a5c..00000000 --- a/examples/models/gpt_4o_mini.py +++ /dev/null @@ -1,16 +0,0 @@ -from swarms import OpenAIChat -import os - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat(openai_api_key=api_key, model_name="gpt-4o-mini") - -# Query the model with a question -out = model( - "What is the best state to register a business in the US for the least amount of taxes?" -) - -# Print the model's response -print(out) diff --git a/examples/models/groq_model_exampole 2.py b/examples/models/groq_model_exampole 2.py deleted file mode 100644 index 5fde99f0..00000000 --- a/examples/models/groq_model_exampole 2.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -from swarms import OpenAIChat - - -# Example usage: -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) - - -out = model("What is the essence of quantum field theory?") -print(out) diff --git a/examples/models/groq_model_exampole.py b/examples/models/groq_model_exampole.py deleted file mode 100644 index 5fde99f0..00000000 --- a/examples/models/groq_model_exampole.py +++ /dev/null @@ -1,18 +0,0 @@ -import os -from swarms import OpenAIChat - - -# Example usage: -api_key = os.getenv("GROQ_API_KEY") - -# Model -model = OpenAIChat( - openai_api_base="https://api.groq.com/openai/v1", - openai_api_key=api_key, - model_name="llama-3.1-70b-versatile", - temperature=0.1, -) - - -out = model("What is the essence of quantum field theory?") -print(out) diff --git a/examples/models/hf/llama3 2 b/examples/models/hf/llama3 2 deleted file mode 100644 index 1e00018c..00000000 --- a/examples/models/hf/llama3 2 +++ /dev/null @@ -1,71 +0,0 @@ -from swarms import Agent -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -import torch -from swarms import BaseLLM -from transformers import AutoTokenizer, LlamaForCausalLM - - -class NvidiaLlama31B(BaseLLM): - # Load the tokenizer and model - def __init__(self, max_tokens: int = 2048): - self.max_tokens = max_tokens - model_path = "nvidia/Llama-3.1-Minitron-4B-Width-Base" - self.tokenizer = AutoTokenizer.from_pretrained(model_path) - - device = "cuda" - dtype = torch.bfloat16 - self.model = LlamaForCausalLM.from_pretrained( - model_path, torch_dtype=dtype, device_map=device - ) - - def run(self, task: str): - # Prepare the input text - inputs = self.tokenizer.encode(task, return_tensors="pt").to( - self.model.device - ) - - # Generate the output - outputs = self.model.generate( - inputs, max_length=self.max_tokens - ) - - # Decode and print the output - output_text = self.tokenizer.decode(outputs[0]) - print(output_text) - - return output_text - - -# # Example usage: -# model = NvidiaLlama31B() -# out = model.run("What is the essence of quantum field theory?") -# print(out) - -model = NvidiaLlama31B() - -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent_sas_chicken_eej", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - llm=model, - max_loops=2, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="finance_agent.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=True, - disable_print_every_step=True, - output_type="json", -) - - -out = agent.run( - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" -) -print(out) diff --git a/examples/models/hf/llama3.py b/examples/models/hf/llama3.py deleted file mode 100644 index 1e00018c..00000000 --- a/examples/models/hf/llama3.py +++ /dev/null @@ -1,71 +0,0 @@ -from swarms import Agent -from swarms.prompts.finance_agent_sys_prompt import ( - FINANCIAL_AGENT_SYS_PROMPT, -) -import torch -from swarms import BaseLLM -from transformers import AutoTokenizer, LlamaForCausalLM - - -class NvidiaLlama31B(BaseLLM): - # Load the tokenizer and model - def __init__(self, max_tokens: int = 2048): - self.max_tokens = max_tokens - model_path = "nvidia/Llama-3.1-Minitron-4B-Width-Base" - self.tokenizer = AutoTokenizer.from_pretrained(model_path) - - device = "cuda" - dtype = torch.bfloat16 - self.model = LlamaForCausalLM.from_pretrained( - model_path, torch_dtype=dtype, device_map=device - ) - - def run(self, task: str): - # Prepare the input text - inputs = self.tokenizer.encode(task, return_tensors="pt").to( - self.model.device - ) - - # Generate the output - outputs = self.model.generate( - inputs, max_length=self.max_tokens - ) - - # Decode and print the output - output_text = self.tokenizer.decode(outputs[0]) - print(output_text) - - return output_text - - -# # Example usage: -# model = NvidiaLlama31B() -# out = model.run("What is the essence of quantum field theory?") -# print(out) - -model = NvidiaLlama31B() - -# Initialize the agent -agent = Agent( - agent_name="Financial-Analysis-Agent_sas_chicken_eej", - system_prompt=FINANCIAL_AGENT_SYS_PROMPT, - llm=model, - max_loops=2, - autosave=True, - dashboard=False, - verbose=True, - dynamic_temperature_enabled=True, - saved_state_path="finance_agent.json", - user_name="swarms_corp", - retry_attempts=1, - context_length=200000, - return_step_meta=True, - disable_print_every_step=True, - output_type="json", -) - - -out = agent.run( - "How can I establish a ROTH IRA to buy stocks and get a tax break? What are the criteria" -) -print(out) diff --git a/examples/models/huggingface_example.py b/examples/models/huggingface_example.py deleted file mode 100644 index 73b9cb41..00000000 --- a/examples/models/huggingface_example.py +++ /dev/null @@ -1,8 +0,0 @@ -from swarms.models import HuggingfaceLLM - -model_id = "NousResearch/Yarn-Mistral-7b-128k" -inference = HuggingfaceLLM(model_id=model_id) - -task = "Once upon a time" -generated_text = inference(task) -print(generated_text) diff --git a/examples/models/idefics_example.py b/examples/models/idefics_example.py deleted file mode 100644 index ea36ba77..00000000 --- a/examples/models/idefics_example.py +++ /dev/null @@ -1,33 +0,0 @@ -# Import the idefics model from the swarms.models module -from swarms.models import Idefics - -# Create an instance of the idefics model -model = Idefics() - -# Define user input with an image URL and chat with the model -user_input = ( - "User: What is in this image?" - " https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" -) -response = model.chat(user_input) -print(response) - -# Define another user input with an image URL and chat with the model -user_input = ( - "User: And who is that?" - " https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" -) -response = model.chat(user_input) -print(response) - -# Set the checkpoint of the model to "new_checkpoint" -model.set_checkpoint("new_checkpoint") - -# Set the device of the model to "cpu" -model.set_device("cpu") - -# Set the maximum length of the chat to 200 -model.set_max_length(200) - -# Clear the chat history of the model -model.clear_chat_history() diff --git a/examples/models/kosmos_example.py b/examples/models/kosmos_example.py deleted file mode 100644 index dbfd108f..00000000 --- a/examples/models/kosmos_example.py +++ /dev/null @@ -1,10 +0,0 @@ -from swarms import Kosmos - -# Initialize the model -model = Kosmos() - -# Generate -out = model.run("Analyze the reciepts in this image", "docs.jpg") - -# Print the output -print(out) diff --git a/examples/models/layout_documentxlm_example.py b/examples/models/layout_documentxlm_example.py deleted file mode 100644 index 281938fd..00000000 --- a/examples/models/layout_documentxlm_example.py +++ /dev/null @@ -1,8 +0,0 @@ -from swarms.models import LayoutLMDocumentQA - -model = LayoutLMDocumentQA() - -# Place an image of a financial document -out = model("What is the total amount?", "images/swarmfest.png") - -print(out) diff --git a/examples/models/llama_3_hosted.py b/examples/models/llama_3_hosted.py deleted file mode 100644 index 8d4d7de2..00000000 --- a/examples/models/llama_3_hosted.py +++ /dev/null @@ -1,7 +0,0 @@ -from swarms import llama3Hosted - -llama3 = llama3Hosted() - -task = "What is the capital of France?" -response = llama3.run(task) -print(response) diff --git a/examples/models/llama_function_caller_example.py b/examples/models/llama_function_caller_example.py deleted file mode 100644 index 201009a8..00000000 --- a/examples/models/llama_function_caller_example.py +++ /dev/null @@ -1,37 +0,0 @@ -from swarms.models.llama_function_caller import LlamaFunctionCaller - -llama_caller = LlamaFunctionCaller() - - -# Add a custom function -def get_weather(location: str, format: str) -> str: - # This is a placeholder for the actual implementation - return f"Weather at {location} in {format} format." - - -llama_caller.add_func( - name="get_weather", - function=get_weather, - description="Get the weather at a location", - arguments=[ - { - "name": "location", - "type": "string", - "description": "Location for the weather", - }, - { - "name": "format", - "type": "string", - "description": "Format of the weather data", - }, - ], -) - -# Call the function -result = llama_caller.call_function( - "get_weather", location="Paris", format="Celsius" -) -print(result) - -# Stream a user prompt -llama_caller("Tell me about the tallest mountain in the world.") diff --git a/examples/models/llava_example.py b/examples/models/llava_example.py deleted file mode 100644 index 561b6f88..00000000 --- a/examples/models/llava_example.py +++ /dev/null @@ -1,16 +0,0 @@ -from swarms import QwenVLMultiModal - -# Instantiate the QwenVLMultiModal model -model = QwenVLMultiModal( - model_name="Qwen/Qwen-VL-Chat", - device="cuda", - quantize=True, -) - -# Run the model -response = model( - "Hello, how are you?", "https://example.com/image.jpg" -) - -# Print the response -print(response) diff --git a/examples/models/nougat_example.py b/examples/models/nougat_example.py deleted file mode 100644 index 97e1f1a3..00000000 --- a/examples/models/nougat_example.py +++ /dev/null @@ -1,5 +0,0 @@ -from swarms.models.nougat import Nougat - -nougat = Nougat() - -out = nougat("large.png") diff --git a/examples/models/openai_model_example.py b/examples/models/openai_model_example.py deleted file mode 100644 index 1a58770c..00000000 --- a/examples/models/openai_model_example.py +++ /dev/null @@ -1,10 +0,0 @@ -import os -from swarms.models import OpenAIChat - -# Load doten -openai = OpenAIChat( - openai_api_key=os.getenv("OPENAI_API_KEY"), verbose=False -) - -chat = openai("What are quantum fields?") -print(chat) diff --git a/examples/models/palm_example.py b/examples/models/palm_example.py deleted file mode 100644 index 5a2348ad..00000000 --- a/examples/models/palm_example.py +++ /dev/null @@ -1,5 +0,0 @@ -from swarms.models import Palm - -palm = Palm() - -out = palm("what's your name") diff --git a/examples/models/ssd_example.py b/examples/models/ssd_example.py deleted file mode 100644 index 2234b9c8..00000000 --- a/examples/models/ssd_example.py +++ /dev/null @@ -1,9 +0,0 @@ -from swarms.models.ssd_1b import SSD1B - -model = SSD1B() - -task = "A painting of a dog" -neg_prompt = "ugly, blurry, poor quality" - -image_url = model(task, neg_prompt) -print(image_url) diff --git a/examples/models/swarms_cloud_api_example.py b/examples/models/swarms_cloud_api_example.py deleted file mode 100644 index 914ca9f5..00000000 --- a/examples/models/swarms_cloud_api_example.py +++ /dev/null @@ -1,31 +0,0 @@ -from dotenv import load_dotenv -from openai import OpenAI -import os - -load_dotenv() - -openai_api_key = os.getenv("SWARMS_API_KEY") -openai_api_base = "https://api.swarms.world" -model = "gpt-4o" - -client = OpenAI(api_key=openai_api_key, base_url=openai_api_base) -# Note that this model expects the image to come before the main text -chat_response = client.chat.completions.create( - model=model, - messages=[ - { - "role": "user", - "content": [ - { - "type": "image_url", - "image_url": { - "url": "https://home-cdn.reolink.us/wp-content/uploads/2022/04/010345091648784709.4253.jpg", - }, - }, - {"type": "text", "text": "What's in this image?"}, - ], - } - ], - temperature=0.1, -) -print("Chat response:", chat_response) diff --git a/examples/models/together_example.py b/examples/models/together_example.py deleted file mode 100644 index f730f72f..00000000 --- a/examples/models/together_example.py +++ /dev/null @@ -1,12 +0,0 @@ -from swarms import TogetherLLM - -# Initialize the model with your parameters -model = TogetherLLM( - model_name="mistralai/Mixtral-8x7B-Instruct-v0.1", - max_tokens=1000, -) - -# Run the model -model.run( - "Generate a blog post about the best way to make money online." -) diff --git a/examples/models/tts_speech_example.py b/examples/models/tts_speech_example.py deleted file mode 100644 index 6c33f944..00000000 --- a/examples/models/tts_speech_example.py +++ /dev/null @@ -1,16 +0,0 @@ -import os - -from dotenv import load_dotenv - -from swarms import OpenAITTS - -load_dotenv() - -tts = OpenAITTS( - model_name="tts-1-1106", - voice="onyx", - openai_api_key=os.getenv("OPENAI_API_KEY"), -) - -out = tts.run_and_save("Dammmmmm those tacos were good") -print(out) diff --git a/examples/models/vilt_example.py b/examples/models/vilt_example.py deleted file mode 100644 index 8e40f59d..00000000 --- a/examples/models/vilt_example.py +++ /dev/null @@ -1,8 +0,0 @@ -from swarms.models.vilt import Vilt - -model = Vilt() - -output = model( - "What is this image", - "http://images.cocodataset.org/val2017/000000039769.jpg", -) diff --git a/examples/structs/swarms/agent_registry/agent_registry.py b/examples/structs/swarms/agent_registry/agent_registry.py index cf8b6c99..dd746d5d 100644 --- a/examples/structs/swarms/agent_registry/agent_registry.py +++ b/examples/structs/swarms/agent_registry/agent_registry.py @@ -1,6 +1,6 @@ from swarms.structs.agent_registry import AgentRegistry from swarms import Agent -from swarms.models import Anthropic +from swarm_models import Anthropic # Initialize the agents diff --git a/examples/structs/swarms/automate_docs.py b/examples/structs/swarms/automate_docs.py index f3268fdb..80e0427d 100644 --- a/examples/structs/swarms/automate_docs.py +++ b/examples/structs/swarms/automate_docs.py @@ -4,7 +4,8 @@ import threading from typing import Callable, List from swarms.prompts.documentation import DOCUMENTATION_WRITER_SOP -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.utils.loguru_logger import logger import concurrent diff --git a/examples/structs/swarms/different_architectures/a_star_swarm_example.py b/examples/structs/swarms/different_architectures/a_star_swarm_example.py index 1995b16e..01fa59a8 100644 --- a/examples/structs/swarms/different_architectures/a_star_swarm_example.py +++ b/examples/structs/swarms/different_architectures/a_star_swarm_example.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/different_architectures/circular_swarm.py b/examples/structs/swarms/different_architectures/circular_swarm.py index 8fdfaff5..b0c8e7fb 100644 --- a/examples/structs/swarms/different_architectures/circular_swarm.py +++ b/examples/structs/swarms/different_architectures/circular_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/different_architectures/star_swarm.py b/examples/structs/swarms/different_architectures/star_swarm.py index 2b3ec2a3..e08963fc 100644 --- a/examples/structs/swarms/different_architectures/star_swarm.py +++ b/examples/structs/swarms/different_architectures/star_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/example_logistics.py b/examples/structs/swarms/example_logistics.py index 9de44346..88750016 100644 --- a/examples/structs/swarms/example_logistics.py +++ b/examples/structs/swarms/example_logistics.py @@ -1,7 +1,7 @@ from swarms.structs import Agent import os from dotenv import load_dotenv -from swarms.models import GPT4VisionAPI +from swarm_models import GPT4VisionAPI from swarms.prompts.logistics import ( Health_Security_Agent_Prompt, Quality_Control_Agent_Prompt, diff --git a/examples/structs/swarms/hiearchical_swarm/agent_creator 2.py b/examples/structs/swarms/hiearchical_swarm/agent_creator 2.py index 307d6640..5023c979 100644 --- a/examples/structs/swarms/hiearchical_swarm/agent_creator 2.py +++ b/examples/structs/swarms/hiearchical_swarm/agent_creator 2.py @@ -11,7 +11,7 @@ from swarms import ( OpenAIChat, SpreadSheetSwarm, ) -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller agent_pool = [] diff --git a/examples/structs/swarms/hiearchical_swarm/agent_creator.py b/examples/structs/swarms/hiearchical_swarm/agent_creator.py index 307d6640..5023c979 100644 --- a/examples/structs/swarms/hiearchical_swarm/agent_creator.py +++ b/examples/structs/swarms/hiearchical_swarm/agent_creator.py @@ -11,7 +11,7 @@ from swarms import ( OpenAIChat, SpreadSheetSwarm, ) -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller agent_pool = [] diff --git a/examples/structs/swarms/mixture_of_agents/mixture_of_agents.py b/examples/structs/swarms/mixture_of_agents/mixture_of_agents.py index f594eddb..ec4ecbd3 100644 --- a/examples/structs/swarms/mixture_of_agents/mixture_of_agents.py +++ b/examples/structs/swarms/mixture_of_agents/mixture_of_agents.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents # Initialize the director agent diff --git a/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k 2.py b/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k 2.py index 9cbf7cf1..4ed21d69 100644 --- a/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k 2.py +++ b/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents diff --git a/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k.py b/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k.py index 9cbf7cf1..4ed21d69 100644 --- a/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k.py +++ b/examples/structs/swarms/mixture_of_agents/mixture_of_agents_nividia_10k.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents diff --git a/examples/structs/swarms/mixture_of_agents/moa_from_scratch 2.py b/examples/structs/swarms/mixture_of_agents/moa_from_scratch 2.py index 8b233065..74226740 100644 --- a/examples/structs/swarms/mixture_of_agents/moa_from_scratch 2.py +++ b/examples/structs/swarms/mixture_of_agents/moa_from_scratch 2.py @@ -6,7 +6,8 @@ from typing import List from pydantic import BaseModel -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/mixture_of_agents/moa_from_scratch.py b/examples/structs/swarms/mixture_of_agents/moa_from_scratch.py index 8b233065..74226740 100644 --- a/examples/structs/swarms/mixture_of_agents/moa_from_scratch.py +++ b/examples/structs/swarms/mixture_of_agents/moa_from_scratch.py @@ -6,7 +6,8 @@ from typing import List from pydantic import BaseModel -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/movers_swarm.py b/examples/structs/swarms/movers_swarm.py index c4625876..3fb17461 100644 --- a/examples/structs/swarms/movers_swarm.py +++ b/examples/structs/swarms/movers_swarm.py @@ -9,7 +9,8 @@ $ pip install swarms - """ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api import os diff --git a/examples/structs/swarms/multi_agent_collaboration/agent_delegation.py b/examples/structs/swarms/multi_agent_collaboration/agent_delegation.py index 91ce1eb3..89c2612e 100644 --- a/examples/structs/swarms/multi_agent_collaboration/agent_delegation.py +++ b/examples/structs/swarms/multi_agent_collaboration/agent_delegation.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat def calculate_profit(revenue: float, expenses: float): diff --git a/examples/structs/swarms/multi_agent_collaboration/company_example.py b/examples/structs/swarms/multi_agent_collaboration/company_example.py index abdee607..933bf0f6 100644 --- a/examples/structs/swarms/multi_agent_collaboration/company_example.py +++ b/examples/structs/swarms/multi_agent_collaboration/company_example.py @@ -2,7 +2,8 @@ import os from dotenv import load_dotenv -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.company import Company load_dotenv() diff --git a/examples/structs/swarms/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py b/examples/structs/swarms/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py index e61d1536..e530dc5c 100644 --- a/examples/structs/swarms/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py +++ b/examples/structs/swarms/multi_agent_collaboration/mixture_of_agents/moa_with_scp.py @@ -1,4 +1,5 @@ -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.mixture_of_agents import MixtureOfAgents from swarms_memory import ChromaDB diff --git a/examples/structs/swarms/multi_agent_collaboration/round_robin_swarm_example.py b/examples/structs/swarms/multi_agent_collaboration/round_robin_swarm_example.py index f3a463ad..a6e0e77c 100644 --- a/examples/structs/swarms/multi_agent_collaboration/round_robin_swarm_example.py +++ b/examples/structs/swarms/multi_agent_collaboration/round_robin_swarm_example.py @@ -1,5 +1,6 @@ from swarms.structs.round_robin import RoundRobinSwarm -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Initialize the LLM diff --git a/examples/structs/swarms/queue_swarm/queue_swarm_2 2.py b/examples/structs/swarms/queue_swarm/queue_swarm_2 2.py index e87770bc..dcfcd022 100644 --- a/examples/structs/swarms/queue_swarm/queue_swarm_2 2.py +++ b/examples/structs/swarms/queue_swarm/queue_swarm_2 2.py @@ -8,7 +8,8 @@ from typing import List from swarms.utils.calculate_func_metrics import profile_func from pydantic import BaseModel -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/queue_swarm/queue_swarm_2 .py b/examples/structs/swarms/queue_swarm/queue_swarm_2 .py index e87770bc..dcfcd022 100644 --- a/examples/structs/swarms/queue_swarm/queue_swarm_2 .py +++ b/examples/structs/swarms/queue_swarm/queue_swarm_2 .py @@ -8,7 +8,8 @@ from typing import List from swarms.utils.calculate_func_metrics import profile_func from pydantic import BaseModel -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/queue_swarm/queue_swarm_example.py b/examples/structs/swarms/queue_swarm/queue_swarm_example.py index fa8d187d..f722dcc5 100644 --- a/examples/structs/swarms/queue_swarm/queue_swarm_example.py +++ b/examples/structs/swarms/queue_swarm/queue_swarm_example.py @@ -1,7 +1,8 @@ import os from swarms.structs.queue_swarm import TaskQueueSwarm -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1 2 b/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1 2 index ac1faec9..c9ae8252 100644 --- a/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1 2 +++ b/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1 2 @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.round_robin import RoundRobinSwarm SEC_DATA = """ diff --git a/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1.py b/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1.py index ac1faec9..c9ae8252 100644 --- a/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1.py +++ b/examples/structs/swarms/round_of_robin_swarm/rob_swarm_1.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.round_robin import RoundRobinSwarm SEC_DATA = """ diff --git a/examples/structs/swarms/search_arena/search_agents.py b/examples/structs/swarms/search_arena/search_agents.py index 5ea84f53..32f10880 100644 --- a/examples/structs/swarms/search_arena/search_agents.py +++ b/examples/structs/swarms/search_arena/search_agents.py @@ -5,7 +5,8 @@ import requests import tavily from dotenv import load_dotenv -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.tools.prebuilt.bing_api import fetch_web_articles_bing_api load_dotenv() diff --git a/examples/structs/swarms/spreadsheet_swarm/dfs_example.py b/examples/structs/swarms/spreadsheet_swarm/dfs_example.py index 5586a1d3..049bbbb2 100644 --- a/examples/structs/swarms/spreadsheet_swarm/dfs_example.py +++ b/examples/structs/swarms/spreadsheet_swarm/dfs_example.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.dfs_search_swarm import DFSSwarm # Get the OpenAI API key from the environment variable diff --git a/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm 2.py b/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm 2.py index eb093199..8948465a 100644 --- a/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm 2.py +++ b/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm 2.py @@ -1,6 +1,7 @@ import uuid import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm.py b/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm.py index eb093199..8948465a 100644 --- a/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm.py +++ b/examples/structs/swarms/spreadsheet_swarm/real_estate_swarm.py @@ -1,6 +1,7 @@ import uuid import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm 2.py b/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm 2.py index ddc23a69..59896dd8 100644 --- a/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm 2.py +++ b/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm.py b/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm.py index ddc23a69..59896dd8 100644 --- a/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm.py +++ b/examples/structs/swarms/spreadsheet_swarm/social_media_marketing_spreesheet_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/csvs/README.md b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/csvs/README.md index 0dbf5e0d..2ea3ea68 100644 --- a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/csvs/README.md +++ b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/csvs/README.md @@ -82,7 +82,8 @@ Features: ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) @@ -179,7 +180,8 @@ agent.run( An LLM equipped with long term memory and tools, a full stack agent capable of automating all and any digital tasks given a good prompt. ```python -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms_memory import ChromaDB import subprocess import os @@ -851,7 +853,8 @@ An all-new swarm architecuture, with SpreadSheetSwarm you can manage and oversee ```python import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for each social media platform diff --git a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm 2.py b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm 2.py index d0b6a3cb..4419c5cd 100644 --- a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm 2.py +++ b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for QR code generation diff --git a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm.py b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm.py index d0b6a3cb..4419c5cd 100644 --- a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm.py +++ b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/qr_code_generative_spreedsheet_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm # Define custom system prompts for QR code generation diff --git a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example 2.py b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example 2.py index 35b8353b..8aab2c80 100644 --- a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example 2.py +++ b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example 2.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example.py b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example.py index 35b8353b..8aab2c80 100644 --- a/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example.py +++ b/examples/structs/swarms/spreadsheet_swarm/spreadsheet_swarm_examples/spread_sheet_example.py @@ -1,6 +1,7 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/swarms/rearrange/example.py b/examples/swarms/rearrange/example.py index 930188db..15c6a688 100644 --- a/examples/swarms/rearrange/example.py +++ b/examples/swarms/rearrange/example.py @@ -4,7 +4,7 @@ from dotenv import load_dotenv load_dotenv() from swarms.structs import Agent -from swarms.models import Anthropic +from swarm_models import Anthropic from swarms.structs.rearrange import AgentRearrange llm = Anthropic(anthropic_api_key=os.getenv("ANTHROPIC_API_KEY"), streaming=True) diff --git a/examples/tasks/task_example 2.py b/examples/tasks/task_example 2.py index b8b81fe5..3074104b 100644 --- a/examples/tasks/task_example 2.py +++ b/examples/tasks/task_example 2.py @@ -1,7 +1,8 @@ import os from datetime import datetime, timedelta -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/tasks/task_example.py b/examples/tasks/task_example.py index b8b81fe5..3074104b 100644 --- a/examples/tasks/task_example.py +++ b/examples/tasks/task_example.py @@ -1,7 +1,8 @@ import os from datetime import datetime, timedelta -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/examples/utils/agent_table 2.py b/examples/utils/agent_table 2.py index be3c9054..e357fe6a 100644 --- a/examples/utils/agent_table 2.py +++ b/examples/utils/agent_table 2.py @@ -5,7 +5,8 @@ from swarms.utils.pandas_utils import ( dict_to_dataframe, pydantic_model_to_dataframe, ) -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Create an instance of the OpenAIChat class llm = OpenAIChat( diff --git a/examples/utils/agent_table.py b/examples/utils/agent_table.py index be3c9054..e357fe6a 100644 --- a/examples/utils/agent_table.py +++ b/examples/utils/agent_table.py @@ -5,7 +5,8 @@ from swarms.utils.pandas_utils import ( dict_to_dataframe, pydantic_model_to_dataframe, ) -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat # Create an instance of the OpenAIChat class llm = OpenAIChat( diff --git a/examples/workshops/aug_10/book_generator_swarm.py b/examples/workshops/aug_10/book_generator_swarm.py index 640491aa..36e767d0 100644 --- a/examples/workshops/aug_10/book_generator_swarm.py +++ b/examples/workshops/aug_10/book_generator_swarm.py @@ -1,4 +1,4 @@ -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller from pydantic import BaseModel, Field from typing import Sequence diff --git a/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm 2.py b/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm 2.py index 6c81c7fa..b0a3cbf0 100644 --- a/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm 2.py +++ b/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm diff --git a/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm.py b/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm.py index 6c81c7fa..b0a3cbf0 100644 --- a/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm.py +++ b/examples/workshops/spreadsheet_workspace/aug_31_workshop/groq_api_spreadsheet_marketing_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm diff --git a/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm 2.py b/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm 2.py index be7d524e..93c982e7 100644 --- a/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm 2.py +++ b/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm 2.py @@ -17,7 +17,7 @@ from pydantic import BaseModel, Field from transformers import T5EncoderModel from swarms import Agent -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller hf_token = os.getenv("HF_TOKEN") diff --git a/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm.py b/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm.py index be7d524e..93c982e7 100644 --- a/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm.py +++ b/examples/workshops/spreadsheet_workspace/aug_31_workshop/hierarchical_img_gen_swarm.py @@ -17,7 +17,7 @@ from pydantic import BaseModel, Field from transformers import T5EncoderModel from swarms import Agent -from swarms.models.openai_function_caller import OpenAIFunctionCaller +from swarm_models.openai_function_caller import OpenAIFunctionCaller hf_token = os.getenv("HF_TOKEN") diff --git a/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm 2.py b/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm 2.py index 22859ed4..86d3fe49 100644 --- a/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm 2.py +++ b/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm 2.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm diff --git a/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm.py b/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm.py index 22859ed4..86d3fe49 100644 --- a/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm.py +++ b/examples/workshops/spreadsheet_workspace/swarms_promotion_spreadsheet_swarm.py @@ -1,5 +1,6 @@ import os -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.spreadsheet_swarm import SpreadSheetSwarm diff --git a/pyproject.toml b/pyproject.toml index f09574b6..4a793d17 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -75,6 +75,7 @@ swarms-memory = "*" black = "*" swarms-cloud = "*" aiofiles = "*" +swarm-models = "*" diff --git a/requirements.txt b/requirements.txt index 8530930b..b1e85932 100644 --- a/requirements.txt +++ b/requirements.txt @@ -31,4 +31,5 @@ fastapi>=0.110.1 networkx swarms-memory pre-commit -aiofiles \ No newline at end of file +aiofiles +swarm-models \ No newline at end of file diff --git a/sample_rearrange 2.py b/sample_rearrange 2.py deleted file mode 100644 index 5829c79e..00000000 --- a/sample_rearrange 2.py +++ /dev/null @@ -1,112 +0,0 @@ -import os - -from swarms import Agent, AgentRearrange, OpenAIChat - -# Get the OpenAI API key from the environment variable -api_key = os.getenv("OPENAI_API_KEY") - -# Create an instance of the OpenAIChat class -model = OpenAIChat( - api_key=api_key, model_name="gpt-4o-mini", temperature=0.1 -) - - -# Initialize the boss agent (Director) -boss_agent = Agent( - agent_name="BossAgent", - system_prompt=""" - You are the BossAgent responsible for managing and overseeing a swarm of agents analyzing company expenses. - Your job is to dynamically assign tasks, prioritize their execution, and ensure that all agents collaborate efficiently. - After receiving a report on the company's expenses, you will break down the work into smaller tasks, - assigning specific tasks to each agent, such as detecting recurring high costs, categorizing expenditures, - and identifying unnecessary transactions. Ensure the results are communicated back in a structured way - so the finance team can take actionable steps to cut off unproductive spending. You also monitor and - dynamically adapt the swarm to optimize their performance. Finally, you summarize their findings - into a coherent report. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="boss_agent.json", -) - -# Initialize worker 1: Expense Analyzer -worker1 = Agent( - agent_name="ExpenseAnalyzer", - system_prompt=""" - Your task is to carefully analyze the company's expense data provided to you. - You will focus on identifying high-cost recurring transactions, categorizing expenditures - (e.g., marketing, operations, utilities, etc.), and flagging areas where there seems to be excessive spending. - You will provide a detailed breakdown of each category, along with specific recommendations for cost-cutting. - Pay close attention to monthly recurring subscriptions, office supplies, and non-essential expenditures. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker1.json", -) - -# Initialize worker 2: Summary Generator -worker2 = Agent( - agent_name="SummaryGenerator", - system_prompt=""" - After receiving the detailed breakdown from the ExpenseAnalyzer, - your task is to create a concise summary of the findings. You will focus on the most actionable insights, - such as highlighting the specific transactions that can be immediately cut off and summarizing the areas - where the company is overspending. Your summary will be used by the BossAgent to generate the final report. - Be clear and to the point, emphasizing the urgency of cutting unnecessary expenses. - """, - llm=model, - max_loops=1, - dashboard=False, - streaming_on=True, - verbose=True, - stopping_token="", - state_save_file_type="json", - saved_state_path="worker2.json", -) - -# Swarm-Level Prompt (Collaboration Prompt) -swarm_prompt = """ - As a swarm, your collective goal is to analyze the company's expenses and identify transactions that should be cut off. - You will work collaboratively to break down the entire process of expense analysis into manageable steps. - The BossAgent will direct the flow and assign tasks dynamically to the agents. The ExpenseAnalyzer will first - focus on breaking down the expense report, identifying high-cost recurring transactions, categorizing them, - and providing recommendations for potential cost reduction. After the analysis, the SummaryGenerator will then - consolidate all the findings into an actionable summary that the finance team can use to immediately cut off unnecessary expenses. - Together, your collaboration is essential to streamlining and improving the company’s financial health. -""" - -# Create a list of agents -agents = [boss_agent, worker1, worker2] - -# Define the flow pattern for the swarm -flow = "BossAgent -> ExpenseAnalyzer -> SummaryGenerator" - -# Using AgentRearrange class to manage the swarm -agent_system = AgentRearrange( - agents=agents, flow=flow, return_json=True -) - -# Input task for the swarm -task = f""" - - {swarm_prompt} - - The company has been facing a rising number of unnecessary expenses, and the finance team needs a detailed - analysis of recent transactions to identify which expenses can be cut off to improve profitability. - Analyze the provided transaction data and create a detailed report on cost-cutting opportunities, - focusing on recurring transactions and non-essential expenditures. -""" - -# Run the swarm system with the task -output = agent_system.run(task) -print(output) diff --git a/swarms/__init__.py b/swarms/__init__.py index c6ed0908..2624b560 100644 --- a/swarms/__init__.py +++ b/swarms/__init__.py @@ -9,7 +9,7 @@ with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: from swarms.agents import * # noqa: E402, F403 from swarms.artifacts import * # noqa: E402, F403 -from swarms.models import * # noqa: E402, F403 +from swarm_models import * # noqa: E402, F403 from swarms.prompts import * # noqa: E402, F403 from swarms.structs import * # noqa: E402, F403 from swarms.telemetry import * # noqa: E402, F403 diff --git a/swarms/cli/create_agent 2.py b/swarms/cli/create_agent 2.py index 9d68f6a4..e7b29d52 100644 --- a/swarms/cli/create_agent 2.py +++ b/swarms/cli/create_agent 2.py @@ -1,6 +1,6 @@ import os from swarms.structs.agent import Agent -from swarms.models.popular_llms import OpenAIChat +from swarm_models.popular_llms import OpenAIChat from swarms.structs.agent_registry import AgentRegistry # Get the OpenAI API key from the environment variable diff --git a/swarms/cli/create_agent.py b/swarms/cli/create_agent.py index 9d68f6a4..e7b29d52 100644 --- a/swarms/cli/create_agent.py +++ b/swarms/cli/create_agent.py @@ -1,6 +1,6 @@ import os from swarms.structs.agent import Agent -from swarms.models.popular_llms import OpenAIChat +from swarm_models.popular_llms import OpenAIChat from swarms.structs.agent_registry import AgentRegistry # Get the OpenAI API key from the environment variable diff --git a/swarms/cli/parse_yaml.py b/swarms/cli/parse_yaml.py index e7ba841f..de8e936d 100644 --- a/swarms/cli/parse_yaml.py +++ b/swarms/cli/parse_yaml.py @@ -5,7 +5,7 @@ from typing import List, Optional import json from swarms.structs.agent_registry import AgentRegistry from swarms.structs.agent import Agent -from swarms.models.popular_llms import OpenAIChat +from swarm_models.popular_llms import OpenAIChat class AgentInput(BaseModel): diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py deleted file mode 100644 index 1a0c847f..00000000 --- a/swarms/models/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -from swarms.models.base_embedding_model import BaseEmbeddingModel -from swarms.models.base_llm import BaseLLM # noqa: E402 -from swarms.models.base_multimodal_model import BaseMultiModalModel -from swarms.models.fuyu import Fuyu # noqa: E402 -from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 -from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 -from swarms.models.idefics import Idefics # noqa: E402 -from swarms.models.kosmos_two import Kosmos # noqa: E402 -from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA -from swarms.models.llama3_hosted import llama3Hosted -from swarms.models.llava import LavaMultiModal # noqa: E402 -from swarms.models.nougat import Nougat # noqa: E402 -from swarms.models.openai_embeddings import OpenAIEmbeddings -from swarms.models.openai_tts import OpenAITTS # noqa: E402 -from swarms.models.palm import GooglePalm as Palm # noqa: E402 -from swarms.models.popular_llms import Anthropic as Anthropic -from swarms.models.popular_llms import ( - AzureOpenAILLM as AzureOpenAI, -) -from swarms.models.popular_llms import ( - CohereChat as Cohere, -) -from swarms.models.popular_llms import OctoAIChat -from swarms.models.popular_llms import ( - OpenAIChatLLM as OpenAIChat, -) -from swarms.models.popular_llms import ( - OpenAILLM as OpenAI, -) -from swarms.models.popular_llms import ReplicateChat as Replicate -from swarms.models.qwen import QwenVLMultiModal # noqa: E402 -from swarms.models.sampling_params import SamplingParams, SamplingType -from swarms.models.together import TogetherLLM # noqa: E402 -from swarms.models.model_types import ( # noqa: E402 - AudioModality, - ImageModality, - MultimodalData, - TextModality, - VideoModality, -) -from swarms.models.vilt import Vilt # noqa: E402 -from swarms.models.popular_llms import FireWorksAI -from swarms.models.openai_function_caller import OpenAIFunctionCaller - -__all__ = [ - "BaseEmbeddingModel", - "BaseLLM", - "BaseMultiModalModel", - "Fuyu", - "GPT4VisionAPI", - "HuggingfaceLLM", - "Idefics", - "Kosmos", - "LayoutLMDocumentQA", - "LavaMultiModal", - "Nougat", - "Palm", - "OpenAITTS", - "Anthropic", - "AzureOpenAI", - "Cohere", - "OpenAIChat", - "OpenAI", - "OctoAIChat", - "QwenVLMultiModal", - "Replicate", - "SamplingParams", - "SamplingType", - "TogetherLLM", - "AudioModality", - "ImageModality", - "MultimodalData", - "TextModality", - "VideoModality", - "Vilt", - "OpenAIEmbeddings", - "llama3Hosted", - "FireWorksAI", - "OpenAIFunctionCaller", -] diff --git a/swarms/models/base_embedding_model.py b/swarms/models/base_embedding_model.py deleted file mode 100644 index 5bcf2a12..00000000 --- a/swarms/models/base_embedding_model.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import annotations - -from abc import ABC, abstractmethod -from dataclasses import dataclass - -import numpy as np -from typing import Callable -from swarms.artifacts.text_artifact import TextArtifact -from swarms.utils.exponential_backoff import ExponentialBackoffMixin - - -@dataclass -class BaseEmbeddingModel( - ExponentialBackoffMixin, - ABC, - # SerializableMixin -): - """ - Attributes: - model: The name of the model to use. - tokenizer: An instance of `BaseTokenizer` to use when calculating tokens. - """ - - model: str = None - tokenizer: Callable = None - chunker: Callable = None - - def embed_text_artifact( - self, artifact: TextArtifact - ) -> list[float]: - return self.embed_string(artifact.to_text()) - - def embed_string(self, string: str) -> list[float]: - for attempt in self.retrying(): - with attempt: - if ( - self.tokenizer - and self.tokenizer.count_tokens(string) - > self.tokenizer.max_tokens - ): - return self._embed_long_string(string) - else: - return self.try_embed_chunk(string) - - else: - raise RuntimeError("Failed to embed string.") - - @abstractmethod - def try_embed_chunk(self, chunk: str) -> list[float]: ... - - def _embed_long_string(self, string: str) -> list[float]: - """Embeds a string that is too long to embed in one go.""" - chunks = self.chunker.chunk(string) - - embedding_chunks = [] - length_chunks = [] - for chunk in chunks: - embedding_chunks.append(self.try_embed_chunk(chunk.value)) - length_chunks.append(len(chunk)) - - # generate weighted averages - embedding_chunks = np.average( - embedding_chunks, axis=0, weights=length_chunks - ) - - # normalize length to 1 - embedding_chunks = embedding_chunks / np.linalg.norm( - embedding_chunks - ) - - return embedding_chunks.tolist() diff --git a/swarms/models/base_llm.py b/swarms/models/base_llm.py deleted file mode 100644 index c9e47388..00000000 --- a/swarms/models/base_llm.py +++ /dev/null @@ -1,415 +0,0 @@ -import asyncio -import logging -import os -import time -from abc import abstractmethod -from typing import List, Optional -from swarms.structs.base_structure import BaseStructure - - -class BaseLLM(BaseStructure): - """Abstract Language Model that defines the interface for all language models - - Args: - model_name (Optional[str], optional): _description_. Defaults to None. - max_tokens (Optional[int], optional): _description_. Defaults to None. - max_length (Optional[int], optional): _description_. Defaults to None. - temperature (Optional[float], optional): _description_. Defaults to None. - top_k (Optional[float], optional): _description_. Defaults to None. - top_p (Optional[float], optional): _description_. Defaults to None. - system_prompt (Optional[str], optional): _description_. Defaults to None. - beam_width (Optional[int], optional): _description_. Defaults to None. - num_return_sequences (Optional[int], optional): _description_. Defaults to None. - seed (Optional[int], optional): _description_. Defaults to None. - frequency_penalty (Optional[float], optional): _description_. Defaults to None. - presence_penalty (Optional[float], optional): _description_. Defaults to None. - stop_token (Optional[str], optional): _description_. Defaults to None. - length_penalty (Optional[float], optional): _description_. Defaults to None. - role (Optional[str], optional): _description_. Defaults to None. - do_sample (Optional[bool], optional): _description_. Defaults to None. - early_stopping (Optional[bool], optional): _description_. Defaults to None. - num_beams (Optional[int], optional): _description_. Defaults to None. - repition_penalty (Optional[float], optional): _description_. Defaults to None. - pad_token_id (Optional[int], optional): _description_. Defaults to None. - eos_token_id (Optional[int], optional): _description_. Defaults to None. - bos_token_id (Optional[int], optional): _description_. Defaults to None. - device (Optional[str], optional): _description_. Defaults to None. - *args: _description_ - **kwargs: _description_ - - - """ - - def __init__( - self, - model_id: Optional[str] = None, - model_name: Optional[str] = None, - max_tokens: Optional[int] = None, - max_length: Optional[int] = None, - temperature: Optional[float] = None, - top_k: Optional[float] = None, - top_p: Optional[float] = None, - system_prompt: Optional[str] = None, - beam_width: Optional[int] = None, - num_return_sequences: Optional[int] = None, - seed: Optional[int] = None, - frequency_penalty: Optional[float] = None, - presence_penalty: Optional[float] = None, - stop_token: Optional[str] = None, - length_penalty: Optional[float] = None, - role: Optional[str] = None, - do_sample: Optional[bool] = None, - early_stopping: Optional[bool] = None, - num_beams: Optional[int] = None, - repition_penalty: Optional[float] = None, - pad_token_id: Optional[int] = None, - eos_token_id: Optional[int] = None, - bos_token_id: Optional[int] = None, - device: Optional[str] = None, - freq_penalty: Optional[float] = None, - stop_token_id: Optional[int] = None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_id = model_id - self.model_name = model_name - self.max_tokens = max_tokens - self.temperature = temperature - self.top_k = top_k - self.top_p = top_p - self.system_prompt = system_prompt - self.beam_width = beam_width - self.num_return_sequences = num_return_sequences - self.seed = seed - self.frequency_penalty = frequency_penalty - self.presence_penalty = presence_penalty - self.stop_token = stop_token - self.length_penalty = length_penalty - self.role = role - self.max_length = max_length - self.do_sample = do_sample - self.early_stopping = early_stopping - self.num_beams = num_beams - self.repition_penalty = repition_penalty - self.pad_token_id = pad_token_id - self.eos_token_id = eos_token_id - self.bos_token_id = bos_token_id - self.device = device - self.frequency_penalty = freq_penalty - self.stop_token_id = stop_token_id - - # Attributes - self.history = "" - self.start_time = None - self.end_time = None - self.history = [] - - @abstractmethod - def run(self, task: Optional[str] = None, *args, **kwargs) -> str: - """generate text using language model""" - - async def arun(self, task: Optional[str] = None, *args, **kwargs): - """Asynchronous run - - Args: - task (Optional[str], optional): _description_. Defaults to None. - """ - loop = asyncio.get_event_loop() - result = await loop.run_in_executor(None, self.run, task) - return result - - def batch_run(self, tasks: List[str], *args, **kwargs): - """Batch run with language model - - Args: - tasks (List[str]): _description_ - - Returns: - _type_: _description_ - """ - return [self.run(task) for task in tasks] - - async def abatch_run(self, tasks: List[str], *args, **kwargs): - """Asynchronous batch run with language model - - Args: - tasks (List[str]): _description_ - - Returns: - _type_: _description_ - """ - return await asyncio.gather( - *(self.arun(task) for task in tasks) - ) - - def chat(self, task: str, history: str = "") -> str: - """Chat with the model""" - complete_task = ( - task + " | " + history - ) # Delimiter for clarity - return self.run(complete_task) - - def __call__(self, task: str) -> str: - """Call the model""" - return self.run(task) - - def _tokens_per_second(self) -> float: - """Tokens per second""" - elapsed_time = self.end_time - self.start_time - if elapsed_time == 0: - return float("inf") - return self._num_tokens() / elapsed_time - - # def _num_tokens(self, text: str) -> int: - # """Number of tokens""" - # tokenizer = self.tokenizer - # return count_tokens(text) - - def _time_for_generation(self, task: str) -> float: - """Time for Generation""" - self.start_time = time.time() - self.run(task) - self.end_time = time.time() - return self.end_time - self.start_time - - def generate_summary(self, text: str) -> str: - """Generate Summary""" - - def set_temperature(self, value: float): - """Set Temperature""" - self.temperature = value - - def set_max_tokens(self, value: int): - """Set new max tokens""" - self.max_tokens = value - - def clear_history(self): - """Clear history""" - self.history = [] - - def enable_logging(self, log_file: str = "model.log"): - """Initialize logging for the model.""" - logging.basicConfig(filename=log_file, level=logging.INFO) - self.log_file = log_file - - def log_event(self, message: str): - """Log an event.""" - logging.info( - f"{time.strftime('%Y-%m-%d %H:%M:%S')} - {message}" - ) - - def save_checkpoint(self, checkpoint_dir: str = "checkpoints"): - """Save the model state.""" - # This is a placeholder for actual checkpointing logic. - if not os.path.exists(checkpoint_dir): - os.makedirs(checkpoint_dir) - checkpoint_path = os.path.join( - checkpoint_dir, - f'checkpoint_{time.strftime("%Y%m%d-%H%M%S")}.ckpt', - ) - # Save model state to checkpoint_path - self.log_event(f"Model checkpoint saved at {checkpoint_path}") - - def load_checkpoint(self, checkpoint_path: str): - """Load the model state from a checkpoint.""" - # This is a placeholder for actual loading logic. - # Load model state from checkpoint_path - self.log_event(f"Model state loaded from {checkpoint_path}") - - def toggle_creative_mode(self, enable: bool): - """Toggle creative mode for the model.""" - self.creative_mode = enable - self.log_event( - f"Creative mode {'enabled' if enable else 'disabled'}." - ) - - def track_resource_utilization(self): - """Track and report resource utilization.""" - # This is a placeholder for actual tracking logic. - # Logic to track CPU, memory, etc. - utilization_report = "Resource utilization report here" - return utilization_report - - def get_generation_time(self) -> float: - """Get generation time""" - if self.start_time and self.end_time: - return self.end_time - self.start_time - return 0 - - def set_max_length(self, max_length: int): - """Set max length - - Args: - max_length (int): _description_ - """ - self.max_length = max_length - - def set_model_name(self, model_name: str): - """Set model name - - Args: - model_name (str): _description_ - """ - self.model_name = model_name - - def set_frequency_penalty(self, frequency_penalty: float): - """Set frequency penalty - - Args: - frequency_penalty (float): _description_ - """ - self.frequency_penalty = frequency_penalty - - def set_presence_penalty(self, presence_penalty: float): - """Set presence penalty - - Args: - presence_penalty (float): _description_ - """ - self.presence_penalty = presence_penalty - - def set_stop_token(self, stop_token: str): - """Set stop token - - Args: - stop_token (str): _description_ - """ - self.stop_token = stop_token - - def set_length_penalty(self, length_penalty: float): - """Set length penalty - - Args: - length_penalty (float): _description_ - """ - self.length_penalty = length_penalty - - def set_role(self, role: str): - """Set role - - Args: - role (str): _description_ - """ - self.role = role - - def set_top_k(self, top_k: int): - """Set top k - - Args: - top_k (int): _description_ - """ - self.top_k = top_k - - def set_top_p(self, top_p: float): - """Set top p - - Args: - top_p (float): _description_ - """ - self.top_p = top_p - - def set_num_beams(self, num_beams: int): - """Set num beams - - Args: - num_beams (int): _description_ - """ - self.num_beams = num_beams - - def set_do_sample(self, do_sample: bool): - """set do sample - - - Args: - do_sample (bool): _description_ - """ - self.do_sample = do_sample - - def set_early_stopping(self, early_stopping: bool): - """set early stopping - - Args: - early_stopping (bool): _description_ - """ - self.early_stopping = early_stopping - - def set_seed(self, seed: int): - """Set seed - - Args: - seed ([type]): [description] - """ - self.seed = seed - - def set_device(self, device: str): - """Set device - - Args: - device (str): _description_ - """ - self.device = device - - def metrics(self) -> str: - """ - Metrics - - Returns: - str: _description_ - """ - _sec_to_first_token = self._sec_to_first_token() - _tokens_per_second = self._tokens_per_second() - _num_tokens = self._num_tokens(self.history) - _time_for_generation = self._time_for_generation(self.history) - - return f""" - SEC TO FIRST TOKEN: {_sec_to_first_token} - TOKENS/SEC: {_tokens_per_second} - TOKENS: {_num_tokens} - Tokens/SEC: {_time_for_generation} - """ - - def time_to_first_token(self, prompt: str) -> float: - """Time to first token - - Args: - prompt (str): _description_ - - Returns: - float: _description_ - """ - start_time = time.time() - self.track_resource_utilization( - prompt - ) # assuming `generate` is a method that generates tokens - first_token_time = time.time() - return first_token_time - start_time - - def generation_latency(self, prompt: str) -> float: - """generation latency - - Args: - prompt (str): _description_ - - Returns: - float: _description_ - """ - start_time = time.time() - self.run(prompt) - end_time = time.time() - return end_time - start_time - - def throughput(self, prompts: List[str]) -> float: - """throughput - - Args: - prompts (): _description_ - - Returns: - float: _description_ - """ - start_time = time.time() - for prompt in prompts: - self.run(prompt) - end_time = time.time() - return len(prompts) / (end_time - start_time) diff --git a/swarms/models/base_multimodal_model.py b/swarms/models/base_multimodal_model.py deleted file mode 100644 index 96b63002..00000000 --- a/swarms/models/base_multimodal_model.py +++ /dev/null @@ -1,329 +0,0 @@ -import asyncio -import base64 -import concurrent.futures -import time -from abc import abstractmethod -from concurrent.futures import ThreadPoolExecutor -from io import BytesIO -from typing import List, Optional, Tuple -from swarms.structs.base_structure import BaseStructure -import requests -from PIL import Image -from termcolor import colored - - -class BaseMultiModalModel(BaseStructure): - """ - Base class for multimodal models - - - Args: - model_name (Optional[str], optional): Model name. Defaults to None. - temperature (Optional[int], optional): Temperature. Defaults to 0.5. - max_tokens (Optional[int], optional): Max tokens. Defaults to 500. - max_workers (Optional[int], optional): Max workers. Defaults to 10. - top_p (Optional[int], optional): Top p. Defaults to 1. - top_k (Optional[int], optional): Top k. Defaults to 50. - beautify (Optional[bool], optional): Beautify. Defaults to False. - device (Optional[str], optional): Device. Defaults to "cuda". - max_new_tokens (Optional[int], optional): Max new tokens. Defaults to 500. - retries (Optional[int], optional): Retries. Defaults to 3. - - Examples: - >>> from swarms.models.base_multimodal_model import BaseMultiModalModel - >>> model = BaseMultiModalModel() - >>> model.run("Generate a summary of this text") - >>> model.run("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png") - >>> model.run_batch(["Generate a summary of this text", "Generate a summary of this text"]) - >>> model.run_batch([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")]) - >>> model.run_batch_async(["Generate a summary of this text", "Generate a summary of this text"]) - >>> model.run_batch_async([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")]) - >>> model.run_batch_async_with_retries(["Generate a summary of this text", "Generate a summary of this text"]) - >>> model.run_batch_async_with_retries([("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png"), ("Generate a summary of this text", "https://www.google.com/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png")]) - >>> model.generate_summary("Generate a summary of this text") - >>> model.set_temperature(0.5) - >>> model.set_max_tokens(500) - >>> model.get_generation_time() - >>> model.get_chat_history() - >>> model.get_unique_chat_history() - >>> model.get_chat_history_length() - >>> model.get_unique_chat_history_length() - >>> model.get_chat_history_tokens() - >>> model.print_beautiful("Print this beautifully") - >>> model.stream("Stream this") - >>> model.unique_chat_history() - >>> model.clear_chat_history() - >>> model.get_img_from_web("https://www.google.com/images/branding/googlelogo/") - - """ - - def __init__( - self, - model_name: Optional[str] = None, - temperature: Optional[int] = 0.5, - max_tokens: Optional[int] = 500, - max_workers: Optional[int] = 10, - top_p: Optional[int] = 1, - top_k: Optional[int] = 50, - beautify: Optional[bool] = False, - device: Optional[str] = "cuda", - max_new_tokens: Optional[int] = 500, - retries: Optional[int] = 3, - system_prompt: Optional[str] = None, - meta_prompt: Optional[str] = None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_name = model_name - self.temperature = temperature - self.max_tokens = max_tokens - self.max_workers = max_workers - self.top_p = top_p - self.top_k = top_k - self.beautify = beautify - self.device = device - self.max_new_tokens = max_new_tokens - self.retries = retries - self.system_prompt = system_prompt - self.meta_prompt = meta_prompt - self.chat_history = [] - - @abstractmethod - def run( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): - """Run the model""" - - def __call__( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): - """Call the model - - Args: - task (str): _description_ - img (str): _description_ - - Returns: - _type_: _description_ - """ - return self.run(task, img, *args, **kwargs) - - async def arun(self, task: str, img: str, *args, **kwargs): - """Run the model asynchronously""" - - def get_img_from_web(self, img: str, *args, **kwargs): - """Get the image from the web""" - try: - response = requests.get(img) - response.raise_for_status() - image_pil = Image.open(BytesIO(response.content)) - return image_pil - except requests.RequestException as error: - print( - f"Error fetching image from {img} and error: {error}" - ) - return None - - def encode_img(self, img: str): - """Encode the image to base64""" - with open(img, "rb") as image_file: - return base64.b64encode(image_file.read()).decode("utf-8") - - def get_img(self, img: str): - """Get the image from the path""" - image_pil = Image.open(img) - return image_pil - - def clear_chat_history(self): - """Clear the chat history""" - self.chat_history = [] - - def run_many( - self, tasks: List[str], imgs: List[str], *args, **kwargs - ): - """ - Run the model on multiple tasks and images all at once using concurrent - - Args: - tasks (List[str]): List of tasks - imgs (List[str]): List of image paths - - Returns: - List[str]: List of responses - - - """ - # Instantiate the thread pool executor - with ThreadPoolExecutor( - max_workers=self.max_workers - ) as executor: - results = executor.map(self.run, tasks, imgs) - - # Print the results for debugging - for result in results: - print(result) - - def run_batch( - self, tasks_images: List[Tuple[str, str]] - ) -> List[str]: - """Process a batch of tasks and images""" - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [ - executor.submit(self.run, task, img) - for task, img in tasks_images - ] - results = [future.result() for future in futures] - return results - - async def run_batch_async( - self, tasks_images: List[Tuple[str, str]] - ) -> List[str]: - """Process a batch of tasks and images asynchronously""" - loop = asyncio.get_event_loop() - futures = [ - loop.run_in_executor(None, self.run, task, img) - for task, img in tasks_images - ] - return await asyncio.gather(*futures) - - async def run_batch_async_with_retries( - self, tasks_images: List[Tuple[str, str]] - ) -> List[str]: - """Process a batch of tasks and images asynchronously with retries""" - loop = asyncio.get_event_loop() - futures = [ - loop.run_in_executor( - None, self.run_with_retries, task, img - ) - for task, img in tasks_images - ] - return await asyncio.gather(*futures) - - def unique_chat_history(self): - """Get the unique chat history""" - return list(set(self.chat_history)) - - def run_with_retries(self, task: str, img: str): - """Run the model with retries""" - for i in range(self.retries): - try: - return self.run(task, img) - except Exception as error: - print(f"Error with the request {error}") - continue - - def run_batch_with_retries( - self, tasks_images: List[Tuple[str, str]] - ): - """Run the model with retries""" - for i in range(self.retries): - try: - return self.run_batch(tasks_images) - except Exception as error: - print(f"Error with the request {error}") - continue - - def _tokens_per_second(self) -> float: - """Tokens per second""" - elapsed_time = self.end_time - self.start_time - if elapsed_time == 0: - return float("inf") - return self._num_tokens() / elapsed_time - - def _time_for_generation(self, task: str) -> float: - """Time for Generation""" - self.start_time = time.time() - self.run(task) - self.end_time = time.time() - return self.end_time - self.start_time - - @abstractmethod - def generate_summary(self, text: str) -> str: - """Generate Summary""" - - def set_temperature(self, value: float): - """Set Temperature""" - self.temperature = value - - def set_max_tokens(self, value: int): - """Set new max tokens""" - self.max_tokens = value - - def get_generation_time(self) -> float: - """Get generation time""" - if self.start_time and self.end_time: - return self.end_time - self.start_time - return 0 - - def get_chat_history(self): - """Get the chat history""" - return self.chat_history - - def get_unique_chat_history(self): - """Get the unique chat history""" - return list(set(self.chat_history)) - - def get_chat_history_length(self): - """Get the chat history length""" - return len(self.chat_history) - - def get_unique_chat_history_length(self): - """Get the unique chat history length""" - return len(list(set(self.chat_history))) - - def get_chat_history_tokens(self): - """Get the chat history tokens""" - return self._num_tokens() - - def print_beautiful(self, content: str, color: str = "cyan"): - """Print Beautifully with termcolor""" - content = colored(content, color) - print(content) - - def stream_response(self, text: str): - """Stream the output - - Args: - content (str): _description_ - """ - for chunk in text: - print(chunk) - - def meta_prompt(self): - """Meta Prompt - - Returns: - _type_: _description_ - """ - META_PROMPT = """ - For any labels or markings on an image that you reference in your response, please - enclose them in square brackets ([]) and list them explicitly. Do not use ranges; for - example, instead of '1 - 4', list as '[1], [2], [3], [4]'. These labels could be - numbers or letters and typically correspond to specific segments or parts of the image. - """ - return META_PROMPT - - def set_device(self, device): - """ - Changes the device used for inference. - - Parameters - ---------- - device : str - The new device to use for inference. - """ - self.device = device - self.model.to(self.device) - - def set_max_length(self, max_length): - """Set max_length""" - self.max_length = max_length diff --git a/swarms/models/base_tts.py b/swarms/models/base_tts.py deleted file mode 100644 index a92a3bb7..00000000 --- a/swarms/models/base_tts.py +++ /dev/null @@ -1,89 +0,0 @@ -import wave -from abc import abstractmethod -from typing import Optional - -from swarms.models.base_llm import BaseLLM - - -class BaseTTSModel(BaseLLM): - """Base class for all TTS models. - - Args: - BaseLLM (_type_): _description_ - model_name (_type_): _description_ - voice (_type_): _description_ - chunk_size (_type_): _description_ - save_to_file (bool, optional): _description_. Defaults to False. - saved_filepath (Optional[str], optional): _description_. Defaults to None. - - Raises: - NotImplementedError: _description_ - - Methods: - save: save the model to a file. - load: load the model from a file. - run: run the model on the given task. - __call__: call the model on the given task. - save_to_file: save the speech data to a file. - - """ - - def __init__( - self, - model_name, - voice, - chunk_size, - save_to_file: bool = False, - saved_filepath: Optional[str] = None, - ): - self.model_name = model_name - self.voice = voice - self.chunk_size = chunk_size - self.save_to_file = save_to_file - self.saved_filepath = saved_filepath - - def save(self, filepath: Optional[str] = None): - """Save the model to a file. - - Args: - filepath (Optional[str], optional): _description_. Defaults to None. - """ - - def load(self, filepath: Optional[str] = None): - """Load the model from a file. - - Args: - filepath (Optional[str], optional): _description_. Defaults to None. - """ - - @abstractmethod - def run(self, task: str, *args, **kwargs): - """Run the model on the given task. - - Args: - task (str): _description_ - """ - - def __call__(self, task: str, *args, **kwargs): - """Call the model on the given task. - - Args: - task (str): _description_ - - Returns: - _type_: _description_ - """ - return self.run(task, *args, **kwargs) - - def save_to_file(self, speech_data, filename): - """Save the speech data to a file. - - Args: - speech_data (bytes): The speech data. - filename (str): The path to the file where the speech will be saved. - """ - with wave.open(filename, "wb") as file: - file.setnchannels(1) - file.setsampwidth(2) - file.setframerate(22050) - file.writeframes(speech_data) diff --git a/swarms/models/base_ttv.py b/swarms/models/base_ttv.py deleted file mode 100644 index 00052ba5..00000000 --- a/swarms/models/base_ttv.py +++ /dev/null @@ -1,117 +0,0 @@ -import asyncio -from abc import abstractmethod -from concurrent.futures import ThreadPoolExecutor -from typing import List, Optional - -from diffusers.utils import export_to_video - -from swarms.models.base_llm import BaseLLM - - -class BaseTextToVideo(BaseLLM): - """BaseTextToVideo class represents prebuilt text-to-video models.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - @abstractmethod - def run(self, *args, **kwargs): - pass - - def __call__( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): - """ - Performs forward pass on the input task and returns the path of the generated video. - - Args: - task (str): The task to perform. - - Returns: - str: The path of the generated video. - """ - return self.run(task, img, *args, **kwargs) - - def save_video_path( - self, video_path: Optional[str] = None, *args, **kwargs - ): - """Saves the generated video to the specified path. - - Args: - video_path (Optional[str], optional): _description_. Defaults to None. - - Returns: - str: The path of the generated video. - """ - return export_to_video(video_path, *args, **kwargs) - - def run_batched( - self, - tasks: List[str] = None, - imgs: List[str] = None, - *args, - **kwargs, - ): - # TODO: Implement batched inference - tasks = tasks or [] - imgs = imgs or [] - if len(tasks) != len(imgs): - raise ValueError( - "The number of tasks and images should be the same." - ) - return [ - self.run(task, img, *args, **kwargs) - for task, img in zip(tasks, imgs) - ] - - def run_concurrent_batched( - self, - tasks: List[str] = None, - imgs: List[str] = None, - *args, - **kwargs, - ): - tasks = tasks or [] - imgs = imgs or [] - if len(tasks) != len(imgs): - raise ValueError( - "The number of tasks and images should be the same." - ) - with ThreadPoolExecutor(max_workers=4) as executor: - loop = asyncio.get_event_loop() - tasks = [ - loop.run_in_executor( - executor, self.run, task, img, *args, **kwargs - ) - for task, img in zip(tasks, imgs) - ] - return loop.run_until_complete(asyncio.gather(*tasks)) - - # Run the model in async mode - def arun( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): - loop = asyncio.get_event_loop() - return loop.run_until_complete( - self.run(task, img, *args, **kwargs) - ) - - def arun_batched( - self, - tasks: List[str] = None, - imgs: List[str] = None, - *args, - **kwargs, - ): - loop = asyncio.get_event_loop() - return loop.run_until_complete( - self.run_batched(tasks, imgs, *args, **kwargs) - ) diff --git a/swarms/models/cog_vlm.py b/swarms/models/cog_vlm.py deleted file mode 100644 index ff78936c..00000000 --- a/swarms/models/cog_vlm.py +++ /dev/null @@ -1,528 +0,0 @@ -import base64 -import os -import time -from io import BytesIO -from typing import List, Literal, Optional, Tuple, Union - -import torch -from PIL import Image -from pydantic import BaseModel, Field -from transformers import ( - AutoModelForCausalLM, - LlamaTokenizer, - TextIteratorStreamer, -) - -from swarms.models.base_multimodal_model import BaseMultiModalModel -from swarms.utils.loguru_logger import logger - -MODEL_PATH = "THUDM/cogvlm-chat-hf" -TOKENIZER_PATH = "lmsys/vicuna-7b-v1.5" -DEVICE = "cuda" if torch.cuda.is_available() else "cpu" -QUANT_ENABLED = False - - -class ImageUrl(BaseModel): - url: str - - -class TextContent(BaseModel): - type: Literal["text"] - text: str - - -class ImageUrlContent(BaseModel): - type: Literal["image_url"] - image_url: ImageUrl - - -ContentItem = Union[TextContent, ImageUrlContent] - - -class ChatMessageInput(BaseModel): - role: Literal["user", "assistant", "system"] - content: Union[str, List[ContentItem]] - name: Optional[str] = None - - -class ChatMessageResponse(BaseModel): - role: Literal["assistant"] - content: str = None - name: Optional[str] = None - - -class DeltaMessage(BaseModel): - role: Optional[Literal["user", "assistant", "system"]] = None - content: Optional[str] = None - - -class ChatCompletionRequest(BaseModel): - model: str - messages: List[ChatMessageInput] - temperature: Optional[float] = 0.8 - top_p: Optional[float] = 0.8 - max_tokens: Optional[int] = None - stream: Optional[bool] = False - # Additional parameters - repetition_penalty: Optional[float] = 1.0 - - -class ChatCompletionResponseChoice(BaseModel): - index: int - message: ChatMessageResponse - - -class ChatCompletionResponseStreamChoice(BaseModel): - index: int - delta: DeltaMessage - - -class UsageInfo(BaseModel): - prompt_tokens: int = 0 - total_tokens: int = 0 - completion_tokens: Optional[int] = 0 - - -class ChatCompletionResponse(BaseModel): - model: str - object: Literal["chat.completion", "chat.completion.chunk"] - choices: List[ - Union[ - ChatCompletionResponseChoice, - ChatCompletionResponseStreamChoice, - ] - ] - created: Optional[int] = Field( - default_factory=lambda: int(time.time()) - ) - usage: Optional[UsageInfo] = None - - -# async def create_chat_completion(request: ChatCompletionRequest): -# global model, tokenizer - -# gen_params = dict( -# messages=request.messages, -# temperature=request.temperature, -# top_p=request.top_p, -# max_tokens=request.max_tokens or 1024, -# echo=False, -# stream=request.stream, -# ) - -# # if request.stream: -# # predict(request.model, gen_params) -# # response = generate_cogvlm(model, tokenizer, gen_params) - -# usage = UsageInfo() - -# message = ChatMessageResponse( -# role="assistant", -# content=response["text"], -# ) -# logger.debug(f"==== message ====\n{message}") -# choice_data = ChatCompletionResponseChoice( -# index=0, -# message=message, -# ) -# task_usage = UsageInfo.model_validate(response["usage"]) -# for usage_key, usage_value in task_usage.model_dump().items(): -# setattr( -# usage, usage_key, getattr(usage, usage_key) + usage_value -# ) -# return ChatCompletionResponse( -# model=request.model, -# choices=[choice_data], -# object="chat.completion", -# usage=usage, -# ) - - -class CogVLMMultiModal(BaseMultiModalModel): - """ - Initializes the CogVLM model. - - Args: - model_name (str): The path or name of the pre-trained model. - tokenizer (str): The path or name of the tokenizer. - device (str): The device to run the model on. - quantize (bool): Whether to enable quantization. - torch_type (str): The torch data type to use. - temperature (float): The temperature for sampling. - top_p (float): The top-p value for sampling. - max_tokens (int): The maximum number of tokens to generate. - echo (bool): Whether to echo the input text. - stream (bool): Whether to stream the output. - repetition_penalty (float): The repetition penalty for sampling. - do_sample (bool): Whether to use sampling during generation. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Methods: - run: Generates a response using the CogVLM model. - generate_stream_cogvlm: Generates a stream of responses using the CogVLM model in inference mode. - process_history_and_images: Processes history messages to extract text, identify the last user query, and convert base64 encoded image URLs to PIL images. - - Example: - >>> model = CogVLMMultiModal() - >>> response = model("Describe this image with meticlous details.", "https://example.com/image.jpg") - >>> print(response) - """ - - def __init__( - self, - model_name: str = MODEL_PATH, - tokenizer: str = TOKENIZER_PATH, - device: str = DEVICE, - quantize: bool = QUANT_ENABLED, - torch_type: str = "float16", - temperature: float = 0.5, - top_p: float = 0.9, - max_tokens: int = 3500, - echo: bool = False, - stream: bool = False, - repetition_penalty: float = 1.0, - do_sample: bool = True, - *args, - **kwargs, - ): - super().__init__() - self.model_name = model_name - self.device = device - self.tokenizer = tokenizer - self.device = device - self.quantize = quantize - self.torch_type = torch_type - self.temperature = temperature - self.top_p = top_p - self.max_tokens = max_tokens - self.echo = echo - self.stream = stream - self.repetition_penalty = repetition_penalty - self.do_sample = do_sample - - if os.environ.get("QUANT_ENABLED"): - pass - else: - with torch.cuda.device(device): - __, total_bytes = torch.cuda.mem_get_info() - total_gb = total_bytes / (1 << 30) - if total_gb < 40: - pass - - torch.cuda.empty_cache() - - self.tokenizer = LlamaTokenizer.from_pretrained( - tokenizer, trust_remote_code=True - ) - - if ( - torch.cuda.is_available() - and torch.cuda.get_device_capability()[0] >= 8 - ): - torch_type = torch.bfloat16 - else: - torch_type = torch.float16 - - print( - f"========Use torch type as:{torch_type} with" - f" device:{device}========\n\n" - ) - - if "cuda" in device: - if QUANT_ENABLED: - self.model = AutoModelForCausalLM.from_pretrained( - model_name, - load_in_4bit=True, - trust_remote_code=True, - torch_dtype=torch_type, - low_cpu_mem_usage=True, - *args, - **kwargs, - ).eval() - else: - self.model = ( - AutoModelForCausalLM.from_pretrained( - model_name, - load_in_4bit=False, - trust_remote_code=True, - torch_dtype=torch_type, - low_cpu_mem_usage=True, - *args, - **kwargs, - ) - .to(device) - .eval() - ) - - else: - self.model = ( - AutoModelForCausalLM.from_pretrained( - model_name, - trust_remote_code=True, - *args, - **kwargs, - ) - .float() - .to(device) - .eval() - ) - - def run(self, task: str, img: str, *args, **kwargs): - """ - Generates a response using the CogVLM model. It processes the chat history and image data, if any, - and then invokes the model to generate a response. - """ - messages = [task] - - params = dict( - messages=messages, - temperature=self.temperature, - repitition_penalty=self.repetition_penalty, - top_p=self.top_p, - max_new_tokens=self.max_tokens, - ) - - for response in self.generate_stream_cogvlm(params): - pass - - return response - - @torch.inference_mode() - def generate_stream_cogvlm( - self, - params: dict, - ): - """ - Generates a stream of responses using the CogVLM model in inference mode. - It's optimized to handle continuous input-output interactions with the model in a streaming manner. - """ - messages = params["messages"] - temperature = float(params.get("temperature", 1.0)) - repetition_penalty = float( - params.get("repetition_penalty", 1.0) - ) - top_p = float(params.get("top_p", 1.0)) - max_new_tokens = int(params.get("max_tokens", 256)) - query, history, image_list = self.process_history_and_images( - messages - ) - - logger.debug(f"==== request ====\n{query}") - - input_by_model = self.model.build_conversation_input_ids( - self.tokenizer, - query=query, - history=history, - images=[image_list[-1]], - ) - inputs = { - "input_ids": ( - input_by_model["input_ids"] - .unsqueeze(0) - .to(self.device) - ), - "token_type_ids": ( - input_by_model["token_type_ids"] - .unsqueeze(0) - .to(self.device) - ), - "attention_mask": ( - input_by_model["attention_mask"] - .unsqueeze(0) - .to(self.device) - ), - "images": [ - [ - input_by_model["images"][0] - .to(self.device) - .to(self.torch_type) - ] - ], - } - if ( - "cross_images" in input_by_model - and input_by_model["cross_images"] - ): - inputs["cross_images"] = [ - [ - input_by_model["cross_images"][0] - .to(self.device) - .to(self.torch_type) - ] - ] - - input_echo_len = len(inputs["input_ids"][0]) - streamer = TextIteratorStreamer( - tokenizer=self.tokenizer, - timeout=60.0, - skip_promptb=True, - skip_special_tokens=True, - ) - gen_kwargs = { - "repetition_penalty": repetition_penalty, - "max_new_tokens": max_new_tokens, - "do_sample": True if temperature > 1e-5 else False, - "top_p": top_p if temperature > 1e-5 else 0, - "streamer": streamer, - } - if temperature > 1e-5: - gen_kwargs["temperature"] = temperature - - total_len = 0 - generated_text = "" - with torch.no_grad(): - self.model.generate(**inputs, **gen_kwargs) - for next_text in streamer: - generated_text += next_text - yield { - "text": generated_text, - "usage": { - "prompt_tokens": input_echo_len, - "completion_tokens": ( - total_len - input_echo_len - ), - "total_tokens": total_len, - }, - } - ret = { - "text": generated_text, - "usage": { - "prompt_tokens": input_echo_len, - "completion_tokens": total_len - input_echo_len, - "total_tokens": total_len, - }, - } - yield ret - - def process_history_and_images( - self, - messages: List[ChatMessageInput], - ) -> Tuple[ - Optional[str], - Optional[List[Tuple[str, str]]], - Optional[List[Image.Image]], - ]: - """ - Process history messages to extract text, identify the last user query, - and convert base64 encoded image URLs to PIL images. - - Args: - messages(List[ChatMessageInput]): List of ChatMessageInput objects. - return: A tuple of three elements: - - The last user query as a string. - - Text history formatted as a list of tuples for the model. - - List of PIL Image objects extracted from the messages. - """ - formatted_history = [] - image_list = [] - last_user_query = "" - - for i, message in enumerate(messages): - role = message.role - content = message.content - - # Extract text content - if isinstance(content, list): # text - text_content = " ".join( - item.text - for item in content - if isinstance(item, TextContent) - ) - else: - text_content = content - - # Extract image data - if isinstance(content, list): # image - for item in content: - if isinstance(item, ImageUrlContent): - image_url = item.image_url.url - if image_url.startswith( - "data:image/jpeg;base64," - ): - base64_encoded_image = image_url.split( - "data:image/jpeg;base64," - )[1] - image_data = base64.b64decode( - base64_encoded_image - ) - image = Image.open( - BytesIO(image_data) - ).convert("RGB") - image_list.append(image) - - # Format history - if role == "user": - if i == len(messages) - 1: - last_user_query = text_content - else: - formatted_history.append((text_content, "")) - elif role == "assistant": - if formatted_history: - if formatted_history[-1][1] != "": - raise AssertionError( - "the last query is answered. answer" - f" again. {formatted_history[-1][0]}," - f" {formatted_history[-1][1]}," - f" {text_content}" - ) - formatted_history[-1] = ( - formatted_history[-1][0], - text_content, - ) - else: - raise AssertionError( - "assistant reply before user" - ) - else: - raise AssertionError(f"unrecognized role: {role}") - - return last_user_query, formatted_history, image_list - - async def predict(self, params: dict): - """ - Handle streaming predictions. It continuously generates responses for a given input stream. - This is particularly useful for real-time, continuous interactions with the model. - """ - - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(role="assistant"), - finish_reason=None, - ) - chunk = ChatCompletionResponse( - model=self.model_name, - choices=[choice_data], - object="chat.completion.chunk", - ) - yield f"{chunk.model_dump_json(exclude_unset=True)}" - - previous_text = "" - for new_response in self.generate_stream_cogvlm(params): - decoded_unicode = new_response["text"] - delta_text = decoded_unicode[len(previous_text) :] - previous_text = decoded_unicode - delta = DeltaMessage( - content=delta_text, - role="assistant", - ) - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=delta, - ) - chunk = ChatCompletionResponse( - model=self.model_name, - choices=[choice_data], - object="chat.completion.chunk", - ) - yield f"{chunk.model_dump_json(exclude_unset=True)}" - choice_data = ChatCompletionResponseStreamChoice( - index=0, - delta=DeltaMessage(), - ) - chunk = ChatCompletionResponse( - model=self.model_name, - choices=[choice_data], - object="chat.completion.chunk", - ) - yield f"{chunk.model_dump_json(exclude_unset=True)}" diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py deleted file mode 100644 index 0e02c3d6..00000000 --- a/swarms/models/dalle3.py +++ /dev/null @@ -1,367 +0,0 @@ -import concurrent.futures -import logging -import os -import uuid -from dataclasses import dataclass -from io import BytesIO -from typing import List - -import backoff -import openai -import requests -from cachetools import TTLCache -from dotenv import load_dotenv -from openai import OpenAI -from PIL import Image -from pydantic import field_validator -from termcolor import colored - -load_dotenv() - -# Configure Logging -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) - - -def handle_errors(self, function): - def wrapper(*args, **kwargs): - try: - return function(*args, **kwargs) - except Exception as error: - logger.error(error) - raise - - return wrapper - - -@dataclass -class Dalle3: - """ - Dalle3 model class - - Attributes: - ----------- - image_url: str - The image url generated by the Dalle3 API - - Methods: - -------- - __call__(self, task: str) -> Dalle3: - Makes a call to the Dalle3 API and returns the image url - - Example: - -------- - >>> dalle3 = Dalle3() - >>> task = "A painting of a dog" - >>> image_url = dalle3(task) - >>> print(image_url) - https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png - - """ - - model: str = "dall-e-3" - img: str = None - size: str = "1024x1024" - max_retries: int = 3 - quality: str = "standard" - openai_api_key: str = None or os.getenv("OPENAI_API_KEY") - n: int = 1 - save_path: str = "images" - max_time_seconds: int = 60 - save_folder: str = "images" - image_format: str = "png" - client = OpenAI( - api_key=openai_api_key, - ) - cache = TTLCache(maxsize=100, ttl=3600) - dashboard: bool = False - - def __post_init__(self): - """Post init method""" - if self.openai_api_key is None: - raise ValueError("Please provide an openai api key") - if self.img is not None: - self.img = self.convert_to_bytesio(self.img) - - os.makedirs(self.save_path, exist_ok=True) - - class Config: - """Config class for the Dalle3 model""" - - arbitrary_types_allowed = True - - @field_validator("max_retries", "time_seconds") - @classmethod - def must_be_positive(cls, value): - if value <= 0: - raise ValueError("Must be positive") - return value - - def read_img(self, img: str): - """Read the image using pil""" - img = Image.open(img) - return img - - def set_width_height(self, img: str, width: int, height: int): - """Set the width and height of the image""" - img = self.read_img(img) - img = img.resize((width, height)) - return img - - def convert_to_bytesio(self, img: str, format: str = "PNG"): - """Convert the image to an bytes io object""" - byte_stream = BytesIO() - img.save(byte_stream, format=format) - byte_array = byte_stream.getvalue() - return byte_array - - @backoff.on_exception( - backoff.expo, Exception, max_time=max_time_seconds - ) - def __call__(self, task: str): - """ - Text to image conversion using the Dalle3 API - - Parameters: - ----------- - task: str - The task to be converted to an image - - Returns: - -------- - Dalle3: - An instance of the Dalle3 class with the image url generated by the Dalle3 API - - Example: - -------- - >>> dalle3 = Dalle3() - >>> task = "A painting of a dog" - >>> image_url = dalle3(task) - >>> print(image_url) - https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png - """ - if self.dashboard: - self.print_dashboard() - if task in self.cache: - return self.cache[task] - try: - # Making a call to the the Dalle3 API - response = self.client.images.generate( - model=self.model, - prompt=task, - size=self.size, - quality=self.quality, - n=self.n, - ) - # Extracting the image url from the response - img = response.data[0].url - - filename = f"{self._generate_uuid()}.{self.image_format}" - - # Download and save the image - self._download_image(img, filename) - - img_path = os.path.join(self.save_path, filename) - self.cache[task] = img_path - - return img_path - except openai.OpenAIError as error: - # Handling exceptions and printing the errors details - print( - colored( - ( - f"Error running Dalle3: {error} try" - " optimizing your api key and or try again" - ), - "red", - ) - ) - raise error - - def _generate_image_name(self, task: str): - """Generate a sanitized file name based on the task""" - sanitized_task = "".join( - char for char in task if char.isalnum() or char in " _ -" - ).rstrip() - return f"{sanitized_task}.{self.image_format}" - - def _download_image(self, img_url: str, filename: str): - """ - Download the image from the given URL and save it to a specified filename within self.save_path. - - Args: - img_url (str): URL of the image to download. - filename (str): Filename to save the image. - """ - full_path = os.path.join(self.save_path, filename) - response = requests.get(img_url) - if response.status_code == 200: - with open(full_path, "wb") as file: - file.write(response.content) - else: - raise ValueError( - f"Failed to download image from {img_url}" - ) - - def create_variations(self, img: str): - """ - Create variations of an image using the Dalle3 API - - Parameters: - ----------- - img: str - The image to be used for the API request - - Returns: - -------- - img: str - The image url generated by the Dalle3 API - - Example: - -------- - >>> dalle3 = Dalle3() - >>> img = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" - >>> img = dalle3.create_variations(img) - >>> print(img) - - - """ - try: - response = self.client.images.create_variation( - img=open(img, "rb"), n=self.n, size=self.size - ) - img = response.data[0].url - - return img - except (Exception, openai.OpenAIError) as error: - print( - colored( - ( - f"Error running Dalle3: {error} try" - " optimizing your api key and or try again" - ), - "red", - ) - ) - print( - colored( - f"Error running Dalle3: {error.http_status}", - "red", - ) - ) - print( - colored(f"Error running Dalle3: {error.error}", "red") - ) - raise error - - def print_dashboard(self): - """Print the Dalle3 dashboard""" - print( - colored( - f"""Dalle3 Dashboard: - -------------------- - - Model: {self.model} - Image: {self.img} - Size: {self.size} - Max Retries: {self.max_retries} - Quality: {self.quality} - N: {self.n} - Save Path: {self.save_path} - Time Seconds: {self.time_seconds} - Save Folder: {self.save_folder} - Image Format: {self.image_format} - -------------------- - - - """, - "green", - ) - ) - - def process_batch_concurrently( - self, tasks: List[str], max_workers: int = 5 - ): - """ - - Process a batch of tasks concurrently - - Args: - tasks (List[str]): A list of tasks to be processed - max_workers (int): The maximum number of workers to use for the concurrent processing - - Returns: - -------- - results (List[str]): A list of image urls generated by the Dalle3 API - - Example: - -------- - >>> dalle3 = Dalle3() - >>> tasks = ["A painting of a dog", "A painting of a cat"] - >>> results = dalle3.process_batch_concurrently(tasks) - >>> print(results) - ['https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png', - - """ - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - future_to_task = { - executor.submit(self, task): task for task in tasks - } - results = [] - for future in concurrent.futures.as_completed( - future_to_task - ): - task = future_to_task[future] - try: - img = future.result() - results.append(img) - - print(f"Task {task} completed: {img}") - except Exception as error: - print( - colored( - ( - f"Error running Dalle3: {error} try" - " optimizing your api key and or try" - " again" - ), - "red", - ) - ) - print( - colored( - ( - "Error running Dalle3:" - f" {error.http_status}" - ), - "red", - ) - ) - print( - colored( - f"Error running Dalle3: {error.error}", - "red", - ) - ) - raise error - - def _generate_uuid(self): - """Generate a uuid""" - return str(uuid.uuid4()) - - def __repr__(self): - """Repr method for the Dalle3 class""" - return f"Dalle3(image_url={self.image_url})" - - def __str__(self): - """Str method for the Dalle3 class""" - return f"Dalle3(image_url={self.image_url})" - - @backoff.on_exception( - backoff.expo, Exception, max_tries=max_retries - ) - def rate_limited_call(self, task: str): - """Rate limited call to the Dalle3 API""" - return self.__call__(task) diff --git a/swarms/models/embeddings_base.py b/swarms/models/embeddings_base.py deleted file mode 100644 index e91c415f..00000000 --- a/swarms/models/embeddings_base.py +++ /dev/null @@ -1,26 +0,0 @@ -"""Interface for embedding models.""" - -from abc import ABC, abstractmethod -from typing import List - - -class Embeddings(ABC): - """Interface for embedding models.""" - - @abstractmethod - def embed_documents(self, texts: List[str]) -> List[List[float]]: - """Embed search docs.""" - - @abstractmethod - def embed_query(self, text: str) -> List[float]: - """Embed query text.""" - - async def aembed_documents( - self, texts: List[str] - ) -> List[List[float]]: - """Embed search docs.""" - raise NotImplementedError - - async def aembed_query(self, text: str) -> List[float]: - """Embed query text.""" - raise NotImplementedError diff --git a/swarms/models/fuyu.py b/swarms/models/fuyu.py deleted file mode 100644 index e02e53a5..00000000 --- a/swarms/models/fuyu.py +++ /dev/null @@ -1,107 +0,0 @@ -from PIL import Image -from termcolor import colored -from transformers import ( - AutoTokenizer, - FuyuForCausalLM, - FuyuImageProcessor, - FuyuProcessor, -) - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class Fuyu(BaseMultiModalModel): - """ - Fuyu model by Adept - - - Args: - BaseMultiModalModel (BaseMultiModalModel): [description] - model_name (str, optional): [description]. Defaults to "adept/fuyu-8b". - device_map (str, optional): [description]. Defaults to "auto". - max_new_tokens (int, optional): [description]. Defaults to 500. - *args: [description] - **kwargs: [description] - - - - Examples: - >>> from swarms.models import Fuyu - >>> model = Fuyu() - >>> model.run("Hello, world!", "https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG") - """ - - def __init__( - self, - model_name: str = "adept/fuyu-8b", - device_map: str = "auto", - max_new_tokens: int = 500, - *args, - **kwargs, - ): - super().__init__(model_name=model_name, *args, **kwargs) - self.model_name = model_name - self.device_map = device_map - self.max_new_tokens = max_new_tokens - - self.tokenizer = AutoTokenizer.from_pretrained(model_name) - self.image_processor = FuyuImageProcessor() - self.processor = FuyuProcessor( - image_processor=self.image_processor, - tokenizer=self.tokenizer, - ) - self.model = FuyuForCausalLM.from_pretrained( - model_name, - device_map=device_map, - *args, - **kwargs, - ) - - def get_img(self, img: str): - """Get the image from the path""" - image_pil = Image.open(img) - return image_pil - - def run(self, text: str = None, img: str = None, *args, **kwargs): - """Run the pipeline - - Args: - text (str): _description_ - img (str): _description_ - - Returns: - _type_: _description_ - """ - try: - img = self.get_img(img) - model_inputs = self.processor( - text=text, - images=[img], - device=self.device_map, - ) - - for k, v in model_inputs.items(): - model_inputs[k] = v.to(self.device_map) - - output = self.model.generate( - max_new_tokens=self.max_new_tokens, - *args, - **model_inputs, - **kwargs, - ) - text = self.processor.batch_decode( - output[:, -7:], - skip_special_tokens=True, - ) - return print(str(text)) - except Exception as error: - print( - colored( - ( - "Error in" - f" {self.__class__.__name__} pipeline:" - f" {error}" - ), - "red", - ) - ) diff --git a/swarms/models/gemini.py b/swarms/models/gemini.py deleted file mode 100644 index fc7b4439..00000000 --- a/swarms/models/gemini.py +++ /dev/null @@ -1,277 +0,0 @@ -import os -import subprocess as sp -from pathlib import Path - -from dotenv import load_dotenv -from PIL import Image - -from swarms.models.base_multimodal_model import BaseMultiModalModel - -try: - import google.generativeai as genai - from google.generativeai.types import GenerationConfig -except ImportError as error: - print(f"Error importing google.generativeai: {error}") - print("Please install the google.generativeai package") - print("pip install google-generativeai") - sp.run(["pip", "install", "--upgrade", "google-generativeai"]) - - -load_dotenv() - - -# Helpers -def get_gemini_api_key_env(): - """Get the Gemini API key from the environment - - Raises: - ValueError: _description_ - - Returns: - _type_: _description_ - """ - key = os.getenv("GEMINI_API_KEY") - if key is None: - raise ValueError("Please provide a Gemini API key") - return str(key) - - -# Main class -class Gemini(BaseMultiModalModel): - """Gemini model - - Args: - model_name (str, optional): _description_. Defaults to "gemini-pro". - gemini_api_key (str, optional): _description_. Defaults to get_gemini_api_key_env. - return_safety (bool, optional): _description_. Defaults to False. - candidates (bool, optional): _description_. Defaults to False. - stream (bool, optional): _description_. Defaults to False. - candidate_count (int, optional): _description_. Defaults to 1. - stop_sequence ([type], optional): _description_. Defaults to ['x']. - max_tokens (int, optional): _description_. Defaults to 100. - temperature (float, optional): _description_. Defaults to 0.9. - - Methods: - run: Run the Gemini model - process_img: Process the image - chat: Chat with the Gemini model - list_models: List the Gemini models - stream_tokens: Stream the tokens - process_img_pil: Process img - - - - Examples: - >>> from swarms.models import Gemini - >>> gemini = Gemini() - >>> gemini.run( - task="A dog", - img="dog.png", - ) - """ - - def __init__( - self, - model_name: str = "gemini-pro-vision", - gemini_api_key: str = get_gemini_api_key_env, - return_safety: bool = False, - candidates: bool = False, - stream: bool = False, - candidate_count: int = 1, - transport: str = "rest", - stop_sequence=["x"], - max_tokens: int = 100, - temperature: float = 0.9, - system_prompt: str = None, - *args, - **kwargs, - ): - super().__init__(model_name, *args, **kwargs) - self.model_name = model_name - self.gemini_api_key = gemini_api_key - self.safety = return_safety - self.candidates = candidates - self.stream = stream - self.candidate_count = candidate_count - self.stop_sequence = stop_sequence - self.max_tokens = max_tokens - self.temperature = temperature - self.system_prompt = system_prompt - - # Configure the API key - genai.configure( - api_key=gemini_api_key, - transport=transport, - *args, - **kwargs, - ) - - # Prepare the generation config - self.generation_config = GenerationConfig( - candidate_count=candidate_count, - # stop_sequence=stop_sequence, - max_output_tokens=max_tokens, - temperature=temperature, - *args, - **kwargs, - ) - - # Initialize the model - self.model = genai.GenerativeModel( - model_name, *args, **kwargs - ) - - # Check for the key - if self.gemini_api_key is None: - raise ValueError("Please provide a Gemini API key") - - def system_prompt_prep( - self, - task: str = None, - *args, - **kwargs, - ): - """System prompt - - Args: - system_prompt (str, optional): _description_. Defaults to None. - """ - PROMPT = f""" - - {self.system_prompt} - - ###### - - {task} - - """ - return PROMPT - - def run( - self, - task: str = None, - img: str = None, - *args, - **kwargs, - ) -> str: - """Run the Gemini model - - Args: - task (str, optional): textual task. Defaults to None. - img (str, optional): img. Defaults to None. - - Returns: - str: output from the model - """ - try: - prepare_prompt = self.system_prompt_prep(task) - if img: - # process_img = self.process_img(img, *args, **kwargs) - process_img = self.process_img_pil(img) - response = self.model.generate_content( - contents=[prepare_prompt, process_img], - generation_config=self.generation_config, - stream=self.stream, - *args, - **kwargs, - ) - return response.text - else: - response = self.model.generate_content( - prepare_prompt, - stream=self.stream, - *args, - **kwargs, - ) - return response.text - except Exception as error: - print(f"Error running Gemini model: {error}") - print(f"Please check the task and image: {task}, {img}") - raise error - - def process_img( - self, - img: str = None, - type: str = "image/png", - *args, - **kwargs, - ): - """Process the image - - Args: - img (str, optional): _description_. Defaults to None. - type (str, optional): _description_. Defaults to "image/png". - - Raises: - ValueError: _description_ - ValueError: _description_ - ValueError: _description_ - """ - try: - if img is None: - raise ValueError("Please provide an image to process") - if type is None: - raise ValueError("Please provide the image type") - if self.gemini_api_key is None: - raise ValueError("Please provide a Gemini API key") - - # Load the image - img = [ - {"mime_type": type, "data": Path(img).read_bytes()} - ] - except Exception as error: - print(f"Error processing image: {error}") - - def chat( - self, - task: str = None, - img: str = None, - *args, - **kwargs, - ) -> str: - """Chat with the Gemini model - - Args: - task (str, optional): _description_. Defaults to None. - img (str, optional): _description_. Defaults to None. - - Returns: - str: _description_ - """ - chat = self.model.start_chat() - response = chat.send_message(task, *args, **kwargs) - response1 = response.text - print(response1) - response = chat.send_message(img, *args, **kwargs) - - def list_models(self) -> str: - """List the Gemini models - - Returns: - str: _description_ - """ - for m in genai.list_models(): - if "generateContent" in m.supported_generation_methods: - print(m.name) - - def stream_tokens(self, content: str = None): - """Stream the tokens - - Args: - content (t, optional): _description_. Defaults to None. - """ - for chunk in content: - print(chunk.text) - print("_" * 80) - - def process_img_pil(self, img: str = None): - """Process img - - Args: - img (str, optional): _description_. Defaults to None. - - Returns: - _type_: _description_ - """ - img = Image.open(img) - return img diff --git a/swarms/models/gpt4_vision_api.py b/swarms/models/gpt4_vision_api.py deleted file mode 100644 index e6b07204..00000000 --- a/swarms/models/gpt4_vision_api.py +++ /dev/null @@ -1,377 +0,0 @@ -import base64 -import json -import logging -import os -from typing import Optional - -import aiohttp -import requests -from dotenv import load_dotenv -from termcolor import colored -from swarms.utils.loguru_logger import logger -from swarms.models.base_multimodal_model import BaseMultiModalModel - -# Load environment variables -load_dotenv() -openai_api_key = os.getenv("OPENAI_API_KEY") - - -gpt4_vision_system_prompt = """ -You are an multi-modal autonomous agent. You are given a task and an image. You must generate a response to the task and image. - -""" - - -class GPT4VisionAPI(BaseMultiModalModel): - """ - GPT-4 Vision API - - This class is a wrapper for the OpenAI API. It is used to run the GPT-4 Vision model. - - Parameters - ---------- - openai_api_key : str - The OpenAI API key. Defaults to the OPENAI_API_KEY environment variable. - max_tokens : int - The maximum number of tokens to generate. Defaults to 300. - - - Methods - ------- - encode_image(img: str) - Encode image to base64. - run(task: str, img: str) - Run the model. - __call__(task: str, img: str) - Run the model. - - Examples: - --------- - >>> from swarms.models import GPT4VisionAPI - >>> llm = GPT4VisionAPI() - >>> task = "What is the color of the object?" - >>> img = "https://i.imgur.com/2M2ZGwC.jpeg" - >>> llm.run(task, img) - - - """ - - def __init__( - self, - openai_api_key: str = openai_api_key, - model_name: str = "gpt-4-vision-preview", - logging_enabled: bool = False, - max_workers: int = 10, - max_tokens: str = 300, - openai_proxy: str = "https://api.openai.com/v1/chat/completions", - beautify: bool = False, - streaming_enabled: Optional[bool] = False, - meta_prompt: Optional[bool] = False, - system_prompt: Optional[str] = gpt4_vision_system_prompt, - *args, - **kwargs, - ): - super(GPT4VisionAPI).__init__(*args, **kwargs) - self.openai_api_key = openai_api_key - self.logging_enabled = logging_enabled - self.model_name = model_name - self.max_workers = max_workers - self.max_tokens = max_tokens - self.openai_proxy = openai_proxy - self.beautify = beautify - self.streaming_enabled = streaming_enabled - self.meta_prompt = meta_prompt - self.system_prompt = system_prompt - - if self.logging_enabled: - logging.basicConfig(level=logging.DEBUG) - else: - # Disable debug logs for requests and urllib3 - logging.getLogger("requests").setLevel(logging.WARNING) - logging.getLogger("urllib3").setLevel(logging.WARNING) - - if self.meta_prompt: - self.system_prompt = self.meta_prompt_init() - - def encode_image(self, img: str): - """Encode image to base64.""" - if not os.path.exists(img): - print(f"Image file not found: {img}") - return None - - with open(img, "rb") as image_file: - return base64.b64encode(image_file.read()).decode("utf-8") - - def download_img_then_encode(self, img: str): - """Download image from URL then encode image to base64 using requests""" - if not os.path.exists(img): - print(f"Image file not found: {img}") - return None - - response = requests.get(img) - return base64.b64encode(response.content).decode("utf-8") - - # Function to handle vision tasks - def run( - self, - task: str = None, - img: str = None, - multi_imgs: list = None, - return_json: bool = False, - *args, - **kwargs, - ): - """Run the model.""" - try: - base64_image = self.encode_image(img) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.openai_api_key}", - } - payload = { - "model": self.model_name, - "messages": [ - { - "role": "system", - "content": [self.system_prompt], - }, - { - "role": "user", - "content": [ - {"type": "text", "text": task}, - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{base64_image}" - }, - }, - ], - }, - ], - "max_tokens": self.max_tokens, - **kwargs, - } - response = requests.post(headers=headers, json=payload) - - # Get the response as a JSON object - response_json = response.json() - - # Return the JSON object if return_json is True - if return_json is True: - print(response_json) - return response_json - else: - return response_json - - except Exception as error: - logger.error( - f"Error with the request: {error}, make sure you" - " double check input types and positions" - ) - raise error - - def video_prompt(self, frames): - """ - SystemPrompt is a class that generates a prompt for the user to respond to. - The prompt is generated based on the current state of the system. - - Parameters - ---------- - frames : list - A list of base64 frames - - Returns - ------- - PROMPT : str - The system prompt - - Examples - -------- - - >>> from swarms.models import GPT4VisionAPI - >>> llm = GPT4VisionAPI() - >>> video = "video.mp4" - >>> base64_frames = llm.process_video(video) - >>> prompt = llm.video_prompt(base64_frames) - >>> print(prompt) - - """ - PROMPT = f""" - These are frames from a video that I want to upload. Generate a compelling description that I can upload along with the video: - - {frames} - """ - return PROMPT - - def stream_response(self, content: str): - """Stream the response of the output - - Args: - content (str): _description_ - """ - for chunk in content: - print(chunk) - - def __call__( - self, - task: Optional[str] = None, - img: Optional[str] = None, - *args, - **kwargs, - ): - """Call the model - - Args: - task (Optional[str], optional): _description_. Defaults to None. - img (Optional[str], optional): _description_. Defaults to None. - - Raises: - error: _description_ - """ - try: - base64_image = self.encode_image(img) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - payload = { - "model": self.model_name, - "messages": [ - { - "role": "system", - "content": [self.system_prompt], - }, - { - "role": "user", - "content": [ - {"type": "text", "text": task}, - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{base64_image}" - }, - }, - ], - }, - ], - "max_tokens": self.max_tokens, - } - response = requests.post( - self.openai_proxy, - headers=headers, - json=payload, - ) - - out = response.json() - content = out["choices"][0]["message"]["content"] - - if self.streaming_enabled: - content = self.stream_response(content) - - if self.beautify: - content = colored(content, "cyan") - print(content) - else: - print(content) - - except Exception as error: - print(f"Error with the request: {error}") - raise error - - async def arun( - self, - task: Optional[str] = None, - img: Optional[str] = None, - ): - """ - Asynchronously run the model - - Overview: - --------- - This method is used to asynchronously run the model. It is used to run the model - on a single task and image. - - Parameters: - ---------- - task : str - The task to run the model on. - img : str - The image to run the task on - - """ - try: - base64_image = self.encode_image(img) - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}", - } - payload = { - "model": "gpt-4-vision-preview", - "messages": [ - { - "role": "user", - "content": [ - {"type": "text", "text": task}, - { - "type": "image_url", - "image_url": { - "url": f"data:image/jpeg;base64,{base64_image}" - }, - }, - ], - } - ], - "max_tokens": self.max_tokens, - } - async with aiohttp.ClientSession() as session: - async with session.post( - self.openai_proxy, - headers=headers, - data=json.dumps(payload), - ) as response: - out = await response.json() - content = out["choices"][0]["message"]["content"] - print(content) - except Exception as error: - print(f"Error with the request {error}") - raise error - - def health_check(self): - """Health check for the GPT4Vision model""" - try: - response = requests.get( - "https://api.openai.com/v1/engines" - ) - return response.status_code == 200 - except requests.RequestException as error: - print(f"Health check failed: {error}") - return False - - def print_dashboard(self): - dashboard = print( - colored( - f""" - GPT4Vision Dashboard - ------------------- - Model: {self.model_name} - Max Workers: {self.max_workers} - OpenAIProxy: {self.openai_proxy} - """, - "green", - ) - ) - return dashboard - - # def meta_prompt_init(self): - # """Meta Prompt - - # Returns: - # _type_: _description_ - # """ - # META_PROMPT = """ - # For any labels or markings on an image that you reference in your response, please - # enclose them in square brackets ([]) and list them explicitly. Do not use ranges; for - # example, instead of '1 - 4', list as '[1], [2], [3], [4]'. These labels could be - # numbers or letters and typically correspond to specific segments or parts of the image. - # """ - # return META_PROMPT diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py deleted file mode 100644 index 84df4cfb..00000000 --- a/swarms/models/huggingface.py +++ /dev/null @@ -1,420 +0,0 @@ -import asyncio -import concurrent.futures -import logging -from typing import List, Tuple - -import torch -from termcolor import colored -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - BitsAndBytesConfig, -) - -from swarms.models.base_llm import BaseLLM - - -class HuggingfaceLLM(BaseLLM): - """ - A class for running inference on a given model. - - Attributes: - model_id (str): The ID of the model. - device (str): The device to run the model on (either 'cuda' or 'cpu'). - max_length (int): The maximum length of the output sequence. - quantize (bool, optional): Whether to use quantization. Defaults to False. - quantization_config (dict, optional): The configuration for quantization. - verbose (bool, optional): Whether to print verbose logs. Defaults to False. - logger (logging.Logger, optional): The logger to use. Defaults to a basic logger. - - Methods: - run(task: str, max_length: int = 500) -> str: - Generate a response based on the prompt text. - - __call__(task: str, max_length: int = 500) -> str: - Generate a response based on the prompt text. - - save_model(path: str): - Save the model to a given path. - - gpu_available() -> bool: - Check if GPU is available. - - memory_consumption() -> dict: - Get the memory consumption of the GPU. - - print_dashboard(task: str): - Print dashboard. - - set_device(device: str): - Changes the device used for inference. - - set_max_length(max_length: int): - Set max_length. - - set_verbose(verbose: bool): - Set verbose. - - set_distributed(distributed: bool): - Set distributed. - - set_decoding(decoding: bool): - Set decoding. - - set_max_workers(max_workers: int): - Set max_workers. - - set_repitition_penalty(repitition_penalty: float): - Set repitition_penalty. - - set_no_repeat_ngram_size(no_repeat_ngram_size: int): - Set no_repeat_ngram_size. - - set_temperature(temperature: float): - Set temperature. - - set_top_k(top_k: int): - Set top_k. - - set_top_p(top_p: float): - Set top_p. - - set_quantize(quantize: bool): - Set quantize. - - set_quantization_config(quantization_config: dict): - Set quantization_config. - - set_model_id(model_id: str): - Set model_id. - - set_model(model): - Set model. - - set_tokenizer(tokenizer): - Set tokenizer. - - set_logger(logger): - Set logger. - - - Examples: - >>> llm = HuggingfaceLLM( - ... model_id="EleutherAI/gpt-neo-2.7B", - ... device="cuda", - ... max_length=500, - ... quantize=True, - ... quantization_config={ - ... "load_in_4bit": True, - ... "bnb_4bit_use_double_quant": True, - ... "bnb_4bit_quant_type": "nf4", - ... "bnb_4bit_compute_dtype": torch.bfloat16, - ... }, - ... ) - >>> llm("Generate a 10,000 word blog on mental clarity and the benefits of meditation.") - 'Generate a 10,000 word - """ - - def __init__( - self, - model_id: str, - device: str = None, - max_length: int = 500, - quantize: bool = False, - quantization_config: dict = None, - verbose=False, - distributed=False, - decoding=False, - max_workers: int = 5, - repitition_penalty: float = 1.3, - no_repeat_ngram_size: int = 5, - temperature: float = 0.7, - top_k: int = 40, - top_p: float = 0.8, - dtype=torch.bfloat16, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.logger = logging.getLogger(__name__) - self.device = ( - device - if device - else ("cuda" if torch.cuda.is_available() else "cpu") - ) - self.model_id = model_id - self.max_length = max_length - self.verbose = verbose - self.distributed = distributed - self.decoding = decoding - self.quantize = quantize - self.quantization_config = quantization_config - self.max_workers = max_workers - self.repitition_penalty = repitition_penalty - self.no_repeat_ngram_size = no_repeat_ngram_size - self.temperature = temperature - self.top_k = top_k - self.top_p = top_p - self.dtype = dtype - - if self.distributed: - assert ( - torch.cuda.device_count() > 1 - ), "You need more than 1 gpu for distributed processing" - - bnb_config = None - if quantize: - if not quantization_config: - quantization_config = { - "load_in_4bit": True, - "bnb_4bit_use_double_quant": True, - "bnb_4bit_quant_type": "nf4", - "bnb_4bit_compute_dtype": dtype, - } - bnb_config = BitsAndBytesConfig(**quantization_config) - - self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) - - if quantize: - self.model = AutoModelForCausalLM.from_pretrained( - self.model_id, - quantization_config=bnb_config, - *args, - **kwargs, - ) - else: - self.model = AutoModelForCausalLM.from_pretrained( - self.model_id, *args, **kwargs - ).to(self.device) - - def print_error(self, error: str): - """Print error""" - print(colored(f"Error: {error}", "red")) - - async def async_run(self, task: str): - """Ashcnronous generate text for a given prompt""" - return await asyncio.to_thread(self.run, task) - - def concurrent_run(self, tasks: List[str], max_workers: int = 5): - """Concurrently generate text for a list of prompts.""" - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - results = list(executor.map(self.run, tasks)) - return results - - def run_batch( - self, tasks_images: List[Tuple[str, str]] - ) -> List[str]: - """Process a batch of tasks and images""" - with concurrent.futures.ThreadPoolExecutor() as executor: - futures = [ - executor.submit(self.run, task, img) - for task, img in tasks_images - ] - results = [future.result() for future in futures] - return results - - def run(self, task: str, *args, **kwargs): - """ - Generate a response based on the prompt text. - - Args: - - task (str): Text to prompt the model. - - max_length (int): Maximum length of the response. - - Returns: - - Generated text (str). - """ - try: - inputs = self.tokenizer.encode(task, return_tensors="pt") - - if self.decoding: - with torch.no_grad(): - for _ in range(self.max_length): - output_sequence = [] - - outputs = self.model.generate( - inputs, - max_length=len(inputs) + 1, - do_sample=True, - ) - output_tokens = outputs[0][-1] - output_sequence.append(output_tokens.item()) - - # print token in real-time - print( - self.tokenizer.decode( - [output_tokens], - skip_special_tokens=True, - ), - end="", - flush=True, - ) - inputs = outputs - else: - with torch.no_grad(): - outputs = self.model.generate( - inputs, - max_length=self.max_length, - do_sample=True, - *args, - **kwargs, - ) - - return self.tokenizer.decode( - outputs[0], skip_special_tokens=True - ) - except Exception as e: - print( - colored( - ( - "HuggingfaceLLM could not generate text" - f" because of error: {e}, try optimizing your" - " arguments" - ), - "red", - ) - ) - raise - - def __call__(self, task: str, *args, **kwargs): - return self.run(task, *args, **kwargs) - - async def __call_async__(self, task: str, *args, **kwargs) -> str: - """Call the model asynchronously""" "" - return await self.run_async(task, *args, **kwargs) - - def save_model(self, path: str): - """Save the model to a given path""" - self.model.save_pretrained(path) - self.tokenizer.save_pretrained(path) - - def gpu_available(self) -> bool: - """Check if GPU is available""" - return torch.cuda.is_available() - - def memory_consumption(self) -> dict: - """Get the memory consumption of the GPU""" - if self.gpu_available(): - torch.cuda.synchronize() - allocated = torch.cuda.memory_allocated() - reserved = torch.cuda.memory_reserved() - return {"allocated": allocated, "reserved": reserved} - else: - return {"error": "GPU not available"} - - def print_dashboard(self, task: str): - """Print dashboard""" - - dashboard = print( - colored( - f""" - HuggingfaceLLM Dashboard - -------------------------------------------- - Model Name: {self.model_id} - Tokenizer: {self.tokenizer} - Model MaxLength: {self.max_length} - Model Device: {self.device} - Model Quantization: {self.quantize} - Model Quantization Config: {self.quantization_config} - Model Verbose: {self.verbose} - Model Distributed: {self.distributed} - Model Decoding: {self.decoding} - - ---------------------------------------- - Metadata: - Task Memory Consumption: {self.memory_consumption()} - GPU Available: {self.gpu_available()} - ---------------------------------------- - - Task Environment: - Task: {task} - - """, - "red", - ) - ) - - print(dashboard) - - def set_device(self, device): - """ - Changes the device used for inference. - - Parameters - ---------- - device : str - The new device to use for inference. - """ - self.device = device - if self.model is not None: - self.model.to(self.device) - - def set_max_length(self, max_length): - """Set max_length""" - self.max_length = max_length - - def clear_chat_history(self): - """Clear chat history""" - self.chat_history = [] - - def set_verbose(self, verbose): - """Set verbose""" - self.verbose = verbose - - def set_distributed(self, distributed): - """Set distributed""" - self.distributed = distributed - - def set_decoding(self, decoding): - """Set decoding""" - self.decoding = decoding - - def set_max_workers(self, max_workers): - """Set max_workers""" - self.max_workers = max_workers - - def set_repitition_penalty(self, repitition_penalty): - """Set repitition_penalty""" - self.repitition_penalty = repitition_penalty - - def set_no_repeat_ngram_size(self, no_repeat_ngram_size): - """Set no_repeat_ngram_size""" - self.no_repeat_ngram_size = no_repeat_ngram_size - - def set_temperature(self, temperature): - """Set temperature""" - self.temperature = temperature - - def set_top_k(self, top_k): - """Set top_k""" - self.top_k = top_k - - def set_top_p(self, top_p): - """Set top_p""" - self.top_p = top_p - - def set_quantize(self, quantize): - """Set quantize""" - self.quantize = quantize - - def set_quantization_config(self, quantization_config): - """Set quantization_config""" - self.quantization_config = quantization_config - - def set_model_id(self, model_id): - """Set model_id""" - self.model_id = model_id - - def set_model(self, model): - """Set model""" - self.model = model - - def set_tokenizer(self, tokenizer): - """Set tokenizer""" - self.tokenizer = tokenizer - - def set_logger(self, logger): - """Set logger""" - self.logger = logger diff --git a/swarms/models/huggingface_pipeline.py b/swarms/models/huggingface_pipeline.py deleted file mode 100644 index 118766a0..00000000 --- a/swarms/models/huggingface_pipeline.py +++ /dev/null @@ -1,72 +0,0 @@ -from abc import abstractmethod - -import torch -from termcolor import colored - -from swarms.models.base_llm import BaseLLM -from transformers.pipelines import pipeline - - -class HuggingfacePipeline(BaseLLM): - """HuggingfacePipeline - - Args: - BaseLLM (BaseLLM): [description] - task (str, optional): [description]. Defaults to "text-generation". - model_name (str, optional): [description]. Defaults to None. - use_fp8 (bool, optional): [description]. Defaults to False. - *args: [description] - **kwargs: [description] - - Raises: - - """ - - def __init__( - self, - task_type: str = "text-generation", - model_name: str = None, - use_fp8: bool = False, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.task_type = task_type - self.model_name = model_name - self.use_fp8 = use_fp8 - - if torch.cuda.is_available(): - self.use_fp8 = True - else: - self.use_fp8 = False - - self.pipe = pipeline( - task_type, model_name, use_fp8=use_fp8 * args, **kwargs - ) - - @abstractmethod - def run(self, task: str, *args, **kwargs) -> str: - """Run the pipeline - - Args: - task (str): [description] - *args: [description] - **kwargs: [description] - - Returns: - _type_: _description_ - """ - try: - out = self.pipeline(task, *args, **kwargs) - return out - except Exception as error: - print( - colored( - ( - "Error in" - f" {self.__class__.__name__} pipeline:" - f" {error}" - ), - "red", - ) - ) diff --git a/swarms/models/idefics.py b/swarms/models/idefics.py deleted file mode 100644 index cc654221..00000000 --- a/swarms/models/idefics.py +++ /dev/null @@ -1,189 +0,0 @@ -from typing import Callable, Optional - -import torch -from termcolor import colored -from transformers import AutoProcessor, IdeficsForVisionText2Text - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -def autodetect_device(): - """ - Autodetects the device to use for inference. - - Returns - ------- - str - The device to use for inference. - """ - return "cuda" if torch.cuda.is_available() else "cpu" - - -class Idefics(BaseMultiModalModel): - """ - - A class for multimodal inference using pre-trained models from the Hugging Face Hub. - - Attributes - ---------- - device : str - The device to use for inference. - model_name : str, optional - The name of the pre-trained model model_name (default is "HuggingFaceM4/idefics-9b-instruct"). - processor : transformers.PreTrainedProcessor - The pre-trained processor. - max_length : int - The maximum length of the generated text. - chat_history : list - The chat history. - - Methods - ------- - infer(prompts, batched_mode=True) - Generates text based on the provided prompts. - chat(user_input) - Engages in a continuous bidirectional conversation based on the user input. - set_model_name(model_name) - Changes the model model_name. - set_device(device) - Changes the device used for inference. - set_max_length(max_length) - Changes the maximum length of the generated text. - clear_chat_history() - Clears the chat history. - - - # Usage - ``` - from swarms.models import idefics - - model = idefics() - - user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" - response = model.chat(user_input) - print(response) - - user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" - response = model.chat(user_input) - print(response) - - model.set_model_name("new_model_name") - model.set_device("cpu") - model.set_max_length(200) - model.clear_chat_history() - ``` - - """ - - def __init__( - self, - model_name: Optional[ - str - ] = "HuggingFaceM4/idefics-9b-instruct", - device: Callable = autodetect_device, - torch_dtype=torch.bfloat16, - max_length: int = 100, - batched_mode: bool = True, - *args, - **kwargs, - ): - # Initialize the parent class - super().__init__(*args, **kwargs) - self.model_name = model_name - self.device = device - self.max_length = max_length - self.batched_mode = batched_mode - - self.chat_history = [] - self.device = ( - device - if device - else ("cuda" if torch.cuda.is_available() else "cpu") - ) - self.model = IdeficsForVisionText2Text.from_pretrained( - model_name, torch_dtype=torch_dtype, *args, **kwargs - ).to(self.device) - - self.processor = AutoProcessor.from_pretrained( - model_name, *args, **kwargs - ) - - def run( - self, task: str = None, img: str = None, *args, **kwargs - ) -> str: - """ - Generates text based on the provided prompts. - - Parameters - ---------- - task : str - the task to perform - batched_mode : bool, optional - Whether to process the prompts in batched mode. If True, all prompts are - processed together. If False, only the first prompt is processed (default is True). - - Returns - ------- - list - A list of generated text strings. - """ - try: - inputs = ( - self.processor( - task, - add_end_of_utterance_token=False, - return_tensors="pt", - *args, - **kwargs, - ).to(self.device) - if self.batched_mode - else self.processor(task, return_tensors="pt").to( - self.device - ) - ) - - exit_condition = self.processor.tokenizer( - "", add_special_tokens=False - ).input_ids - - bad_words_ids = self.processor.tokenizer( - ["", " x4 or y2 < y3 or y1 > y4) - - -class Kosmos(BaseMultiModalModel): - """A class representing the Kosmos model. - - This model is used for multi-modal tasks such as grounding, referring expression comprehension, - referring expression generation, grounded VQA, grounded image captioning, and more. - - Args: - model_name (str): The name or path of the pre-trained model. - max_new_tokens (int): The maximum number of new tokens to generate. - verbose (bool): Whether to print verbose output. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Attributes: - max_new_tokens (int): The maximum number of new tokens to generate. - model (AutoModelForVision2Seq): The pre-trained model for vision-to-sequence tasks. - processor (AutoProcessor): The pre-trained processor for vision-to-sequence tasks. - """ - - def __init__( - self, - model_name="ydshieh/kosmos-2-patch14-224", - max_new_tokens: int = 64, - verbose: bool = False, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - - self.max_new_tokens = max_new_tokens - - self.model = AutoModelForVision2Seq.from_pretrained( - model_name, trust_remote_code=True, *args, **kwargs - ) - self.processor = AutoProcessor.from_pretrained( - model_name, trust_remote_code=True, *args, **kwargs - ) - - def get_image(self, url: str): - """Get image from url - - Args: - url (str): The URL of the image. - - Returns: - PIL.Image: The image object. - """ - return Image.open(requests.get(url, stream=True).raw) - - def run(self, task: str, image: str, *args, **kwargs): - """Run the model - - Args: - task (str): The task to run. - image (str): The URL of the image. - """ - inputs = self.processor( - text=task, images=image, return_tensors="pt" - ) - generated_ids = self.model.generate( - pixel_values=inputs["pixel_values"], - input_ids=inputs["input_ids"][:, :-1], - attention_mask=inputs["attention_mask"][:, :-1], - image_embeds=None, - img_attn_mask=inputs["img_attn_mask"][:, :-1], - use_cache=True, - max_new_tokens=self.max_new_tokens, - ) - - generated_texts = self.processor.batch_decode( - generated_ids, - skip_special_tokens=True, - )[0] - - processed_text, entities = ( - self.processor.post_process_generation(generated_texts) - ) - - return processed_text, entities - - # tasks - def multimodal_grounding(self, phrase, image_url): - task = f" {phrase} " - self.run(task, image_url) - - def referring_expression_comprehension(self, phrase, image_url): - task = f" {phrase} " - self.run(task, image_url) - - def referring_expression_generation(self, phrase, image_url): - task = ( - "" - " It is" - ) - self.run(task, image_url) - - def grounded_vqa(self, question, image_url): - task = f" Question: {question} Answer:" - self.run(task, image_url) - - def grounded_image_captioning(self, image_url): - task = " An image of" - self.run(task, image_url) - - def grounded_image_captioning_detailed(self, image_url): - task = " Describe this image in detail" - self.run(task, image_url) - - def generate_boxees(self, task, image_url): - image = self.get_image(image_url) - processed_text, entities = self.process_task(task, image) - self.draw_entity_boxes_on_image(image, entities, show=True) diff --git a/swarms/models/layoutlm_document_qa.py b/swarms/models/layoutlm_document_qa.py deleted file mode 100644 index 09aa9a1a..00000000 --- a/swarms/models/layoutlm_document_qa.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -LayoutLMDocumentQA is a multimodal good for -visual question answering on real world docs lik invoice, pdfs, etc -""" - -from transformers import pipeline - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class LayoutLMDocumentQA(BaseMultiModalModel): - """ - LayoutLMDocumentQA for document question answering: - - Args: - model_name (str, optional): [description]. Defaults to "impira/layoutlm-document-qa". - task (str, optional): [description]. Defaults to "document-question-answering". - - Usage: - >>> from swarms.models import LayoutLMDocumentQA - >>> model = LayoutLMDocumentQA() - >>> out = model("What is the total amount?", "path/to/img.png") - >>> print(out) - - """ - - def __init__( - self, - model_name: str = "impira/layoutlm-document-qa", - task_type: str = "document-question-answering", - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_name = model_name - self.task_type = task_type - self.pipeline = pipeline(task_type, model=model_name) - - def __call__(self, task: str, img_path: str, *args, **kwargs): - """Call the LayoutLMDocumentQA model - - Args: - task (str): _description_ - img_path (str): _description_ - - Returns: - _type_: _description_ - """ - out = self.pipeline(img_path, task) - out = str(out) - return out diff --git a/swarms/models/llama3_hosted.py b/swarms/models/llama3_hosted.py deleted file mode 100644 index 88a9979f..00000000 --- a/swarms/models/llama3_hosted.py +++ /dev/null @@ -1,82 +0,0 @@ -import requests -import json -from swarms.models.base_llm import BaseLLM - - -class llama3Hosted(BaseLLM): - """ - A class representing a hosted version of the Llama3 model. - - Args: - model (str): The name or path of the Llama3 model to use. - temperature (float): The temperature parameter for generating responses. - max_tokens (int): The maximum number of tokens in the generated response. - system_prompt (str): The system prompt to use for generating responses. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Attributes: - model (str): The name or path of the Llama3 model. - temperature (float): The temperature parameter for generating responses. - max_tokens (int): The maximum number of tokens in the generated response. - system_prompt (str): The system prompt for generating responses. - - Methods: - run(task, *args, **kwargs): Generates a response for the given task. - - """ - - def __init__( - self, - model: str = "meta-llama/Meta-Llama-3-8B-Instruct", - temperature: float = 0.8, - max_tokens: int = 4000, - system_prompt: str = "You are a helpful assistant.", - base_url: str = "http://34.204.8.31:30001/v1/chat/completions", - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model = model - self.temperature = temperature - self.max_tokens = max_tokens - self.system_prompt = system_prompt - self.base_url = base_url - - def run(self, task: str, *args, **kwargs) -> str: - """ - Generates a response for the given task. - - Args: - task (str): The user's task or input. - - Returns: - str: The generated response from the Llama3 model. - - """ - - payload = json.dumps( - { - "model": self.model, - "messages": [ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ], - "stop_token_ids": [128009, 128001], - "temperature": self.temperature, - "max_tokens": self.max_tokens, - } - ) - - headers = {"Content-Type": "application/json"} - - response = requests.request( - "POST", self.base_url, headers=headers, data=payload - ) - - response_json = response.json() - assistant_message = response_json["choices"][0]["message"][ - "content" - ] - - return assistant_message diff --git a/swarms/models/llama_function_caller.py b/swarms/models/llama_function_caller.py deleted file mode 100644 index 0f175edb..00000000 --- a/swarms/models/llama_function_caller.py +++ /dev/null @@ -1,230 +0,0 @@ -# !pip install accelerate -# !pip install torch -# !pip install transformers -# !pip install bitsandbytes - -from typing import Callable, Dict, List - -import torch -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - BitsAndBytesConfig, - TextStreamer, -) -from swarms.models.base_llm import BaseLLM - - -class LlamaFunctionCaller(BaseLLM): - """ - A class to manage and execute Llama functions. - - Attributes: - ----------- - model: transformers.AutoModelForCausalLM - The loaded Llama model. - tokenizer: transformers.AutoTokenizer - The tokenizer for the Llama model. - functions: Dict[str, Callable] - A dictionary of functions available for execution. - - Methods: - -------- - __init__(self, model_id: str, cache_dir: str, runtime: str) - Initializes the LlamaFunctionCaller with the specified model. - add_func(self, name: str, function: Callable, description: str, arguments: List[Dict]) - Adds a new function to the LlamaFunctionCaller. - call_function(self, name: str, **kwargs) - Calls the specified function with given arguments. - stream(self, user_prompt: str) - Streams a user prompt to the model and prints the response. - - - Example: - - # Example usage - model_id = "Your-Model-ID" - cache_dir = "Your-Cache-Directory" - runtime = "cuda" # or 'cpu' - - llama_caller = LlamaFunctionCaller(model_id, cache_dir, runtime) - - - # Add a custom function - def get_weather(location: str, format: str) -> str: - # This is a placeholder for the actual implementation - return f"Weather at {location} in {format} format." - - - llama_caller.add_func( - name="get_weather", - function=get_weather, - description="Get the weather at a location", - arguments=[ - { - "name": "location", - "type": "string", - "description": "Location for the weather", - }, - { - "name": "format", - "type": "string", - "description": "Format of the weather data", - }, - ], - ) - - # Call the function - result = llama_caller.call_function("get_weather", location="Paris", format="Celsius") - print(result) - - # Stream a user prompt - llama_caller("Tell me about the tallest mountain in the world.") - - """ - - def __init__( - self, - model_id: str = "Trelis/Llama-2-7b-chat-hf-function-calling-v2", - cache_dir: str = "llama_cache", - runtime: str = "auto", - max_tokens: int = 500, - streaming: bool = False, - *args, - **kwargs, - ): - self.model_id = model_id - self.cache_dir = cache_dir - self.runtime = runtime - self.max_tokens = max_tokens - self.streaming = streaming - - # Load the model and tokenizer - self.model = self._load_model() - self.tokenizer = AutoTokenizer.from_pretrained( - model_id, cache_dir=cache_dir, use_fast=True - ) - self.functions = {} - - def _load_model(self): - # Configuration for loading the model - bnb_config = BitsAndBytesConfig( - load_in_4bit=True, - bnb_4bit_use_double_quant=True, - bnb_4bit_quant_type="nf4", - bnb_4bit_compute_dtype=torch.bfloat16, - ) - return AutoModelForCausalLM.from_pretrained( - self.model_id, - quantization_config=bnb_config, - device_map=self.runtime, - trust_remote_code=True, - cache_dir=self.cache_dir, - ) - - def add_func( - self, - name: str, - function: Callable, - description: str, - arguments: List[Dict], - ): - """ - Adds a new function to the LlamaFunctionCaller. - - Args: - name (str): The name of the function. - function (Callable): The function to execute. - description (str): Description of the function. - arguments (List[Dict]): List of argument specifications. - """ - self.functions[name] = { - "function": function, - "description": description, - "arguments": arguments, - } - - def call_function(self, name: str, **kwargs): - """ - Calls the specified function with given arguments. - - Args: - name (str): The name of the function to call. - **kwargs: Keyword arguments for the function call. - - Returns: - The result of the function call. - """ - if name not in self.functions: - raise ValueError(f"Function {name} not found.") - - func_info = self.functions[name] - return func_info["function"](**kwargs) - - def __call__(self, task: str, **kwargs): - """ - Streams a user prompt to the model and prints the response. - - Args: - task (str): The user prompt to stream. - """ - # Format the prompt - prompt = f"{task}\n\n" - - # Encode and send to the model - inputs = self.tokenizer([prompt], return_tensors="pt").to( - self.runtime - ) - - streamer = TextStreamer(self.tokenizer) - - if self.streaming: - out = self.model.generate( - **inputs, - streamer=streamer, - max_new_tokens=self.max_tokens, - **kwargs, - ) - - return out - else: - out = self.model.generate( - **inputs, max_length=self.max_tokens, **kwargs - ) - # return self.tokenizer.decode(out[0], skip_special_tokens=True) - return out - - -# llama_caller = LlamaFunctionCaller() - - -# # Add a custom function -# def get_weather(location: str, format: str) -> str: -# # This is a placeholder for the actual implementation -# return f"Weather at {location} in {format} format." - - -# llama_caller.add_func( -# name="get_weather", -# function=get_weather, -# description="Get the weather at a location", -# arguments=[ -# { -# "name": "location", -# "type": "string", -# "description": "Location for the weather", -# }, -# { -# "name": "format", -# "type": "string", -# "description": "Format of the weather data", -# }, -# ], -# ) - -# # Call the function -# result = llama_caller.call_function("get_weather", location="Paris", format="Celsius") -# print(result) - -# # Stream a user prompt -# llama_caller("Tell me about the tallest mountain in the world.") diff --git a/swarms/models/llava.py b/swarms/models/llava.py deleted file mode 100644 index 5aa4681f..00000000 --- a/swarms/models/llava.py +++ /dev/null @@ -1,84 +0,0 @@ -from io import BytesIO -from typing import Tuple, Union - -import requests -from PIL import Image -from transformers import AutoProcessor, LlavaForConditionalGeneration - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class LavaMultiModal(BaseMultiModalModel): - """ - A class to handle multi-modal inputs (text and image) using the Llava model for conditional generation. - - Attributes: - model_name (str): The name or path of the pre-trained model. - max_length (int): The maximum length of the generated sequence. - - Args: - model_name (str): The name of the pre-trained model. - max_length (int): The maximum length of the generated sequence. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Examples: - >>> model = LavaMultiModal() - >>> model.run("A cat", "https://example.com/cat.jpg") - - """ - - def __init__( - self, - model_name: str = "llava-hf/llava-1.5-7b-hf", - max_length: int = 30, - *args, - **kwargs, - ) -> None: - super().__init__(*args, **kwargs) - self.model_name = model_name - self.max_length = max_length - - self.model = LlavaForConditionalGeneration.from_pretrained( - model_name, *args, **kwargs - ) - self.processor = AutoProcessor.from_pretrained(model_name) - - def run( - self, text: str, img: str, *args, **kwargs - ) -> Union[str, Tuple[None, str]]: - """ - Processes the input text and image, and generates a response. - - Args: - text (str): The input text for the model. - img (str): The URL of the image to process. - max_length (int): The maximum length of the generated sequence. - - Returns: - Union[str, Tuple[None, str]]: The generated response string or a tuple (None, error message) in case of an error. - """ - try: - response = requests.get(img, stream=True) - response.raise_for_status() - image = Image.open(BytesIO(response.content)) - - inputs = self.processor( - text=text, images=image, return_tensors="pt" - ) - - # Generate - generate_ids = self.model.generate( - **inputs, max_length=self.max_length, **kwargs - ) - return self.processor.batch_decode( - generate_ids, - skip_special_tokens=True, - clean_up_tokenization_spaces=False, - *args, - )[0] - - except requests.RequestException as e: - return None, f"Error fetching image: {str(e)}" - except Exception as e: - return None, f"Error during model processing: {str(e)}" diff --git a/swarms/models/model_router.py b/swarms/models/model_router.py deleted file mode 100644 index c695c20e..00000000 --- a/swarms/models/model_router.py +++ /dev/null @@ -1,359 +0,0 @@ -from typing import List, Union - -from swarms.models.base_embedding_model import BaseEmbeddingModel -from swarms.models.base_llm import BaseLLM -from swarms.models.base_multimodal_model import BaseMultiModalModel -from swarms.models.fuyu import Fuyu # noqa: E402 -from swarms.models.gpt4_vision_api import GPT4VisionAPI # noqa: E402 -from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 -from swarms.models.idefics import Idefics # noqa: E402 -from swarms.models.kosmos_two import Kosmos # noqa: E402 -from swarms.models.layoutlm_document_qa import LayoutLMDocumentQA -from swarms.models.llama3_hosted import llama3Hosted -from swarms.models.llava import LavaMultiModal # noqa: E402 -from swarms.models.nougat import Nougat # noqa: E402 -from swarms.models.openai_embeddings import OpenAIEmbeddings -from swarms.models.openai_function_caller import OpenAIFunctionCaller -from swarms.models.openai_tts import OpenAITTS # noqa: E402 -from swarms.models.palm import GooglePalm as Palm # noqa: E402 -from swarms.models.popular_llms import Anthropic as Anthropic -from swarms.models.popular_llms import ( - AzureOpenAILLM as AzureOpenAI, -) -from swarms.models.popular_llms import ( - CohereChat as Cohere, -) -from swarms.models.popular_llms import FireWorksAI, OctoAIChat -from swarms.models.popular_llms import ( - OpenAIChatLLM as OpenAIChat, -) -from swarms.models.popular_llms import ( - OpenAILLM as OpenAI, -) -from swarms.models.popular_llms import ReplicateChat as Replicate -from swarms.models.qwen import QwenVLMultiModal # noqa: E402 -from swarms.models.sampling_params import SamplingParams -from swarms.models.together import TogetherLLM # noqa: E402 -from swarms.models.vilt import Vilt # noqa: E402 -from swarms.structs.base_structure import BaseStructure -from swarms.utils.loguru_logger import logger - -# New type BaseLLM and BaseEmbeddingModel and BaseMultimodalModel -omni_model_type = Union[ - BaseLLM, BaseEmbeddingModel, BaseMultiModalModel, callable -] -list_of_omni_model_type = List[omni_model_type] - - -models = [ - BaseLLM, - BaseEmbeddingModel, - BaseMultiModalModel, - Fuyu, - GPT4VisionAPI, - HuggingfaceLLM, - Idefics, - Kosmos, - LayoutLMDocumentQA, - llama3Hosted, - LavaMultiModal, - Nougat, - OpenAIEmbeddings, - OpenAITTS, - Palm, - Anthropic, - AzureOpenAI, - Cohere, - OctoAIChat, - OpenAIChat, - OpenAI, - Replicate, - QwenVLMultiModal, - SamplingParams, - TogetherLLM, - Vilt, - FireWorksAI, - OpenAIFunctionCaller, -] - - -class ModelRouter(BaseStructure): - """ - A router for managing multiple models. - - Attributes: - model_router_id (str): The ID of the model router. - model_router_description (str): The description of the model router. - model_pool (List[omni_model_type]): The list of models in the model pool. - - Methods: - check_for_models(): Checks if there are any models in the model pool. - add_model(model: omni_model_type): Adds a model to the model pool. - add_models(models: List[omni_model_type]): Adds multiple models to the model pool. - get_model_by_name(model_name: str) -> omni_model_type: Retrieves a model from the model pool by its name. - get_multiple_models_by_name(model_names: List[str]) -> List[omni_model_type]: Retrieves multiple models from the model pool by their names. - get_model_pool() -> List[omni_model_type]: Retrieves the entire model pool. - get_model_by_index(index: int) -> omni_model_type: Retrieves a model from the model pool by its index. - get_model_by_id(model_id: str) -> omni_model_type: Retrieves a model from the model pool by its ID. - dict() -> dict: Returns a dictionary representation of the model router. - - """ - - def __init__( - self, - model_router_id: str = "model_router", - model_router_description: str = "A router for managing multiple models.", - model_pool: List[omni_model_type] = models, - verbose: bool = False, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_router_id = model_router_id - self.model_router_description = model_router_description - self.model_pool = model_pool - self.verbose = verbose - - self.check_for_models() - # self.refactor_model_class_if_invoke() - - def check_for_models(self): - """ - Checks if there are any models in the model pool. - - Returns: - None - - Raises: - ValueError: If no models are found in the model pool. - """ - if len(self.model_pool) == 0: - raise ValueError("No models found in model pool.") - - def add_model(self, model: omni_model_type): - """ - Adds a model to the model pool. - - Args: - model (omni_model_type): The model to be added. - - Returns: - str: A success message indicating that the model has been added to the model pool. - """ - logger.info(f"Adding model {model.name} to model pool.") - self.model_pool.append(model) - return "Model successfully added to model pool." - - def add_models(self, models: List[omni_model_type]): - """ - Adds multiple models to the model pool. - - Args: - models (List[omni_model_type]): The models to be added. - - Returns: - str: A success message indicating that the models have been added to the model pool. - """ - logger.info("Adding models to model pool.") - self.model_pool.extend(models) - return "Models successfully added to model pool." - - # def query_model_from_langchain(self, model_name: str, *args, **kwargs): - # """ - # Query a model from langchain community. - - # Args: - # model_name (str): The name of the model. - # *args: Additional positional arguments to be passed to the model. - # **kwargs: Additional keyword arguments to be passed to the model. - - # Returns: - # omni_model_type: The model object. - - # Raises: - # ValueError: If the model with the given name is not found in the model pool. - # """ - # from langchain_community.llms import __getattr__ - - # logger.info( - # f"Querying model {model_name} from langchain community." - # ) - # model = __getattr__(model_name)(*args, **kwargs) - # model = self.refactor_model_class_if_invoke_class(model) - - # return model - - def get_model_by_name(self, model_name: str) -> omni_model_type: - """ - Retrieves a model from the model pool by its name. - - Args: - model_name (str): The name of the model. - - Returns: - omni_model_type: The model object. - - Raises: - ValueError: If the model with the given name is not found in the model pool. - """ - logger.info(f"Retrieving model {model_name} from model pool.") - for model in self.model_pool: - if model_name in [ - model.name, - model.model_id, - model.model_name, - ]: - return model - raise ValueError( - f"Model {model_name} not found in model pool." - ) - - def get_multiple_models_by_name( - self, model_names: List[str] - ) -> List[omni_model_type]: - """ - Retrieves multiple models from the model pool by their names. - - Args: - model_names (List[str]): The names of the models. - - Returns: - List[omni_model_type]: The list of model objects. - - Raises: - ValueError: If any of the models with the given names are not found in the model pool. - """ - logger.info( - f"Retrieving multiple models {model_names} from model pool." - ) - models = [] - for model_name in model_names: - models.append(self.get_model_by_name(model_name)) - return models - - def get_model_pool(self) -> List[omni_model_type]: - """ - Retrieves the entire model pool. - - Returns: - List[omni_model_type]: The list of model objects in the model pool. - """ - return self.model_pool - - def get_model_by_index(self, index: int) -> omni_model_type: - """ - Retrieves a model from the model pool by its index. - - Args: - index (int): The index of the model in the model pool. - - Returns: - omni_model_type: The model object. - - Raises: - IndexError: If the index is out of range. - """ - return self.model_pool[index] - - def get_model_by_id(self, model_id: str) -> omni_model_type: - """ - Retrieves a model from the model pool by its ID. - - Args: - model_id (str): The ID of the model. - - Returns: - omni_model_type: The model object. - - Raises: - ValueError: If the model with the given ID is not found in the model pool. - """ - name = model_id - for model in self.model_pool: - if ( - hasattr(model, "model_id") - and name == model.model_id - or hasattr(model, "model_name") - and name == model.model_name - or hasattr(model, "name") - and name == model.name - or hasattr(model, "model") - and name == model.model - ): - return model - raise ValueError(f"Model {model_id} not found in model pool.") - - def refactor_model_class_if_invoke(self): - """ - Refactors the model class if it has an 'invoke' method. - - Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method. - - Returns: - str: A success message indicating that the model classes have been refactored. - """ - for model in self.model_pool: - if hasattr(model, "invoke"): - model.run = model.invoke - model.__call__ = model.invoke - logger.info( - f"Refactored model {model.name} to have run and __call__ methods." - ) - - # Update the model in the model pool - self.model_pool[self.model_pool.index(model)] = model - - return "Model classes successfully refactored." - - def refactor_model_class_if_invoke_class( - self, model: callable, *args, **kwargs - ) -> callable: - """ - Refactors the model class if it has an 'invoke' method. - - Checks to see if the model pool has a model with an 'invoke' method and refactors it to have a 'run' method and '__call__' method. - - Returns: - str: A success message indicating that the model classes have been refactored. - """ - if hasattr(model, "invoke"): - model.run = model.invoke - model.__call__ = model.invoke - logger.info( - f"Refactored model {model.name} to have run and __call__ methods." - ) - - return model - - def find_model_by_name_and_run( - self, - model_name: str = None, - task: str = None, - *args, - **kwargs, - ) -> str: - """ - Finds a model by its name and runs a task on it. - - Args: - model_name (str): The name of the model. - task (str): The task to be run on the model. - *args: Additional positional arguments to be passed to the task. - **kwargs: Additional keyword arguments to be passed to the task. - - Returns: - str: The result of running the task on the model. - - Raises: - ValueError: If the model with the given name is not found in the model pool. - """ - model = self.get_model_by_name(model_name) - return model.run(task, *args, **kwargs) - - -# model = ModelRouter() -# print(model.to_dict()) -# print(model.get_model_pool()) -# print(model.get_model_by_index(0)) -# print(model.get_model_by_id("stability-ai/stable-diffusion:")) -# # print(model.get_multiple_models_by_name(["gpt-4o", "gpt-4"])) diff --git a/swarms/models/model_types.py b/swarms/models/model_types.py deleted file mode 100644 index 49b1ed9d..00000000 --- a/swarms/models/model_types.py +++ /dev/null @@ -1,29 +0,0 @@ -from typing import List, Optional - -from pydantic import BaseModel - - -class TextModality(BaseModel): - content: str - - -class ImageModality(BaseModel): - url: str - alt_text: Optional[str] = None - - -class AudioModality(BaseModel): - url: str - transcript: Optional[str] = None - - -class VideoModality(BaseModel): - url: str - transcript: Optional[str] = None - - -class MultimodalData(BaseModel): - text: Optional[List[TextModality]] = None - images: Optional[List[ImageModality]] = None - audio: Optional[List[AudioModality]] = None - video: Optional[List[VideoModality]] = None diff --git a/swarms/models/moondream_mm.py b/swarms/models/moondream_mm.py deleted file mode 100644 index c1db54fc..00000000 --- a/swarms/models/moondream_mm.py +++ /dev/null @@ -1,63 +0,0 @@ -from PIL import Image -from transformers import AutoModelForCausalLM, AutoTokenizer - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class MoonDream(BaseMultiModalModel): - """ - MoonDream is a multi-modal model that combines text and image inputs to generate descriptive answers for images. - - Args: - model_name (str): The name or path of the pre-trained model to be used. - revision (str): The specific revision of the pre-trained model to be used. - - Attributes: - model_name (str): The name or path of the pre-trained model. - revision (str): The specific revision of the pre-trained model. - model (AutoModelForCausalLM): The pre-trained model for generating answers. - tokenizer (AutoTokenizer): The tokenizer for processing text inputs. - - """ - - def __init__( - self, - model_name: str = "vikhyatk/moondream2", - revision: str = "2024-03-04", - system_prompt: str = None, - *args, - **kwargs, - ): - super().__init__() - self.model_name = model_name - self.revision = revision - self.system_prompt = system_prompt - - self.model = AutoModelForCausalLM.from_pretrained( - model_name, - trust_remote_code=True, - revision=revision, - *args, - **kwargs, - ) - self.tokenizer = AutoTokenizer.from_pretrained( - model_name, revision=revision - ) - - def run(self, task: str, img: str): - """ - Runs the MoonDream model to generate a descriptive answer for the given image. - - Args: - task (str): The task or question related to the image. - img (str): The path or URL of the image file. - - Returns: - str: The descriptive answer generated by the MoonDream model. - - """ - image = Image.open(img) - enc_image = self.model.encode_image(image) - return self.model.answer_question( - enc_image, f"{self.system_propmpt} {task}", self.tokenizer - ) diff --git a/swarms/models/nougat.py b/swarms/models/nougat.py deleted file mode 100644 index 9cba23f2..00000000 --- a/swarms/models/nougat.py +++ /dev/null @@ -1,106 +0,0 @@ -""" -Nougat by Meta - -Good for: -- transcribe Scientific PDFs into an easy to use markdown -format -- Extracting information from PDFs -- Extracting metadata from pdfs - -""" - -import re - -import torch -from PIL import Image -from transformers import NougatProcessor, VisionEncoderDecoderModel - - -class Nougat: - """ - Nougat - - Args: - model_name_or_path: str, default="facebook/nougat-base" - min_length: int, default=1 - max_new_tokens: int, default=30 - - Usage: - >>> from swarms.models.nougat import Nougat - >>> nougat = Nougat() - >>> nougat("path/to/image.png") - - - """ - - def __init__( - self, - model_name_or_path="facebook/nougat-base", - min_length: int = 1, - max_new_tokens: int = 5000, - ): - self.model_name_or_path = model_name_or_path - self.min_length = min_length - self.max_new_tokens = max_new_tokens - - self.processor = NougatProcessor.from_pretrained( - self.model_name_or_path - ) - self.model = VisionEncoderDecoderModel.from_pretrained( - self.model_name_or_path - ) - self.device = "cuda" if torch.cuda.is_available() else "cpu" - self.model.to(self.device) - - def get_image(self, img: str): - """Get an image from a path""" - img = Image.open(img) - - if img.mode == "L": - img = img.convert("RGB") - return img - - def __call__(self, img: str, *args, **kwargs): - """Call the model with an image_path str as an input""" - image = Image.open(img) - pixel_values = self.processor( - image, return_tensors="pt" - ).pixel_values - - # Generate transcriptions, here we only generate 30 tokens - outputs = self.model.generate( - pixel_values.to(self.device), - min_length=self.min_length, - max_new_tokens=self.max_new_tokens, - *args, - **kwargs, - ) - - sequence = self.processor.batch_decode( - outputs, skip_special_tokens=True - )[0] - sequence = self.processor.post_process_generation( - sequence, fix_markdown=False - ) - - out = print(sequence) - return out - - def clean_nougat_output(raw_output): - """Clean the output from nougat to be more readable""" - # Define the pattern to extract the relevant data - daily_balance_pattern = ( - r"\*\*(\d{2}/\d{2}/\d{4})\*\*\n\n\*\*([\d,]+\.\d{2})\*\*" - ) - - # Find all matches of the pattern - matches = re.findall(daily_balance_pattern, raw_output) - - # Convert the matches to a readable format - cleaned_data = [ - f"Date: {date}, Amount: {amount.replace(',', '')}" - for date, amount in matches - ] - - # Join the cleaned data with new lines for readability - return "\n".join(cleaned_data) diff --git a/swarms/models/ollama_model 2.py b/swarms/models/ollama_model 2.py deleted file mode 100644 index 1604ee5d..00000000 --- a/swarms/models/ollama_model 2.py +++ /dev/null @@ -1,135 +0,0 @@ -from loguru import logger -import subprocess -from pydantic import BaseModel, Field, ValidationError -from typing import List, Optional, Union - - -try: - import ollama -except ImportError: - logger.error("Failed to import ollama") - subprocess.run(["pip", "install", "ollama"]) - import ollama - - -class Message(BaseModel): - role: str = Field( - ..., - regex="^(user|system|assistant)$", - description="The role of the message sender.", - ) - content: str = Field( - ..., min_length=1, description="The content of the message." - ) - - -class OllamaModel: - def __init__( - self, - model_name: str, - host: Optional[str] = None, - timeout: int = 30, - stream: bool = False, - ): - """ - Initializes the OllamaModel with the model name and optional parameters. - - Args: - model_name (str): The name of the model to interact with (e.g., 'llama3.1'). - host (str, optional): The Ollama host to connect to. Defaults to None. - timeout (int, optional): Timeout for the requests. Defaults to 30 seconds. - stream (bool, optional): Enable streaming for responses. Defaults to False. - """ - self.model_name = model_name - self.host = host - self.timeout = timeout - self.stream = stream - - self.client = ollama.Client(host=host) if host else None - - def validate_messages( - self, messages: List[Message] - ) -> List[dict]: - """ - Validates the list of messages using Pydantic schema. - - Args: - messages (List[Message]): List of messages to validate. - - Returns: - List[dict]: Validated messages in dictionary format. - """ - try: - return [message.dict() for message in messages] - except ValidationError as e: - print(f"Validation error: {e}") - return [] - - def chat( - self, messages: List[Message], *args, **kwargs - ) -> Union[str, None]: - """Executes the chat task.""" - validated_messages = self.validate_messages(messages) - if not validated_messages: - return None - - if self.stream: - stream = ollama.chat( - model=self.model_name, - messages=validated_messages, - stream=True, - *args, - **kwargs, - ) - for chunk in stream: - print(chunk["message"]["content"], end="", flush=True) - else: - response = ollama.chat( - model=self.model_name, messages=validated_messages - ) - return response["message"]["content"] - - def generate(self, prompt: str) -> Optional[str]: - """Generates text based on a prompt.""" - if len(prompt) == 0: - print("Prompt cannot be empty.") - return None - - response = ollama.generate( - model=self.model_name, prompt=prompt - ) - return response.get("message", {}).get("content", None) - - def list_models(self) -> List[str]: - """Lists available models.""" - return ollama.list() - - def show_model(self) -> dict: - """Shows details of the current model.""" - return ollama.show(self.model_name) - - def create_model(self, modelfile: str) -> dict: - """Creates a new model from a modelfile.""" - return ollama.create( - model=self.model_name, modelfile=modelfile - ) - - def delete_model(self) -> bool: - """Deletes the current model.""" - try: - ollama.delete(self.model_name) - return True - except ollama.ResponseError as e: - print(f"Error deleting model: {e}") - return False - - def run(self, task: str, *args, **kwargs): - """ - Executes the task based on the task string. - - Args: - task (str): The task to execute, such as 'chat', 'generate', etc. - """ - return ollama.generate( - model=self.model_name, prompt=task, *args, **kwargs - ) diff --git a/swarms/models/ollama_model.py b/swarms/models/ollama_model.py deleted file mode 100644 index 1604ee5d..00000000 --- a/swarms/models/ollama_model.py +++ /dev/null @@ -1,135 +0,0 @@ -from loguru import logger -import subprocess -from pydantic import BaseModel, Field, ValidationError -from typing import List, Optional, Union - - -try: - import ollama -except ImportError: - logger.error("Failed to import ollama") - subprocess.run(["pip", "install", "ollama"]) - import ollama - - -class Message(BaseModel): - role: str = Field( - ..., - regex="^(user|system|assistant)$", - description="The role of the message sender.", - ) - content: str = Field( - ..., min_length=1, description="The content of the message." - ) - - -class OllamaModel: - def __init__( - self, - model_name: str, - host: Optional[str] = None, - timeout: int = 30, - stream: bool = False, - ): - """ - Initializes the OllamaModel with the model name and optional parameters. - - Args: - model_name (str): The name of the model to interact with (e.g., 'llama3.1'). - host (str, optional): The Ollama host to connect to. Defaults to None. - timeout (int, optional): Timeout for the requests. Defaults to 30 seconds. - stream (bool, optional): Enable streaming for responses. Defaults to False. - """ - self.model_name = model_name - self.host = host - self.timeout = timeout - self.stream = stream - - self.client = ollama.Client(host=host) if host else None - - def validate_messages( - self, messages: List[Message] - ) -> List[dict]: - """ - Validates the list of messages using Pydantic schema. - - Args: - messages (List[Message]): List of messages to validate. - - Returns: - List[dict]: Validated messages in dictionary format. - """ - try: - return [message.dict() for message in messages] - except ValidationError as e: - print(f"Validation error: {e}") - return [] - - def chat( - self, messages: List[Message], *args, **kwargs - ) -> Union[str, None]: - """Executes the chat task.""" - validated_messages = self.validate_messages(messages) - if not validated_messages: - return None - - if self.stream: - stream = ollama.chat( - model=self.model_name, - messages=validated_messages, - stream=True, - *args, - **kwargs, - ) - for chunk in stream: - print(chunk["message"]["content"], end="", flush=True) - else: - response = ollama.chat( - model=self.model_name, messages=validated_messages - ) - return response["message"]["content"] - - def generate(self, prompt: str) -> Optional[str]: - """Generates text based on a prompt.""" - if len(prompt) == 0: - print("Prompt cannot be empty.") - return None - - response = ollama.generate( - model=self.model_name, prompt=prompt - ) - return response.get("message", {}).get("content", None) - - def list_models(self) -> List[str]: - """Lists available models.""" - return ollama.list() - - def show_model(self) -> dict: - """Shows details of the current model.""" - return ollama.show(self.model_name) - - def create_model(self, modelfile: str) -> dict: - """Creates a new model from a modelfile.""" - return ollama.create( - model=self.model_name, modelfile=modelfile - ) - - def delete_model(self) -> bool: - """Deletes the current model.""" - try: - ollama.delete(self.model_name) - return True - except ollama.ResponseError as e: - print(f"Error deleting model: {e}") - return False - - def run(self, task: str, *args, **kwargs): - """ - Executes the task based on the task string. - - Args: - task (str): The task to execute, such as 'chat', 'generate', etc. - """ - return ollama.generate( - model=self.model_name, prompt=task, *args, **kwargs - ) diff --git a/swarms/models/open_dalle.py b/swarms/models/open_dalle.py deleted file mode 100644 index 57e8846b..00000000 --- a/swarms/models/open_dalle.py +++ /dev/null @@ -1,67 +0,0 @@ -from typing import Any, Optional - -import torch -from diffusers import AutoPipelineForText2Image - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class OpenDalle(BaseMultiModalModel): - """OpenDalle model class - - Attributes: - model_name (str): The name or path of the model to be used. Defaults to "dataautogpt3/OpenDalleV1.1". - torch_dtype (torch.dtype): The torch data type to be used. Defaults to torch.float16. - device (str): The device to be used for computation. Defaults to "cuda". - - Examples: - >>> from swarms.models.open_dalle import OpenDalle - >>> od = OpenDalle() - >>> od.run("A picture of a cat") - - """ - - def __init__( - self, - model_name: str = "dataautogpt3/OpenDalleV1.1", - torch_dtype: Any = torch.float16, - device: str = "cuda", - *args, - **kwargs, - ): - """ - Initializes the OpenDalle model. - - Args: - model_name (str, optional): The name or path of the model to be used. Defaults to "dataautogpt3/OpenDalleV1.1". - torch_dtype (torch.dtype, optional): The torch data type to be used. Defaults to torch.float16. - device (str, optional): The device to be used for computation. Defaults to "cuda". - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - """ - self.pipeline = AutoPipelineForText2Image.from_pretrained( - model_name, torch_dtype=torch_dtype, *args, **kwargs - ).to(device) - - def run(self, task: Optional[str] = None, *args, **kwargs): - """Run the OpenDalle model - - Args: - task (str, optional): The task to be performed. Defaults to None. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Returns: - [type]: [description] - """ - try: - if task is None: - raise ValueError("Task cannot be None") - if not isinstance(task, str): - raise TypeError("Task must be a string") - if len(task) < 1: - raise ValueError("Task cannot be empty") - return self.pipeline(task, *args, **kwargs).images[0] - except Exception as error: - print(f"[ERROR][OpenDalle] {error}") - raise error diff --git a/swarms/models/open_router.py b/swarms/models/open_router.py deleted file mode 100644 index 4140b736..00000000 --- a/swarms/models/open_router.py +++ /dev/null @@ -1,75 +0,0 @@ -from swarms.models.base_llm import BaseLLM -from pydantic import BaseModel -from typing import List, Dict -import openai - - -class OpenRouterRequest(BaseModel): - model: str - messages: List[Dict[str, str]] = [] - - -class OpenRouterChat(BaseLLM): - """ - A class representing an OpenRouter chat model. - - Args: - model_name (str): The name of the OpenRouter model. - base_url (str, optional): The base URL for the OpenRouter API. Defaults to "https://openrouter.ai/api/v1/chat/completions". - openrouter_api_key (str, optional): The API key for accessing the OpenRouter API. Defaults to None. - system_prompt (str, optional): The system prompt for the chat model. Defaults to None. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Attributes: - model_name (str): The name of the OpenRouter model. - base_url (str): The base URL for the OpenRouter API. - openrouter_api_key (str): The API key for accessing the OpenRouter API. - system_prompt (str): The system prompt for the chat model. - - Methods: - run(task, *args, **kwargs): Runs the chat model with the given task. - - """ - - def __init__( - self, - model_name: str, - base_url: str = "https://openrouter.ai/api/v1/chat/completions", - openrouter_api_key: str = None, - system_prompt: str = None, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_name = model_name - self.base_url = base_url - self.openrouter_api_key = openrouter_api_key - self.system_prompt = system_prompt - - openai.api_base = "https://openrouter.ai/api/v1" - openai.api_key = openrouter_api_key - - def run(self, task: str, *args, **kwargs) -> str: - """ - Runs the chat model with the given task. - - Args: - task (str): The user's task for the chat model. - *args: Variable length argument list. - **kwargs: Arbitrary keyword arguments. - - Returns: - str: The response generated by the chat model. - - """ - response = openai.ChatCompletion.create( - model=self.model_name, - messages=[ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ] - * args, - **kwargs, - ) - return response.choices[0].message.text diff --git a/swarms/models/openai_embeddings.py b/swarms/models/openai_embeddings.py deleted file mode 100644 index 0193f0cc..00000000 --- a/swarms/models/openai_embeddings.py +++ /dev/null @@ -1,5 +0,0 @@ -from langchain_community.embeddings.openai import OpenAIEmbeddings - -__all__ = [ - "OpenAIEmbeddings", -] diff --git a/swarms/models/openai_function_caller.py b/swarms/models/openai_function_caller.py deleted file mode 100644 index ba77829f..00000000 --- a/swarms/models/openai_function_caller.py +++ /dev/null @@ -1,179 +0,0 @@ -import openai -from pydantic import BaseModel -import os -from swarms.utils.loguru_logger import logger -from swarms.models.base_llm import BaseLLM -from typing import List - - -class OpenAIFunctionCaller(BaseLLM): - """ - A class that represents a caller for OpenAI chat completions. - - Args: - system_prompt (str): The system prompt to be used in the chat completion. - model_name (str): The name of the OpenAI model to be used. - max_tokens (int): The maximum number of tokens in the generated completion. - temperature (float): The temperature parameter for randomness in the completion. - base_model (BaseModel): The base model to be used for the completion. - openai_api_key (str): The API key for accessing the OpenAI service. - parallel_tool_calls (bool): Whether to make parallel tool calls. - top_p (float): The top-p parameter for nucleus sampling in the completion. - - Attributes: - system_prompt (str): The system prompt to be used in the chat completion. - model_name (str): The name of the OpenAI model to be used. - max_tokens (int): The maximum number of tokens in the generated completion. - temperature (float): The temperature parameter for randomness in the completion. - base_model (BaseModel): The base model to be used for the completion. - parallel_tool_calls (bool): Whether to make parallel tool calls. - top_p (float): The top-p parameter for nucleus sampling in the completion. - client (openai.OpenAI): The OpenAI client for making API calls. - - Methods: - check_api_key: Checks if the API key is provided and retrieves it from the environment if not. - run: Runs the chat completion with the given task and returns the generated completion. - - """ - - def __init__( - self, - system_prompt: str = None, - model_name: str = "gpt-4o-2024-08-06", - max_tokens: int = 4000, - temperature: float = 0.4, - base_model: BaseModel = None, - openai_api_key: str = None, - parallel_tool_calls: bool = False, - top_p: float = 0.9, - *args, - **kwargs, - ): - super().__init__() - self.system_prompt = system_prompt - self.model_name = model_name - self.max_tokens = max_tokens - self.temperature = temperature - self.openai_api_key = openai_api_key - self.base_model = base_model - self.parallel_tool_calls = parallel_tool_calls - self.top_p = top_p - self.client = openai.OpenAI(api_key=self.check_api_key()) - - def check_api_key(self) -> str: - """ - Checks if the API key is provided and retrieves it from the environment if not. - - Returns: - str: The API key. - - """ - if self.openai_api_key is None: - self.openai_api_key = os.getenv("OPENAI_API_KEY") - - return self.openai_api_key - - def run(self, task: str, *args, **kwargs) -> dict: - """ - Runs the chat completion with the given task and returns the generated completion. - - Args: - task (str): The user's task for the chat completion. - *args: Additional positional arguments to be passed to the OpenAI API. - **kwargs: Additional keyword arguments to be passed to the OpenAI API. - - Returns: - str: The generated completion. - - """ - try: - completion = self.client.beta.chat.completions.parse( - model=self.model_name, - messages=[ - {"role": "system", "content": self.system_prompt}, - {"role": "user", "content": task}, - ], - max_tokens=self.max_tokens, - temperature=self.temperature, - response_format=self.base_model, - parallel_tool_calls=self.parallel_tool_calls, - tools=( - [openai.pydantic_function_tool(self.base_model)] - ), - *args, - **kwargs, - ) - - out = ( - completion.choices[0] - .message.tool_calls[0] - .function.arguments - ) - - # Conver str to dict - # print(out) - out = eval(out) - return out - except Exception as error: - logger.error( - f"Error in running OpenAI chat completion: {error}" - ) - return None - - def convert_to_dict_from_base_model( - self, base_model: BaseModel - ) -> dict: - return openai.pydantic_function_tool(base_model) - - def convert_list_of_base_models( - self, base_models: List[BaseModel] - ): - """ - Converts a list of BaseModels to a list of dictionaries. - - Args: - base_models (List[BaseModel]): A list of BaseModels to be converted. - - Returns: - List[Dict]: A list of dictionaries representing the converted BaseModels. - """ - return [ - self.convert_to_dict_from_base_model(base_model) - for base_model in base_models - ] - - -# def agents_list( -# agents: List[Agent] = None, -# ) -> str: -# responses = [] - -# for agent in agents: -# name = agent.agent_name -# description = agent.description -# response = f"Agent Name {name}: Description {description}" -# responses.append(response) - -# return concat_strings(responses) - - -# class HierarchicalOrderCall(BaseModel): -# agent_name: str -# task: str - - -# # Example usage: -# # Initialize the function caller -# function_caller = OpenAIFunctionCaller( -# system_prompt="You are a helpful assistant.", -# openai_api_key=""," -# max_tokens=500, -# temperature=0.5, -# base_model=HierarchicalOrderCall, -# ) - -# # Run the function caller -# response = function_caller.run( -# "Send an order to the financial agent twice" -# ) -# print(response) diff --git a/swarms/models/openai_tts.py b/swarms/models/openai_tts.py deleted file mode 100644 index f3e8b850..00000000 --- a/swarms/models/openai_tts.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import subprocess -import sys - -import requests -from dotenv import load_dotenv - -from swarms.models.base_llm import BaseLLM - -try: - import wave -except ImportError as error: - print(f"Import Error: {error} - Please install pyaudio") - subprocess.check_call( - [sys.executable, "-m", "pip", "install", "pyaudio"] - ) - - -# Load .env file -load_dotenv() - - -# OpenAI API Key env -def openai_api_key_env(): - openai_api_key = os.getenv("OPENAI_API_KEY") - return openai_api_key - - -class OpenAITTS(BaseLLM): - """OpenAI TTS model - - Attributes: - model_name (str): _description_ - proxy_url (str): _description_ - openai_api_key (str): _description_ - voice (str): _description_ - chunk_size (_type_): _description_ - - Methods: - run: _description_ - - - Examples: - >>> from swarms.models.openai_tts import OpenAITTS - >>> tts = OpenAITTS( - ... model_name = "tts-1-1106", - ... proxy_url = "https://api.openai.com/v1/audio/speech", - ... openai_api_key = openai_api_key_env, - ... voice = "onyx", - ... ) - >>> tts.run("Hello world") - - """ - - def __init__( - self, - model_name: str = "tts-1-1106", - proxy_url: str = "https://api.openai.com/v1/audio/speech", - openai_api_key: str = openai_api_key_env, - voice: str = "onyx", - chunk_size=1024 * 1024, - autosave: bool = False, - saved_filepath: str = None, - *args, - **kwargs, - ): - super().__init__() - self.model_name = model_name - self.proxy_url = proxy_url - self.openai_api_key = openai_api_key - self.voice = voice - self.chunk_size = chunk_size - self.autosave = autosave - self.saved_filepath = saved_filepath - - self.saved_filepath = "runs/tts_speech.wav" - - def run(self, task: str, *args, **kwargs): - """Run the tts model - - Args: - task (str): _description_ - - Returns: - _type_: _description_ - """ - response = requests.post( - self.proxy_url, - headers={ - "Authorization": f"Bearer {self.openai_api_key}", - }, - json={ - "model": self.model_name, - "input": task, - "voice": self.voice, - }, - ) - - audio = b"" - for chunk in response.iter_content(chunk_size=1024 * 1024): - audio += chunk - return audio - - def run_and_save(self, task: str = None, *args, **kwargs): - """Run the TTS model and save the output to a file. - - Args: - task (str): The text to be converted to speech. - filename (str): The path to the file where the speech will be saved. - - Returns: - bytes: The speech data. - """ - # Run the TTS model. - speech_data = self.run(task) - - # Save the speech data to a file. - with wave.open(self.saved_filepath, "wb") as file: - file.setnchannels(1) - file.setsampwidth(2) - file.setframerate(22050) - file.writeframes(speech_data) - - return speech_data diff --git a/swarms/models/palm.py b/swarms/models/palm.py deleted file mode 100644 index 301ce1bd..00000000 --- a/swarms/models/palm.py +++ /dev/null @@ -1,5 +0,0 @@ -from langchain_community.llms.google_palm import GooglePalm - -__all__ = [ - "GooglePalm", -] diff --git a/swarms/models/popular_llms.py b/swarms/models/popular_llms.py deleted file mode 100644 index 852c56b4..00000000 --- a/swarms/models/popular_llms.py +++ /dev/null @@ -1,92 +0,0 @@ -from langchain_community.chat_models.azure_openai import ( - AzureChatOpenAI, -) -from langchain_community.chat_models.openai import ( - ChatOpenAI as OpenAIChat, -) -from langchain_community.llms.anthropic import Anthropic -from langchain_community.llms.cohere import Cohere -from langchain_community.llms.mosaicml import MosaicML -from langchain_community.llms.openai import ( - OpenAI, -) # , OpenAIChat, AzureOpenAI -from langchain_community.llms.octoai_endpoint import OctoAIEndpoint -from langchain_community.llms.replicate import Replicate -from langchain_community.llms.fireworks import Fireworks # noqa: F401 - - -class Anthropic(Anthropic): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class CohereChat(Cohere): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class MosaicMLChat(MosaicML): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class OpenAILLM(OpenAI): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class ReplicateChat(Replicate): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class AzureOpenAILLM(AzureChatOpenAI): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class OpenAIChatLLM(OpenAIChat): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def __call__(self, *args, **kwargs): - out = self.invoke(*args, **kwargs) - return out.content.strip() - - def run(self, *args, **kwargs): - out = self.invoke(*args, **kwargs) - return out.content.strip() - - -class OctoAIChat(OctoAIEndpoint): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - -class FireWorksAI(Fireworks): - def __call__(self, *args, **kwargs): - return self.invoke(*args, **kwargs) - - def run(self, *args, **kwargs): - return self.invoke(*args, **kwargs) diff --git a/swarms/models/qwen.py b/swarms/models/qwen.py deleted file mode 100644 index b5a4ed1a..00000000 --- a/swarms/models/qwen.py +++ /dev/null @@ -1,144 +0,0 @@ -from dataclasses import dataclass, field -from typing import Optional, Tuple - -from PIL import Image -from transformers import AutoModelForCausalLM, AutoTokenizer - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -@dataclass -class QwenVLMultiModal(BaseMultiModalModel): - """ - QwenVLMultiModal is a class that represents a multi-modal model for Qwen chatbot. - It inherits from the BaseMultiModalModel class. - - - Args: - model_name (str): The name of the model to be used. - device (str): The device to run the model on. - args (tuple): Additional positional arguments. - kwargs (dict): Additional keyword arguments. - quantize (bool): A flag to indicate whether to quantize the model. - return_bounding_boxes (bool): A flag to indicate whether to return bounding boxes for the image. - - - Examples: - >>> qwen = QwenVLMultiModal() - >>> response = qwen.run("Hello", "https://example.com/image.jpg") - >>> print(response) - """ - - model_name: str = "Qwen/Qwen-VL" - device: str = "cuda" - args: tuple = field(default_factory=tuple) - kwargs: dict = field(default_factory=dict) - quantize: bool = False - return_bounding_boxes: bool = False - - def __post_init__(self): - """ - Initializes the QwenVLMultiModal object. - It initializes the tokenizer and the model for the Qwen chatbot. - """ - - if self.quantize: - self.model_name = "Qwen/Qwen-VL-Chat-Int4" - - self.tokenizer = AutoTokenizer.from_pretrained( - self.model_name, trust_remote_code=True - ) - self.model = AutoModelForCausalLM.from_pretrained( - self.model_name, - device_map=self.device, - trust_remote_code=True, - ).eval() - - def run( - self, text: str, img: str, *args, **kwargs - ) -> Tuple[Optional[str], Optional[Image.Image]]: - """ - Runs the Qwen chatbot model on the given text and image inputs. - - Args: - text (str): The input text for the chatbot. - img (str): The input image for the chatbot. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - Tuple[Optional[str], Optional[Image.Image]]: A tuple containing the response generated by the chatbot - and the image associated with the response (if any). - """ - try: - if self.return_bounding_boxes: - query = self.tokenizer.from_list_format( - [ - {"image": img, "text": text}, - ] - ) - - inputs = self.tokenizer(query, return_tensors="pt") - inputs = inputs.to(self.model.device) - pred = self.model.generate(**inputs) - response = self.tokenizer.decode( - pred.cpu()[0], skip_special_tokens=False - ) - - image_bb = self.tokenizer.draw_bbox_on_latest_picture( - response - ) - - if image_bb: - image_bb.save("output.jpg") - else: - print("No bounding boxes found in the image.") - - return response, image_bb - else: - query = self.tokenizer.from_list_format( - [ - {"image": img, "text": text}, - ] - ) - - inputs = self.tokenizer(query, return_tensors="pt") - inputs = inputs.to(self.model.device) - pred = self.model.generate(**inputs) - response = self.tokenizer.decode( - pred.cpu()[0], skip_special_tokens=False - ) - return response - except Exception as error: - print(f"[ERROR]: [QwenVLMultiModal]: {error}") - - def chat( - self, text: str, img: str, *args, **kwargs - ) -> tuple[str, list]: - """ - Chat with the model using text and image inputs. - - Args: - text (str): The text input for the chat. - img (str): The image input for the chat. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - tuple[str, list]: A tuple containing the response and chat history. - - Raises: - Exception: If an error occurs during the chat. - - """ - try: - response, history = self.model.chat( - self.tokenizer, - query=f"{img}这是什么", - history=None, - ) - return response, history - except Exception as e: - raise Exception( - "An error occurred during the chat." - ) from e diff --git a/swarms/models/sam.py b/swarms/models/sam.py deleted file mode 100644 index f47d5a89..00000000 --- a/swarms/models/sam.py +++ /dev/null @@ -1,108 +0,0 @@ -from typing import List - -import requests -import torch -from PIL import Image -from transformers import SamModel, SamProcessor - -device = "cuda" if torch.cuda.is_available() else "cpu" - - -class SAM: - """ - Class representing the SAM (Segmentation and Masking) model. - - Args: - model_name (str): The name of the pre-trained SAM model. Default is "facebook/sam-vit-huge". - device (torch.device): The device to run the model on. Default is the current device. - input_points (List[List[int]]): The 2D location of a window in the image to segment. Default is [[450, 600]]. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Attributes: - model_name (str): The name of the pre-trained SAM model. - device (torch.device): The device to run the model on. - input_points (List[List[int]]): The 2D location of a window in the image to segment. - model (SamModel): The pre-trained SAM model. - processor (SamProcessor): The processor for the SAM model. - - Methods: - run(task=None, img=None, *args, **kwargs): Runs the SAM model on the given image and returns the segmentation scores and masks. - process_img(img: str = None, *args, **kwargs): Processes the input image and returns the processed image. - - """ - - def __init__( - self, - model_name: str = "facebook/sam-vit-huge", - device=device, - input_points: List[List[int]] = [[450, 600]], - *args, - **kwargs, - ): - self.model_name = model_name - self.device = device - self.input_points = input_points - - self.model = SamModel.from_pretrained( - model_name, *args, **kwargs - ).to(device) - - self.processor = SamProcessor.from_pretrained(model_name) - - def run(self, task: str = None, img: str = None, *args, **kwargs): - """ - Runs the SAM model on the given image and returns the segmentation scores and masks. - - Args: - task: The task to perform. Not used in this method. - img: The input image to segment. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - Tuple: A tuple containing the segmentation scores and masks. - - """ - img = self.process_img(img) - - # Specify the points of the mask to segment - input_points = [ - self.input_points - ] # 2D location of a window in the image - - # Preprocess the image - inputs = self.processor( - img, input_points=input_points, return_tensors="pt" - ).to(device) - - with torch.no_grad(): - outputs = self.model(**inputs) # noqa: E999 - - masks = self.processor.image_processor.post_process_masks( - outputs.pred_masks.cpu(), - inputs["original_sizes"].cpu(), - inputs["reshaped_input_sizes"].cpu(), - ) - scores = outputs.iou_scores - - return scores, masks - - def process_img(self, img: str = None, *args, **kwargs): - """ - Processes the input image and returns the processed image. - - Args: - img (str): The URL or file path of the input image. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - Image: The processed image. - - """ - raw_image = Image.open( - requests.get(img, stream=True, *args, **kwargs).raw - ).convert("RGB") - - return raw_image diff --git a/swarms/models/sampling_params.py b/swarms/models/sampling_params.py deleted file mode 100644 index d231c295..00000000 --- a/swarms/models/sampling_params.py +++ /dev/null @@ -1,300 +0,0 @@ -"""Sampling parameters for text generation.""" - -from enum import IntEnum -from functools import cached_property -from typing import Callable, List, Optional, Union - -import torch - -_SAMPLING_EPS = 1e-5 - - -class SamplingType(IntEnum): - GREEDY = 0 - RANDOM = 1 - BEAM = 2 - - -LogitsProcessor = Callable[[List[int], torch.Tensor], torch.Tensor] -"""LogitsProcessor is a function that takes a list of previously generated -tokens and a tensor of the logits for the next token, and returns a modified -tensor of logits to sample from.""" - - -class SamplingParams: - """Sampling parameters for text generation. - - Overall, we follow the sampling parameters from the OpenAI text completion - API (https://platform.openai.com/docs/api-reference/completions/create). - In addition, we support beam search, which is not supported by OpenAI. - - Args: - n: Number of output sequences to return for the given prompt. - best_of: Number of output sequences that are generated from the prompt. - From these `best_of` sequences, the top `n` sequences are returned. - `best_of` must be greater than or equal to `n`. This is treated as - the beam width when `use_beam_search` is True. By default, `best_of` - is set to `n`. - presence_penalty: Float that penalizes new tokens based on whether they - appear in the generated text so far. Values > 0 encourage the model - to use new tokens, while values < 0 encourage the model to repeat - tokens. - frequency_penalty: Float that penalizes new tokens based on their - frequency in the generated text so far. Values > 0 encourage the - model to use new tokens, while values < 0 encourage the model to - repeat tokens. - repetition_penalty: Float that penalizes new tokens based on whether - they appear in the prompt and the generated text so far. Values > 1 - encourage the model to use new tokens, while values < 1 encourage - the model to repeat tokens. - temperature: Float that controls the randomness of the sampling. Lower - values make the model more deterministic, while higher values make - the model more random. Zero means greedy sampling. - top_p: Float that controls the cumulative probability of the top tokens - to consider. Must be in (0, 1]. Set to 1 to consider all tokens. - top_k: Integer that controls the number of top tokens to consider. Set - to -1 to consider all tokens. - min_p: Float that represents the minimum probability for a token to be - considered, relative to the probability of the most likely token. - Must be in [0, 1]. Set to 0 to disable this. - use_beam_search: Whether to use beam search instead of sampling. - length_penalty: Float that penalizes sequences based on their length. - Used in beam search. - early_stopping: Controls the stopping condition for beam search. It - accepts the following values: `True`, where the generation stops as - soon as there are `best_of` complete candidates; `False`, where an - heuristic is applied and the generation stops when is it very - unlikely to find better candidates; `"never"`, where the beam search - procedure only stops when there cannot be better candidates - (canonical beam search algorithm). - stop: List of strings that stop the generation when they are generated. - The returned output will not contain the stop strings. - stop_token_ids: List of tokens that stop the generation when they are - generated. The returned output will contain the stop tokens unless - the stop tokens are special tokens. - include_stop_str_in_output: Whether to include the stop strings in output - text. Defaults to False. - ignore_eos: Whether to ignore the EOS token and continue generating - tokens after the EOS token is generated. - max_tokens: Maximum number of tokens to generate per output sequence. - logprobs: Number of log probabilities to return per output token. - Note that the implementation follows the OpenAI API: The return - result includes the log probabilities on the `logprobs` most likely - tokens, as well the chosen tokens. The API will always return the - log probability of the sampled token, so there may be up to - `logprobs+1` elements in the response. - prompt_logprobs: Number of log probabilities to return per prompt token. - skip_special_tokens: Whether to skip special tokens in the output. - spaces_between_special_tokens: Whether to add spaces between special - tokens in the output. Defaults to True. - logits_processors: List of functions that modify logits based on - previously generated tokens. - """ - - def __init__( - self, - n: int = 1, - best_of: Optional[int] = None, - presence_penalty: float = 0.0, - frequency_penalty: float = 0.0, - repetition_penalty: float = 1.0, - temperature: float = 1.0, - top_p: float = 1.0, - top_k: int = -1, - min_p: float = 0.0, - use_beam_search: bool = False, - length_penalty: float = 1.0, - early_stopping: Union[bool, str] = False, - stop: Union[str, List[str], None] = None, - stop_token_ids: Optional[List[int]] = None, - include_stop_str_in_output: bool = False, - ignore_eos: bool = False, - max_tokens: Optional[int] = 16, - logprobs: Optional[int] = None, - prompt_logprobs: Optional[int] = None, - skip_special_tokens: bool = True, - spaces_between_special_tokens: bool = True, - logits_processors: Optional[List[LogitsProcessor]] = None, - ) -> None: - self.n = n - self.best_of = best_of if best_of is not None else n - self.presence_penalty = presence_penalty - self.frequency_penalty = frequency_penalty - self.repetition_penalty = repetition_penalty - self.temperature = temperature - self.top_p = top_p - self.top_k = top_k - self.min_p = min_p - self.use_beam_search = use_beam_search - self.length_penalty = length_penalty - self.early_stopping = early_stopping - if stop is None: - self.stop = [] - elif isinstance(stop, str): - self.stop = [stop] - else: - self.stop = list(stop) - if stop_token_ids is None: - self.stop_token_ids = [] - else: - self.stop_token_ids = list(stop_token_ids) - self.ignore_eos = ignore_eos - self.max_tokens = max_tokens - self.logprobs = logprobs - self.prompt_logprobs = prompt_logprobs - self.skip_special_tokens = skip_special_tokens - self.spaces_between_special_tokens = ( - spaces_between_special_tokens - ) - self.logits_processors = logits_processors - self.include_stop_str_in_output = include_stop_str_in_output - self._verify_args() - if self.use_beam_search: - self._verify_beam_search() - else: - self._verify_non_beam_search() - if self.temperature < _SAMPLING_EPS: - # Zero temperature means greedy sampling. - self.top_p = 1.0 - self.top_k = -1 - self.min_p = 0.0 - self._verify_greedy_sampling() - - def _verify_args(self) -> None: - if self.n < 1: - raise ValueError(f"n must be at least 1, got {self.n}.") - if self.best_of < self.n: - raise ValueError( - "best_of must be greater than or equal to n, " - f"got n={self.n} and best_of={self.best_of}." - ) - if not -2.0 <= self.presence_penalty <= 2.0: - raise ValueError( - "presence_penalty must be in [-2, 2], got " - f"{self.presence_penalty}." - ) - if not -2.0 <= self.frequency_penalty <= 2.0: - raise ValueError( - "frequency_penalty must be in [-2, 2], got " - f"{self.frequency_penalty}." - ) - if not 0.0 < self.repetition_penalty <= 2.0: - raise ValueError( - "repetition_penalty must be in (0, 2], got " - f"{self.repetition_penalty}." - ) - if self.temperature < 0.0: - raise ValueError( - "temperature must be non-negative, got" - f" {self.temperature}." - ) - if not 0.0 < self.top_p <= 1.0: - raise ValueError( - f"top_p must be in (0, 1], got {self.top_p}." - ) - if self.top_k < -1 or self.top_k == 0: - raise ValueError( - "top_k must be -1 (disable), or at least 1, " - f"got {self.top_k}." - ) - if not 0.0 <= self.min_p <= 1.0: - raise ValueError( - f"min_p must be in [0, 1], got {self.min_p}." - ) - if self.max_tokens is not None and self.max_tokens < 1: - raise ValueError( - "max_tokens must be at least 1, got" - f" {self.max_tokens}." - ) - if self.logprobs is not None and self.logprobs < 0: - raise ValueError( - f"logprobs must be non-negative, got {self.logprobs}." - ) - if ( - self.prompt_logprobs is not None - and self.prompt_logprobs < 0 - ): - raise ValueError( - "prompt_logprobs must be non-negative, got " - f"{self.prompt_logprobs}." - ) - - def _verify_beam_search(self) -> None: - if self.best_of == 1: - raise ValueError( - "best_of must be greater than 1 when using beam " - f"search. Got {self.best_of}." - ) - if self.temperature > _SAMPLING_EPS: - raise ValueError( - "temperature must be 0 when using beam search." - ) - if self.top_p < 1.0 - _SAMPLING_EPS: - raise ValueError( - "top_p must be 1 when using beam search." - ) - if self.top_k != -1: - raise ValueError( - "top_k must be -1 when using beam search." - ) - if self.early_stopping not in [True, False, "never"]: - raise ValueError( - "early_stopping must be True, False, or 'never', " - f"got {self.early_stopping}." - ) - - def _verify_non_beam_search(self) -> None: - if self.early_stopping is not False: - raise ValueError( - "early_stopping is not effective and must be " - "False when not using beam search." - ) - if ( - self.length_penalty < 1.0 - _SAMPLING_EPS - or self.length_penalty > 1.0 + _SAMPLING_EPS - ): - raise ValueError( - "length_penalty is not effective and must be the " - "default value of 1.0 when not using beam search." - ) - - def _verify_greedy_sampling(self) -> None: - if self.best_of > 1: - raise ValueError( - "best_of must be 1 when using greedy sampling." - f"Got {self.best_of}." - ) - - @cached_property - def sampling_type(self) -> SamplingType: - if self.use_beam_search: - return SamplingType.BEAM - if self.temperature < _SAMPLING_EPS: - return SamplingType.GREEDY - return SamplingType.RANDOM - - def __repr__(self) -> str: - return ( - f"SamplingParams(n={self.n}, " - f"best_of={self.best_of}, " - f"presence_penalty={self.presence_penalty}, " - f"frequency_penalty={self.frequency_penalty}, " - f"repetition_penalty={self.repetition_penalty}, " - f"temperature={self.temperature}, " - f"top_p={self.top_p}, " - f"top_k={self.top_k}, " - f"min_p={self.min_p}, " - f"use_beam_search={self.use_beam_search}, " - f"length_penalty={self.length_penalty}, " - f"early_stopping={self.early_stopping}, " - f"stop={self.stop}, " - f"stop_token_ids={self.stop_token_ids}, " - f"include_stop_str_in_output={self.include_stop_str_in_output}, " - f"ignore_eos={self.ignore_eos}, " - f"max_tokens={self.max_tokens}, " - f"logprobs={self.logprobs}, " - f"prompt_logprobs={self.prompt_logprobs}, " - f"skip_special_tokens={self.skip_special_tokens}, " - "spaces_between_special_tokens=" - f"{self.spaces_between_special_tokens})" - ) diff --git a/swarms/models/ssd_1b.py b/swarms/models/ssd_1b.py deleted file mode 100644 index 3042d1ab..00000000 --- a/swarms/models/ssd_1b.py +++ /dev/null @@ -1,280 +0,0 @@ -import concurrent.futures -import os -import uuid -from dataclasses import dataclass -from io import BytesIO -from typing import List - -import backoff -import torch -from cachetools import TTLCache -from diffusers import StableDiffusionXLPipeline -from PIL import Image -from pydantic import field_validator -from termcolor import colored - - -@dataclass -class SSD1B: - """ - SSD1B model class - - Attributes: - ----------- - image_url: str - The image url generated by the SSD1B API - - Methods: - -------- - __call__(self, task: str) -> SSD1B: - Makes a call to the SSD1B API and returns the image url - - Example: - -------- - model = SSD1B() - task = "A painting of a dog" - neg_prompt = "ugly, blurry, poor quality" - image_url = model(task, neg_prompt) - print(image_url) - """ - - model: str = "dall-e-3" - img: str = None - size: str = "1024x1024" - max_retries: int = 3 - quality: str = "standard" - model_name: str = "segment/SSD-1B" - n: int = 1 - save_path: str = "images" - max_time_seconds: int = 60 - save_folder: str = "images" - image_format: str = "png" - device: str = "cuda" - dashboard: bool = False - cache = TTLCache(maxsize=100, ttl=3600) - pipe = StableDiffusionXLPipeline.from_pretrained( - "segmind/SSD-1B", - torch_dtype=torch.float16, - use_safetensors=True, - variant="fp16", - ).to(device) - - def __post_init__(self): - """Post init method""" - - if self.img is not None: - self.img = self.convert_to_bytesio(self.img) - - os.makedirs(self.save_path, exist_ok=True) - - class Config: - """Config class for the SSD1B model""" - - arbitrary_types_allowed = True - - @field_validator("max_retries", "time_seconds") - @classmethod - def must_be_positive(cls, value): - if value <= 0: - raise ValueError("Must be positive") - return value - - def read_img(self, img: str): - """Read the image using pil""" - img = Image.open(img) - return img - - def set_width_height(self, img: str, width: int, height: int): - """Set the width and height of the image""" - img = self.read_img(img) - img = img.resize((width, height)) - return img - - def convert_to_bytesio(self, img: str, format: str = "PNG"): - """Convert the image to an bytes io object""" - byte_stream = BytesIO() - img.save(byte_stream, format=format) - byte_array = byte_stream.getvalue() - return byte_array - - @backoff.on_exception( - backoff.expo, Exception, max_time=max_time_seconds - ) - def __call__(self, task: str, neg_prompt: str): - """ - Text to image conversion using the SSD1B API - - Parameters: - ----------- - task: str - The task to be converted to an image - - Returns: - -------- - SSD1B: - An instance of the SSD1B class with the image url generated by the SSD1B API - - Example: - -------- - >>> dalle3 = SSD1B() - >>> task = "A painting of a dog" - >>> image_url = dalle3(task) - >>> print(image_url) - https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png - """ - if self.dashboard: - self.print_dashboard() - if task in self.cache: - return self.cache[task] - try: - img = self.pipe( - prompt=task, neg_prompt=neg_prompt - ).images[0] - - # Generate a unique filename for the image - img_name = f"{uuid.uuid4()}.{self.image_format}" - img_path = os.path.join(self.save_path, img_name) - - # Save the image - img.save(img_path, self.image_format) - self.cache[task] = img_path - - return img_path - - except Exception as error: - # Handling exceptions and printing the errors details - print( - colored( - ( - f"Error running SSD1B: {error} try optimizing" - " your api key and or try again" - ), - "red", - ) - ) - raise error - - def _generate_image_name(self, task: str): - """Generate a sanitized file name based on the task""" - sanitized_task = "".join( - char for char in task if char.isalnum() or char in " _ -" - ).rstrip() - return f"{sanitized_task}.{self.image_format}" - - def _download_image(self, img: Image, filename: str): - """ - Save the PIL Image object to a file. - """ - full_path = os.path.join(self.save_path, filename) - img.save(full_path, self.image_format) - - def print_dashboard(self): - """Print the SSD1B dashboard""" - print( - colored( - f"""SSD1B Dashboard: - -------------------- - - Model: {self.model} - Image: {self.img} - Size: {self.size} - Max Retries: {self.max_retries} - Quality: {self.quality} - N: {self.n} - Save Path: {self.save_path} - Time Seconds: {self.time_seconds} - Save Folder: {self.save_folder} - Image Format: {self.image_format} - -------------------- - - - """, - "green", - ) - ) - - def process_batch_concurrently( - self, tasks: List[str], max_workers: int = 5 - ): - """ - - Process a batch of tasks concurrently - - Args: - tasks (List[str]): A list of tasks to be processed - max_workers (int): The maximum number of workers to use for the concurrent processing - - Returns: - -------- - results (List[str]): A list of image urls generated by the SSD1B API - - Example: - -------- - >>> model = SSD1B() - >>> tasks = ["A painting of a dog", "A painting of a cat"] - >>> results = model.process_batch_concurrently(tasks) - >>> print(results) - - """ - with concurrent.futures.ThreadPoolExecutor( - max_workers=max_workers - ) as executor: - future_to_task = { - executor.submit(self, task): task for task in tasks - } - results = [] - for future in concurrent.futures.as_completed( - future_to_task - ): - task = future_to_task[future] - try: - img = future.result() - results.append(img) - - print(f"Task {task} completed: {img}") - except Exception as error: - print( - colored( - ( - f"Error running SSD1B: {error} try" - " optimizing your api key and or try" - " again" - ), - "red", - ) - ) - print( - colored( - ( - "Error running SSD1B:" - f" {error.http_status}" - ), - "red", - ) - ) - print( - colored( - f"Error running SSD1B: {error.error}", - "red", - ) - ) - raise error - - def _generate_uuid(self): - """Generate a uuid""" - return str(uuid.uuid4()) - - def __repr__(self): - """Repr method for the SSD1B class""" - return f"SSD1B(image_url={self.image_url})" - - def __str__(self): - """Str method for the SSD1B class""" - return f"SSD1B(image_url={self.image_url})" - - @backoff.on_exception( - backoff.expo, Exception, max_tries=max_retries - ) - def rate_limited_call(self, task: str): - """Rate limited call to the SSD1B API""" - return self.__call__(task) diff --git a/swarms/models/tiktoken_wrapper.py b/swarms/models/tiktoken_wrapper.py deleted file mode 100644 index c114200b..00000000 --- a/swarms/models/tiktoken_wrapper.py +++ /dev/null @@ -1,101 +0,0 @@ -import tiktoken - -import concurrent.futures -from typing import List - - -class TikTokenizer: - def __init__( - self, - model_name: str = "o200k_base", - ): - """ - Initializes a TikTokenizer object. - - Args: - model_name (str, optional): The name of the model to use for tokenization. Defaults to "gpt-4o". - """ - try: - self.model_name = model_name - self.encoding = tiktoken.get_encoding(model_name) - except Exception as e: - raise ValueError( - f"Failed to initialize tokenizer with model '{model_name}': {str(e)}" - ) - - def encode(self, string: str) -> str: - """ - Tokenizes a text string. - - Args: - string (str): The input text string. - - Returns: - str: The tokenized text string. - """ - return self.encoding.encode(string) - - def decode(self, tokens: List[int]) -> str: - """ - Detokenizes a text string. - - Args: - string (str): The input tokenized text string. - - Returns: - str: The detokenized text string. - """ - return self.encoding.decode(tokens) - - def count_tokens(self, string: str) -> int: - """ - Returns the number of tokens in a text string. - - Args: - string (str): The input text string. - - Returns: - int: The number of tokens in the text string. - """ - num_tokens = 0 - - def count_tokens_in_chunk(chunk): - nonlocal num_tokens - num_tokens += len(self.encoding.encode(chunk)) - - # Split the string into chunks for parallel processing - chunks = [ - string[i : i + 1000] for i in range(0, len(string), 1000) - ] - - # Create a ThreadPoolExecutor with maximum threads - with concurrent.futures.ThreadPoolExecutor( - max_workers=10 - ) as executor: - # Submit each chunk for processing - futures = [ - executor.submit(count_tokens_in_chunk, chunk) - for chunk in chunks - ] - - # Wait for all futures to complete - concurrent.futures.wait(futures) - - return num_tokens - - -# # Path: swarms/models/tiktoken_wrapper.py -# # Example -# # Initialize the TikTokenizer object with the default model -# tokenizer = TikTokenizer() - -# # Tokenize a text string - -# text = "Hello, how are you doing today?" -# tokens = tokenizer.encode(text) - -# print(f"Tokens: {tokens}") - -# # Count the number of tokens in the text string -# num_tokens = tokenizer.count_tokens(text) -# print(f"Number of tokens: {num_tokens}") diff --git a/swarms/models/together.py b/swarms/models/together.py deleted file mode 100644 index fbb5ae51..00000000 --- a/swarms/models/together.py +++ /dev/null @@ -1,137 +0,0 @@ -import logging -import os -from typing import Optional - -import requests -from dotenv import load_dotenv - -from swarms.models.base_llm import BaseLLM - -# Load environment variables -load_dotenv() - - -def together_api_key_env(): - """Get the API key from the environment.""" - return os.getenv("TOGETHER_API_KEY") - - -class TogetherLLM(BaseLLM): - """ - GPT-4 Vision API - - This class is a wrapper for the OpenAI API. It is used to run the GPT-4 Vision model. - - Parameters - ---------- - together_api_key : str - The OpenAI API key. Defaults to the together_api_key environment variable. - max_tokens : int - The maximum number of tokens to generate. Defaults to 300. - - - Methods - ------- - encode_image(img: str) - Encode image to base64. - run(task: str, img: str) - Run the model. - __call__(task: str, img: str) - Run the model. - - Examples: - --------- - >>> from swarms.models import GPT4VisionAPI - >>> llm = GPT4VisionAPI() - >>> task = "What is the color of the object?" - >>> img = "https://i.imgur.com/2M2ZGwC.jpeg" - >>> llm.run(task, img) - - - """ - - def __init__( - self, - together_api_key: str = together_api_key_env, - model_name: str = "mistralai/Mixtral-8x7B-Instruct-v0.1", - logging_enabled: bool = False, - max_workers: int = 10, - max_tokens: str = 300, - api_endpoint: str = "https://api.together.xyz", - beautify: bool = False, - streaming_enabled: Optional[bool] = False, - meta_prompt: Optional[bool] = False, - system_prompt: Optional[str] = None, - *args, - **kwargs, - ): - super(TogetherLLM).__init__(*args, **kwargs) - self.together_api_key = together_api_key - self.logging_enabled = logging_enabled - self.model_name = model_name - self.max_workers = max_workers - self.max_tokens = max_tokens - self.api_endpoint = api_endpoint - self.beautify = beautify - self.streaming_enabled = streaming_enabled - self.meta_prompt = meta_prompt - self.system_prompt = system_prompt - - if self.logging_enabled: - logging.basicConfig(level=logging.DEBUG) - else: - # Disable debug logs for requests and urllib3 - logging.getLogger("requests").setLevel(logging.WARNING) - logging.getLogger("urllib3").setLevel(logging.WARNING) - - if self.meta_prompt: - self.system_prompt = self.meta_prompt_init() - - # Function to handle vision tasks - def run(self, task: str = None, *args, **kwargs): - """Run the model.""" - try: - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {self.together_api_key}", - } - payload = { - "model": self.model_name, - "messages": [ - { - "role": "system", - "content": [self.system_prompt], - }, - { - "role": "user", - "content": task, - }, - ], - "max_tokens": self.max_tokens, - **kwargs, - } - response = requests.post( - self.api_endpoint, - headers=headers, - json=payload, - *args, - **kwargs, - ) - - out = response.json() - content = ( - out["choices"][0] - .get("message", {}) - .get("content", None) - ) - if self.streaming_enabled: - content = self.stream_response(content) - - return content - - except Exception as error: - print( - f"Error with the request: {error}, make sure you" - " double check input types and positions" - ) - return None diff --git a/swarms/models/vilt.py b/swarms/models/vilt.py deleted file mode 100644 index 60425e52..00000000 --- a/swarms/models/vilt.py +++ /dev/null @@ -1,57 +0,0 @@ -import requests -from PIL import Image -from transformers import ViltForQuestionAnswering, ViltProcessor - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class Vilt(BaseMultiModalModel): - """ - Vision-and-Language Transformer (ViLT) model fine-tuned on VQAv2. - It was introduced in the paper ViLT: Vision-and-Language Transformer Without - Convolution or Region Supervision by Kim et al. and first released in this repository. - - Disclaimer: The team releasing ViLT did not write a model card for this model - so this model card has been written by the Hugging Face team. - - https://huggingface.co/dandelin/vilt-b32-finetuned-vqa - - - Example: - >>> model = Vilt() - >>> output = model("What is this image", "http://images.cocodataset.org/val2017/000000039769.jpg") - - """ - - def __init__( - self, - model_name: str = "dandelin/vilt-b32-finetuned-vqa", - *args, - **kwargs, - ): - super().__init__(model_name, *args, **kwargs) - self.processor = ViltProcessor.from_pretrained( - model_name, *args, **kwargs - ) - self.model = ViltForQuestionAnswering.from_pretrained( - model_name, *args, **kwargs - ) - - def run(self, task: str = None, img: str = None, *args, **kwargs): - """ - Run the model - - - Args: - - """ - # Download the image - image = Image.open(requests.get(img, stream=True).raw) - - encoding = self.processor(image, task, return_tensors="pt") - - # Forward pass - outputs = self.model(**encoding) - logits = outputs.logits - idx = logits.argmax(-1).item() - print("Predicted Answer:", self.model.config.id2label[idx]) diff --git a/swarms/models/vip_llava.py b/swarms/models/vip_llava.py deleted file mode 100644 index db532913..00000000 --- a/swarms/models/vip_llava.py +++ /dev/null @@ -1,94 +0,0 @@ -from io import BytesIO - -import requests -import torch -from PIL import Image -from transformers import ( - AutoProcessor, - VipLlavaForConditionalGeneration, -) - -from swarms.models.base_multimodal_model import BaseMultiModalModel - - -class VipLlavaMultiModal(BaseMultiModalModel): - """ - A multi-modal model for VIP-LLAVA. - - Args: - model_name (str): The name or path of the pre-trained model. - max_new_tokens (int): The maximum number of new tokens to generate. - device_map (str): The device mapping for the model. - torch_dtype: The torch data type for the model. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - """ - - def __init__( - self, - model_name: str = "llava-hf/vip-llava-7b-hf", - max_new_tokens: int = 500, - device_map: str = "auto", - torch_dtype=torch.float16, - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.model_name = model_name - self.max_new_tokens = max_new_tokens - self.device_map = device_map - self.torch_dtype = torch_dtype - - self.model = VipLlavaForConditionalGeneration.from_pretrained( - model_name, - device_map=device_map, - torch_dtype=torch_dtype, - *args, - **kwargs, - ) - self.processor = AutoProcessor.from_pretrained( - model_name, *args, **kwargs - ) - - def run(self, text: str, img: str, *args, **kwargs): - """ - Run the VIP-LLAVA model. - - Args: - text (str): The input text. - img (str): The URL of the input image. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - str: The generated output text. - tuple: A tuple containing None and the error message if an error occurs. - """ - try: - response = requests.get(img, stream=True) - response.raise_for_status() - image = Image.open(BytesIO(response.content)) - - inputs = self.processor( - text=text, - images=image, - return_tensors="pt", - *args, - **kwargs, - ).to(0, self.torch_dtype) - - # Generate - generate_ids = self.model.generate( - **inputs, max_new_tokens=self.max_new_tokens, **kwargs - ) - - return self.processor.decode( - generate_ids[0][len(inputs["input_ids"][0]) :], - skip_special_tokens=True, - ) - - except requests.RequestException as error: - return None, f"Error fetching image: {error}" - - except Exception as error: - return None, f"Error during model inference: {error}" diff --git a/swarms/models/zeroscope.py b/swarms/models/zeroscope.py deleted file mode 100644 index 01e578fa..00000000 --- a/swarms/models/zeroscope.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler -from diffusers.utils import export_to_video - - -class ZeroscopeTTV: - """ - ZeroscopeTTV class represents a zero-shot video generation model. - - Args: - model_name (str): The name of the pre-trained model to use. - torch_dtype (torch.dtype): The torch data type to use for computations. - chunk_size (int): The size of chunks for forward chunking. - dim (int): The dimension along which to split the input for forward chunking. - num_inference_steps (int): The number of inference steps to perform. - height (int): The height of the video frames. - width (int): The width of the video frames. - num_frames (int): The number of frames in the video. - - Attributes: - model_name (str): The name of the pre-trained model. - torch_dtype (torch.dtype): The torch data type used for computations. - chunk_size (int): The size of chunks for forward chunking. - dim (int): The dimension along which the input is split for forward chunking. - num_inference_steps (int): The number of inference steps to perform. - height (int): The height of the video frames. - width (int): The width of the video frames. - num_frames (int): The number of frames in the video. - pipe (DiffusionPipeline): The diffusion pipeline for video generation. - - Methods: - forward(task: str = None, *args, **kwargs) -> str: - Performs forward pass on the input task and returns the path of the generated video. - - Examples: - >>> from swarms.models - >>> zeroscope = ZeroscopeTTV() - >>> task = "A person is walking on the street." - >>> video_path = zeroscope(task) - - """ - - def __init__( - self, - model_name: str = "cerspense/zeroscope_v2_576w", - torch_dtype=torch.float16, - chunk_size: int = 1, - dim: int = 1, - num_inference_steps: int = 40, - height: int = 320, - width: int = 576, - num_frames: int = 36, - *args, - **kwargs, - ): - self.model_name = model_name - self.torch_dtype = torch_dtype - self.chunk_size = chunk_size - self.dim = dim - self.num_inference_steps = num_inference_steps - self.height = height - self.width = width - self.num_frames = num_frames - - self.pipe = DiffusionPipeline.from_pretrained( - model_name, torch_dtype=torch_dtype, *args, **kwargs - ) - self.pipe.scheduler = DPMSolverMultistepScheduler( - self.pipe.scheduler.config, - ) - self.pipe_enable_model_cpu_offload() - self.pipe.enable_vae_slicing() - self.pipe.unet.enable_forward_chunking( - chunk_size=chunk_size, dim=dim - ) - - def run(self, task: str = None, *args, **kwargs): - """ - Performs a forward pass on the input task and returns the path of the generated video. - - Args: - task (str): The input task for video generation. - - Returns: - str: The path of the generated video. - """ - try: - video_frames = self.pipe( - task, - num_inference_steps=self.num_inference_steps, - height=self.height, - width=self.width, - num_frames=self.num_frames, - *args, - **kwargs, - ).frames - video_path = export_to_video(video_frames) - return video_path - except Exception as error: - print(f"Error in [ZeroscopeTTV.forward]: {error}") - raise error diff --git a/swarms/prompts/autoswarm.py b/swarms/prompts/autoswarm.py index 0d76d020..8ded2027 100644 --- a/swarms/prompts/autoswarm.py +++ b/swarms/prompts/autoswarm.py @@ -49,7 +49,7 @@ Output Format: A complete Python script that is ready for copy/paste to GitHub a Here is an example of a a working swarm script that you can use as a rough template for the logic: import os from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import Agent import swarms.prompts.swarm_daddy as sdsp diff --git a/swarms/structs/agent.py b/swarms/structs/agent.py index 8f07a4ed..3b96d86b 100644 --- a/swarms/structs/agent.py +++ b/swarms/structs/agent.py @@ -26,7 +26,7 @@ from pydantic import BaseModel from swarms_memory import BaseVectorDatabase from termcolor import colored -from swarms.models.tiktoken_wrapper import TikTokenizer +from swarm_models.tiktoken_wrapper import TikTokenizer from swarms.prompts.agent_system_prompts import AGENT_SYSTEM_PROMPT_3 from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, @@ -164,7 +164,7 @@ class Agent: Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import Agent >>> llm = OpenAIChat() >>> agent = Agent(llm=llm, max_loops=1) @@ -885,22 +885,6 @@ class Agent: ) raise error -<<<<<<< HEAD - async def astream_events( - self, task: str = None, img: str = None, *args, **kwargs - ): - """ - Run the Agent with LangChain's astream_events API. - Only works with LangChain-based models. - """ - try: - async for evt in self.llm.astream_events(task, version="v1"): - yield evt - except Exception as e: - print(f"Error streaming events: {e}") - - def __call__(self, task: str = None, img: str = None, *args, **kwargs): -======= # @run_on_cpu # def run( # self, @@ -1104,7 +1088,6 @@ class Agent: def __call__( self, task: str = None, img: str = None, *args, **kwargs ): ->>>>>>> ce359f5e ([5.6.8]) """Call the agent Args: diff --git a/swarms/structs/base_workflow.py b/swarms/structs/base_workflow.py index 053bf2ba..b5deb916 100644 --- a/swarms/structs/base_workflow.py +++ b/swarms/structs/base_workflow.py @@ -180,7 +180,7 @@ class BaseWorkflow(BaseStructure): ValueError: If the task is not found in the workflow. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import SequentialWorkflow >>> llm = OpenAIChat(openai_api_key="") >>> workflow = SequentialWorkflow(max_loops=1) @@ -218,7 +218,7 @@ class BaseWorkflow(BaseStructure): ValueError: If the task is not found in the workflow. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import SequentialWorkflow >>> llm = OpenAIChat(openai_api_key="") >>> workflow = SequentialWorkflow(max_loops=1) @@ -257,7 +257,7 @@ class BaseWorkflow(BaseStructure): filepath (str): The path to save the workflow state to. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import SequentialWorkflow >>> llm = OpenAIChat(openai_api_key="") >>> workflow = SequentialWorkflow(max_loops=1) @@ -329,7 +329,7 @@ class BaseWorkflow(BaseStructure): filepath (str): The path to load the workflow state from. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import SequentialWorkflow >>> llm = OpenAIChat(openai_api_key="") >>> workflow = SequentialWorkflow(max_loops=1) @@ -372,7 +372,7 @@ class BaseWorkflow(BaseStructure): **kwargs: Additional keyword arguments to pass to the dashboard. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import SequentialWorkflow >>> llm = OpenAIChat(openai_api_key="") >>> workflow = SequentialWorkflow(max_loops=1) diff --git a/swarms/structs/dfs_search_swarm.py b/swarms/structs/dfs_search_swarm.py index 655dc097..a47dca3e 100644 --- a/swarms/structs/dfs_search_swarm.py +++ b/swarms/structs/dfs_search_swarm.py @@ -1,5 +1,6 @@ # import os -# from swarms import Agent, OpenAIChat +# from swarms import Agent +from swarm_models import OpenAIChat # from typing import List # class DepthFirstSearchSwarm: @@ -79,7 +80,8 @@ # #################### # import os -# from swarms import Agent, OpenAIChat +# from swarms import Agent +from swarm_models import OpenAIChat # class DFSSwarm: # def __init__(self, agents): diff --git a/swarms/structs/graph_workflow.py b/swarms/structs/graph_workflow.py index 2e665e33..66cbaf63 100644 --- a/swarms/structs/graph_workflow.py +++ b/swarms/structs/graph_workflow.py @@ -225,7 +225,8 @@ class GraphWorkflow(BaseModel): # # Example usage # if __name__ == "__main__": -# from swarms import Agent, OpenAIChat +# from swarms import Agent +from swarm_models import OpenAIChat # import os # from dotenv import load_dotenv diff --git a/swarms/structs/hiearchical_swarm.py b/swarms/structs/hiearchical_swarm.py index ae44c714..82fa6ba2 100644 --- a/swarms/structs/hiearchical_swarm.py +++ b/swarms/structs/hiearchical_swarm.py @@ -6,7 +6,7 @@ from swarms.structs.base_swarm import BaseSwarm from swarms.structs.agent import Agent from swarms.structs.concat import concat_strings from swarms.structs.agent_registry import AgentRegistry -from swarms.models.base_llm import BaseLLM +from swarm_models.base_llm import BaseLLM from swarms.structs.conversation import Conversation diff --git a/swarms/structs/monte_carlo_swarm.py b/swarms/structs/monte_carlo_swarm.py index 2908aa23..a2b558ba 100644 --- a/swarms/structs/monte_carlo_swarm.py +++ b/swarms/structs/monte_carlo_swarm.py @@ -200,7 +200,8 @@ def average_aggregator(results: List[float]) -> float: # import os -# from swarms import Agent, OpenAIChat +# from swarms import Agent +from swarm_models import OpenAIChat # from typing import List, Union, Callable # from collections import Counter diff --git a/swarms/structs/multi_agent_collab.py b/swarms/structs/multi_agent_collab.py index 2914787d..9f99f0f8 100644 --- a/swarms/structs/multi_agent_collab.py +++ b/swarms/structs/multi_agent_collab.py @@ -85,7 +85,7 @@ class MultiAgentCollaboration(BaseSwarm): Usage: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import Agent >>> from swarms.swarms.multi_agent_collab import MultiAgentCollaboration >>> diff --git a/swarms/structs/omni_agent_types.py b/swarms/structs/omni_agent_types.py index e2c310cf..9a0f3f6a 100644 --- a/swarms/structs/omni_agent_types.py +++ b/swarms/structs/omni_agent_types.py @@ -4,8 +4,8 @@ from typing import ( Sequence, Union, ) -from swarms.models.base_llm import BaseLLM -from swarms.models.base_multimodal_model import BaseMultiModalModel +from swarm_models.base_llm import BaseLLM +from swarm_models.base_multimodal_model import BaseMultiModalModel from swarms.structs.agent import Agent # Unified type for agent diff --git a/swarms/structs/pulsar_swarm 2.py b/swarms/structs/pulsar_swarm 2.py index 0cbd7b38..199bdefe 100644 --- a/swarms/structs/pulsar_swarm 2.py +++ b/swarms/structs/pulsar_swarm 2.py @@ -3,7 +3,8 @@ import sys import datetime from typing import List, Dict, Any, Optional -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/swarms/structs/pulsar_swarm.py b/swarms/structs/pulsar_swarm.py index 0cbd7b38..199bdefe 100644 --- a/swarms/structs/pulsar_swarm.py +++ b/swarms/structs/pulsar_swarm.py @@ -3,7 +3,8 @@ import sys import datetime from typing import List, Dict, Any, Optional -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/swarms/structs/recursive_workflow.py b/swarms/structs/recursive_workflow.py index 286e8810..455522b5 100644 --- a/swarms/structs/recursive_workflow.py +++ b/swarms/structs/recursive_workflow.py @@ -21,7 +21,7 @@ class RecursiveWorkflow(BaseStructure): stop_token (Any): The token that indicates when to stop the workflow. Examples: - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> from swarms.structs import RecursiveWorkflow, Task >>> llm = OpenAIChat(openai_api_key="") >>> task = Task(llm, "What's the weather in miami") diff --git a/swarms/structs/run_agents_in_parallel_async_multiprocess.py b/swarms/structs/run_agents_in_parallel_async_multiprocess.py index a6630cc0..c3a4d87f 100644 --- a/swarms/structs/run_agents_in_parallel_async_multiprocess.py +++ b/swarms/structs/run_agents_in_parallel_async_multiprocess.py @@ -1,6 +1,7 @@ import os import asyncio -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat import uvloop from multiprocessing import cpu_count from swarms.utils.calculate_func_metrics import profile_func diff --git a/swarms/structs/task.py b/swarms/structs/task.py index 049ee458..70293426 100644 --- a/swarms/structs/task.py +++ b/swarms/structs/task.py @@ -46,7 +46,7 @@ class Task(BaseModel): Examples: >>> from swarms.structs import Task, Agent - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) >>> task = Task(description="What's the weather in miami", agent=agent) >>> task.run() @@ -106,7 +106,7 @@ class Task(BaseModel): Examples: >>> from swarms.structs import Task, Agent - >>> from swarms.models import OpenAIChat + >>> from swarm_models import OpenAIChat >>> agent = Agent(llm=OpenAIChat(openai_api_key=""), max_loops=1, dashboard=False) >>> task = Task(description="What's the weather in miami", agent=agent) >>> task.run() diff --git a/swarms/tools/json_former.py b/swarms/tools/json_former.py index 8e5e041b..01d608a5 100644 --- a/swarms/tools/json_former.py +++ b/swarms/tools/json_former.py @@ -9,7 +9,7 @@ from swarms.tools.logits_processor import ( OutputNumbersTokens, StringStoppingCriteria, ) -from swarms.models.base_llm import BaseLLM +from swarm_models.base_llm import BaseLLM GENERATION_MARKER = "|GENERATION|" diff --git a/swarms/tools/prebuilt/code_executor.py b/swarms/tools/prebuilt/code_executor.py index 115f141a..4ea7597e 100644 --- a/swarms/tools/prebuilt/code_executor.py +++ b/swarms/tools/prebuilt/code_executor.py @@ -1,7 +1,7 @@ import os import subprocess from loguru import logger -from swarms.models.tiktoken_wrapper import TikTokenizer +from swarm_models.tiktoken_wrapper import TikTokenizer class CodeExecutor: diff --git a/tests/models/test_anthropic.py b/tests/models/test_anthropic.py index cc48479a..1f583889 100644 --- a/tests/models/test_anthropic.py +++ b/tests/models/test_anthropic.py @@ -3,7 +3,7 @@ from unittest.mock import Mock, patch import pytest -from swarms.models.anthropic import Anthropic +from swarm_models.anthropic import Anthropic # Mock the Anthropic API client for testing diff --git a/tests/models/test_fuyu.py b/tests/models/test_fuyu.py index e76e11bb..60044de2 100644 --- a/tests/models/test_fuyu.py +++ b/tests/models/test_fuyu.py @@ -5,7 +5,7 @@ import torch from PIL import Image from transformers import FuyuImageProcessor, FuyuProcessor -from swarms.models.fuyu import Fuyu +from swarm_models.fuyu import Fuyu # Basic test to ensure instantiation of class. diff --git a/tests/models/test_gemini.py b/tests/models/test_gemini.py index a61d1676..91e7c0ac 100644 --- a/tests/models/test_gemini.py +++ b/tests/models/test_gemini.py @@ -2,7 +2,7 @@ from unittest.mock import Mock, patch import pytest -from swarms.models.gemini import Gemini +from swarm_models.gemini import Gemini # Define test fixtures diff --git a/tests/models/test_gpt4_vision_api.py b/tests/models/test_gpt4_vision_api.py index ac797280..3a67f8ee 100644 --- a/tests/models/test_gpt4_vision_api.py +++ b/tests/models/test_gpt4_vision_api.py @@ -7,7 +7,7 @@ from aiohttp import ClientResponseError from dotenv import load_dotenv from requests.exceptions import RequestException -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI load_dotenv() diff --git a/tests/models/test_hf.py b/tests/models/test_hf.py index ad56699c..65e52712 100644 --- a/tests/models/test_hf.py +++ b/tests/models/test_hf.py @@ -4,7 +4,7 @@ from unittest.mock import patch import pytest import torch -from swarms.models.huggingface import HuggingfaceLLM +from swarm_models.huggingface import HuggingfaceLLM # Fixture for the class instance diff --git a/tests/models/test_hf_pipeline.py b/tests/models/test_hf_pipeline.py index 8580dd56..98490623 100644 --- a/tests/models/test_hf_pipeline.py +++ b/tests/models/test_hf_pipeline.py @@ -3,7 +3,7 @@ from unittest.mock import patch import pytest import torch -from swarms.models.huggingface_pipeline import HuggingfacePipeline +from swarm_models.huggingface_pipeline import HuggingfacePipeline @pytest.fixture diff --git a/tests/models/test_idefics.py b/tests/models/test_idefics.py index 3bfee679..f381d41b 100644 --- a/tests/models/test_idefics.py +++ b/tests/models/test_idefics.py @@ -3,7 +3,7 @@ from unittest.mock import patch import pytest import torch -from swarms.models.idefics import ( +from swarm_models.idefics import ( AutoProcessor, Idefics, IdeficsForVisionText2Text, diff --git a/tests/models/test_imports.py b/tests/models/test_imports.py index 2da66f21..bdca4350 100644 --- a/tests/models/test_imports.py +++ b/tests/models/test_imports.py @@ -1,4 +1,4 @@ -from swarms.models import __all__ +from swarm_models import __all__ EXPECTED_ALL = [ "Anthropic", diff --git a/tests/models/test_kosmos.py b/tests/models/test_kosmos.py index 1219f895..ce7c36d6 100644 --- a/tests/models/test_kosmos.py +++ b/tests/models/test_kosmos.py @@ -4,7 +4,7 @@ import pytest import requests # This will be your project directory -from swarms.models.kosmos_two import Kosmos, is_overlapping +from swarm_models.kosmos_two import Kosmos, is_overlapping # A placeholder image URL for testing TEST_IMAGE_URL = "https://images.unsplash.com/photo-1673267569891-ca4246caafd7?auto=format&fit=crop&q=60&w=400&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHx0b3BpYy1mZWVkfDM1fEpwZzZLaWRsLUhrfHxlbnwwfHx8fHw%3D" diff --git a/tests/models/test_nougat.py b/tests/models/test_nougat.py index 858845a6..2c7f6361 100644 --- a/tests/models/test_nougat.py +++ b/tests/models/test_nougat.py @@ -6,7 +6,7 @@ import torch from PIL import Image from transformers import NougatProcessor, VisionEncoderDecoderModel -from swarms.models.nougat import Nougat +from swarm_models.nougat import Nougat @pytest.fixture diff --git a/tests/models/test_open_dalle.py b/tests/models/test_open_dalle.py index 4ff14e10..4dfd200c 100644 --- a/tests/models/test_open_dalle.py +++ b/tests/models/test_open_dalle.py @@ -1,7 +1,7 @@ import pytest import torch -from swarms.models.open_dalle import OpenDalle +from swarm_models.open_dalle import OpenDalle def test_init(): diff --git a/tests/models/test_openaitts.py b/tests/models/test_openaitts.py index 42745284..03e1e9c4 100644 --- a/tests/models/test_openaitts.py +++ b/tests/models/test_openaitts.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch import pytest -from swarms.models.openai_tts import OpenAITTS +from swarm_models.openai_tts import OpenAITTS def test_openaitts_initialization(): diff --git a/tests/models/test_qwen.py b/tests/models/test_qwen.py index a920256c..3e5c937e 100644 --- a/tests/models/test_qwen.py +++ b/tests/models/test_qwen.py @@ -1,6 +1,6 @@ from unittest.mock import Mock, patch -from swarms.models.qwen import QwenVLMultiModal +from swarm_models.qwen import QwenVLMultiModal def test_post_init(): diff --git a/tests/models/test_ssd_1b.py b/tests/models/test_ssd_1b.py index f658f853..86a7e94a 100644 --- a/tests/models/test_ssd_1b.py +++ b/tests/models/test_ssd_1b.py @@ -1,7 +1,7 @@ import pytest from PIL import Image -from swarms.models.ssd_1b import SSD1B +from swarm_models.ssd_1b import SSD1B # Create fixtures if needed diff --git a/tests/models/test_timm_model.py b/tests/models/test_timm_model.py index c4e37126..5fdaac5a 100644 --- a/tests/models/test_timm_model.py +++ b/tests/models/test_timm_model.py @@ -3,7 +3,7 @@ from unittest.mock import Mock, patch import pytest import torch -from swarms.models import TimmModel +from swarm_models import TimmModel def test_timm_model_init(): diff --git a/tests/models/test_togther.py b/tests/models/test_togther.py index dd2a2f89..c7a0421c 100644 --- a/tests/models/test_togther.py +++ b/tests/models/test_togther.py @@ -4,7 +4,7 @@ from unittest.mock import Mock, patch import pytest import requests -from swarms.models.together import TogetherLLM +from swarm_models.together import TogetherLLM @pytest.fixture diff --git a/tests/models/test_vilt.py b/tests/models/test_vilt.py index d849f98e..8e222637 100644 --- a/tests/models/test_vilt.py +++ b/tests/models/test_vilt.py @@ -2,7 +2,7 @@ from unittest.mock import Mock, patch import pytest -from swarms.models.vilt import Image, Vilt, requests +from swarm_models.vilt import Image, Vilt, requests # Fixture for Vilt instance diff --git a/tests/models/test_zeroscope.py b/tests/models/test_zeroscope.py index 25a4c597..c8809cd1 100644 --- a/tests/models/test_zeroscope.py +++ b/tests/models/test_zeroscope.py @@ -2,7 +2,7 @@ from unittest.mock import MagicMock, patch import pytest -from swarms.models.zeroscope import ZeroscopeTTV +from swarm_models.zeroscope import ZeroscopeTTV @patch("swarms.models.zeroscope.DiffusionPipeline") diff --git "a/tests/profiling_agent 2.py\\" "b/tests/profiling_agent 2.py\\" index 6ef5487f..8f1b0220 100644 --- "a/tests/profiling_agent 2.py\\" +++ "b/tests/profiling_agent 2.py\\" @@ -4,7 +4,8 @@ start_time = time.time() import os import uuid -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/tests/profiling_agent.py b/tests/profiling_agent.py index 6ef5487f..8f1b0220 100644 --- a/tests/profiling_agent.py +++ b/tests/profiling_agent.py @@ -4,7 +4,8 @@ start_time = time.time() import os import uuid -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.prompts.finance_agent_sys_prompt import ( FINANCIAL_AGENT_SYS_PROMPT, ) diff --git a/tests/structs/test_agent.py b/tests/structs/test_agent.py index c3a50a63..4a145029 100644 --- a/tests/structs/test_agent.py +++ b/tests/structs/test_agent.py @@ -6,7 +6,7 @@ from unittest.mock import MagicMock, patch import pytest from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs.agent import Agent, stop_when_repeats from swarms.utils.loguru_logger import logger diff --git a/tests/structs/test_base_workflow.py b/tests/structs/test_base_workflow.py index ccb7a563..fbb8d710 100644 --- a/tests/structs/test_base_workflow.py +++ b/tests/structs/test_base_workflow.py @@ -4,7 +4,7 @@ import os import pytest from dotenv import load_dotenv -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import BaseWorkflow load_dotenv() diff --git a/tests/structs/test_groupchat.py b/tests/structs/test_groupchat.py index e8096d9c..99222365 100644 --- a/tests/structs/test_groupchat.py +++ b/tests/structs/test_groupchat.py @@ -1,7 +1,7 @@ import pytest -from swarms.models import OpenAIChat -from swarms.models.anthropic import Anthropic +from swarm_models import OpenAIChat +from swarm_models.anthropic import Anthropic from swarms.structs.agent import Agent from swarms.structs.groupchat import GroupChat, GroupChatManager diff --git a/tests/structs/test_multi_agent_collab.py b/tests/structs/test_multi_agent_collab.py index 3f7d27b6..db06c9c0 100644 --- a/tests/structs/test_multi_agent_collab.py +++ b/tests/structs/test_multi_agent_collab.py @@ -4,7 +4,8 @@ from unittest.mock import Mock import pytest -from swarms import Agent, OpenAIChat +from swarms import Agent +from swarm_models import OpenAIChat from swarms.structs.multi_agent_collab import MultiAgentCollaboration # Initialize the director agent diff --git a/tests/structs/test_recursive_workflow.py b/tests/structs/test_recursive_workflow.py index 5b24f921..75cd5145 100644 --- a/tests/structs/test_recursive_workflow.py +++ b/tests/structs/test_recursive_workflow.py @@ -2,7 +2,7 @@ from unittest.mock import Mock, create_autospec import pytest -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import RecursiveWorkflow, Task diff --git a/tests/structs/test_sequential_workflow.py b/tests/structs/test_sequential_workflow.py index d92c4928..1327d0ae 100644 --- a/tests/structs/test_sequential_workflow.py +++ b/tests/structs/test_sequential_workflow.py @@ -4,7 +4,7 @@ from unittest.mock import patch import pytest -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs.agent import Agent from swarms.structs.sequential_workflow import ( SequentialWorkflow, diff --git a/tests/structs/test_task.py b/tests/structs/test_task.py index 64724bcb..32fc9803 100644 --- a/tests/structs/test_task.py +++ b/tests/structs/test_task.py @@ -5,7 +5,7 @@ from unittest.mock import Mock import pytest from dotenv import load_dotenv -from swarms.models.gpt4_vision_api import GPT4VisionAPI +from swarm_models.gpt4_vision_api import GPT4VisionAPI from swarms.prompts.multi_modal_autonomous_instruction_prompt import ( MULTI_MODAL_AUTO_AGENT_SYSTEM_PROMPT_1, ) diff --git a/tests/structs/test_team.py b/tests/structs/test_team.py index 2628789b..df92fe95 100644 --- a/tests/structs/test_team.py +++ b/tests/structs/test_team.py @@ -1,7 +1,7 @@ import json import unittest -from swarms.models import OpenAIChat +from swarm_models import OpenAIChat from swarms.structs import Agent, Task from swarms.structs.team import Team