From c8a93d08f6025b27477f2fe4df93f58b8dde5297 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:17:58 -0800 Subject: [PATCH 01/40] Create autotemp.py --- playground/structs/autotemp.py | 67 ++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 playground/structs/autotemp.py diff --git a/playground/structs/autotemp.py b/playground/structs/autotemp.py new file mode 100644 index 00000000..ed38a621 --- /dev/null +++ b/playground/structs/autotemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From 1f8adceebc0366df99ae46394906e69af0d8aaac Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:19:16 -0800 Subject: [PATCH 02/40] Create autotemp_example.py --- playground/autotemp_example.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 playground/autotemp_example.py diff --git a/playground/autotemp_example.py b/playground/autotemp_example.py new file mode 100644 index 00000000..9047268d --- /dev/null +++ b/playground/autotemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From d86d8ec1902292f2a69eee3555513b8e087c2561 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:30:51 -0800 Subject: [PATCH 03/40] Delete swarms/models/autotemp.py --- swarms/models/autotemp.py | 101 -------------------------------------- 1 file changed, 101 deletions(-) delete mode 100644 swarms/models/autotemp.py diff --git a/swarms/models/autotemp.py b/swarms/models/autotemp.py deleted file mode 100644 index c3abb894..00000000 --- a/swarms/models/autotemp.py +++ /dev/null @@ -1,101 +0,0 @@ -import re -from concurrent.futures import ThreadPoolExecutor, as_completed -from swarms.models.openai_models import OpenAIChat - - -class AutoTempAgent: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - - Flow: - 1. Generate outputs at a range of temperature settings. - 2. Evaluate each output using the default temperature setting. - 3. Select the best output based on the evaluation score. - 4. Return the best output. - - - Args: - temperature (float, optional): The default temperature setting to use. Defaults to 0.5. - api_key (str, optional): Your OpenAI API key. Defaults to None. - alt_temps ([type], optional): A list of alternative temperature settings to try. Defaults to None. - auto_select (bool, optional): If True, the best temperature setting will be automatically selected. Defaults to True. - max_workers (int, optional): The maximum number of workers to use when generating outputs. Defaults to 6. - - Returns: - [type]: [description] - - Examples: - >>> from swarms.demos.autotemp import AutoTemp - >>> autotemp = AutoTemp() - >>> autotemp.run("Generate a 10,000 word blog on mental clarity and the benefits of meditation.", "0.4,0.6,0.8,1.0,1.2,1.4") - Best AutoTemp Output (Temp 0.4 | Score: 100.0): - Generate a 10,000 word blog on mental clarity and the benefits of meditation. - - """ - - def __init__( - self, - temperature: float = 0.5, - api_key: str = None, - alt_temps=None, - auto_select=True, - max_workers=6, - ): - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.temperature = temperature - self.alt_temps = alt_temps - self.llm = OpenAIChat( - openai_api_key=api_key, - temperature=temperature, - ) - - def evaluate_output(self, output: str): - """Evaluate the output using the default temperature setting.""" - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {self.temperature}. - Provide a precise score from 0.0 to 100.0, considering the criteria of relevance, clarity, utility, pride, and delight. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(prompt=eval_prompt) - score_match = re.search(r"\b\d+(\.\d)?\b", score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, task: str, temperature_string): - """Run the AutoTemp agent.""" - temperature_list = [ - float(temp.strip()) for temp in temperature_string.split(",") - ] - outputs = {} - scores = {} - with ThreadPoolExecutor(max_workers=self.max_workers) as executor: - future_to_temp = { - executor.submit(self.llm.generate, task, temp): temp - for temp in temperature_list - } - for future in as_completed(future_to_temp): - temp = future_to_temp[future] - output_text = future.result() - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From e132dbf7f03ba8d59a09f4b970e265c2c51d3992 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:31:57 -0800 Subject: [PATCH 04/40] Create AutoTemp.py --- swarms/models/AutoTemp.py | 67 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 swarms/models/AutoTemp.py diff --git a/swarms/models/AutoTemp.py b/swarms/models/AutoTemp.py new file mode 100644 index 00000000..ed38a621 --- /dev/null +++ b/swarms/models/AutoTemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From aa7fa3238862584920176558eb136d6cec5d66f4 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:32:40 -0800 Subject: [PATCH 05/40] Delete playground/autotemp_example.py --- playground/autotemp_example.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 playground/autotemp_example.py diff --git a/playground/autotemp_example.py b/playground/autotemp_example.py deleted file mode 100644 index 9047268d..00000000 --- a/playground/autotemp_example.py +++ /dev/null @@ -1,22 +0,0 @@ -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp - -# Your OpenAI API key -api_key = "" - -autotemp_agent = AutoTemp( - api_key=api_key, - alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], - auto_select=False, - # model_version="gpt-3.5-turbo" # Specify the model version if needed -) - -# Define the task and temperature string -task = "Generate a short story about a lost civilization." -temperature_string = "0.4,0.6,0.8,1.0,1.2," - -# Run the AutoTempAgent -result = autotemp_agent.run(task, temperature_string) - -# Print the result -print(result) From 1af9bf996768ee914ea6c8528db72426b4578f81 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:32:53 -0800 Subject: [PATCH 06/40] Delete playground/structs/autotemp.py --- playground/structs/autotemp.py | 67 ---------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 playground/structs/autotemp.py diff --git a/playground/structs/autotemp.py b/playground/structs/autotemp.py deleted file mode 100644 index ed38a621..00000000 --- a/playground/structs/autotemp.py +++ /dev/null @@ -1,67 +0,0 @@ -import re -from swarms.models.openai_models import OpenAIChat - -class AutoTemp: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - It generates responses at different temperatures, evaluates them, and ranks them based on quality. - """ - - def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): - self.api_key = api_key - self.default_temp = default_temp - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) - - def evaluate_output(self, output, temperature): - print(f"Evaluating output at temperature {temperature}...") - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: - - - Relevance: How well does the output address the prompt or task at hand? - - Clarity: Is the output easy to understand and free of ambiguity? - - Utility: How useful is the output for its intended purpose? - - Pride: If the user had to submit this output to the world for their career, would they be proud? - - Delight: Is the output likely to delight or positively surprise the user? - - Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r'\b\d+(\.\d)?\b', score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, prompt, temperature_string): - print("Starting generation process...") - temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] - outputs = {} - scores = {} - for temp in temperature_list: - print(f"Generating at temperature {temp}...") - output_text = self.llm(prompt, temperature=temp) - if output_text: - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - print("Generation process complete.") - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From 9c04b62f8e0869dc197cea453a00963b81a20fc0 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sat, 18 Nov 2023 14:33:45 -0800 Subject: [PATCH 07/40] Create AutoTemp_example.py --- AutoTemp_example.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 AutoTemp_example.py diff --git a/AutoTemp_example.py b/AutoTemp_example.py new file mode 100644 index 00000000..30a46e1d --- /dev/null +++ b/AutoTemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.AutoTemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From c85275266a2cd7bbd355e5ee1f6f8e5b7f8d23a7 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 12:21:53 -0800 Subject: [PATCH 08/40] Create blog_gen.py --- swarms/swarms/blog_gen.py | 110 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 swarms/swarms/blog_gen.py diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py new file mode 100644 index 00000000..fa526a25 --- /dev/null +++ b/swarms/swarms/blog_gen.py @@ -0,0 +1,110 @@ +import os +from termcolor import colored +from swarms.models import OpenAIChat +from swarms.models.AutoTemp import AutoTemp +from swarms.structs import SequentialWorkflow + + +class BlogGen: + def __init__( + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2,1.4" + ): # Add blog_topic as an argument + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.7) + self.auto_temp = AutoTemp(api_key) + self.temperature_range = temperature_range + self.workflow = SequentialWorkflow(max_loops=5) + + # Formatting the topic selection prompt with the user's topic + self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" + Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. + """ + + self.DRAFT_WRITER_SYSTEM_PROMPT = """ + Create an engaging and comprehensive blog article of at least 5,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + """ + + self.REVIEW_AGENT_SYSTEM_PROMPT = """ + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the author’s voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + """ + + self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ + Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. + """ + + def run_workflow(self): + try: + # Topic generation using OpenAIChat + topic_result = self.openai_chat.generate( + [self.TOPIC_SELECTION_SYSTEM_PROMPT] + ) + topic_output = topic_result.generations[0][0].text + print( + colored( + f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", + "white", + ) + ) + + chosen_topic = topic_output.split("\n")[0] + print(colored("Selected topic: " + chosen_topic, "yellow")) + + # Initial draft generation with AutoTemp + initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( + "{{CHOSEN_TOPIC}}", chosen_topic + ) + auto_temp_output = self.auto_temp.run( + initial_draft_prompt, self.temperature_range + ) + initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly + print( + colored( + f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", + "white", + ) + ) + + # Review process using OpenAIChat + review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + review_result = self.openai_chat.generate([review_prompt]) + review_output = review_result.generations[0][0].text + print( + colored( + f"\nReview Output:\n----------------------------\n{review_output}\n", + "white", + ) + ) + + # Distribution preparation using OpenAIChat + distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + distribution_result = self.openai_chat.generate([distribution_prompt]) + distribution_output = distribution_result.generations[0][0].text + print( + colored( + f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", + "white", + ) + ) + + # Final compilation of the blog + final_blog_content = ( + f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" + ) + print( + colored( + f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", + "green", + ) + ) + + except Exception as e: + print(colored(f"An error occurred: {str(e)}", "red")) + + +if __name__ == "__main__": + api_key = os.environ["OPENAI_API_KEY"] + blog_generator = BlogGen(api_key) + blog_generator.run_workflow() From 7554fbd0cd7d572dd21ee2f322ce09c5f3599794 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 12:22:27 -0800 Subject: [PATCH 09/40] Create blog_gen_example.py --- blog_gen_example.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 blog_gen_example.py diff --git a/blog_gen_example.py b/blog_gen_example.py new file mode 100644 index 00000000..7cf95535 --- /dev/null +++ b/blog_gen_example.py @@ -0,0 +1,23 @@ +import os +from swarms.swarms.blog_gen import BlogGen + + +def main(): + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY environment variable not set.") + + blog_topic = input("Enter the topic for the blog generation: ") + + blog_generator = BlogGen(api_key, blog_topic) + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( + "{{BLOG_TOPIC}}", blog_topic + ) + ) + + blog_generator.run_workflow() + + +if __name__ == "__main__": + main() From 367ecfcbbad471435d4372ee525b4cc72ccfe93a Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 16:11:26 -0800 Subject: [PATCH 10/40] Update and rename AutoTemp_example.py to autotemp_example.py --- AutoTemp_example.py => autotemp_example.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename AutoTemp_example.py => autotemp_example.py (92%) diff --git a/AutoTemp_example.py b/autotemp_example.py similarity index 92% rename from AutoTemp_example.py rename to autotemp_example.py index 30a46e1d..9047268d 100644 --- a/AutoTemp_example.py +++ b/autotemp_example.py @@ -1,5 +1,5 @@ from swarms.models import OpenAIChat -from swarms.models.AutoTemp import AutoTemp +from swarms.models.autotemp import AutoTemp # Your OpenAI API key api_key = "" From 8403cfa35075dfc25f1b44633783d183d1bfb07f Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 16:12:38 -0800 Subject: [PATCH 11/40] Rename AutoTemp.py to autotemp.py --- swarms/models/{AutoTemp.py => autotemp.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename swarms/models/{AutoTemp.py => autotemp.py} (100%) diff --git a/swarms/models/AutoTemp.py b/swarms/models/autotemp.py similarity index 100% rename from swarms/models/AutoTemp.py rename to swarms/models/autotemp.py From dd777fa2fbb25daa563b16f7feb8a9ae85f1a4bd Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 18:33:20 -0800 Subject: [PATCH 12/40] Update blog_gen.py --- swarms/swarms/blog_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index fa526a25..4c285e40 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -1,7 +1,7 @@ import os from termcolor import colored from swarms.models import OpenAIChat -from swarms.models.AutoTemp import AutoTemp +from swarms.models.autotemp import AutoTemp from swarms.structs import SequentialWorkflow From 8ea786e41367160c2d8a057f324f8874f4cf53d5 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 18:44:42 -0800 Subject: [PATCH 13/40] Update blog_gen.py --- swarms/swarms/blog_gen.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index 4c285e40..93d44c3d 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -20,7 +20,7 @@ class BlogGen: """ self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 5,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. """ self.REVIEW_AGENT_SYSTEM_PROMPT = """ From 07fbf42ec2bfddca0267e630bf9e9f772770616e Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Sun, 19 Nov 2023 19:56:38 -0800 Subject: [PATCH 14/40] Update blog_gen.py --- swarms/swarms/blog_gen.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py index 93d44c3d..3781d895 100644 --- a/swarms/swarms/blog_gen.py +++ b/swarms/swarms/blog_gen.py @@ -7,9 +7,9 @@ from swarms.structs import SequentialWorkflow class BlogGen: def __init__( - self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2,1.4" + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" ): # Add blog_topic as an argument - self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.7) + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) self.auto_temp = AutoTemp(api_key) self.temperature_range = temperature_range self.workflow = SequentialWorkflow(max_loops=5) @@ -20,11 +20,11 @@ class BlogGen: """ self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences, with a focus on shareability on social media platforms. + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. """ self.REVIEW_AGENT_SYSTEM_PROMPT = """ - Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the author’s voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. """ self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ From 9b6e61298b42658e3ed933562a141ad679bd59a6 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 22 Nov 2023 14:59:25 -0800 Subject: [PATCH 15/40] playground/demos cleanup --- playground/demos/{ => positive_med}/positive_med.py | 0 playground/{demos => design_team}/ui_software_demo.py | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename playground/demos/{ => positive_med}/positive_med.py (100%) rename playground/{demos => design_team}/ui_software_demo.py (100%) diff --git a/playground/demos/positive_med.py b/playground/demos/positive_med/positive_med.py similarity index 100% rename from playground/demos/positive_med.py rename to playground/demos/positive_med/positive_med.py diff --git a/playground/demos/ui_software_demo.py b/playground/design_team/ui_software_demo.py similarity index 100% rename from playground/demos/ui_software_demo.py rename to playground/design_team/ui_software_demo.py From fa52e09414902aeeffc79a3520e78558f392bee8 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 10:05:17 -0800 Subject: [PATCH 16/40] CLEAN UP: Flow and demo layouts --- .../design_team/ui_software_demo.py | 0 .../multi_modal_auto_agent.py | 0 swarms/structs/flow.py | 24 ++++-- swarms/structs/non_linear_workflow.py | 79 +++++++++++++++++++ swarms/utils/main.py | 2 +- 5 files changed, 96 insertions(+), 9 deletions(-) rename playground/{ => demos}/design_team/ui_software_demo.py (100%) rename playground/demos/{ => multi_modal_autonomous_agents}/multi_modal_auto_agent.py (100%) create mode 100644 swarms/structs/non_linear_workflow.py diff --git a/playground/design_team/ui_software_demo.py b/playground/demos/design_team/ui_software_demo.py similarity index 100% rename from playground/design_team/ui_software_demo.py rename to playground/demos/design_team/ui_software_demo.py diff --git a/playground/demos/multi_modal_auto_agent.py b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py similarity index 100% rename from playground/demos/multi_modal_auto_agent.py rename to playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index fd359592..7aca7f21 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -12,13 +12,7 @@ from termcolor import colored from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.parse_code import extract_code_in_backticks_in_string -# Prompts -DYNAMIC_STOP_PROMPT = """ -When you have finished the task from the Human, output a special token: -This will enable you to leave the autonomous loop. -""" - -# Constants +# System prompt FLOW_SYSTEM_PROMPT = f""" You are an autonomous agent granted autonomy in a autonomous loop structure. Your role is to engage in multi-step conversations with your self or the user, @@ -30,6 +24,19 @@ to aid in these complex tasks. Your responses should be coherent, contextually r """ + + +# Prompts +DYNAMIC_STOP_PROMPT = """ + +Now, when you 99% sure you have completed the task, you may follow the instructions below to escape the autonomous loop. + +When you have finished the task from the Human, output a special token: +This will enable you to leave the autonomous loop. +""" + + + # Make it able to handle multi input tools DYNAMICAL_TOOL_USAGE = """ You have access to the following tools: @@ -191,7 +198,7 @@ class Flow: def __init__( self, llm: Any, - # template: str, + template: str, max_loops=5, stopping_condition: Optional[Callable[[str], bool]] = None, loop_interval: int = 1, @@ -217,6 +224,7 @@ class Flow: **kwargs: Any, ): self.llm = llm + self.template = template self.max_loops = max_loops self.stopping_condition = stopping_condition self.loop_interval = loop_interval diff --git a/swarms/structs/non_linear_workflow.py b/swarms/structs/non_linear_workflow.py new file mode 100644 index 00000000..b9b29154 --- /dev/null +++ b/swarms/structs/non_linear_workflow.py @@ -0,0 +1,79 @@ +from swarms.models import OpenAIChat +from swarms.structs.flow import Flow + +import concurrent.futures +from typing import Callable, List, Dict, Any, Sequence + + +class Task: + def __init__(self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []): + self.id = id + self.task = task + self.flows = flows + self.dependencies = dependencies + self.results = [] + + def execute(self, parent_results: Dict[str, Any]): + args = [parent_results[dep] for dep in self.dependencies] + for flow in self.flows: + result = flow.run(self.task, *args) + self.results.append(result) + args = [result] # The output of one flow becomes the input to the next + + +class Workflow: + def __init__(self): + self.tasks: Dict[str, Task] = {} + self.executor = concurrent.futures.ThreadPoolExecutor() + + def add_task(self, task: Task): + self.tasks[task.id] = task + + def run(self): + completed_tasks = set() + while len(completed_tasks) < len(self.tasks): + futures = [] + for task in self.tasks.values(): + if task.id not in completed_tasks and all( + dep in completed_tasks for dep in task.dependencies + ): + future = self.executor.submit( + task.execute, + {dep: self.tasks[dep].results for dep in task.dependencies}, + ) + futures.append((future, task.id)) + + for future, task_id in futures: + future.result() # Wait for task completion + completed_tasks.add(task_id) + + def get_results(self): + return {task_id: task.results for task_id, task in self.tasks.items()} + + +# create flows +llm = OpenAIChat(openai_api_key="sk-") + +flow1 = Flow(llm, max_loops=1) +flow2 = Flow(llm, max_loops=1) +flow3 = Flow(llm, max_loops=1) +flow4 = Flow(llm, max_loops=1) + + +# Create tasks with their respective Flows and task strings +task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1]) +task2 = Task("task2", "Elaborate on the summary of topic X", [flow2, flow3], dependencies=["task1"]) +task3 = Task("task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]) + +# Create a workflow and add tasks +workflow = Workflow() +workflow.add_task(task1) +workflow.add_task(task2) +workflow.add_task(task3) + +# Run the workflow +workflow.run() + +# Get results +results = workflow.get_results() +print(results) \ No newline at end of file diff --git a/swarms/utils/main.py b/swarms/utils/main.py index a6c4fc34..a17d4782 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -387,4 +387,4 @@ class FileHandler: # => base end -# ===========================> \ No newline at end of file +# ===========================> From 00325e5af4bceadc6315814dd5a435268e2e2671 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:07:56 -0800 Subject: [PATCH 17/40] Create autotemp.py --- playground/demos/autotemp/autotemp.py | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 playground/demos/autotemp/autotemp.py diff --git a/playground/demos/autotemp/autotemp.py b/playground/demos/autotemp/autotemp.py new file mode 100644 index 00000000..ed38a621 --- /dev/null +++ b/playground/demos/autotemp/autotemp.py @@ -0,0 +1,67 @@ +import re +from swarms.models.openai_models import OpenAIChat + +class AutoTemp: + """ + AutoTemp is a tool for automatically selecting the best temperature setting for a given task. + It generates responses at different temperatures, evaluates them, and ranks them based on quality. + """ + + def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + self.api_key = api_key + self.default_temp = default_temp + self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.auto_select = auto_select + self.max_workers = max_workers + self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + + def evaluate_output(self, output, temperature): + print(f"Evaluating output at temperature {temperature}...") + eval_prompt = f""" + Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: + + - Relevance: How well does the output address the prompt or task at hand? + - Clarity: Is the output easy to understand and free of ambiguity? + - Utility: How useful is the output for its intended purpose? + - Pride: If the user had to submit this output to the world for their career, would they be proud? + - Delight: Is the output likely to delight or positively surprise the user? + + Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. + + Output to evaluate: + --- + {output} + --- + """ + score_text = self.llm(eval_prompt, temperature=0.5) + score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + return round(float(score_match.group()), 1) if score_match else 0.0 + + def run(self, prompt, temperature_string): + print("Starting generation process...") + temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + outputs = {} + scores = {} + for temp in temperature_list: + print(f"Generating at temperature {temp}...") + output_text = self.llm(prompt, temperature=temp) + if output_text: + outputs[temp] = output_text + scores[temp] = self.evaluate_output(output_text, temp) + + print("Generation process complete.") + if not scores: + return "No valid outputs generated.", None + + sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + best_temp, best_score = sorted_scores[0] + best_output = outputs[best_temp] + + return ( + f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + if self.auto_select + else "\n".join( + f"Temp {temp} | Score: {score}:\n{outputs[temp]}" + for temp, score in sorted_scores + ) + ) From d61ba72deeaefff074c9eb226785492d91e57c30 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:10:05 -0800 Subject: [PATCH 18/40] Create autotemp_example.py --- playground/demos/autotemp/autotemp_example.py | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 playground/demos/autotemp/autotemp_example.py diff --git a/playground/demos/autotemp/autotemp_example.py b/playground/demos/autotemp/autotemp_example.py new file mode 100644 index 00000000..9047268d --- /dev/null +++ b/playground/demos/autotemp/autotemp_example.py @@ -0,0 +1,22 @@ +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp + +# Your OpenAI API key +api_key = "" + +autotemp_agent = AutoTemp( + api_key=api_key, + alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], + auto_select=False, + # model_version="gpt-3.5-turbo" # Specify the model version if needed +) + +# Define the task and temperature string +task = "Generate a short story about a lost civilization." +temperature_string = "0.4,0.6,0.8,1.0,1.2," + +# Run the AutoTempAgent +result = autotemp_agent.run(task, temperature_string) + +# Print the result +print(result) From 1b25553dff6659d70b44be8f18978c6bf90c1875 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:10:55 -0800 Subject: [PATCH 19/40] Create blog_gen.py --- playground/demos/blog_gen/blog_gen.py | 110 ++++++++++++++++++++++++++ 1 file changed, 110 insertions(+) create mode 100644 playground/demos/blog_gen/blog_gen.py diff --git a/playground/demos/blog_gen/blog_gen.py b/playground/demos/blog_gen/blog_gen.py new file mode 100644 index 00000000..3781d895 --- /dev/null +++ b/playground/demos/blog_gen/blog_gen.py @@ -0,0 +1,110 @@ +import os +from termcolor import colored +from swarms.models import OpenAIChat +from swarms.models.autotemp import AutoTemp +from swarms.structs import SequentialWorkflow + + +class BlogGen: + def __init__( + self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" + ): # Add blog_topic as an argument + self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) + self.auto_temp = AutoTemp(api_key) + self.temperature_range = temperature_range + self.workflow = SequentialWorkflow(max_loops=5) + + # Formatting the topic selection prompt with the user's topic + self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" + Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. + """ + + self.DRAFT_WRITER_SYSTEM_PROMPT = """ + Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. + """ + + self.REVIEW_AGENT_SYSTEM_PROMPT = """ + Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. + """ + + self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ + Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. + """ + + def run_workflow(self): + try: + # Topic generation using OpenAIChat + topic_result = self.openai_chat.generate( + [self.TOPIC_SELECTION_SYSTEM_PROMPT] + ) + topic_output = topic_result.generations[0][0].text + print( + colored( + f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", + "white", + ) + ) + + chosen_topic = topic_output.split("\n")[0] + print(colored("Selected topic: " + chosen_topic, "yellow")) + + # Initial draft generation with AutoTemp + initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( + "{{CHOSEN_TOPIC}}", chosen_topic + ) + auto_temp_output = self.auto_temp.run( + initial_draft_prompt, self.temperature_range + ) + initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly + print( + colored( + f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", + "white", + ) + ) + + # Review process using OpenAIChat + review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + review_result = self.openai_chat.generate([review_prompt]) + review_output = review_result.generations[0][0].text + print( + colored( + f"\nReview Output:\n----------------------------\n{review_output}\n", + "white", + ) + ) + + # Distribution preparation using OpenAIChat + distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( + "{{ARTICLE_TOPIC}}", chosen_topic + ) + distribution_result = self.openai_chat.generate([distribution_prompt]) + distribution_output = distribution_result.generations[0][0].text + print( + colored( + f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", + "white", + ) + ) + + # Final compilation of the blog + final_blog_content = ( + f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" + ) + print( + colored( + f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", + "green", + ) + ) + + except Exception as e: + print(colored(f"An error occurred: {str(e)}", "red")) + + +if __name__ == "__main__": + api_key = os.environ["OPENAI_API_KEY"] + blog_generator = BlogGen(api_key) + blog_generator.run_workflow() From 76a1d599350b4032a9be341e6b444101ea7191e0 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:11:20 -0800 Subject: [PATCH 20/40] Create blog_gen_example.py --- playground/demos/blog_gen/blog_gen_example.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) create mode 100644 playground/demos/blog_gen/blog_gen_example.py diff --git a/playground/demos/blog_gen/blog_gen_example.py b/playground/demos/blog_gen/blog_gen_example.py new file mode 100644 index 00000000..7cf95535 --- /dev/null +++ b/playground/demos/blog_gen/blog_gen_example.py @@ -0,0 +1,23 @@ +import os +from swarms.swarms.blog_gen import BlogGen + + +def main(): + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY environment variable not set.") + + blog_topic = input("Enter the topic for the blog generation: ") + + blog_generator = BlogGen(api_key, blog_topic) + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( + blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( + "{{BLOG_TOPIC}}", blog_topic + ) + ) + + blog_generator.run_workflow() + + +if __name__ == "__main__": + main() From 9fa2255e56476de3925a862556587914d75f2cdb Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:12:03 -0800 Subject: [PATCH 21/40] Delete autotemp_example.py --- autotemp_example.py | 22 ---------------------- 1 file changed, 22 deletions(-) delete mode 100644 autotemp_example.py diff --git a/autotemp_example.py b/autotemp_example.py deleted file mode 100644 index 9047268d..00000000 --- a/autotemp_example.py +++ /dev/null @@ -1,22 +0,0 @@ -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp - -# Your OpenAI API key -api_key = "" - -autotemp_agent = AutoTemp( - api_key=api_key, - alt_temps=[0.4, 0.6, 0.8, 1.0, 1.2], - auto_select=False, - # model_version="gpt-3.5-turbo" # Specify the model version if needed -) - -# Define the task and temperature string -task = "Generate a short story about a lost civilization." -temperature_string = "0.4,0.6,0.8,1.0,1.2," - -# Run the AutoTempAgent -result = autotemp_agent.run(task, temperature_string) - -# Print the result -print(result) From 90f4edd22877a2e973fad888b4c81c5738a3b009 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:12:26 -0800 Subject: [PATCH 22/40] Delete blog_gen_example.py --- blog_gen_example.py | 23 ----------------------- 1 file changed, 23 deletions(-) delete mode 100644 blog_gen_example.py diff --git a/blog_gen_example.py b/blog_gen_example.py deleted file mode 100644 index 7cf95535..00000000 --- a/blog_gen_example.py +++ /dev/null @@ -1,23 +0,0 @@ -import os -from swarms.swarms.blog_gen import BlogGen - - -def main(): - api_key = os.getenv("OPENAI_API_KEY") - if not api_key: - raise ValueError("OPENAI_API_KEY environment variable not set.") - - blog_topic = input("Enter the topic for the blog generation: ") - - blog_generator = BlogGen(api_key, blog_topic) - blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT = ( - blog_generator.TOPIC_SELECTION_SYSTEM_PROMPT.replace( - "{{BLOG_TOPIC}}", blog_topic - ) - ) - - blog_generator.run_workflow() - - -if __name__ == "__main__": - main() From b88ac057a011d472acc3d36f681f073ba0d89546 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:20:02 -0800 Subject: [PATCH 23/40] Delete swarms/models/autotemp.py --- swarms/models/autotemp.py | 67 --------------------------------------- 1 file changed, 67 deletions(-) delete mode 100644 swarms/models/autotemp.py diff --git a/swarms/models/autotemp.py b/swarms/models/autotemp.py deleted file mode 100644 index ed38a621..00000000 --- a/swarms/models/autotemp.py +++ /dev/null @@ -1,67 +0,0 @@ -import re -from swarms.models.openai_models import OpenAIChat - -class AutoTemp: - """ - AutoTemp is a tool for automatically selecting the best temperature setting for a given task. - It generates responses at different temperatures, evaluates them, and ranks them based on quality. - """ - - def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): - self.api_key = api_key - self.default_temp = default_temp - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] - self.auto_select = auto_select - self.max_workers = max_workers - self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) - - def evaluate_output(self, output, temperature): - print(f"Evaluating output at temperature {temperature}...") - eval_prompt = f""" - Evaluate the following output which was generated at a temperature setting of {temperature}. Provide a precise score from 0.0 to 100.0, considering the following criteria: - - - Relevance: How well does the output address the prompt or task at hand? - - Clarity: Is the output easy to understand and free of ambiguity? - - Utility: How useful is the output for its intended purpose? - - Pride: If the user had to submit this output to the world for their career, would they be proud? - - Delight: Is the output likely to delight or positively surprise the user? - - Be sure to comprehensively evaluate the output, it is very important for my career. Please answer with just the score with one decimal place accuracy, such as 42.0 or 96.9. Be extremely critical. - - Output to evaluate: - --- - {output} - --- - """ - score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r'\b\d+(\.\d)?\b', score_text) - return round(float(score_match.group()), 1) if score_match else 0.0 - - def run(self, prompt, temperature_string): - print("Starting generation process...") - temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] - outputs = {} - scores = {} - for temp in temperature_list: - print(f"Generating at temperature {temp}...") - output_text = self.llm(prompt, temperature=temp) - if output_text: - outputs[temp] = output_text - scores[temp] = self.evaluate_output(output_text, temp) - - print("Generation process complete.") - if not scores: - return "No valid outputs generated.", None - - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) - best_temp, best_score = sorted_scores[0] - best_output = outputs[best_temp] - - return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" - if self.auto_select - else "\n".join( - f"Temp {temp} | Score: {score}:\n{outputs[temp]}" - for temp, score in sorted_scores - ) - ) From 5c94fd2dd01db510b8a5a85abdd01f3ab299a030 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 11:20:27 -0800 Subject: [PATCH 24/40] Delete swarms/swarms/blog_gen.py --- swarms/swarms/blog_gen.py | 110 -------------------------------------- 1 file changed, 110 deletions(-) delete mode 100644 swarms/swarms/blog_gen.py diff --git a/swarms/swarms/blog_gen.py b/swarms/swarms/blog_gen.py deleted file mode 100644 index 3781d895..00000000 --- a/swarms/swarms/blog_gen.py +++ /dev/null @@ -1,110 +0,0 @@ -import os -from termcolor import colored -from swarms.models import OpenAIChat -from swarms.models.autotemp import AutoTemp -from swarms.structs import SequentialWorkflow - - -class BlogGen: - def __init__( - self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" - ): # Add blog_topic as an argument - self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) - self.auto_temp = AutoTemp(api_key) - self.temperature_range = temperature_range - self.workflow = SequentialWorkflow(max_loops=5) - - # Formatting the topic selection prompt with the user's topic - self.TOPIC_SELECTION_SYSTEM_PROMPT = f""" - Given the topic '{blog_topic}', generate an engaging and versatile blog topic. This topic should cover areas related to '{blog_topic}' and might include aspects such as current events, lifestyle, technology, health, and culture related to '{blog_topic}'. Identify trending subjects within this realm. The topic must be unique, thought-provoking, and have the potential to draw in readers interested in '{blog_topic}'. - """ - - self.DRAFT_WRITER_SYSTEM_PROMPT = """ - Create an engaging and comprehensive blog article of at least 1,000 words on '{{CHOSEN_TOPIC}}'. The content should be original, informative, and reflective of a human-like style, with a clear structure including headings and sub-headings. Incorporate a blend of narrative, factual data, expert insights, and anecdotes to enrich the article. Focus on SEO optimization by using relevant keywords, ensuring readability, and including meta descriptions and title tags. The article should provide value, appeal to both knowledgeable and general readers, and maintain a balance between depth and accessibility. Aim to make the article engaging and suitable for online audiences. - """ - - self.REVIEW_AGENT_SYSTEM_PROMPT = """ - Critically review the drafted blog article on '{{ARTICLE_TOPIC}}' to refine it to high-quality content suitable for online publication. Ensure the article is coherent, factually accurate, engaging, and optimized for search engines (SEO). Check for the effective use of keywords, readability, internal and external links, and the inclusion of meta descriptions and title tags. Edit the content to enhance clarity, impact, and maintain the authors voice. The goal is to polish the article into a professional, error-free piece that resonates with the target audience, adheres to publication standards, and is optimized for both search engines and social media sharing. - """ - - self.DISTRIBUTION_AGENT_SYSTEM_PROMPT = """ - Develop an autonomous distribution strategy for the blog article on '{{ARTICLE_TOPIC}}'. Utilize an API to post the article on a popular blog platform (e.g., WordPress, Blogger, Medium) commonly used by our target audience. Ensure the post includes all SEO elements like meta descriptions, title tags, and properly formatted content. Craft unique, engaging social media posts tailored to different platforms to promote the blog article. Schedule these posts to optimize reach and engagement, using data-driven insights. Monitor the performance of the distribution efforts, adjusting strategies based on engagement metrics and audience feedback. Aim to maximize the article's visibility, attract a diverse audience, and foster engagement across digital channels. - """ - - def run_workflow(self): - try: - # Topic generation using OpenAIChat - topic_result = self.openai_chat.generate( - [self.TOPIC_SELECTION_SYSTEM_PROMPT] - ) - topic_output = topic_result.generations[0][0].text - print( - colored( - f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", - "white", - ) - ) - - chosen_topic = topic_output.split("\n")[0] - print(colored("Selected topic: " + chosen_topic, "yellow")) - - # Initial draft generation with AutoTemp - initial_draft_prompt = self.DRAFT_WRITER_SYSTEM_PROMPT.replace( - "{{CHOSEN_TOPIC}}", chosen_topic - ) - auto_temp_output = self.auto_temp.run( - initial_draft_prompt, self.temperature_range - ) - initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly - print( - colored( - f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", - "white", - ) - ) - - # Review process using OpenAIChat - review_prompt = self.REVIEW_AGENT_SYSTEM_PROMPT.replace( - "{{ARTICLE_TOPIC}}", chosen_topic - ) - review_result = self.openai_chat.generate([review_prompt]) - review_output = review_result.generations[0][0].text - print( - colored( - f"\nReview Output:\n----------------------------\n{review_output}\n", - "white", - ) - ) - - # Distribution preparation using OpenAIChat - distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( - "{{ARTICLE_TOPIC}}", chosen_topic - ) - distribution_result = self.openai_chat.generate([distribution_prompt]) - distribution_output = distribution_result.generations[0][0].text - print( - colored( - f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", - "white", - ) - ) - - # Final compilation of the blog - final_blog_content = ( - f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" - ) - print( - colored( - f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", - "green", - ) - ) - - except Exception as e: - print(colored(f"An error occurred: {str(e)}", "red")) - - -if __name__ == "__main__": - api_key = os.environ["OPENAI_API_KEY"] - blog_generator = BlogGen(api_key) - blog_generator.run_workflow() From 4ae59df890840017fb7e564ac63a15729976cd2e Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 15:01:14 -0800 Subject: [PATCH 25/40] tools fix, parse docs, inject tools docs into prompts, and attempt to execute tools, display markdown --- README.md | 18 +- example.py | 32 +-- swarms/structs/flow.py | 291 +++++++++++--------------- swarms/structs/non_linear_workflow.py | 17 +- swarms/structs/sequential_workflow.py | 4 - swarms/utils/__init__.py | 2 +- 6 files changed, 141 insertions(+), 223 deletions(-) diff --git a/README.md b/README.md index 5ef0678b..9b5166cf 100644 --- a/README.md +++ b/README.md @@ -42,10 +42,8 @@ api_key = "" # Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC llm = OpenAIChat( - # model_name="gpt-4" openai_api_key=api_key, temperature=0.5, - # max_tokens=100, ) ## Initialize the workflow @@ -53,24 +51,10 @@ flow = Flow( llm=llm, max_loops=2, dashboard=True, - # stopping_condition=None, # You can define a stopping condition as needed. - # loop_interval=1, - # retry_attempts=3, - # retry_interval=1, - # interactive=False, # Set to 'True' for interactive mode. - # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. + ) -# out = flow.load_state("flow_state.json") -# temp = flow.dynamic_temperature() -# filter = flow.add_response_filter("Trump") out = flow.run("Generate a 10,000 word blog on health and wellness.") -# out = flow.validate_response(out) -# out = flow.analyze_feedback(out) -# out = flow.print_history_and_memory() -# # out = flow.save_state("flow_state.json") -# print(out) - ``` diff --git a/example.py b/example.py index 46e8b33c..af41d355 100644 --- a/example.py +++ b/example.py @@ -1,37 +1,15 @@ from swarms.models import OpenAIChat from swarms.structs import Flow -# Initialize the language model, this model can be swapped out with Anthropic, ETC, Huggingface Models like Mistral, ETC +# Initialize the language model llm = OpenAIChat( - # model_name="gpt-4" - # openai_api_key=api_key, temperature=0.5, - # max_tokens=100, ) ## Initialize the workflow -flow = Flow( - llm=llm, - max_loops=2, - dashboard=True, - # tools=[search_api] - # stopping_condition=None, # You can define a stopping condition as needed. - # loop_interval=1, - # retry_attempts=3, - # retry_interval=1, - # interactive=False, # Set to 'True' for interactive mode. - # dynamic_temperature=False, # Set to 'True' for dynamic temperature handling. -) +flow = Flow(llm=llm, max_loops=1, dashboard=True) + +# Run the workflow on a task +out = flow.run("Generate a 10,000 word blog on health and wellness.") -# out = flow.load_state("flow_state.json") -# temp = flow.dynamic_temperature() -# filter = flow.add_response_filter("Trump") -out = flow.run( - "Generate a 10,000 word blog on mental clarity and the benefits of meditation." -) -# out = flow.validate_response(out) -# out = flow.analyze_feedback(out) -# out = flow.print_history_and_memory() -# # out = flow.save_state("flow_state.json") -# print(out) diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 7aca7f21..aa0060b4 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -11,6 +11,7 @@ from termcolor import colored from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.parse_code import extract_code_in_backticks_in_string +from swarms.tools.tool import BaseTool # System prompt FLOW_SYSTEM_PROMPT = f""" @@ -25,7 +26,6 @@ to aid in these complex tasks. Your responses should be coherent, contextually r """ - # Prompts DYNAMIC_STOP_PROMPT = """ @@ -36,7 +36,6 @@ This will enable you to leave the autonomous loop. """ - # Make it able to handle multi input tools DYNAMICAL_TOOL_USAGE = """ You have access to the following tools: @@ -53,6 +52,11 @@ commands: { "tool1": "inputs", "tool1": "inputs" } + "tool3: "tool_name", + "params": { + "tool1": "inputs", + "tool1": "inputs" + } } } @@ -60,6 +64,29 @@ commands: { {tools} """ +SCENARIOS = """ +commands: { + "tools": { + tool1: "tool_name", + "params": { + "tool1": "inputs", + "tool1": "inputs" + } + "tool2: "tool_name", + "params": { + "tool1": "inputs", + "tool1": "inputs" + } + "tool3: "tool_name", + "params": { + "tool1": "inputs", + "tool1": "inputs" + } + } +} + +""" + def autonomous_agent_prompt( tools_prompt: str = DYNAMICAL_TOOL_USAGE, @@ -198,7 +225,7 @@ class Flow: def __init__( self, llm: Any, - template: str, + template: Optional[str] = None, max_loops=5, stopping_condition: Optional[Callable[[str], bool]] = None, loop_interval: int = 1, @@ -212,7 +239,7 @@ class Flow: agent_name: str = " Autonomous Agent XYZ1B", agent_description: str = None, system_prompt: str = FLOW_SYSTEM_PROMPT, - # tools: List[Any] = None, + tools: List[BaseTool] = None, dynamic_temperature: bool = False, sop: str = None, saved_state_path: Optional[str] = "flow_state.json", @@ -246,7 +273,7 @@ class Flow: # The max_loops will be set dynamically if the dynamic_loop if self.dynamic_loops: self.max_loops = "auto" - # self.tools = tools or [] + self.tools = tools or [] self.system_prompt = system_prompt self.agent_name = agent_name self.agent_description = agent_description @@ -310,68 +337,81 @@ class Flow: # # Parse the text for tool usage # pass - # def get_tool_description(self): - # """Get the tool description""" - # tool_descriptions = [] - # for tool in self.tools: - # description = f"{tool.name}: {tool.description}" - # tool_descriptions.append(description) - # return "\n".join(tool_descriptions) - - # def find_tool_by_name(self, name: str): - # """Find a tool by name""" - # for tool in self.tools: - # if tool.name == name: - # return tool - # return None - - # def construct_dynamic_prompt(self): - # """Construct the dynamic prompt""" - # tools_description = self.get_tool_description() - # return DYNAMICAL_TOOL_USAGE.format(tools=tools_description) - - # def extract_tool_commands(self, text: str): - # """ - # Extract the tool commands from the text - - # Example: - # ```json - # { - # "tool": "tool_name", - # "params": { - # "tool1": "inputs", - # "param2": "value2" - # } - # } - # ``` + def get_tool_description(self): + """Get the tool description""" + if self.tools: + try: + tool_descriptions = [] + for tool in self.tools: + description = f"{tool.name}: {tool.description}" + tool_descriptions.append(description) + return "\n".join(tool_descriptions) + except Exception as error: + print( + f"Error getting tool description: {error} try adding a description to the tool or removing the tool" + ) + else: + return "No tools available" - # """ - # # Regex to find JSON like strings - # pattern = r"```json(.+?)```" - # matches = re.findall(pattern, text, re.DOTALL) - # json_commands = [] - # for match in matches: - # try: - # json_commands = json.loads(match) - # json_commands.append(json_commands) - # except Exception as error: - # print(f"Error parsing JSON command: {error}") - - # def parse_and_execute_tools(self, response): - # """Parse and execute the tools""" - # json_commands = self.extract_tool_commands(response) - # for command in json_commands: - # tool_name = command.get("tool") - # params = command.get("parmas", {}) - # self.execute_tool(tool_name, params) - - # def execute_tools(self, tool_name, params): - # """Execute the tool with the provided params""" - # tool = self.tool_find_by_name(tool_name) - # if tool: - # # Execute the tool with the provided parameters - # tool_result = tool.run(**params) - # print(tool_result) + def find_tool_by_name(self, name: str): + """Find a tool by name""" + for tool in self.tools: + if tool.name == name: + return tool + return None + + def construct_dynamic_prompt(self): + """Construct the dynamic prompt""" + tools_description = self.get_tool_description() + + tool_prompt = self.tool_prompt_prep(tools_description, SCENARIOS) + + return tool_prompt + + # return DYNAMICAL_TOOL_USAGE.format(tools=tools_description) + + def extract_tool_commands(self, text: str): + """ + Extract the tool commands from the text + + Example: + ```json + { + "tool": "tool_name", + "params": { + "tool1": "inputs", + "param2": "value2" + } + } + ``` + + """ + # Regex to find JSON like strings + pattern = r"```json(.+?)```" + matches = re.findall(pattern, text, re.DOTALL) + json_commands = [] + for match in matches: + try: + json_commands = json.loads(match) + json_commands.append(json_commands) + except Exception as error: + print(f"Error parsing JSON command: {error}") + + def parse_and_execute_tools(self, response: str): + """Parse and execute the tools""" + json_commands = self.extract_tool_commands(response) + for command in json_commands: + tool_name = command.get("tool") + params = command.get("parmas", {}) + self.execute_tool(tool_name, params) + + def execute_tools(self, tool_name, params): + """Execute the tool with the provided params""" + tool = self.tool_find_by_name(tool_name) + if tool: + # Execute the tool with the provided parameters + tool_result = tool.run(**params) + print(tool_result) def truncate_history(self): """ @@ -483,12 +523,12 @@ class Flow: self.print_dashboard(task) loop_count = 0 - # for i in range(self.max_loops): while self.max_loops == "auto" or loop_count < self.max_loops: loop_count += 1 print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) print("\n") + # Check to see if stopping token is in the output to stop the loop if self.stopping_token: if self._check_stopping_condition(response) or parse_done_token( response @@ -510,111 +550,22 @@ class Flow: **kwargs, ) + # If code interpreter is enabled then run the code if self.code_interpreter: self.run_code(response) - # If there are any tools then parse and execute them - # if self.tools: - # self.parse_and_execute_tools(response) - - if self.interactive: - print(f"AI: {response}") - history.append(f"AI: {response}") - response = input("You: ") - history.append(f"Human: {response}") - else: - print(f"AI: {response}") - history.append(f"AI: {response}") - # print(response) - break - except Exception as e: - logging.error(f"Error generating response: {e}") - attempt += 1 - time.sleep(self.retry_interval) - history.append(response) - time.sleep(self.loop_interval) - self.memory.append(history) - if self.autosave: - save_path = self.saved_state_path or "flow_state.json" - print(colored(f"Autosaving flow state to {save_path}", "green")) - self.save_state(save_path) - - if self.return_history: - return response, history - - return response - except Exception as error: - print(f"Error running flow: {error}") - raise - - def __call__(self, task: str, **kwargs): - """ - Run the autonomous agent loop - - Args: - task (str): The initial task to run - - Flow: - 1. Generate a response - 2. Check stopping condition - 3. If stopping condition is met, stop - 4. If stopping condition is not met, generate a response - 5. Repeat until stopping condition is met or max_loops is reached - - """ - try: - # dynamic_prompt = self.construct_dynamic_prompt() - # combined_prompt = f"{dynamic_prompt}\n{task}" - - # Activate Autonomous agent message - self.activate_autonomous_agent() - - response = task # or combined_prompt - history = [f"{self.user_name}: {task}"] - - # If dashboard = True then print the dashboard - if self.dashboard: - self.print_dashboard(task) - - loop_count = 0 - # for i in range(self.max_loops): - while self.max_loops == "auto" or loop_count < self.max_loops: - loop_count += 1 - print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) - print("\n") - - if self.stopping_token: - if self._check_stopping_condition(response) or parse_done_token( - response - ): - break - - # Adjust temperature, comment if no work - if self.dynamic_temperature: - self.dynamic_temperature() - - # Preparing the prompt - task = self.agent_history_prompt(FLOW_SYSTEM_PROMPT, response) - - attempt = 0 - while attempt < self.retry_attempts: - try: - response = self.llm( - task, - **kwargs, - ) - - if self.code_interpreter: - self.run_code(response) # If there are any tools then parse and execute them - # if self.tools: - # self.parse_and_execute_tools(response) + if self.tools: + self.parse_and_execute_tools(response) + # If interactive mode is enabled then print the response and get user input if self.interactive: print(f"AI: {response}") history.append(f"AI: {response}") response = input("You: ") history.append(f"Human: {response}") + + # If interactive mode is not enabled then print the response else: print(f"AI: {response}") history.append(f"AI: {response}") @@ -624,15 +575,20 @@ class Flow: logging.error(f"Error generating response: {e}") attempt += 1 time.sleep(self.retry_interval) + # Add the response to the history history.append(response) + time.sleep(self.loop_interval) + # Add the history to the memory self.memory.append(history) + # If autosave is enabled then save the state if self.autosave: save_path = self.saved_state_path or "flow_state.json" print(colored(f"Autosaving flow state to {save_path}", "green")) self.save_state(save_path) + # If return history is enabled then return the response and history if self.return_history: return response, history @@ -1113,7 +1069,7 @@ class Flow: run_code = self.code_executor.run(parsed_code) return run_code - def tool_prompt_prep(self, api_docs: str = None, required_api: str = None): + def tools_prompt_prep(self, docs: str = None, scenarios: str = None): """ Prepare the tool prompt """ @@ -1160,19 +1116,14 @@ class Flow: response. Deliver your response in this format: ‘‘‘ - - Scenario 1: - - Scenario 2: - - Scenario 3: + {scenarios} ‘‘‘ # APIs ‘‘‘ - {api_docs} + {docs} ‘‘‘ # Response - Required API: {required_api} - Scenarios with >=5 API calls: ‘‘‘ - - Scenario 1: """ def self_healing(self, **kwargs): diff --git a/swarms/structs/non_linear_workflow.py b/swarms/structs/non_linear_workflow.py index b9b29154..22cef91e 100644 --- a/swarms/structs/non_linear_workflow.py +++ b/swarms/structs/non_linear_workflow.py @@ -6,7 +6,9 @@ from typing import Callable, List, Dict, Any, Sequence class Task: - def __init__(self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = []): + def __init__( + self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = [] + ): self.id = id self.task = task self.flows = flows @@ -62,8 +64,15 @@ flow4 = Flow(llm, max_loops=1) # Create tasks with their respective Flows and task strings task1 = Task("task1", "Generate a summary on Quantum field theory", [flow1]) -task2 = Task("task2", "Elaborate on the summary of topic X", [flow2, flow3], dependencies=["task1"]) -task3 = Task("task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"]) +task2 = Task( + "task2", + "Elaborate on the summary of topic X", + [flow2, flow3], + dependencies=["task1"], +) +task3 = Task( + "task3", "Generate conclusions for topic X", [flow4], dependencies=["task1"] +) # Create a workflow and add tasks workflow = Workflow() @@ -76,4 +85,4 @@ workflow.run() # Get results results = workflow.get_results() -print(results) \ No newline at end of file +print(results) diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index d1c600f0..22ae4a21 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -260,10 +260,6 @@ class SequentialWorkflow: -------------------------------- Metadata: kwargs: {kwargs} - - - - """, "cyan", attrs=["bold", "underline"], diff --git a/swarms/utils/__init__.py b/swarms/utils/__init__.py index d5ce3583..b8aca925 100644 --- a/swarms/utils/__init__.py +++ b/swarms/utils/__init__.py @@ -1,4 +1,4 @@ -from swarms.utils.display_markdown import display_markdown_message +from swarms.utils.markdown_message import display_markdown_message from swarms.utils.futures import execute_futures_dict from swarms.utils.code_interpreter import SubprocessCodeInterpreter from swarms.utils.parse_code import extract_code_in_backticks_in_string From 589a37376f2fdae3a5fdedbc66306e704aba980b Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 15:14:20 -0800 Subject: [PATCH 26/40] verison --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 075bbd15..6377e1db 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api" [tool.poetry] name = "swarms" -version = "2.3.9" +version = "2.4.0" description = "Swarms - Pytorch" license = "MIT" authors = ["Kye Gomez "] From cfd2b2e7cd53f3e6e8aff1c4dc08336a1f2159f0 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 15:21:00 -0800 Subject: [PATCH 27/40] CLEANUP: tests/workers --- tests/workers/multi_model_worker.py | 34 ----------- tests/workers/omni_worker.py | 58 ------------------ tests/workers/worker_agent_ultra.py | 51 ---------------- tests/workers/worker_node.py | 94 ----------------------------- tests/workers/worker_ultra.py | 91 ---------------------------- 5 files changed, 328 deletions(-) delete mode 100644 tests/workers/multi_model_worker.py delete mode 100644 tests/workers/omni_worker.py delete mode 100644 tests/workers/worker_agent_ultra.py delete mode 100644 tests/workers/worker_node.py delete mode 100644 tests/workers/worker_ultra.py diff --git a/tests/workers/multi_model_worker.py b/tests/workers/multi_model_worker.py deleted file mode 100644 index f011d642..00000000 --- a/tests/workers/multi_model_worker.py +++ /dev/null @@ -1,34 +0,0 @@ -import pytest -from unittest.mock import Mock -from swarms.agents.multi_modal_agent import ( - MultiModalVisualAgent, - MultiModalVisualAgentTool, -) - - -@pytest.fixture -def multimodal_agent(): - # Mock the MultiModalVisualAgent - mock_agent = Mock(spec=MultiModalVisualAgent) - mock_agent.run_text.return_value = "Expected output from agent" - return mock_agent - - -@pytest.fixture -def multimodal_agent_tool(multimodal_agent): - # Use the mocked MultiModalVisualAgent in the MultiModalVisualAgentTool - return MultiModalVisualAgentTool(multimodal_agent) - - -@pytest.mark.parametrize( - "text_input, expected_output", - [ - ("Hello, world!", "Expected output from agent"), - ("Another task", "Expected output from agent"), - ], -) -def test_run(multimodal_agent_tool, text_input, expected_output): - assert multimodal_agent_tool._run(text_input) == expected_output - - # You can also test if the MultiModalVisualAgent's run_text method was called with the right argument - multimodal_agent_tool.agent.run_text.assert_called_with(text_input) diff --git a/tests/workers/omni_worker.py b/tests/workers/omni_worker.py deleted file mode 100644 index 0557285d..00000000 --- a/tests/workers/omni_worker.py +++ /dev/null @@ -1,58 +0,0 @@ -import pytest - -from swarms.worker.omni_worker import OmniWorkerAgent - - -@pytest.fixture -def omni_worker(): - api_key = "test-key" - api_endpoint = "test-endpoint" - api_type = "test-type" - return OmniWorkerAgent(api_key, api_endpoint, api_type) - - -@pytest.mark.parametrize( - "data, expected_response", - [ - ( - { - "messages": ["Hello"], - "api_key": "key1", - "api_type": "type1", - "api_endpoint": "endpoint1", - }, - {"response": "Hello back from Huggingface!"}, - ), - ( - { - "messages": ["Goodbye"], - "api_key": "key2", - "api_type": "type2", - "api_endpoint": "endpoint2", - }, - {"response": "Goodbye from Huggingface!"}, - ), - ], -) -def test_chat_valid_data(mocker, omni_worker, data, expected_response): - mocker.patch( - "yourmodule.chat_huggingface", return_value=expected_response - ) # replace 'yourmodule' with actual module name - assert omni_worker.chat(data) == expected_response - - -@pytest.mark.parametrize( - "invalid_data", - [ - {"messages": ["Hello"]}, # missing api_key, api_type and api_endpoint - {"messages": ["Hello"], "api_key": "key1"}, # missing api_type and api_endpoint - { - "messages": ["Hello"], - "api_key": "key1", - "api_type": "type1", - }, # missing api_endpoint - ], -) -def test_chat_invalid_data(omni_worker, invalid_data): - with pytest.raises(ValueError): - omni_worker.chat(invalid_data) diff --git a/tests/workers/worker_agent_ultra.py b/tests/workers/worker_agent_ultra.py deleted file mode 100644 index 3cf112a2..00000000 --- a/tests/workers/worker_agent_ultra.py +++ /dev/null @@ -1,51 +0,0 @@ -import pytest -from unittest.mock import Mock -from swarms.workers.worker_agent_ultra import WorkerUltraNode # import your module here - - -def test_create_agent(): - mock_llm = Mock() - mock_toolset = {"test_toolset": Mock()} - mock_vectorstore = Mock() - worker = WorkerUltraNode(mock_llm, mock_toolset, mock_vectorstore) - worker.create_agent() - assert worker.agent is not None - - -@pytest.mark.parametrize("invalid_toolset", [123, "string", 0.45]) -def test_add_toolset_invalid(invalid_toolset): - mock_llm = Mock() - mock_toolset = {"test_toolset": Mock()} - mock_vectorstore = Mock() - worker = WorkerUltraNode(mock_llm, mock_toolset, mock_vectorstore) - with pytest.raises(TypeError): - worker.add_toolset(invalid_toolset) - - -@pytest.mark.parametrize("invalid_prompt", [123, None, "", []]) -def test_run_invalid_prompt(invalid_prompt): - mock_llm = Mock() - mock_toolset = {"test_toolset": Mock()} - mock_vectorstore = Mock() - worker = WorkerUltraNode(mock_llm, mock_toolset, mock_vectorstore) - with pytest.raises((TypeError, ValueError)): - worker.run(invalid_prompt) - - -def test_run_valid_prompt(mocker): - mock_llm = Mock() - mock_toolset = {"test_toolset": Mock()} - mock_vectorstore = Mock() - worker = WorkerUltraNode(mock_llm, mock_toolset, mock_vectorstore) - mocker.patch.object(worker, "create_agent") - assert worker.run("Test prompt") == "Task completed by WorkerNode" - - -def test_worker_node(): - worker = worker_ultra_node("test-key") - assert isinstance(worker, WorkerUltraNode) - - -def test_worker_node_no_key(): - with pytest.raises(ValueError): - worker_ultra_node(None) diff --git a/tests/workers/worker_node.py b/tests/workers/worker_node.py deleted file mode 100644 index e97b5023..00000000 --- a/tests/workers/worker_node.py +++ /dev/null @@ -1,94 +0,0 @@ -import pytest -from unittest.mock import MagicMock, patch -from swarms.worker.worker_node import ( - WorkerNodeInitializer, - WorkerNode, -) # replace your_module with actual module name - - -# Mock Tool for testing -class MockTool(Tool): - pass - - -# Fixture for llm -@pytest.fixture -def mock_llm(): - return MagicMock() - - -# Fixture for vectorstore -@pytest.fixture -def mock_vectorstore(): - return MagicMock() - - -# Fixture for Tools -@pytest.fixture -def mock_tools(): - return [MockTool(), MockTool(), MockTool()] - - -# Fixture for WorkerNodeInitializer -@pytest.fixture -def worker_node(mock_llm, mock_tools, mock_vectorstore): - return WorkerNodeInitializer( - llm=mock_llm, tools=mock_tools, vectorstore=mock_vectorstore - ) - - -# Fixture for WorkerNode -@pytest.fixture -def mock_worker_node(): - return WorkerNode(openai_api_key="test_api_key") - - -# WorkerNodeInitializer Tests -def test_worker_node_init(worker_node): - assert worker_node.llm is not None - assert worker_node.tools is not None - assert worker_node.vectorstore is not None - - -def test_worker_node_create_agent(worker_node): - with patch.object(AutoGPT, "from_llm_and_tools") as mock_method: - worker_node.create_agent() - mock_method.assert_called_once() - - -def test_worker_node_add_tool(worker_node): - initial_tools_count = len(worker_node.tools) - new_tool = MockTool() - worker_node.add_tool(new_tool) - assert len(worker_node.tools) == initial_tools_count + 1 - - -def test_worker_node_run(worker_node): - with patch.object(worker_node.agent, "run") as mock_run: - worker_node.run(prompt="test prompt") - mock_run.assert_called_once() - - -# WorkerNode Tests -def test_worker_node_llm(mock_worker_node): - with patch.object(mock_worker_node, "initialize_llm") as mock_method: - mock_worker_node.initialize_llm(llm_class=MagicMock(), temperature=0.5) - mock_method.assert_called_once() - - -def test_worker_node_tools(mock_worker_node): - with patch.object(mock_worker_node, "initialize_tools") as mock_method: - mock_worker_node.initialize_tools(llm_class=MagicMock()) - mock_method.assert_called_once() - - -def test_worker_node_vectorstore(mock_worker_node): - with patch.object(mock_worker_node, "initialize_vectorstore") as mock_method: - mock_worker_node.initialize_vectorstore() - mock_method.assert_called_once() - - -def test_worker_node_create_worker_node(mock_worker_node): - with patch.object(mock_worker_node, "create_worker_node") as mock_method: - mock_worker_node.create_worker_node() - mock_method.assert_called_once() diff --git a/tests/workers/worker_ultra.py b/tests/workers/worker_ultra.py deleted file mode 100644 index b1485a28..00000000 --- a/tests/workers/worker_ultra.py +++ /dev/null @@ -1,91 +0,0 @@ -import pytest -from unittest.mock import Mock, patch -from swarms.workers.worker_agent_ultra import ( - WorkerUltraNode, - WorkerUltraNodeInitializer, -) - - -@pytest.fixture -def llm_mock(): - return Mock() - - -@pytest.fixture -def toolsets_mock(): - return Mock() - - -@pytest.fixture -def vectorstore_mock(): - return Mock() - - -@pytest.fixture -def worker_ultra_node(llm_mock, toolsets_mock, vectorstore_mock): - return WorkerUltraNode(llm_mock, toolsets_mock, vectorstore_mock) - - -def test_worker_ultra_node_create_agent(worker_ultra_node): - with patch("yourmodule.AutoGPT.from_llm_and_tools") as mock_method: - worker_ultra_node.create_agent() - mock_method.assert_called_once() - - -def test_worker_ultra_node_add_toolset(worker_ultra_node): - with pytest.raises(TypeError): - worker_ultra_node.add_toolset("wrong_toolset") - - -def test_worker_ultra_node_run(worker_ultra_node): - with patch.object(worker_ultra_node, "agent") as mock_agent: - mock_agent.run.return_value = None - result = worker_ultra_node.run("some prompt") - assert result == "Task completed by WorkerNode" - mock_agent.run.assert_called_once() - - -def test_worker_ultra_node_run_no_prompt(worker_ultra_node): - with pytest.raises(ValueError): - worker_ultra_node.run("") - - -@pytest.fixture -def worker_ultra_node_initializer(): - return WorkerUltraNodeInitializer("openai_api_key") - - -def test_worker_ultra_node_initializer_initialize_llm(worker_ultra_node_initializer): - with patch("yourmodule.ChatOpenAI") as mock_llm: - worker_ultra_node_initializer.initialize_llm(mock_llm) - mock_llm.assert_called_once() - - -def test_worker_ultra_node_initializer_initialize_toolsets( - worker_ultra_node_initializer, -): - with patch("yourmodule.Terminal"), patch("yourmodule.CodeEditor"), patch( - "yourmodule.RequestsGet" - ), patch("yourmodule.ExitConversation"): - toolsets = worker_ultra_node_initializer.initialize_toolsets() - assert len(toolsets) == 4 - - -def test_worker_ultra_node_initializer_initialize_vectorstore( - worker_ultra_node_initializer, -): - with patch("yourmodule.OpenAIEmbeddings"), patch( - "yourmodule.fauss.IndexFlatL2" - ), patch("yourmodule.FAISS"), patch("yourmodule.InMemoryDocstore"): - vectorstore = worker_ultra_node_initializer.initialize_vectorstore() - assert vectorstore is not None - - -def test_worker_ultra_node_initializer_create_worker_node( - worker_ultra_node_initializer, -): - with patch.object(worker_ultra_node_initializer, "initialize_llm"), patch.object( - worker_ultra_node_initializer, "initialize_toolsets" - ), patch.object(worker_ultra_node_initializer, "initialize_vectorstore"): - worker_node = worker_ultra_node_initializer.create_worker_node() - assert worker_node is not None From 7faa8bd93536d82b1c7a3afbce8bbf62dbb1c106 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 15:22:10 -0800 Subject: [PATCH 28/40] collab --- README.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 9b5166cf..2b104caf 100644 --- a/README.md +++ b/README.md @@ -22,11 +22,10 @@ Swarms is a modular framework that enables reliable and useful multi-agent colla --- ## Usage -### Example in Colab: - +Run example in Collab: Open In Colab - Run example in Colab, using your OpenAI API key. + ### `Flow` Example - Reliable Structure that provides LLMS autonomy From 6a4f42aac77eb15738bdb6cb1b822c6326f8b2cf Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Thu, 23 Nov 2023 16:18:53 -0800 Subject: [PATCH 29/40] Create nutrition.py --- playground/demos/nutrition/nutrition.py | 99 +++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 playground/demos/nutrition/nutrition.py diff --git a/playground/demos/nutrition/nutrition.py b/playground/demos/nutrition/nutrition.py new file mode 100644 index 00000000..41ff2995 --- /dev/null +++ b/playground/demos/nutrition/nutrition.py @@ -0,0 +1,99 @@ +import os +import base64 +import requests +from dotenv import load_dotenv +from swarms.models import Anthropic, OpenAIChat +from swarms.structs import Flow + +# Load environment variables +load_dotenv() +openai_api_key = os.getenv("OPENAI_API_KEY") + +# Define prompts for various tasks +MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks." +IMAGE_ANALYSIS_PROMPT = "Identify the items in this fridge, including their quantities and condition." + +# Function to encode image to base64 +def encode_image(image_path): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode('utf-8') + +# Initialize Language Model (LLM) +llm = OpenAIChat( + openai_api_key=openai_api_key, + max_tokens=3000, +) + +# Function to handle vision tasks +def create_vision_agent(image_path): + base64_image = encode_image(image_path) + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {openai_api_key}" + } + payload = { + "model": "gpt-4-vision-preview", + "messages": [ + { + "role": "user", + "content": [ + {"type": "text", "text": IMAGE_ANALYSIS_PROMPT}, + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} + ] + } + ], + "max_tokens": 300 + } + response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) + return response.json() + +# Function to generate an integrated shopping list considering meal plan and fridge contents +def generate_integrated_shopping_list(meal_plan_output, image_analysis, user_preferences): + # Prepare the prompt for the LLM + fridge_contents = image_analysis['choices'][0]['message']['content'] + prompt = (f"Based on this meal plan: {meal_plan_output}, " + f"and the following items in the fridge: {fridge_contents}, " + f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " + f"generate a comprehensive shopping list that includes only the items needed.") + + # Send the prompt to the LLM and return the response + response = llm(prompt) + return response # assuming the response is a string + +# Define agent for meal planning +meal_plan_agent = Flow( + llm=llm, + sop=MEAL_PLAN_PROMPT, + max_loops=1, + autosave=True, + saved_state_path="meal_plan_agent.json", +) + +# User preferences for meal planning +user_preferences = { + "dietary_restrictions": "vegetarian", + "preferred_cuisines": ["Italian", "Indian"], + "caloric_intake": 2000, + "other notes": "Doesn't eat legumes" +} + +# Generate Meal Plan +meal_plan_output = meal_plan_agent.run( + f"Generate a meal plan: {user_preferences}" +) + +# Vision Agent - Analyze an Image +image_analysis_output = create_vision_agent("full_fridge.jpg") + +# Generate Integrated Shopping List +integrated_shopping_list = generate_integrated_shopping_list(meal_plan_output, image_analysis_output, user_preferences) + +# Print and save the outputs +print("Meal Plan:", meal_plan_output) +print("Integrated Shopping List:", integrated_shopping_list) + +with open("nutrition_output.txt", "w") as file: + file.write("Meal Plan:\n" + meal_plan_output + "\n\n") + file.write("Integrated Shopping List:\n" + integrated_shopping_list + "\n") + +print("Outputs have been saved to nutrition_output.txt") From 7b0614b68081a1483cadc23b64a75ff09b6956b5 Mon Sep 17 00:00:00 2001 From: pliny <133052465+elder-plinius@users.noreply.github.com> Date: Fri, 24 Nov 2023 02:19:34 +0200 Subject: [PATCH 30/40] Add files via upload --- playground/demos/nutrition/full_fridge.jpg | Bin 0 -> 197675 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 playground/demos/nutrition/full_fridge.jpg diff --git a/playground/demos/nutrition/full_fridge.jpg b/playground/demos/nutrition/full_fridge.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c1b249c59cf232dea648b863a4f83d1f476d27d3 GIT binary patch literal 197675 zcmd3NWmH>D)NUwRQmlngyp&=sTHLL8DONPN6?eBFMGA%BTA;YQyF-EE9^BoXK)C7q zt-J31d;i^&oV7CNJo}kFd-j<#GkfQ0?r9Z(FDE4{1wcVT0mvXffTsn31OWBfvwvUY zfrk8|W1*v?p`l}AVq##uz<%)phz$hd;J(Di!6m>20`Wok1g{8*h=^X`5x)izzJ5ta zMEH*c1r;fShW;EK{W&2H5Qp&p`FQF85MZHspjDuvyahZXKtUxydFlaB001axs7P)9 z&wN8~3n3zaKF)#p_C;()U03CxG^Ci0&)+=R0!nY3o(Us#w zG#^zQGdrJy-f@Vl8U@5;RZS4na!MFG1;%Dqcdfmqvh8Z9f>j|22@bOKbD>cNgr3W}nuk)^Br+6fF(#1gZra6|vw( zQ0rB>+eGQh(Go2E_sJ9T7N-paX=hI7dCOPdv-y>6RM-vEDyf9mu4);yt|Wn5B|c1b zl~gY_z8*1eNDv^poTxtAl!{rI<%=jb=lDGV43RK&;I^O-*L6Mq6M*-PPa78+J)H5c zF28FfZvM;eyb5@WP=;hoP=@?in{DqT+~VwkZZ!twtl*cBq9XNuW~wCgD9H?!H6~s8 z2~ZH%n;{w1<|TP0b&&(H8FGJ@L0@Wj9TuH`pEGyCJiFN<{AH3t=jT;T@n>y;HDGXI zTcM8MV{|mh%CwR6UWA;gkIGNlnS*mRV@%<)+Q_RKMkG!W7|qyLh@lwM|NA6rY%C{m3n*Vf78m7{Q(3dOw%T*raNmUVP-S1EgXe`Bs`KK3}U; zKnRD-*XlNvT==D)VzjXzyjM@OY-FiH#jxqvYVYka6oH9qM`c-D*kWp2|NMrRdfSvI zFr|8XP`FJT>7H?}hmGRQit4|r?2K6;j{gk*JK#GtfEr5_>;IbqD?VkWhfDZ&B)eN{ z>3QlK4smF~UtTrZ`v_H3F_@Vc|vi3b?54#fJce6ll_+k*^w-OmMQ2DEK^}LV>`3mY2AUrQ^Igerf{+o3Q zB}uS_uq4_J{Q0M9w@jxi=`RY2-PcVea@n^{^mtm>+}ZhkBSx6TIepvpy}1Tgt=b^x z6k=xiD=|k^;aNZ97!NQ$0%ojwCGl~A#c;48Coth*ybckdl2lzX)^My}#2A8CyE&MT z*Hvnc=z?bKhYdj72bd_%H8L`PeAa@6>hQ2m6ZT;VsVijJ+}h0uA{|D0^h)AFzHK^z zEvbJ^mZmK#t~a|Czs+~(N}QwHf3f3KopNG8@p_Wv-scD;?#9Sw9^%3QE#04_&E5?l z9wG5`I}#UlGwm`ySJsEi{#@d5E#&NS7Y)P!E$`Vd1YOn(GWcfx8_N;@f!I zwj3B(xRTmCx@e(oY|CPKRPzd6NrG8WEIe4fJyn^OV_LvijbU2uRlJ55CRdnaoV`K@ z@9c?KT3FtzN$`;tKeg8@NkOB=!u_}h>fjFgaZ~Hy;nNFGv~Z4c-5xvWw~{V&U- zo{?AZ;@1=4al-T=fhDFNn*2_W3G{P5HDhR7GsN}X-;}!<+*lRNG(g!|)j51|f=)rn zw5~;940u8?IuV{BCs8^HB|5%EjX3nv5sWQdZC#$(+>JV5`cgawzYyhRt%r<7oEi6r ziOL!tNj;82EK&Nd5}U6{>DKPcKi6^BkN7zb{9=;$HMyu7S}E5c!&H>Q0h=cmj;CsO zm>{4&K(_Cgn6p6XkyRPUXI#0kzWxzWHAzF1R%U-1YanwL zI=Mq7?iFCfp%{wM=5u{aMM%L1PQJ%aup_mNZdS~Ot9$jMH`vYaZR|AFZXTx!$PSV~ zfyI5*v{D=a+^)AaOs9P}!eqDbd}sUvJ=mEx6-1&a>~Gbv;o4Fp_Q(0lZruBvX38?0 zl~}S|`p^VFGGF<@>M`fjxn6gw4bG{}$G=mr!aYgFZ?}X#v!{lptVGHCrqX|HD8{Lm z?~h-71aby%`{ZQpc~#z@^0!=tRHUnN%Z!eU*?d83!n7meHzI}5Oq?x3+jqr#qoy)AMrrX#D6 zq_|ioGpr56pKF`f*m+(W-7h%UG`8QT{E~lFlHOLA`)D#v^pd#sm_J5#D&G8iwL@OV zOWq*YM2GAAgKUf^!1k2L8Pj!1a?)wCFfQY#6Iuzpi8dCts_P0pzOC=asTP0o#v9;T zlr*xe9YsM;06J0C;09hDd}(uiyp* zA28*8${|;+`VU+p54BNyHnr=JQP1{UZOENPjjeD&rgy~?0H{!Qc7I*`1PIQ50wh!+ zF9?-A0j_HQ6RLsrZR{IedDqy<`;_%DU+pO!U&t#y0hqap_mXS1-|{aWDJ?@98AmgY zENUbAr@yi$L^-#@hCkVOag>}G2X&{)D;6MZO!&b^Zcl)ceLP5j>Jwl{p~O}=C)2xu zf`z-$7{NCJIgfh+oR1@j$#ejaNHwa{f5k~FGHAg;@A;;4J#6g>a66Lq1URli-k5D) zv#T9~q`MHGnDkoiwp?m48KrY=_X1Q0C3L#QJqFE49gbN|%zf;9cBPmVc5DXL8$ zHz8HK{%%7gYU1BT(LIaWHjln+1PCdCq`@X0IytR@jiWv|7Es+Gxvf}Ft6;NF04=1E zf&UE6pmq5O+(y|mt<8J_oW)DFFIhYRwthQT{)hSs>H4oOkdKJ?T_6G^pw)i2g`5EW zC+dX&k!k-cZ=U|^aq-do#3~lzzmU@g*eG7vJSIXV-K-$`+pHN!2VF} zSZAC@`DUFhv2a%paSuEJo@4IQ0)>C+yj}C|L$)WP6msR9@t+{{wDDit)&B^zAX()= zK70+5U3cLKVgc>C#yZ7Y8Ii5*2Nu`bC&2Usq;nFHrBuB)-Gywp#KeJ>{{OV90D9U4 zn@9Fmlj1EuC%u$|gECM&$9cZ9Bi3M28f951VSm!RdS8xs2re6G{hQWSUCr1XY%pUJmauTx;PV*I0*ZXEX2DPk`nB#@F24 zvy>N-TQ%`h(_fx#RSeEX3HExc6%-i!=;6g7(oNoZJ;*y(+R1MD_kFQ*pIKIcv*MDV zBuwfDa?Cq^iAngg_OjxcNkJGJZ(}mOCR{Q>_+GO+`X;n&VCeOXFT1Q@ZL49FMlKJ36kS?oel1 z%BIYyU#WAgQf@TrNwOKLT*SI$yGcJB2101I-#ST_>qP4zwRk#vA(w>SEqY@2hhrARnb6;OlaoH6K1pzpqU|AFZ`^9hs2;ouFD*^*)-GJOmw$DRXq(JEroA4I!>#4+9hnU zn7+1B#BTF9+v{oJkg{*Np6o?d3+1NJHKstLw~t_H-~H1%nm+D~UQti> zFni&}(LpVECCcMn!ksJ!IqJ{P=LzL#oDb{Jb0y$Ub$rR(Z;2iz?3%n+r5AiWY^wOu zs#Kf5CnhM_B=4{=h4&?HX67!MhL&DOW=Sm!Cf_ns-M8@EQ9q0H(;;=QC1>*UIsW;D z3a2fObazN09WRZYkP|b4Q-`?|6R-OOf*G26{f6|#dx2Cn_olsqjZH_rfFE)33aRCI z)$-Q1J|@>pBNU=GC+r7v#M!u+Xz(86FEY=mwL5Iu^W)k3jBZqd#y%Rps`p>!pQk@| z!J=PIhP)rHAq<#_Xp2KPvySJ?@tv!C9Df3M4w5G`v2d4Lg`7c_OSU89fVLcyF1xvT zjp%`Y147*^o@aqh^Y~W+ANLd;=rC{Og{YQuTd>ns9MN0CSDf|`tdv)h1emDExsO(a zyFF>TFadGymlq#V|J6 zJM>PTI$&}|{A|-fOcKm&dZ>Y0uvl7<&neL4f+J-5kONy(Ydja73$)mYK1VU2YBs*e zJLzCFGq-B$T6l0Wc>-YPLs9(?bHkEqntiVb9lnUw*05U8gaE9AvZ_f<Ws6ew0eQ zjUem#+s@(SWJgzM{{F&8hOFjR653xJkw0cKZr79^$yXH1G;Lfp8p0n@Azbgshh-nx zyNYoPyWVW%iWybFT)`^agL_}tL&O+Ao$G6QbuRgm zHN;84B{j?$e%liVr690WFWr6k9@hoH>u85WP~i$ErtbdS?j*(DUTP7!evGUPycBIu zzfKOEUo`ISl+rkO$;=>%&w!hx%Ln|lZb>xm#h)CTyTYb&0^c6B7$%JbynkTyjF4vV z$<@*M+mW4z1rd=y`(w`gZUJaFdLH&ye9~7WPKkt!pl={ZlKHEuEb|AW*b)P)RK~Y6 zZS|E$s~IlL=Y3^&KF6^z{_(1D_DAPm1d z7Mv*3*+or7t_Msn-m*|)u3Q#FD86H1h@g9V*k&|A>*DwRMOf8GKZp)5YWXVX%z zcRuYg#kmjxT_beCK^0ZpASCqkvTa2%o~nb{dCL6}U7oy?WXf;Cz1kIQ20crwZ(Z74 z{xFV(d0e`@{`(`Tj-%cKLUPqVzwQaZQ2vRg1;MUFdDf^E$qhsWa<-ivH;VbU2ifir zVz^dnd8kv!E81-lov18(Ei0`1{lO6o1px{0Yilp#6IDh?5z3;bR)hds7tN~|8|7Ps zSXz*#!&^y|sHy!(F^{S6$JrcY6>donMt;|@Gg11#m2CAb9)p+SBNt08N+@IBZReJr zfmSlM2wlWy&)OM&3-M?FZP_PTxs&vms&Ba)L}j_!5$;_JYTA?K&7CjS76_rMN}3_FW&zg<^DIu_reEkHME+`s>2u>=q$rx-Axe zcA*_)i)cq>g0p>#5THn%b>=Zux8iaqNg)qdg)kKP?=?=$Xl56D^Fb&}t1KfIChr5g z_mnl$NjR@+T;d+Vd3h{1jm}nqL6I#`LzQ5%Vq$LC~mEsHUwE!5P5f zfM2rQtejsW)4$DftwCwT-E=zB*{& zn)6R%%gX0LOkaE>a2+aIL(nG#q<7bn9J#fN`YJt3ge_m+H+)tS8u4)2G8Q8?Cx3of zXeu*ZX&e{w!7GEALg0)8cjr6TQ61u_+>X_8TGMsdayQSp%!O6ccQ0S9GIx+yr)=4U zLr2=4O8l$oK0`;f9$>3AD=Vy*Tdz1D;8N+U;&iN_v6B6eD`+pr{n#n2K?H5MOz~RT zAp(A)ZzQ7m(nP(Z=M}AL>=_z2RFNX=fN<-CS#CHNWueobhtZd99rgl4Zp-^<;h635gY08Z6 zv3xNh`c=l4<+6FCE}!fFAU0j%y1I)E9zJ1Uawz%3bJJQhLOzbg+F{w;(V|^GB(3}r zO9y(R++m?18h1nK%RuK$zK!O?kM|o5gbnE*yeXdm0fDOct3w{*DDAV%^!@aICMsOb z@-Wv#{&pXJ8UEy*+FijaNZSIl6^O#2NM`I@qrWgUz8ZMGI-q#|Z7%&*LYgeQ3%!94 z(0`CX)5Qlpx4mY?HT*_^_5>Ifn9CrIp3}(6Z@76wGh&E*prs2C=H)^w9;7N$`fVR` zzVAft$#JXZc1~k|df^I2Y!r$N#6z21Sk@#|??@SEWWJ<)FQ#zxD}z^R4B-_%1g-TR z$e!8X2MEhmM|eQx{s0upgcul#d&7(rhcorA;p{y)*ISAaA8})S7`3MSDxO86APc%_X)+&x}rKW)gIHF zYPUse7Q4J$JFatm<4Tui^?{?K-a_fiQhjX@X}0;5H*}%B^(aw*r}}l_1V>5pGMpbt6<#pJ^>Enm~sPsl)ciiw~s>nvz;VDkM?V>N^pB9Y&c}v&%9e1 z#RnsfBjF#k=6;R{vsh5-m6VGZzR#=M;9^5i%YoGVQT_CQlr$5im_{xs3!J^aU0opN zxiT87pQD&jfle2+zz_TmdW+`PYTpm72#J~hPA$t*O>0qvuOBTv0Z0nPwkLf=xEQo_ z8ffNP+XX%V>v$r9V;1{mY|h)&4B>bi{j1y9G`15b$UPU)MF$3pXPZFR!ddGOHXxYt z_dAEPwEJ+pxm5NV@KVt;cPPmUa-S#^m$>~|wMoM9d(~^kzuC&+Yn!fi0$0SP^LpRT zY{J%ZC}*P|<1&r0^1Yl+rPx) z>UeesG&^pWHl1X#z*N}v`z%7w#k5#IcoJDOq%*j4{a*P+534Nst2X6Or}6Ra(sfNh z7w0?NX>2e}fmiLBn|C;0tyKtF;J$3*VhC;Qq8r7$k>81qs7o_pLH&|}{ylIj;#JDf z%L8!DypNi}fh#{I#UVla6neI+O`y^MbTUM_LkyRjJ>2_c77gDkyy&3Pv;M6Q6tCt; zd@B=9?DZv2f3^p^7+<%gHZP|hs;3B6ha?2B?HfKsQHhU9|Gnl5?)J%ud)YaBd$zI# zwY<0d+ZtkVO~iU(b4?3QWLpyM%t6?+^$HN)#Z62a&q!~(U*{%a;$P!?*!_oY4qnkF zl_CS4}{ntjJzXar5hF}+&(h9vyUMsPZRXXwLgtKaPc$4(q*m?{{$bthF; zq41EJsv5r_E)x7TI)p`lcqPK*r7^kYH%4{IZ}-RYuK^*|{@3>5`nd##JKwkUC((1) zEnXU}T7db#CoO@nw%W3lkCB;?-r^zKm28@vjO8U#+EY4+BMhJm2@)dTyHmJ8(j z&_m^szLIaq%N}H&zup!&9 zRuach`^-G#TV|@lyM?K`wB;X@=yxl(>hu0u9|;o2@JFfCySkD{oy@n(C<=D#umrqB zU4_x^pB{Qb#nc3lPWFs`J0$XcG3!mNn8L$?Pw4*^g5y)MrhbuZF#{t_#2@Q*(kDXE z^l-3GKXZkQS&Qe34AfGAr?g>V8FzIBc!>e8>_SKo%5BppfvM5$uc`WYU~O4Etjajv z7{4;sVO2NJkh)|Hllg?=C9-^nHbM2*$d0tI6v|tv{`KjPt9FU}xv#Sb=Hl_hT|0Pi z>dAxxVUezl z<@^cqdd2eI=NR-_Tdcd#+C4J2t@nrpGo%UhdUr3-#?X8r3w7%*&jlJmd=YV7F3DHl z9e&pb*O=0GVCcW671`vXTpy2x$7HnkSo+;Lt!@T>>)kp4ABpCfLmzBH|r-_s*^sBYuUFx_pJ~NM9KjQ~a3Id8ehac+`Fu zGPU;vpe_pBS_vfc=6wSEva5BsSi1T11W5bmBD_S;NyjIBB{b64lp+b8k@JIEl=G89 z#LEv5(iO|o9=q#5Vkhx!(^sz+Nr8xsy!vnI(`y|CvhDtp77t`fR4n(YzIV?>`s!gV zY%4c^ApeoxU<5wOAw6`ztfZ-fgHi?;e2LWP=#ut7G9?bJCd zmujGXuH3-8ZNHQIAoD_Q#qxzH@M`P{z%Nw1ch|uNM{MN&8$os&c-<#E_M}}mki_?u zH*>W!U1CR~A-6LBX)AJrht~jqnfkY{9QS!hI%izoQi-Leuz|~)(-f*5@R78NFdXk9 z8`hQ%4+)v``5}LYqeqU1`vg$UeF)BtfhN(>MRIguo@K*Y_Iro(!tXl%LqdvFkXDWC zI9*5IV}wv6N2VkUy3>QTc7wQg;b1S9)0Nw?wv1+!?)GU$#dWSX53Rv_*F^LeHtM}p zZ2Ar3C8v82Z?1D_W7zxLA-^c6FA<;XgVJztNXT7&wDrKOtAo;ceMl%;Rh2@9UWjXk z3O@k|&IRA+(W~ON2`EM_`FtxKd~H%fn`BBB^6l%2QKs>o70`GYm*n#Vp9{|QhXIm{ zgX~Njy%k5!&t77@Ia0^m&-(7;FF)jd3$Hwst;akpQig2J)vD1E%PQ7-H&0FU;vVUs z*;WAE56mx1xaY3ykBV~JVmvNt)Zg^Ex4dke)2Cn5J$|IRHSLH3 zbn=7r@lDw%zVN#kygzpZsaOs^UX0NF99)UvcUHI@zQp@j?b}6S^Hz$BQ1O0;#9NwA ze>a)_LOl9Jl0}MI9--A7okCwY<{bzgzoIV9P&G&$cyLmEG=)=agUKd0-nkwN%(Xs7 zTu{$;It1XY3d{xc|Hcay0xq!W_Yj}cK`Qf1EO>JIa6)?BDm}T4lYWUftRo*De9aSQ zeCu(@TcBhcR(&+L?GOiLc1I=nY<)A^RUw(2iGf|*S4+&OxwI_;1!UI?@OWu64>GR6 z#OwJ%)kS6b=g^IvOdqcY&vYAciP>adarq{C{(==;`7b~OZ)*Mm3x`;kxY8#zkaL$f z%@p`nq2jD!qPVQR!IjmY<6(;uv3l=)Wz)V_GSYSNs$?Trp`fic;?Wtz!%Aw8wL%r} z%#j^+XN&b0r$J(@$>gR7}~v$wv&G7Ilyxwb6U_D zoz<$(J~ptJ#r^4)vh7<1eCs)LVSicjd6@XDKld|FcAv21FdFEj)DST-`an^+3UO4y zovK1mjGNx2OiDR1FHm+~^?o z;7CM667h1Ebb`};q3fk}&lQ~xxg*@+Q4Kvl;cG?2yKjl^^f=LF@ZZul(?z%unyG2I zQZJq(NLGsSD~?0&QVHIuzMur36QOy+g18P8u9hj|Y8+0Q?zKcl$lewIQB&#+pH!(m z=ju(94?JnI_IQu@iqh~706<@{@~#R(20{#cf`U_+_UxSU<4W*gx?4bg<q;N&R(UCja93gzLnt%ONtLP(#wNq&TpU;LZRiQl4;1_hs; zu8)WQnC50zgVP(y{s|Oif;7pvI=+Ew^PHf!;n)=GPYs z=XUtuS9p5QPYmJb7TIYL(K=7}Motk+15t%=-`vH2?$#Y?pU;rmO$$+7C#gIxD2sb8 zNK|%a)Yja(Wqnp9Sk`Czlid&9xq=*oZZ;yv_|2rzXk~M*OJRX#i0Yv+Y+uD>HvwC| zPAmbdzVWC^Vd2dX)jrNeadVhVb68!ctZRQ&`+cBv^1Gu*(*THljgv;e34iCv8$yLJ zh8Sxnrbt2qkFq4iDyI6^)HODj=oQ~$hMRtr{5mKa$5n%kEO66SKJ?R}L>jT0u@g1H zhnQeZ|1RkQfF{cR<_*P^q zAKx>y->wxswn@;mAG~Z#HNa(^l+!Li&m_#!GL~0NK~c3!vz*=T%&bQlnd}szOnku% z)=6`|o2^$OO}ca(>dEj5(O;E}M&Ob-*F+XkIKyR{7HD)qHpfb&wi(%UdU-1qk-8fY zpHTkgPs5K?6*IiGNkUAeq;|G7&+0=fH2bV4#(;j@+=64uS)x4)u* zjUz+c#B2+d?3sz4$$=Fp7xZicuKeA8>p{W+Ya3q1mmBq{->y(BB9ix#3LhdLU)Brb zy449u*?Fy6BYrb;R4aS;%jed(?Z$ojCbwmYTP55i<{AMI9IWPQ?fkUWEFCP$?6tV0 z_^D$n0iG&dp(@Qd!|pqlYhll8X3p)B;Lc64=Hzcj=%8M&RG0pSl-V?3T${u*zD4}f zz!^1^(iu^py$@>gEuoM@xv=*ZYImF}w%@0F>{cUfrYOjB*kAk#u8w4&Fk=%KEa3Lb z$|fgW5g+Pg1aRYEwlEtkF8xeJ!Dv$ptPbYnRSmoy5OvCMVsRLv#DDnkUU}o|TJE?4 z+A&MJ@w^q3YX+LL_q@JoQMZQeknWe%9TRM?@)bF_{dK?sRNrw?aYS=>?Xw88kyE-r z`CQ$Di9`FlbG?t&bJM=W2Y4W4h=}WikDOqN>{re75(Xy!*?3$TWnHO0Qv+ zbOK+%wv-Td97!<0t9ks154}jZ^{g846Eh8!ZzCj#_cE8w#%QkpO(iq5f?1m*k{9Am z8TWg*Vf=B^Z9kXIdO=E>_Q#7up!Uzni0JFzf(I*Q)^;bx6W_VTmW0B)$QElm{%VfH zqh6NuI|TONi4)Fo+c{qJ9yevzgL?It?UGH`8nPP#p8$F;=)|hcFwncYMe%&@?3eXF zXFW9{Q<^Igam>WeuCIfjn#*H_dD;i3(O&IZcFmOED<%6&zgc#dg$k9QxuteoV5id+ z@3i?~trS(jzx}2*Z*upt>mg%$oHi#XEhuwQt!W8t0py-~VtW|deHYe{9rfAtjQKN- zS5)b(P}MN0sx&z4<~gw3<;7}7*owifuB~A0Jl-5@vAhG7MV(5Q4pic1cW2?#J-#mR z)_9SNV;?FMa{7+vZct05N_VapYs`OB2D=~xM=&f0=D7_-rSZF+(F4t*c6N_dH_S|| ztT|j~cWoSDXL_#LPHeE4>E!pFI?^2qz4K?x zdITifLJX%#qin_gwHX9t@qyvkwq!smePVNI>FwDf`JB5(HzhA_ML|;7z?l;uBiLPw0-9zx)=WY0FsuFxxL>uiNw1D0RWf^!j0Su`z70&sWVTgMBxJ1;R4= z$Pf_K>SMfA8-d0{H!}5+R;%{&7ZG?986Ioxd+z2Ru>KdJ_dbQSv5us^9eg?w^#4)} z-5#dmn$dLfcZcUU5cIC zvjd1+s03zT<8GAdicQQAx*}li+_gV}1VNJJ0uaxeYwbQ~%590AC%_VSm2K#hvu~$c z+lq=Jynsp$qk}CPek4|ZYGZdT=K^Qi_~d_VO$j~Gizwb8rmC&D7imQ*7&_++>AY={ zOY?fYG9|}jzzaHUET&r_$T**b&a09Lvh8~L;@y-4PoCxDojU^_GEU@7Yu<7{_aoSl zR)?e_Q^4ZG4elJWwLRj4&PQ+y%^XrJcUF&5+2_gk#Z>b&q+1f0$b>S_i))#__9;VT9V*Fg)cSMM%nLW@(s(~d zr}cchJee9ml+4qxh`ObUY0$Q<_2J`PT76M-4j%p##jq!RZVbz^^b|Ae4?q`UIR8Wl z3#xYJCvRSh%nWBD5W4v^@n@E+N$(m}sm|25=Tvr2HaL-q@p&#k>;QaT~`UoB*7mniy)RezDa`lA#=*@s&rPux!Um%o-tL@7p<#K{GZu_ z6Y4}>PjJE)hsoVoJC}w$)-R(+_E?tG$cVe{eLXlu=|Fa(s2jm-!|@@Vm=_ zHS8|SVf%R{>h_0ErV`$`Nh=$^(Yh29!mrncO!`o@Xzi1KJ1$3T295B^_>QYYf^#0W z%m?Z8uk$Vb(lRnP9jSc5EZXSFwGe*JJ&r>rksaq_w&*BR|F}hPNmgF;VXIzBv-|oY z7L!Z^b=NBFKB|&N=F0oWp6^I`j-ESanz_h$MS0oK4$m~s-0V!7L*A&fw2M+Yx4;0A4@-~u&cm? z2_QPD$oK65&Y{C|E($mbCKXdEDP6M@N!{Qk316M7QgVH*R>j} zR}3dTBuQ!DnP?Oc(L1iM!%JtkwN^glIPl3$e7+A1HXffHC9B_b)Pencm3Bj5JeOh)5{_XD;=%wGUVZ1YqX);oP{Ore{==9fBPe9WA3Z zB^lI`fUA!cTQKcb|NisQHM`JTlR^d)IQqO3w zk*zPfXc^e!G+gL~>_@+R1`=DGdIo=B?j#$)KLv^HzDV2P>W&8K)7_h)P|i4Mn%Cw) z{U1Q(C4v5%jZPxg`3&D4yvU%Wofi9B1@V7ld=}*Au-vBzOkS+DP=Y%!S5YA!qjFv5 zJkG|2hv&uafc)!8sLMNHJhoCd4o+COhC=WhhCzC-fy$Y!Pk5Pkkp8)AsulnKM^SAn zGnkp2-jK+p++wm;C!DMcYKu;cT#5{*LeCPSP|hwNREl2g3$`q*P3<&l?#*{QiI~Q$yeMh@x^K)to*BbFMoQ4 zw*}Fw@F<$)^5BSmJf(BH2T`Jv2r6dE94}Gm-o2se1+w-ag!Zs<9Ofzt<3Dd0ZMtPPt8RGeB`v(f zHea$DwBseC=-ZMmpZ}5$;aT^~<#Evp{{6ZtC_6^0CZZllc$8^|5vrURz-6@0oqqWQ zaB&wWrHLT(dg-VVJ}+lmf;EjL<^KADsRavHn~`m*m=y0FINL{`wgsoVdn=0bI0@%npXckn z-QopR>{AJwsY5bcP4VVuA@Rl>){|LrOaWFl4s&tFwpI2ukwD&s0h6)w=)}6PZqa3I z+=v#!M@;;hpH;mQuk8WX48LYYDZXsX7pU?=@GJY@9xzgDehn`7!bIz;@mh$>mFIcq ze)#@l)e^WoFI39Ox^#IVj|6c&s-<3*h+8)RH7pODQPaUWYBFp=4A_+v)5}-e#Iw1s zvq3Cu&Ks;efmHA9N*m}On0 zU2;V)?pfUhE$YyEsS^pM+5qJX`RL@|w$}keD*%Jl6@IVW84VQc;TMvq!VJQuspe$1 zsDIMJiBf@fTiZ%XOK~sA8yvOBg+jumGx=@GUS{_C$9`s4oGvTckK)<)(maDk6h*Zu0B%uh=+_$Euh%p;n}Hlw8M4JL3Th&0#EJ2MYBT!sZV2ZMWaU|NW$SmLr6Dr#UhM5adV_zbmk_c z=AdfU{zv2aABRMl8>-Ux!uP>cyE=`t?|}xK`fneIB`#xKs%nW4_ncHc@uMC(;AdIw z%K=*-e@NH&%si+}W%n>LqA*|N7D|UI-*pC-fC$SAM&F8M{|1^O(Rwa6BAzT zI#9YySY>#+OSg~bBRBFLv!YiJ5||TJBwu&;j~*jU=^fydE9qM;zxd|OBO~k%xIe_> z0mauIX%g=2!EB-ABNOKD?&zo5Y@NYy;{6{+r>Dy$#do{qm#jJS(alpT`s?u*WLXGW zX2ogB-tTqGw#>R%OQ19G=WA#Zx-#wRecMIzVyet3i>kK-QE+^3d35H|_kvkt2RUzz zKwr``rEzMe#3s9UDGmx$I^@3idcv+rCoTA|N7uI zd2i{3Eqf&j)K|lydpk?8fwChb8w!NJejhsEvF~q3DYQ>akUsoj1-MLZn|=2D%SL0V zGG3Z4t$cEQos6a%SedPHXC1xpIx%vx^XKXfy?c#?T{l=Gr;mC~z%xu}Y(`Sh&oy=$ zYB?z&ljGihszn8C`n|9S?bh`u-aJ2_u89Z_HX5(Sf`LuYQzMrf$-NEKv;Mm03aJBT z|B%mtRc#JjYaGET>Y@)c78-AGi*{yeFa+qX6_N-H-b=NIRTE}7n>NSDtmu-m^X^s! zTXRn18U*Mf>K%IsNGj`7w>7uCyZy~RYx5O3v9P_*|31JhXs;D?ue%6nYv=dh9U=^T zvwfhuWI$VXBm@p>Gu4`_Mc1Q_2-{nkZmbY0g^qwenk8^iNgVKhQw*+p1@#%%Z7)sy zQI&ufZmczMF*UocJg(Au+EfPUBxtPLPPvxlOyIxkxZ(dzGNt zWR8LLo*$z`wiEX|&5#gUn?z(s@!hl~Qcf*!@)tj!$50NMWb2=mTOe5EWvPdKbeiYb zqxw9~9pjk5*?8%4h6vu6H=A%&*DlD-EBX66F1`)-oe4D0D(N|DA2U0zCzh^pyhFl_ zQYR;u9tmLtt_eIfX!TT}rF`D>A`sVwp)@DIP#}Ux{l(2T&GaCy5E@A?& z^GgcQa6~tAQEtFHT2%&LiYVo-EAOLCGMd@@4v0~9BQAZ=V2J z>Z^ZJ4#B81HyxQ5@D6RrH?$W+G9J2%!m?@@E-bppY!8VYRd*8uMY7uF^tB~qz)t-8 zQ1Gpe;M6tl>HQyLzh88%c!AYaakpwv%PS}MLa^cH&^G;w02|Dwu;cFyr0hqPcZArCe)B~8!lc?T8VAEuyU*3R{Y^XA(B2u#wlmn9M-6zRl zB-I^YC=9u*nz6XL#q+WdH;)|v97+Lp@L7PH3Y+j}q}gTHNO%U1oeDUz#lq<>vO+!= zpe#$APOowc7PoTo&9UrAf!^ciUTJlbiXfvE04 zD{? zP%XSynxuo=IJ6%jS&0Sv4|<5zp7U9Pu)j1a7_(nxpIV#mBY6yqIrmAq%N^zA?tl)9 z0=;(yi9iKlTy$9%Sz=HgmlcU8wFP)k9uI^y=CPZBSvTPiD)M1{5D@a|A@Z!_7ezqb zc|;oMuxyf}8jGpg9SBgAgHWdEc!i?{j}JP8z9QY~S0>aZ#i`|By*rf#u*I3VQvUS` zTdZ&L6=Tc{-KZs3D>aS&L?b!Q9~*ACaO|lc+Ed*xO}1T5J8>v~Md}KW2W{UE(@wwW za+~RXf9%SW{{Xo~`VU)toNI%N&b4v;lA|-0nR=)3e@H1yUa6XW$x)fae$Z5F_f6CB zg3%7g=$6ZD*qwE#KT7Icpe;xZ!PKr^=%aD7M)%Y@-jceDC;sIeZ>q~ShTA|pvC#G& zv->MtQ`NQOLN2x@(QG22{!{xaObHrs2Quyq*To0Db4P=UPMK)y#rUs8;AwE8n#deA z-~6(NHk>z$jlT@7(VfJjs#*!vdMW@^EH5-S7odi!C<9`YU8AgV()Lm;#b2$XZ84pCyP2hZZ$7Zo4-D?0Rm^cyI6=&=37%jLlZr;T8dS9_2E_KA`9yyHp7!>;^ zZ$p1?!Rz`K2_Lw1Hc6U*(|ht&NwMkVecaak=@(-)5fB{j$zuUapN_Q9`j4q9>(t+B+J{loo+bWHvCGUoV+xt|I;fh;z|eRn z6_j+{l7BewYOP|l^y2pGJkmSbwb&17FxfHr<_)ZU956F|&tqD5iQqK{u%{Ai_6fu- zlNFr4U%9k=JAMjRF5$4pfkY&jvP!8*4Rf{Ky@wv)51nUSg*t3zu2|~>Au2IIxS zH{K^rEq_Q0I5hChA9*(W_w&btgFd2o=duY>rQ`D=sy0oA)t9(p)dDzQcM;Y2_P(D7 zO;!%r)(GPU{bc3x@(y6I^eiU8tLW%h8I8oi91ECb&@^kWi`i`yu3p1xebcC`#IXSC zdU*FDjgovPYjSP}E(^6S1`4}x>UCDPJ@{WwpYkH6*zQ(>I>}^w(lQ3$?c7N6EoquE zT>+#rQr?y9ZzFK;C@XMgo}!NdV_}$?YGt{x9|UTD>%D6%=Rbqs@>tqeHp9H03*frW zrReob9+kCAx%J&I6V%{y)Fkcp9c*BAvS0uZ;o(-hczv_`muj8|$hemX9+e7Ceq9u2%dvs-`k+6sNJL)UB1j3F@Pc=}kVo81GW$ zIL<8TGbi}DL=O(I>*Re%{ zWRQ6Ac8fdxqqPw@Y9hY>yy3ze3mqExq?}sI`Cmrh>4#0*$qQt631XHTzFdd;mLFFLr#mkU&vh|@J#KUXjuAsiKnTmuXS{DNcTwt z#_<5$l3YD2t$Iu6EDs__hhiB1DNlsax<|$*sebEsh!zWOWl7?#!SEWGUfE)jnkTXB z;O3SB2=uLS+|MM_^2QxgIg)5ht|XJZ@dk6Osvf?bL+@)^g3=Lq=mj>VGSNQ%H zrBk!qW=yG6*6{_0`arUqvrLM^>)Si5Wpl4%yiKijaiz<&^A?j{(l2PRe7-}=dGM5Xzzmi?wP1wX1%Aw?Ib5WSfvfVw03pc322mDq@A2=(iapB z+g>MCz9e)N0EQh&@f&@3^Hse4i_y;99->Ja-RQT7ao38F+f*+*Y0M6>Ei%S^F||^* zhePg_UJlFdutrJk;uq1m$0X$+L+7*OXGicbQ~>~f&&p0c^P{4MLA#yU5G%d3Y(`H8^ub1ZFm^*nwH)nj4wjm0xlfpwbJ zN#d2oozjbE+4NXSZdl^}^G4&!m~}NiV$PDVhD_=Qo$WhXq3YWFe`VQWvdGfrPc7VR zeIAAGPIF+|zH_FUb0sjtpSss@2YNqh^E^z-J(6X)Xq?`)wT}a-*5b}xB%N25eSS=k zZDnes29uAkZc45x6gXBXM&`VScq>PWEzEW<%h{eV{{XFQDD|>7^zGp(9K5TC^9AcL zx(cVydWS&J;0=p)K8_2ZGQK@sF2|sU4XqI3F2kJ^6)u0leTZwft=rDa(b7tri{&3J z)^T=@Siid`*?JvGNsai8ywiU??`nL^z&gQc=krYe0BYfVn&sW6W7rNeG?|L3YL5jn zQ@QoB_=H~OzvFs77#va;gj0M$lHP!QdxJJ`+*l)>pzSE8QUsLBE39ko6 z(_-y)tx+*2k(u7rF9s@0E!NBYxZ`gTd9v!7 zD9dZf-}5`mL8@xhKasm|`sU40#=2PxPcF6I)cU%$l-OJ3Y?8F^wY%ZOok$$Y(qm{< zm=ySYl@v^Ps*Fm(9QV@v4~xY*%NuBqIL{^!>@rGOMcgIpgT>lK#jH6k!>DO;%|a8l z%+E)t#}_$AvNv6MXH**3xsG$%(EF2XnIBy{7#sbCz9}dHxpy&DFrcTV~18q0NaXAi(0yp zP46KpdmPrWr}#FTcIcp}?%Mj1yQcN;=2*Kla!*YL+i2JUx2_isy{dZvjQg&_Mw)AG_oy7ZrWXy5 zGOQVRTNiJe`%4d#NZT|zQ#CQKb@!8+SK62+HLkcTCd&3G{n2$v#;P}-Mb)D-RQNqB z-`KSUZe^3hUdK2%1HoD{PeeqVq`SOTl{wGzBZQ&8|bW_PscR8W3*LFY*^)GJ01Kn)ka`Biy6gnY%U3L ziuqe4jRy5Ev&Qzf7u97uZ5wfEC+csK?Gp&5>nde=I)l19f^b{|Y!pmV7E2|*IaI+k z$qkaiL2YzR4cgEF;F5cZ;!vcCm4JL9=}A7aCVkhTMwi^);Ww79CTQszJm?8yi9|jz z^CZqF&~%N;AH(Uv`(RlL*j>TI`2rA_7@)V>&!uJVnIlO4br8yccrrueX-f&DBS>aH zh)~fZ?vsz)4@yzB@A#odFv{=VB_3AUD$c)^JcrB>St8okd{&{=&+G}9bl?k8$@6Dl)uUn;;jUqkbt>`jV@ox(xt zS%a`2P}MR-RQ z;9j6S=7;ep;#ZOzIRO_}qsq#TJ?E?{-Q$$59{t`c28o(ldwEp7Bvw(h(}Dt&rjQjw zCMh%_KB?9RDEfDBPUVvDkg76KGaP<7Dydj46a0$fm|6*Pr`8{PlgO$gIog14?bd_~ z*i3y+Qr$3r_-jHtaX7z1MHht~?YmJ9KKg$}%1fpZw$54HDLNdEvxEfDPvilW;vg5uYm#Clx1gDC4pE&l*o zu43r4-QAZ`oBS!Cr{g(v_D}u7+jp$9Xo}ivw;-V;{qxk-pj%)2Z6F6lz4o;I%6vch zUsG2!_*dw3`h`V@yi^Vsn#P=h4-Wmd-O<66r@?nTokxwqB;imeoM7x|) ze|3y@b1TMA!Xg)Q-4L)E+>%K3tMxt$UrQ?`uEVP#rC|=4-Ajh+Ii4D5wH8mtSx=Yn z*ms57Pai+Q3!=?!FwYUwK|H?gPlm+|5lY)rl3^bRf=SZ)TGwAwsB*;^_T^vl{m;s} zbH}R5m+yWVCF9Nh+z8>=z6FTY^?v4tg5&RkD*(_Nsn)~pkC<_~z~i}Hvo*qsNg6H- zO8_1*7bjjq_6da^4@(v|fMHHGET+V%1hp}?qTcy;i}2KVycDJq+%VN-xE(@X>BHP! zWE!5a+PH4`+&vmEGFubve_x;MeO4!xs+QE#t~~br(Al`o1lESfPV(Z%Jh}1Roo4o) zCG31&9i8g1=|#_B;dM65I}etD__U1?!zEd#WHMB4fB?Ily>2>-4_Z?Yp};WQHMMLt zHSx(&JQ2N;2DQPU7P{NKt_8X3dzUO{7`j%=DCC;9(o6Xf6cVwO7jXf_r>iG`eY7c- z8-nJnV+X5qYiU^{WDbr>!**%JEbxfiS%^amz;tdTxu=^zxgLP9n-P|rh-uncpfc`) zKsMoReCy2WTE(NMR^JHvE``&xbxqje+AF(%-=9Nh;F#QW*mFdF4(h030m1OzJ@70@ z*23rLReVk?(>5m$S?*IQ*x@ZiVt2eq&$?)701i57rizzGTKqJ};Ir4&x!^ieRZPg5 z*AI)hEB;b#oVs0Bt})ZW$74ymyW!e511fj8 z9ac}M-Dy!VRCu;My{ihU-Nx}>HhpE8SJsVn`PFK!pwByF1On%7gWgSoR=T@Ga+qLL z@M4(#Je5p);-)5l71kFVFWag4mmW*owQ~D?O+}^mpN{3T6~p4KuYr)&$xQ_$JHGZ@ z3!ZIbamZWNk)y$kF)+=EK4Vbu;tQXtvG3t~d6bJQ==>4y6z6)POAQAE!!zOZxcj1~ zimF^al&mLyCIWYeusnEQ4jZI5 z#kRadfH&?5vvRKNF}c{yL;ND6dF>`kxwq7D?^ay1mzjj+%4!FWvPQ<%zIg|X^UcIx zOPiiN)RdG^Mj0t=ZEOuTyL1|HTyIwx-*$Euk0k#9g@5<{#V0t=47jXR^!h;zVS|nC zbw5JX+eyXSM@3aTjhjysq_=&n6}DR1w@+15Q1(h331p3K>E3a@w|ti8$!N{8WYV{57o|dT;tkHu2*qC z;FZKZ7y$P}D6>rn7V{FjdEW&27wDMPK_lhn?nQR5;W<<@4wO|U+#_-MmAbQw)ju+6 zht;3po$4!*9A^`Bqw#QhG5m42Z-~{UFc#Ygqyz6w<=nS7N|zSlbbuWLXFr<7r*J`R zE|>VW>E>LFh!apEMTzT<3`fLc2uDfIWEbX`|R?4mnil=^3pZN;kkF0e0Phm51m+Y0iny zFW7bm&d->CNLNI>9;|TSxyPa@-q?N=FYsraetPKh_LlsfOJ)zB-&KB_tk}H_wib^O zXU@9YWR5S{#z%J}gwgQ+A&Z^9sMZ5dn08-5M^qiuKN5_)bl96U$LL)dvfak){{SWR zV$u7VMjAW1mf2QI??%LrSpe4Ji#q(grc400n@uU|}X*?iL-2dagUh1-KO)Ve!rQE)7OCn*{N{DX*$E32mBw zjOXm(M~UrA7tQqWR?$(>3UPAJMv1_v6_u8$B8$zu+zOIfMXbpjGlRMELc=^L5-SBVr}i#?`{q5yMVrfw3S=J za>ilJbI(hM)Z$f>$Tu6+VigW!2a9d@m+pp*#`&58`meO~oEZJ&@okFbwMkb!_jWyg zt*GmGwE1DH>X@=dO13G+Zg;{^P;L5g`Ig#R%u5A{xi1o~ix-ZTjMdTBwUS35XqoW3 zCpJI{-E&Uq=GzrQo3i&Bg=R-g~vvV@K?`s z$5q3U@I9T$q1ENpYdUmx;+&OyU3Ycj+q1;fV$G-0>Gb*k02E{NUKiw#p8I|iLAD%v zCJ^289f{;jZPit_iG~_B;L@^V*0wUzc^(Ss(qjykO}68)wcDVw9vrV2)mHQV9M@4{ zJ#(Dr2DlTw@>jVQmP!228&c{C_$+vKAuRRVM2*cGjl-Hvx%vV7?S2%}0pt6etuJ1{?6X==DPun~mvWSsW2W6Td>b#K+>L!U zZ43pmxWqxXbJa^B)zi{u8g|of3YW3%w0wAKt-TXVr-L*{YnC!A{(7(MdGS!^xN4^W zxzWtX+(wLbwLXiFI_qjXD7Jm?LQf=eK{~X%`g~2Nl_$fFY0XS_<77BSHmF4tQyJ^7>5ZEqWtHLicT?ZE91p>DTGH$7?J+y zSO6_;Bwa+VHC&%ll4)jBjH2a^l2y-RTW5N4SyM?-PQoet6`{`H9Cw_yF3;*@#Im*t zhEtn1NZq|}xnOzI&ZrrtvRLrUK9TSiywZy~x{rkAwK_auy=65wS^;w-u<~!YdHT&% zwAy@=k2Oa_QKyD{*w@9k+PnH42a~dOP91^Ah#N&rJWwufd0kzwswqY(3=)}X&9}8~ z>#g0~tW2PZxnBl`-uC%o4(}FMoXddH)#uDUo$-6iqj0lH95`wh!a@#G-518+)a_1b zc5B!4HD+U^iVBBL_m{x<%u_YocTX|oT+vgSCBm^<>Pa2DPrUcQcwM~5m@b>eXd?G+ zx|QBMOfxVCo7fB6)VV)wbYhbeiSZH*+%)fDy80fMt(-M6is8CEt#+C-!6x}i%Wh1_ zk=9c;7vHO;eL=izrZ^2!HLsVNMv9nf*jlMI#~9t!+{a*j9mGUvf7IW)-bU6RV)`DCCjyeMh#6RXzFK< z^x)qlT?Q`7>PU+Y>ojm3mLo+b!&d5gD%x7fX_-5>-y02q1oQ1wES~o}2jWGYFX@$e zqH%6l!fbTiWxTgvO9}3ZXUxtUWa4bv#~B$)^K$1?#!2%isxKP4d!7#9Jc>d?dP)Zp ze{(a`m{E_jr#b_@!^{-&akG;;X^-*uE*>YqTtB2MKw;b@zsEm9=uVoA{{S8G$+sFO z{+j2^IBxZWhmLrm8T2D%-ZsUu{{a60W{iJCwI0u}_03E2-NVRM+}Szb*tT8=Pui9b z=+>ji4gUZdDQ&~7cc`j^I*%{^0FvVJm1WD|3|YEsVXx&X)?fbsCPmN2D${|0F(w}1 zIQdo|CXunAb)f+>tagPj3%%?nwT9rVn<;tC_QRz;%b4-H=d(0RXOKm#x>q~C*mT42 zNnE#?UAdS;$K5wyO59F1OXF3YRaO;SdtACF`FM{}TNeVvn)oAdAn3KWCq(R50DLiP zP{1XRw|HtjJUr{%nVQ6qvN>I64r+sU)76JtRVhzU8_iK@F?KnihlAuQdX~MC$G%@? z51nF)T6{D$x4N)k)p#t}6+o+WU3ZL5_rmVoTJgAs{H-^$dlZ%*M(kR#kkiM%DK`Lf zvQbTj`l#Q8vBB-XRyp@5>;|$XK0_ppX1vN<5vHJuk8)^P1np+5Cd3sDmfeTIUh07z z_q5;DKs(R)tu0J6@y9b?x-M*}^fjXop>W&;PbJ#ppi{Zi5vH%JBA0h@t=$EoQ8N}V z*D{X`f-=(GT_EgL0w)*D6;yRHmQF{EO|NjO)mv4>AbXhTAe0Vd5PfNe(k7b)f;Y)K z85EbXaj{yxnA>W>k+Sv74(;N(40kWJ^*H8#t%jbCm6KG+*BIbqgDl?LxPY&@FJ$?m zhdtp{Q{nPOhSJjeS+dGS_=Iv6wJ)I$qD8KEas`0(gD`iY=|8}9GAMuy+A z>rP^_(+R+djA&^tuokw3PGXfUmt%BP(X*Wa#TISi4qm$y1^Dy35aWgK8LfJQeR+JC=xM_oVmA z!O{ei=WB1OmIoba8r@>)oO!1GZN{Z~B+*MP3Y$1shMy={m|%796Fk8tWVbu4depH- zW{v~*7Ve!0V%I`hn3HxFJJG)ojZ>w)e^S~(v|XVz-H2em9v=)CtV47MGK9sMo9FE{Qi zt&)cVd#r@Xn_sF?uDV$V@s0gbRT?=hNvK6rMb!iyP4|lO7Z$ff_M!p0O_Mf@1q~7u zGac(+ik_l*HdV?(1KOwPj;Mnt>7C+)>Cf>BV@z~TobMGwC1)NN-c-ur;z&D+x?@_0 zDqbOQ$vlN=5uEjcf_Cjm$L)?knB@1SA^!lg>?n5PZ2qc5(=Utd+MBdF{{W!m=cY5Q zknmyKiMRTx7T+fygO5g)O}A$S5uCvH81uZb{*Y4u4+(eU{}XdSK5 za1pY~Bd)T%{&Sa3 z);?ygTWv-07Zuc7X{D*DrNdiGPc1{e`r;-RTSoRSW@c5#ye-c68oXbbr+=|_Vfaq2 zPsBTIMZrHpXNc5VmT!<$fXkT3{{Xsczv18POzZOQHQ=)Wgx3?gYy~s>Jd2Stjjqhr zI6La-qoSHPTPtI03q$N$-~iBz>K4X3V$3yFuBwtMI$FaRi^8et+SvmDwSm^!X`TmAx1)n8RFwdIU$V|2S)d>?=%9UgqI1Ir!K zZPHZTvvlBlQ^nfa2q0_|R8cgDW_%5Ft&Cij?;cu}B~PERhBplirE47=rSx(NUm2Hu z%UW%tcdwyZM+~jacx_W^A;58H^1G~cDW>}frTEPF9x z^q6eaG?+Fk1yq-G6+(^Wfa!6-*w_(!a|L|2SkdY;N)wu1oxFU{)qRudxHOtwLeDCF znqH}0(X;9CISo`7k;D?)_{Cw?zD%=R_{^RxjuvM9j)RWmZu!=x%GsA3!>Dl5^Rv9Ic(uTQ~YqhhiPoZv2 zmd`V>94o>e=_fSqFMG}0NsRGY@@fe)T}3ls$+g&YFcyc))NTimF6)b2-0A06+?`2? z!S4Pts%3vcenpw$ru8^8L-m+4l(6 z+HtyNS5>??teq7@N7^Mnh*WJ%bp)Sym+}hnPqrB1HF^=rN#*Q7;FwJHJ`N|6sp2LA z@ZAR;tGbQMlNxuqkIz$(tj>f(J48X=w$O&?hMQ*SlI>P^TZ_e@b)GsQj^f9Q@a zM4T|X<@O)cXXt3BiIUK?HU&B^jV_&?iYU<5RMb;L77!f-4J3mBwf_L>%fFu%{B98J{cQ8}*_JG-bGDa? z{E=kEv+f^)F_YpoZ*4Gjoz=T}$H*?RcH8hQ&rLoPg9?|_Ngn3&;@xW*a2#)b!d9ft zc^ffiIU7+;hq9iXt|N1+EhL{H>{2-wXZ4vbu4mT*`k7kS5Y`_Ny6N+uLis&zEZRIX zYrl*6pI4^T;nq$*A2eEV{7ICw*02SGe2+5bU8GftoPMJ~92n5@2Ci|}>07@uR!sKD z=ffqWbldzKU_kbFNuq<9l~l_YmfhYew&C?pZXA-bMEj{3!Y7%PVz% zba^UvXC=60Y5oQNqOKKu9I2VG+X$~b`iK6BT100M^shFdrEd=sr0L{h)VND+htmu2 zjlQL|x>`8(5I3?qod=bxRpHIG6+QkVc=;CD5awM&bkQ#W_q2?lfZaP&h%}7~eMsok z&aYuPFn_Rv>2r$Ni(YhkXIHTtmN*@#E_E?cW(Gxhr}$gd{k;Z#JQIVy}taq+%3 zus-I*jwEo?%jW(^>R!RiMbxuurLWV+bemCmTqdpGdP{}dlj&DkSzxgJpChiZjJcvJX5`|KN`e$3wNqcIsOoF(Y2JVrEJYUIVK|8SPvJ^ymjP9 zVwwGzmbV-+UkX;nVV`RCG&kEiJaiNzi7BoGs(p8zdywMJIQhhe3&3j+I# ze3h_zYe!Qa+?h5TITaJrMZVKu1%-ya>ssw!6_jDvR7HRnI!2dYxvS_DD=g%^o;G>h zRBMB0Wts=Rzc#qaL6Zn!u>Sye;&e#nm;QzLe_vOd;`l@|AML6}%kWoJXJZvi<;`p5 zYgpYr<;!IB9gD&7tmz!C-n?)SPnGLjZ2pfBmuQaa>bDptoM_Rmj$C?=!4{eKJq1O| zX~oO3?2W0Fu;^mio-;wz+W2dq)?bg>(>!9>2FS3t#UEH7iEeHVKL%b2TmlNrKWccZ zoYO0xKu255X&)7digxUa4XLUEQx+?7JGW+CJ8fHbkEV_oVxPOi2N!AIrJCGA5sS2}_MXG_jx06sxb*a+1#xYBTHfhR zriv|H%AGZv{yAlbd>^qMKRt8J2EA`Td^)a+>8#S+ z*OkZ|X{Em}y9=G@c*-wpNK?rVpqnA_wlkNn{{YI`KQ(GxgZ^|<--lT5QCn+b4IkqIhrM#N) zD-V-MzQld(Cbfq`mW|%_6H4hU(pF8BeCF-yIY;1?%z3UmW071N0C{(=mCh1@Nh$mi zxvw`=UbZp~fG2|7PBu;8(*pJYZUU!gI!Q8)G}_2TsA}9?Ey0iG-=u$XnakA$@&^KJ zHA-hJCZ80gbQP5omT&lhl2`+7V%qRpAYeI_mGsP!_hTAB(~HkwR@^flo=I8@tkIxn zAQq#;9{eh?uvFLx#iDRJ5!yS|*9ESK4f6W~-hkOB7m_`~05{iyoxyW_*iBSbl>4FW z7S~WmFjl{1du69_Y`~BcIPtK_OE-m@INeGun@bhXRd~HrwNukRS6aY8%>W7o*~+S= z`;^#Z?>sL502C^HX4YGPxsdocl(Uws%!kTgG{{f}z*#D!&nx!fLS_2tF%Hp<{W8)pNQCq~CLnq%2BO z;UU1h=%HFw06nu?jZ}Db}#BlE# z6%CTGwq>{HjCF{U0xX^ za$;32c^Wo7*06b0qS+aUM+~%hm@+mwWRb09^uPUGGRNk`sWSI~gCLq1%VC0V7k2z@ ze@bBb!0b|xQ?yFzSM|=F$9sL(xE^I0nRcg3m~%B%Em?}DM_YaI{1y?VyuntxP+Zwn zsTFv2F8xqwN!+b6mhYkWI;wc-}IF^0eoD#T!;D>o_bjS<+zEYo9@_3W5$FpIG8AafqY zTE=l#KDMFKw36dtrm8aq&W^_0H-$9EC?&z8KtVcLNzYbhsVs%BsOuvP4Sl!o$OF$+ z!YT1)#VDt(p~ahUHanQFC{ zAI8ruB&TWlnz^F3sz(Hyld|0&Rmze}6ikB|yrS)Qx>ML)h2S52(AlSvPHHkX2T0jm z*WzHbc&0p3XEnyxx5D}syM~V!rE}d5EsVxqcbr>M2qgKMu$~OTFa7t8+rDTIt*Y55 z&c7AIA{t|rzF)Mz<(guPy1#Hgws9O)!uN&8%3_Vp9!RjRD5 ziOq<`T=r!FLu z`^1UQvB4R2OXq=rd?QXr(4S$s&XW-hF;7z{BKK@Z*tMm8F;pjWA1?Dn?k%FY?CPXf z$2~sw=x{9uffpRT3+DBmM<$mXrj^(C>U+4fc=A>7SY^yUCi`DMR&^JN*YsI8i!-*R zt}L3P#pAsJ3eQ1G$f<-gN4+3s=xcx^mXD=HVDv}bYM03dIe zb=TItpsIqH^^wm6vCZ(ADI70t7f(O6nQ+P}$Di-i=Eu_UNjJ~;@+0W6x5y)opM8Lf zi^MAah*D4R?SlD;J(>N#uw|9|10KapF0y zU-9ikA84oDCHay%3|Sp;=xu^#8rs^x+LlceJGyntLy%J<0Q;uy|gi-refyj=Qnv)QQwyG0|OIP6+G7}h#vEKt*=s*Mx-sLBT z#f{1I*Vz3K1K0QugiYjkLrGoy_tk#_}6bVZy&B%-5X>w z*03FZ*4z5mqcP0;d+lf)y+Ha3iNNBQ7sBT(XN1(20Sq${XK^=iKNL9X8@x@))qCG+$dYMteAn49TZ44d{{BCqFmrqmG>N3IF64TH^d@f4 z)a?%4eRHkH?00=CtBq3DVc4BD6?l=AF|qH47$iMaS27y&sCRKHjX_gZ+%k8~Jy%~m z&b(4TNI`SFUG8=bUS#^dTyone9>yMrr_BxOf8Y1y+LLW2M!wPbWxMIe(pGdC;}*mw zEPzhuTKlezjapgGtDVz6YS)`Laj}P;+lB+F`A%q4+dQR{>#};MS5!#aXx?{T`!Egi z2A$!r^abD3Yv-P7J5H9fq~gwu`23)zhwi5N z6gG6Y!sEC4%wMFmL$`W1G5-LuJ`uRV_SY8|c5gX!zEReKJv3bX(RG3Q9HI_48tM4X zTZbcItPZLm5Dw|LnaUbP+P(8a88lIuOD}dejgMlF81p&7Q-03}f3a%~rO7=OU=50* zSefxy@;>FN3ZAyI?!8?{FW(*QRUmGcV~6f(ru=B0KeDlIHsfvTd0XUTy^B=S*4BTm z&;053r?J;njXWlQ=L%8=%J*rv)sm0o-RxO=vk>FfkL1eNvEmhEkFut}%PG`#wMC$K zRKouNhENxucA>W!2l8cjG0Jj&6obmeM)2yP{*^!TjzPpL2_I$7Jd&Sk0`>0gH^+bE zBi-t6j{g8A;-fbbt^WXAoPR3sYRR~rb^ic%R3GJC?Lc0ej?%i`_(zqiKX)lF_Thgb zpqy6_7qxXw{{W45wI*?FL=WB7BfG74wE?hw-J}|I-Bkww()A2i2%W0h^TpAB82&Y1UBo8!mn3)6~I4R0Cqg~~GR zPd5wh<_f{tpZd0E{()+I%7GO@ip_X5!Sh)JJaHwPyTQg{&?nFPU-VyI;<% zy|Wcy{9)`(WZOP?Zw$v-Zz%RByC&nYsAy%c#AB=xww|6NcAe3^C-*A^pKEuKaOEc2 zbJNSOnJWk!HE*$R&4gx5cQxU)a^kq0^@X9EyX0wpJQS3B6+1Q^Uk55aZT|oz;x}Wh z;IbCD9XG6WAz3l1d6`)&TT?4qE#ilF1kx-uvtd=nq0X2VI9ftT;IN8t7fV<8u>SxP zP=7po7PO%jq=wv>JxgSCxOo$?y!x5Gc^0~(Z`kpV!5*cuAz8i)Z>n<$@dl|0cr`hM zd)QQ`)m?f33yvNq;Pd&Gf?ImIUg~$$M=vMvhk4QPU5}1B#7f`|bmM@S!OO|>PKKmo9 zZ)PVi=tjDln_5`DK=m!N0}NyLBhPaAsh@$1>VjpLfuX9yKmPzF;e9Q|{{V77$i8Up zUQYw*;gzZ!(i!0xcA#;!QN6t*(yUBK`fnH8S>+2_<}6Y;aq76;J0j|)Eg{$Yt6P9p zI*p^?xOAXY3{93iu$rfExC~MxsiRB@#$=l1w^wIQefFDkDW=uN8xG#}cG% z&YR9~{>q7)7Gq@_xRnFB4}SvZvsJQPQUv%`1<{T^ zQWfqg5nDdQ@Y_z@MrsLaH_O;tD_#cY3CT-~N9z9J;+N|T3>E?m%JH8U@Yi?zo%jwQ5k@Dqao7EU& z<#nxoqEn}sC`z4}EQO(lzv1yEMtnHy8xHml%CV>ZM`X z=22nO0GNOMzN7V4ebjuD-NyJhn__(!@g1H&6s)Jr7E*i=n)w_9O_Ay7b9F{7TaLikWT!BO3L5?3YzGepwMwK2EBT*2KVYbF3Tj$-Il-*jayL?#mb* z4Lw$2p%k=lG7`#qryX2LuT$>mH|+5GY1(WE`BR5>xXy;+JyaSjo z=3TfloDL)$y)L|N*uPyU+@(g*rdxjxy~FzJv!m%)w7GrkdHnwXbB9r{$)6v)d>r$Z zF-#Kz*`5VMJ$+QYMMb*Eu*#=zU89g??cc#ovozo&04lKqZ~P*pe_?L+<%qhrwYU_F zV|X)mAP485Wro{K5?|=QU3VUf9odzx$dww$2E{OT!rJy!)xC`*7|jf=evNa9-5_#l z^VoA_Xx?$_DQFn@#_{;VLOH4W`SChTK7x*)3dCm( ziPx8Pz9oj2Xe5$vz%GxX%Cf}fmf^JU*x%s`{J^L4QAtYnG4VB_=XZIg_P?2S;T@`u zr02ccD-{t*4Hg>N0c?VnFk8$UYNP1edg=5Uk?K|q&rV>hiH>k2L;*f>ps1`W<+&HQ z)65m}aU8SLU~cA2s&D>}H|aHVodCP#7T0}rS50}E{L_`=Zx+)^c=o(|7duf4$uIGR z2z>$AfhS{mcm2ZNwW;y|pFm*0D7I>)7X9@4irU)=@%A^DJLt&#=Pgr`bj`Hjefh)I zh%>1&?zm)MDO&KTe#&IG`>oq5zE}X^jMn3MTkwz!fOFr!%(D43i$fh|1j@LA%}RFD zW~hv6q`^-shZcpCWh*&P8z7Vu_a$1%-vQ_+2d>___#M9DMdAX*w{@M#1wXQ6qAep^YGo=d`!htd-HyW~yGJ z60CJIclZ6`a~sXLYSIW>(-7GHT*hZ~wR2(*aXZ!%Gy&xCR&@Ie$$$XoSMt5)AF8dA zjY$Sb!>6K(15o$I)_}tKL!3Je&bLst`F|6@9$gKqscn7eh~5LetE8#+Lz6=TOY49d zeYk$Ae@(H>j@pLIX0P9CLr1@hTIBp*^xP=qiF0${ZJFXbo%pDFjLTa$ThvKs9u4hX zpNVV>XL!@Y>Rnm9Ikskke7-KJqdGC{&ejjExBJz9TljBmzEY7l5 z@@NGqLq&|%RK{5trHmWz$lF`Z#IK+Z=%cd!Z^S?2KmAhF-5EA(MI&DvxP{to0jLBY zC@n-Q-72_iD2eT!HweCJe}fZf-}SuXE(Gb*lyhE=_yQ}3Tgos zbX>=M`7P~Pvn&>_H%BXHmN#(^a6dw&*7Rh_^>Z0o+(Mni-`;24AEKrD5hAOG?Slcu z7+(5H_)Yv~GC~{a1SuD7FE2Q?v0;vp=kDrdFWvHj;-+Ef)0W~bBC8f~8rjtix9F$w z^mxkw*CMo^!k+&CLgRa{vpFs;aBCb}Xw?{`bJ9a2FvxAX9RSw%^%2;i@}n7szDl`X zC3wBbHyn7a?GC`Q&44VZx65y_JZDGjxiebFR97{P_$+L#4IXXP%9(?fbF~H`oUuN= z&gyBIq;a*{c~5$27G1=tY2p#MOy&*Iyltmq>)84^WocRis%x$MYkyTbi=!S-dv{G< zB0Q6~^-|F&Xd3yPZ0Ei}LwxIJWekfB3RzWESrqlWtbADfnreI2^QGv! zlVf$;U5YT?_ial@_EnaTV3}f`FwtLMARON_N&OW&8m4P1H*UhlHm)OK$e&_TdWmV{ zFEcgTf#UgA+J|D9awB%s)*md*{Z!!WcQ3xzlBn2&_SX9rO&W5u8jl2tl43H)NXMPl zzGsF4c-rMBPo1!uxf>ifr39~gw;I+|F~pw1W5ry0N^IW@#&$oLTO3Su^s#mEx%Tl$ z8lNudE9X`TmSMnh?i0QnB<;;~QVF+rTnAkapE8wWbsgmD*OOpC9z@z|lP2(cfosO< zgBZhJbZ}VDiH*yL3%6Co&U96?9D)k60Jm>?o5+}yabj_nHP5&o1nZ~$6<^N|-RkOC zH=6NrR7ILS~y0j+!W;d@^q})(>`|YsTsU*vxR_HT2ImEOR8@1A|?Ce3xlVCwQ!HRM*r+ zalA(Tn(lm*G+2XS%uuz!7fl3_UHtb;Ts>ub8a-U6dRQ)1{{Y+Gze9?8Xvh55>-@{@ z9XDPDO)W-@~!h);FhwpDC*NmP|6MtHEgt&g+xp`OZv5wS{`P7}eG(Ia7 z)cLo-=7yNEDdr7xw~P9+?hNDxnv~ZMr!K}Kad3)RXKYh9e|^tmTIXL*Tg~`9br(;w zeRNn3dc?6g8o>;$Ya)@G#oOl2*N`L!0DP*qfz44B{!yHsXNMUerypMaCZ8@7? zQKt~UE6opetD3Ga`Lp)?bCz8WIWHNXg(`!@43)% z8mb$yYKCEOI;oCqa$M99!&F%Da5shS;cIJdDnB|=Pnhv%&_w}zC?jmQdqtmOd`(UD z?pPZ6Wy9&}qa2Vuu+(@>(hY(kr)?Bh@*xl`=^k3DUw?xGZdN|}uyx!;e z@%kH=BRX8mvS(AxQCTf+66?iLS07a)8g4^Fc!svxUzLX?Q_V7(x(3!U&SS9%&so)< z6i5eF;s+pIuboNke;&i}iSrPkl1D`z9~5qE-w3(SxN$sB-Es0NzAakuf?;oST0p6l zhT_4k-f1Vx;uZ9oefhMQ@=bSX-~BJQ=;--fLiZZ{xTD*;b^id$HrLfzlPZN<4x~ZW zZ```#&2e`hBtUu9iwKw8?4^!YfExa0b9th0f2N>v=Zf+v;z+98sPP4~$2MIQbGD z2TI!c@j*37sgg3sG4*m7fUw~acHlY`42prVi9ju@!9Mf(}euM%Aim1aTAh5S`a$Vx4(E=NL=$0<=RWL;2 zX5q@Ei4PV{HQhd?Y{WPfRK$hZFUc!N9Q~A9PTbf}CcOS)OQuQx02K1@?Zh4ZQ_|Tj zx7(4oDBgcDH|dn?WsJH%+n#@-a?|AF@N#Xw$rtQUnb3aMLElps=><46spH8(=S^rS zhx=4tq_ji7dM>aHjZhwCr(ec$-5DFj@S6+h&r_D1=)ls=jp4-GR^R6YI8Mid)hfR1UahniRhRdb6g#e*m4Q_0(FYPP9<7<<33M+Aa2Xf2<=k7@` z=sDeH2sg3Uil~53m$4?{!razxAbGpi4z<++ewF3!O7y>Md@?88I9?dlTx32?az?EsD?+ zoPyFVbq!WUk7A12J}HV}`FZl;A?By8Yg}VsaSc7k%(Z?Co$4`(gjE>zOJBXZL~URo z-%+bqOFpg|dfs|ET@FoJZVekcR}1Pmcczk?-uzEAlQQIW6o#YSyl-;kZy4gRK3&Ap z3~uK+n+SmZV*T^D}_cP_Q6w3UY6aPA&Nwm$FM z34FgoMY>>BeVeXaE0br=?v`#2u8F*F1O9nRpi_@}Z;&h3O6PB4koVJ%>s+Yt=^Gov zK(<6FF!&s_PGTPR6M2up9CYnYVE6*)Q|lhd+12d7DL1nUrT(d^)~lO1kE+@3SZBq< zDTnk;RyD+I6)SCw>oz%_Tib$G{{T?+byY2{N7n48IkV$n)r0y6pgAj`^S8EFh_?4F z#NYi}7go~#t#AqN`V^BPjq$XW zG~5nKID8S=bt?w2GnueZ*q4IG@f+w+*{YF_!W=h18|~%ZtTs`3k(a`x=i@ils^l^o z2&8LT-xMi72WUDqH^UpC zhXev*FfGA(xGa^j@Qx@1da!@FdDV`; zKFcT6cIzhX4!R;8^RPb~W0`lD@MjOta@Dyz#m*3m@ThkWI@(M6X~ce$cP?4w4GJ$O16g&tE!$l2w|s*(!NJV?bftv zN%6Zbxz90e4;#wcNz2%Ee^WLSiO$+*Kdaqy9OJsu2<&ke?psGWVbvI(NX)d9(eKpM zVY9~*FKfK8fG_1LlI+}>b2zgxVRV>Z5G2O%OY;X%`L}@F8IiYO8*1mklw% zDcclp9wFYloZxl8;Dt}vo)KA>c4L)r%tkJvx{nTm3F6)k-FsL87aAQ$B{5Dw+8syx zvU>fO(PR!Jt9v8f1aRz8vg1`O(|BLxg^bm0 zXmf9SsJZGz&X-z1kr?$+Yk_zU8jlnv-&|Fn9lwwiTk0%$Ste(<-2CVXqnIj-?Y`T5 zkg#??R&@_;zCcj^5pG=jRzjttLJt%K)OgiH-qE}`YvPY$U)?Fd)!=?8_9!?iSlI!& z^QW;$DkQlhbZcOg2?y8DxpugINKt<8))%~Qw!zl;0X;G_Tob5O@hcruIQSq)ZFB{U zVpTTyWAZ}x-5Qg@!2T#rm_g&>`BKroGkZIRiUG*2r~qB!{mmZ5@48hZ_rN>%N3l0D zGWOmV8rcQn$^m1GRy64-{E)FY{c}ac*8EZIOyrrZ78;V0sDbjd7HVzqP@|XT+(DNN zpU=7y@n)PMyQ;40110P_0?fI%8QBl)MW`YTbBUXlL3c5Oy2e35C^3pZ; z!akK4GVUICn|a#eZb_(+{{U%m&lF~=M=Ub9@I%9!T}!(MQ&QG25YXyvMf>i(lRL^{ zEG()mXt2FC{*@snQKEB+CR2TH*U9?H7iB5|#L4CGi9M^upF_}2TcwIE{Eta>JxWxh z$oeejEfZYUczc@p8zC;IxwyZ7dV4m}rE;wF$KhW-JHJNoe7E$BDVRmBBW{gJU)Asg;wuNZjVOSH|YG!%=-l70ftJJA&dglU2iv(z+%$ zdB?08dATbdNzeFhYoV5wmO&)+&n;mZZ-H^VJr?vYj*hQWqmJw_Uw6%LeSTh-r_8s` zedXfLq{a53hBFMK9ui|~;||?x9dYLhJncUK_aF+om%In=#dzH9<7@h_=vsrqU0H+D zb$~^=2x$+{-?=ibs)n+TDq4N+x(S1ycGv;BN7A{!$F*y_YR@tBY+NMG()`PmvFt6; zzLZTNcJs^F;N!?x@&GMWj732nB^20xBL4sbiT8Cpu-mI>Ae$ZshhkR_tnes^-eZRy zN~Gr8&r6r+r=Cez{=Ma_jjtDV&ZE%0%zax|sm)s=-Ky+%ap|5IxT(oTy?Yz0D^uXP zy9A}hW1d$Rmw_WBa{(Zhod=P5Cd@MH;bcRv0f2_Q&P%x8i9MxO_Bm08=NzXcOXHF$ zA*g&tII3i0K(KJpt^ap^Sqp&XepEzi5`xjsqt4PKs4 z9xT#PTz7m6WW0}sS|0Pv_AIL~N*qrOiMWL`A>4b(g@?qs zxF^mv^QTn!Q*er0F_cvDvN;i`qNkPfOvv2lMmp!T zv8LWBAdA}CUzLLH`>b(_f{$aREmmx~9$C$~df$IRv>87Hoy3l%rsu`h`riEe9DONS zF$i#YWP+-ein9(bJKbzs#P=RnNmI!N8yRFIvAld~OOK?Is~OKd;`lxfQ!NEsA(evc zd!u83*Srw77{(ukQN@!a@_r{~-exbk5CWc-_Rt9Q3Y{j=sHV&ncuiIo(JLFQw4?_C zZaRV8z^{K~tigvWX$s?**xXk~as%)w6US zsi2P=e~+nhS=MOYxMd|aBTG))D;jR_1+8s!NOOrK@zY&Zs}7{cu+ZNMq7$?ahfeMA zexI3g)=R~4$9f-H);eDxMd)9!{BAcmHdeYP{8x(=Zo?`WBU(d(qhZ;j8etJvP+{2IWY`viPbIwj z%_s6zhI1rSIKcpcpy<)ab|Uy0eYq}iy6s8>;*|@7SNK#+xgpln6*ApI(ULm2+!a^E z#iEzwm1wOTRc93zf>VFEC_|reBX73p3FQmV=7h_pN&f&B^4RUHkpBQiREE!+y|$W@ z`~$y5sxzihz6q9FfApzO^nc#A`8d}HDmV8|yg*QS&-WEC`_x{wI0xA}>Efs?<@=V5 zcGbu0S_ja6jO+~9));?-9DM6>_=jmHq6rGNsalVP{6#&~`N~+Z2 ztf7cFg7>>dF}z$?w31^rL`%CAQSlM9Yw144=IXkJpGfxPea`luMVnH$L3S>Y&8_(a zuU}>=)ix^d;SAuh_qM730EgYSCyMOh&5uz9lN+>k^5LEd?3$)aGH?$Ny%T#?VtiJj zTERm@6K1M#yPWIp#yJz;+@+w)nEY^=jZ_o6k5uTlTjT^A`fj)~>oH=M$H~o)NtYyd zB2%uh*5kQm+3an}LsQqe#mtqCie??x#_laYNbgQW_pu~{#djjq(v)9gt0=g=uaZZt z*HfzT&)!gw&52_VEyGzGAsTo@z_=6mEgrk2YU9Q*Nc!67O4$UHpB22?Uw+an$v#|wuJ1!KP6+Z(R`04Gdc&hd3j zsvbtS`O?pQnwZqdrS7UD5Sm-@HnyuyjkIDF7^OudSX43fQn`T5Bd=gcxfauc+Vj5S zjl!Z~)Ks$V5}@@>B(=A==c?)JCp>UcgQK^kc=xrEHY@eVjY&>r5#iRcA))ev`YJlB zGvd(Fcto_cd!$^qt#~8zS4PK%(=!Dju*L@Kjkia|3Y)Y_8egp`FAE6~5)Hovd2`RG zj}!e5PA^K`JYTCdK3~iOGGXGN#MpyoF_72qwbr(v*m*q5NaWgx<<2uj)HU1HMZ7t0 zaa%)XCA8Te1#19fcMG)bFL5PmeVfbH=bQ5IgX>%!VaeddZ@XuIqitG@yHz8xq@kx~ zU3)}K4YSR+M;*|rG1Eo$qZQcS+~GKR z{aNASpITQOKW5MJib{VMF4cUK>s&~u&R&&I2N|avJ>j1`&Q$1r)&SDMTDnp=I~}HP zY%u=-T2)#vfELJ%=N1FBgz6uF-W|!T!&dO%oF#2)*8&ukg z%8csrwhZ5kMF{-BrB@NKRlTK0*6iyvv*Te^L;4phB81Nj>oQh8-;IaV59r*0;=Y28 z2OwkZ?8E{5#n&`P>?nRpdqMk#ACt{(m=MIp>7C?XGx}At z!TMF?**JUjC(%4h>6Rcq$9Iu@)9HxLr=+G%mw*pp^h*-4F+LBa+*{psD@*&HKO&3! zk3SVBN2NPBsBBd6QMrI`VRG~FoBBaXsO)?El{b~Xu#1o|!w11qM@1Wk=o+M*%~!}r ztyu+&M|#TqR0p<4nRvZGd1Z~X?Oyp%dr%mQ9xKaoSy_3;=nQ%GBcE!^PHWC0_ds|^ z;#Y^V%idkf!{|VGY;JfjEDFnYdBe(p@L#g%A$nJwyHFln1?M&Cw_@_(SpavH=lyDe zy+C+C3(G8t>{y@_XB5j89yDk0hpjzl6w4OVaQ+bWs)0?;@ltXAG3!`mUzs1S@ zYu2*Lx@Rb_{biiQ(QE1@s*nl#_3s)Q{irrqOI|nTFdHM4Uer>a+&?C7UcCu z&)DPeJ)___jBa~mq@Ex}h-;Yl2D;t47YldyEWS-6p(->bRq+aD&5Pal0LO9Ps?$BYS;+JvSu?% z=I;{&i1Lj>!)JEQJ0z^a<#}WL7JN2G0dm_C!KcZx*`67;wr`ps&RI7F$7$hxJQ$5E zbu5ZP3z!_xKsM6)ry^EGpQYZ?^pKE$9z`3n{IT_skuVGc!hA2>+Bm6t{oT&9b)a6? zHuR@+H-9a*(4I{>{AUx`a0fGJ4ib9g%5bT&5g z z{+ozc3E)Wcb!1 z2Z~>Z;tjYLwafzRXpGkR*BY*#T$zi*IfD`RGMcPQ4~knMEOp0btFl|4aJVpU4wTLv z)%Cs(+ix8&x;xh&=FA1u^%w>h)^(lFw~{`9&oCmD0NRbC2T`gq8JU?wk2eNIJ5m-*<1u za-Wf;pQ!9?rirHZH~B>`9?mJE(XSwUr3(2f9sA+evBeCAd0YUQ!1uVU{zj|fSbni! zjit_=!Ju57?hEIKNUXC4ea+a$B?V-Vi6k*crtotc;)C6w2hO`6Y52SHwoR?66Q2l_ zbr6f4T!*t>FynOjf6)70fzzRyGtH4&;TmHP*%b6p zI`xg3G6)SIx3=B9^->rH0gdHqj=GYr802N$6T?r6NF#w3Am4#o3S(iqGyo{s-Ml#S zULOViz?)A_PCSLR9RV64P(lO;fic>CqHHq-zBx0zSzvE8FoQ@RFb z7R3}Xp09Qm7uawG#g{&EtrZbAqT9sWaPCDsLIUE~5=geai%&9~R9qtKj>zVRO~)H) zzW)FgG=gmJkKyK;w;B#@fhINvIo?v%f{~%wb!|lLGS)WN*TpB+Rj&h>3HMJhtYxN} zsnL_b>EvB0>hfVtVoEFEk%@HdGJksC=FQ{K=PTgwVz{0IU+?l%!r9|=j<=5y0CZaRWDHrBDf4@z$i%5a=E#+H;o8r{$s z;sv~kjJd|LIQP1y5Oucmm2_=ZuB*b^4HHC^@(BI$&apNc=u*)(v&`qQEC&~gdEbT8 z_+VqVXKZ1wy}L&h7&F{BMQ#s(V3AVdRb5PCT4ND$g`zu>OMVXvYuc-uS?LRi&WE+l9+c}C#H%x2UKHK|4ixDNWq*M^%(>R!>#rY~SHUB9 zM$o}Ct`$LBO+Gh41{u}7IJvce-(QKvS~CXKu$sCyMV4Xfs!N;pM6nXUIg2{Pm7wy@ zNrGBvt*8fyJXX-ld44KuG>||Xg34zkq1wydO_qA79QoSa{v@|S=vLU=I9Wku=C%rV zwWCXRDTrNng|mo*#IfpDW@$Xf(C#FX%pkqVa)hX^7LsDu)(sro{HEUTI}* zM2y7ULosQ;SJlV6xP13@zX@M)gZ3ofB&i80B_vr#Yo>Gl&j9_I>owP@cj&c4^vXZA znPI=}6Ccb9V`pRj*{eGY4nLaJ3)3k5g_Vc!wFcjc#mVI1TpY9y*(aZhp!1LJdLG{j zkJhJJKFK%a)d!pY(?S09#h`r&=<#UwIfQ_Jd2gq5ExnQ5W=;s;Mg0p->?%(O*wzuD z1i;I8_v9UNX=%h|`?7eWlig*t*%y{rviyGSV$Ye{v4?U$Aoi`7M}~EAH%r-St(Rl; zbuhze*d4pWd2TrCVCn(iX|U~Im1mRBmU$=5pMMzpm_`ucBA1@&4IK%rh;F_w8so7; zL65~zhsRPJ102om3YpplJxI(BtU6YGtb#AxwU5^<4ZtvxB8C{}sp^fR0?h(DTThkg zE8FO~<%&7I60^?gI96YEe78pJ$QU4O9upwE(>tttxsrK#hKplGNYKD7Wo^7n@y7xSz<Z27q9d}Oc+widG^C|7J;`GpEiQe-Y%O1xDyIn(p3W1k#xcVF%y(RrMtXSDw>v%d~H1` zi)hJhyedKEvTZ&d=L?FtJisqPj)l(Q|0Z=lP zL4#-*lx4-+vT;8pdHVkV`Ag<@^xuj6j;VB(!HMDGJJhJ#Oq+8j%71}(=&MxKA-DsL z^y=N}TI*{y`3~oNre@r14sxWL00|q#*I@zLk2iyjYRy^k-Rev$yN!+{)4$>LZl? zYNky-?o4OyNr03+Yizq$FEj}M$l zo5#sMqOGoYJUuI4VCG7^)b6d}o-2;=7v=rx-f;S$PuWc)~*-j9%UsQQ5qGX$s?Pw^yySKDCxLGh?W!bB%`i)cI3d`Gc~}VFu#gO5-W>m!#R> z%+%n0?wo%}Rb_w>Gz!DgRmJ(boxD!%DE#$bW}tIgA~Kfy!%hQ|eM&CqwA8pJGY=b( z(MVzAdU|jN-8YAo3Vn9KS=HcpbABx}WAj3On_;co!>L=@69>!_mBefmR%>qlSH>IL zM=<=trBe)fon{A!x8qYtKQy2?TcCr##k|=c#jAw+TK@oI>>N1Wh9lg4YmT}Iy4Y@X zW%yLUerXqC;iK#by#BJb_%dN$J>kpdpG~vXZTl$5IavOC7t-4Av^jjt>E`aMZ8=^1 z2xE`V)ATDIPC4*>GH=?)lqpI*X_%s!V+(m+*7PY_FWk)YteZm0KOEN!oAG^NNFE)D zjyJ9uKRpMmD!Bx99rr3l$~z4o1xX$%zCs4sWHq-Sv1-rGpbjW2-eGxU2ZaPLSDr)$ zC3(++$c=SJkpYi2;TPnX<8{*E*n8%Mh3^Dw)t82jB#KcB9Y8=ZgW8`c(Dhti&>DkF9+|4+Tn-Fx3nWN|w|-xK~&Z-y5p7^{s2#-P{$HgBphtoy>wM zofv5%a~-2P@C5$=){4-v_W^!u!o4TZu{tONiS1r-Mec)S01e77ZgyTF0qNtS$C}HH zmxv752~6>Cg5Q)qY2z($nd1Kd0lrb|L<)X+Xch2wnEsXX_oq1j03E1prSRKa{HHIV zdFXwc0n^|gKC6IwbPw^%4mSS)yN{rz*+ zwaY#(bdUY<-=t9nbv{$m4g6)Iw(XcJu#K$Y5Hj^pRQM%4R7J(1^1f}>ojKjjH||^4 zDcjuN#!*$9A*IZ_WXsvgx_+_@8V7hi5oN=wPK}hC{liP_Eyck7$Xq!Se}v?VqixiU z({8}4c+n=JEp*jbYMpmDs`fC_=R|p2tpXzyuwgvaLcP1&!q>XEVr>GNpaWJzSvFvRK(LJFGIjj3-XcD*cKzkC1|HbIbM~UHhq#hGRP-E*N-YwqUA;VB_#08@anc3byHaC(mTuQ1&Nj+68a>)4Gy3$D2 z!;)X!2!?i)IoxQXKc4ii3x!f;>>8U6qKm1brHn-scPDkDfVtIH6}GPQT`dqSk&*0 z4To}sYrw0j7XA3G#LF*?d2?HH$z~)8v`-tYH`2>J?sr%;IJ)-s2@w5Kv0diK2T-=m zziRZ{EQNrw#PHPsqaD1}jK_3os!SU@xp_R}cpxz&d=Rvz+FQGya^TbvgyKXHZEav% zq)gi_NwpXutJ;23Y}OGBGZE`A!*_a~>qx^!xY^xIdGjf|5vYj8J|9gLAaHi^cI$7v zN#;~8)}b?ExO1*evu+&GenYK?+P2nE!1A^mmhl+!$&BIFE|kY3o&nwMB%TXe?OvV{ zS%f|k3!R#<-n3lk{<<%s$}>xbbR{GLM*u~Ilivbodcuvq{LlufO_11K+MdP2bJ)SS za3niW6WA$O*6xde$tdjya>0hjo{FO^A5I(1HhW7>o|PeB@qTF;W8Q9>6-tJ7>`Oxq z_$KyP7LZd+(Tq1OWn4!RaMqp|J)rY-tkLNqVscQ6l(Qa( z@X_3zbT8rUS`tYlePp(2b6Pi??!Zo-wJVzOX=?E)q?Sh+cZnt39IH>nOXYK2b`1#w z>GYNPK5UZJXP$X^KX1@!R+mp4QSgPPm^4Ult*jJOvH(TOaa?yKa|4?ux)yLMHxq`U zm61Pl#*y2tZS}h4#{<$-EM(Dn)(0IvWf5NmZ8*I|Zw>9@-sdu-(5E()$t3PR*G{JSsyT<{t87H}Rw&a4py_er3q)#0*Dqhu;s)-nMSUvw_Q5 zwg3t6dWQ8b1pF)MdwFbp%dyh4y&kG9{uy>>&UL*P+O)E+S9En$MbD|8M(}9d7ykgS zFU*IBo>xjyMI&k29rnj~#C9M3temzMw~nXOzfWEJpOL=Zi1)!K#6E>r+EI{X`qCl2 zngAYfuY5SF-p~g2SgjVl+CNAtM#eR4tT2m2G#r+!+3q!6jboYiI_5KX55^ly3s~C_ z!30j%wB6^@w`|S4es4mgQD77}EcpENu!vt-P%7$Oua5H8w7by8k20u?=luTwzngkR z;aGc4^)4Ag%eRf3Twd>+ba|ISZnB>YF1@s{)oFc|VXntBrZJf(VZB417*8hk&#?TE z)w=3pY_6n(?q2bGEl9*yl4~wNA*F?+YrB6+xZxw;0)@kAVcr@MrMRm$8HW)dF}OH( z6~?a4N5N&mTtRTYgejO$juciK3!!D)EO-#4qII0PPPxF?CK-<9exEg%E30Z@!)mb# ztVxvab)%?`NSy8A+Cet9hRp?belQ55pX8MvkL=qG#dep3XBWpGhhgx`6I@&4UG3et zKO~TRjqFl5*X}9)NnCib`@2`<=FOMhwI;jQbD#EeW43C(AGzJA`7KagGL7KMt^7KJ z^D$0r*zawUv;E!vrl<~?M@+0c>U~kJ!{q8N4q+BEpCW?I;_buq{{Tv7sHxW5uXVUt zL1xo)h7b0p`ijsy&t(zUyJY!xItRxK*%Ka1!Pmqb2;jE%1qL^|`I|r;8-UylmAZ=# z-tbnn*qjpn#h4@887%NO zHwgCz2KgwX_AQ_ntBK>r`HK1+p7Lw>pVZO0pJy(bj$WfJQPn=$GE6ongXwD4cqUEG zm93?x#&RVGgNdGZeaMQ@uoJm!aL^A*>-*Up)lh_0D6gP-Cf&I+OkkNg)KNF~kW!w= zsS79y3N7kPuG~^!@L0#u%^(K+ zNDk%2Ltxo%uMx)IHEv36A!}Q?H-XeJ+nB!hXkA_`z3o@s<4U%wFTBZjx!I>Mydm__ z0pMad!JegT!fIXf`}=%SFuy#tNdS4_?QaBg-78Ge;?m*S>k*mJ$Rq21DePZCC(JCSdj!~X!%p*y ztTbp3JKlyL%5Q3yrH%@^c;@hSusxx`J%aXKeDL?t+ri0>TOFLiKG5*Gth(k!Q}3oH z0K(D@&zWm|t!G$ja+I+`;^Q4Vx3BVlxp!xQQ=TwI`2k|=GL7R}NKckA>|F_H+`owI zM?H8xZRF~(-J0Ow8@t^08+Le$Ky2e4ZWWptQt`YQF7<-@LGCUdMZ5x-j?SsR#NX>r z1aY~+=nwu`p2f+k(|x8fjny5E9<%JSl(yZRABb&d3B+=wl+1jMsB|*6#kyRBWmvAy zRBy`yk*;@IDO(vm_5pnrc1^$?@UToc`DtVJB%P3Q%z*D@{4L&EIQ@&wpG%zGxv!z> z!>nV1rH%c6*!gLbC*RC6N>7Wr;eP!U`PEU54g{~EOxey|XYsKI0T|^GDBWa($yLvhion7u7Y}g7)*StLWd0MN5)g$H>-`Qb+{x z(QXm5jH<_6Wy~xd(MZo?&bKDuW`bBo7fl>)X=xG08KLYV*7GiDNi(jM6v=ZQ7gLGV z!%-|wXfGwNu^Jv~7Pm7~d!JuT6C{oo5Eyq!V7br-SxiB)?5BiCC#AqBg+oom@<-N5 z`A)g{(;W5~+yTG<93_vJ0dTN^^Y5gQ-p;U&ccmXVP*IDw$fmkP9qjiQ7bHHEerPHc zoB^`XMpFH=%%t#^hu2Y#*;qbsqh|YRm%tidjNz^AsEmGaLZ4l!1*u^jjBlwdu!L^J z!PwBVTt>k*#97v1^)2m*!{#Z5$eWb6MA5;+adsxeE;drDU_2^$tD@VM!e9HQv ztlipfS$7+1X{^oC;Lt(EE!Kt0`qm2GwQ#3!h znp8F{Q#@Q>4V-sA7^~Zs~w9NuohLk#A-0E%atL(P+ygNLKa{_er^|JVNZ9{8m z9!sf1gBSt$B!TdSzw3f&mgm5;RtH%X1L`JhH7<&IY9leu*{s)G?*Xpsns{^at2|+m zHKO~@4Gk}Lt{`@mTL^MoEp_2?ZfE1F$F9?g%@lH65p>v1-=fG%=Sl+hUh+wYEP+rN zT^>{~FLHoVJqS$konvp5deg}Grg+J;P4bUgtB_Q)&+giNHQYAzt_11S(lc%pMecCt zc|I9k_nyCX({sBIt#Ch1q5d^s{{Zd}+vmW(a$?mpW z!@2v=H?px)5!cPfj|4j7%4Ya;yLZ*1mEbw8=f}vh`7}z->)k_Q z?)QfGJBpQ&+9+II#Kmqvg_A@*W4EcuDw~NsxVR1%R~bi$;Cod^5RKc;(?EEjWj7N$ zrb0P;RcmW?lF{t@FwYa@jz$_{9SI@dZe{Ilcq>*W_C?kL;c~owFGYf~x~~zQI(jIC zd-BWhMaTmFatEDkz^={fA62t_26-I!J@jmGG7?-(xv1$T-p|hE_6&d4Iay=HDRC?+ z3aTkvH7v~zjqKpz+Nv_YcW4b4z33bLV~}_){c&Cy_2Qa7sf>GLFKCg#mb9Hn9Bi55 zxTH8u1of3J+^2!2NgwfAIG-Tmvu25IzU$t+TaDCnY5{9p*8b&IQmLEbBmBKn|sR`bL1yxm(|;3_;YHLhqrD}~u%e#`mPCLhzM60gSV zUj_$DQ-byTu(DbAYS_{*x^3dSTiV>aNL&w5jVrUAV4r2`cGIyP?}BVQO3jrT+#j8B zeBC-!G%*d*qz` zNt!a|C5pj8B|LPEadb{_yX?*GUWKjm_HU=oIcqIe!ASZh$D4523~z8Gkx?1Go>W1b z(kME&w6(U?J28}&^?>A@lCW}!m1rsY4<{ zCW{c!r)t+*bHqg^RAeOCvOK-Rv^0B5eG6pX!D(KS^(G7!lE>~Jros$PZ$5Xk^aV3J~$G5RLo_177_XB&A#W!@3F|HDa z7Lasdw!FT3FV3;{NguZ^$ss%k6OEP9wm{drMzec}?OqzWF)8rZNo&3DmcUxTUv)uK zgW*y+Uc_4OkTT-pyPcM|GSz;DVQqbnL!N83|+*LX{u*~tM*{NuzCa&&rJHv@8#ZNgF zH{h!9o!Zbl9N?^PtTi$Rn4^l#Yj+`FYjkKAu;y)Yd6HKKXrs}6=#*?NXk&w}U#V7H z0$!r7o>u%OylSe{j|2cMx|x3tt8c=d20h2V!v6p| ze&&FyTP&BX+5QgVb?V$dIIBCg9GB}@B!s|Q&NZ#`idFsQ5Rt1Lvp+7q4 zs-}SbI9lqe%+rEmwb-T+NnHf^r6XKwDcSJj;5E~{Do5Of!E((m2S-a4G);^cE=V5; z?>MPZ4e?4~(W^zuc=E;yVB`DH^_ArWePvvd@dHF;Ca~Fy9z%k+;F5lcu>w(mlX{Q{Y!A*ASfCl?RO*P|kk@skx6835}nyC;25+ z$(CphnG5ZTzSq*3{-!GLOp|xzVgB-et5z+So$asJ_fo(63boTD-lHoI!KwcM(_8Xk zaC0~w{_Y$U25r_%E4RXj=#^k++uhtbD*kQOd?a6A3BTr*OW^uf(cUfXl;D8<;y=&{ z-qLF;C@Lw%C0x|o%>C*@(Z{)Y(czZaUJMA~1b?8_p48e~QA+Dx%qHEsqU1gZnmlI~ ziV9Xg6RLZIZ0B{Z@rdWeO;Tf+k!brqpqulMs@){6sS&$(a>Ir0rm0(TepgkQMEy23 zh+A68jk)iX?YPz;2kyCjDkhX0>3%8h?fj|~b}`&QYvFQy%ORLC9#wAi1k8; zTfqSMW;{q6_(}Yc>{*P>iGQqqX!a=5Ir0b%vF<=iGc)2Je*}Dhlx}ClNAIotfTCJB z4g<2uP}_@gQIwQ!Xv6`xSi|^%DBRIrImUk#M24R_%r}MpWeQ6S%~*aAh}*TC)_+|! zgYIMelkHJWZM)Z{zEFUb8LqHf#EAKiwI5xzoZ^c9L+w!|meY`47L5QvO20N#xuvgy z7qsyN5*9-_)dBX1y(L1H8!QVA?m_~0>)N9Lv_MP0{aQ2UDEj%FainME6<;mqWrLe! z1&{0YQO1p@`9thp9iY9U=7=BVKe<**Qcr2rb3j@~(cC!D59fcmVs@KuIDevF?o}7I z$H<$#oNe>1Q468kn;FLrcbZ@BNk40`Mm57;e3Fl1qsZGH9(2xl?|n7mluI@N+Y}q5 z`r{8_Ragnz5Dlb>!8>mpm$BIVxkk*UpPYnvyA~sy(+J$Z{ zjl)3|6fj2l8q(t8=UDm`yEx&Hvn|!i_U}Ec(4OU~a?4pZZ=@}7b4<*!2K|e3XWcTT zZM5+ZsI0!vv8CM~GkQTgduWaRI-Gw-6iWtyN^SMm+Euup2#5NhqaxPG5tTsq??Ro* zk@dNg2WQ5sgnoEXQ)@3`vSem&=PUug8or!=NYp9ywYoHU7pukU;x-Exi?PGGWV}2O zA3u5&+AP=*{qf`T9EyJ}Q6| zwe8;GC6dH;#~C~v@J5jtPpZZz!zr-}2|C&;C&37=pzEWglClRp!oeIJL!4yrc1OI} z+|5V;H1L&WUR}X(`D*hf8(%&tG_ULF!v!@HoxE%TyCRSgYi}1jw_Al9nyavxE3p?~ zSYU$*jwr19c5c(OHcuLNk@lua1EJq&yO>`0U9AN^# zHqgx0k=AaF8sYFBI7r8Kh7q#`t0U)X+*1)0RJmR(Coi@ay_FE}F^qUE-DnPtf!o)1 zthgJzP1Vj!L|={kinWGjdTd^rI*NJ4SO<0GjV)&I;x*UQq`W^6l-`<_DA~xC-2CA~ zNPKIY^KjRC79Qw3;=0~M$>s4|YSI=OX=}+JteBp3c zPNv^8V172HhELK9y|d1h6q@K*@1b+<)wXSd=j_J`#poGY*y+O?yhtr;iM^~xVi|lK z1CvNOYY*1;j0Iz-b*Gx$zLjL!j>}@%HfC-QN51SfNQCr`kloE;ByN+;f(Y_iR%uV! zGJ)DJerSQ+T^IG(?hrdAs2|fkP;gx{$mRBV{W7ne7Y+75g?CL*Kc#Y^;TmVV3t!y= zKcpzR4}+;Pr};g`-*gp*9pgB2U0?_f9iqqDtC>{(*1zQqRe~pmN?Lvsh#+?hEWS+= z?sto#ip4=NJ5Fjtdqm2;d99voZWRTRIJTV zcO)l33K5iyZmG|xERR!bl2j&qqpi$YR~*Bz*qc#^Odv5rTpPW+BOG-m+yPGO>Vc}1 zw){AsGOcI)(}`q^Q{6fkqnaq%I>;n85(_tZ_RcY{Kvzr{UC?jbwCER?zOi>7BaS93(erankyY4;2rN zX6UnK1B7PG7&pTx+ziy6a4v*6?(Vz}&K^ey+}=)blP_liVq>2iG?an|+YX;^`x8 zh$c7Q`;K8cYBkctDeN~qR%6vt;*>7k#wqP2ol&#p15UUBrO3AM=1Dfz!_UpscFASO z$CdE?%Z-D_UuZRSIhnblhC_vBN@E-5U~j!t zfCEIa`E9!99{yKc@tyO$xw3jUR+m-P)K)q3w6GUHe^LgP)Q_EZ7N%KY6^-`(hgVC! zGWU5`53~4qF1e$mZhzYD1&CqQSi{?gOCWoxD5#?ZxDenQd%Om?54tLklqxE*=xX7p zuBf1nE#_29N^K^bOPF*b!%@c8TMr7uFbs!)yBOT-F>03vM-@2TYhH8#jT&eV9_y;* z7Eg~7+7)a#(31%*ft4~qB@woumXmimr-|2I<Uz;GZ z<}Zg}b=4GYu5K+)6Wr=@=nI=GU323!4mZ=CzGVe2I^V8}ikvS5l90Ke$x9peda@0J zo2Hi?i*M}PFl_Gz#j$8<=aP~V=;ft!qkJ{`gJ&C(M(EJ?rWxO8*j_c7{3aX@I>}*! z!Rjh$$Z;(W@f{478{H?WWhX%!sz}D&q zghDkOJ`;TWmdvK9hN=e53)?Ji7Y}K;E1fE^S;cJbbgmFc;?n63px)r#_clLDvSlpC zUzF;bO*dHjQ*G$v7P$Dszn^;cG@V7avoC*%(#@t-a5rZCyr?63GMQ1=MO{x6@VrSB zgL$qR=$(lhy1tEf?s=nYYpT`zL=S$_a^z9?(n*G5Eoi?IOco1gz1DFHRBa-nN`0ec z$=UGX!*Lgac@SVB3M;Z)p^acU6C0q!sT)sSP}lWQi5l(e$6=^8;H zUhgDKqqoex5(Cdf08xp zZ5R{r{hwkCh!ZtEsu%|!7vX+sE1}deyt-VGUWcR8a;!ScbAG4Ky~7jS!U!!SESMWJ ztm4LvMdZDQp-p4hdts=OLedD=j*7e_>%)tDQZ?OQo8y-CXYO%(m7*#LddaFhVA9sS z0D0)FnHkT1X2PbsdcLcwx8k3rK(a90NtbrxU#uAE_p>xz`Ic;tdkfm@pcphJ?s3yibHxPTvbothk z{{S%a+W!D4d`(cndhiD#>#3M;qGNIi5wUvgX*SvvzpRSyC)6 zSZBcjFQ}4MmQ7~{=mw*YHB7p)&0Ti37Y+@GddhKy9m-%w6WW<#l|hY#p_{jfwin@Y zmZy+523t=QthYgOSa8`|<7a4Z#ab|id+pk8sQ6_;VY9nfk}GNAqeRdPc@Y_OMYcM?DBul~ZS z^vOSGWpVr+N3Jz%*+?I<-YeZq{{UMROQu1-DV7KDH3#R7Z^_2EIkmdZ{{SL@&0_U9 zK>q+~{>fAA9{l?#9M!E}50*qu$Sys9ZPgZ9U3jmBe~gAuUQZ9D;jp z)W((EIj6Thfkp#~Uf3@h{@p^z8{e@k7}~?Xn)Gp5cI*A9U z>XcyY;-OGAy4_|hJj>nNTH#~|XA~*u<8|$?s>!28?T{F5`&K2VTY3_Kx9?sS*dP*r zr?DpH>!Mt?<3yW=y~qSeH&{KSk7`-hJou#CEIo(>OEf1$k!_CzxoKFI>QD_{+v#mn zc^TNAJaM{0M&Hh~fW_?4j(v%w?Bb+e+j&z-hMlNnVYoW8*sJ)PE?#YIRhiASWZ75o zKsR6Bv7;bb`wZi$sl}g1F3Hp2RI+e3gd{jq?|p~jD@c+HW0YD zTX(d8i<4dll(jC!pA^CgY1tcz0$>3;cNHkoe5oo=w2LJhWf@HPp5!2U7q+oLcvp+n zi6{Yi{{U7+?6KZJ7xW=SW~_1fcLJ%zBZN-qw?^qCG+n`^&LA5#x85e&08~tQ*_d_= z+(QzYlCqjwi0SF&!MEx<*SzkA?tpdPHsVJ!WCgIE%@7r7+4hDwqPwsP~ z_xC;Exq+hlq!1i;uevu~ayLzri)b`+_)J~}T*~+=+d^m`6GX3+#3i$QZPGU#eeLbt z4yw}yr_Q4@J_y{$fLCE=d1)I}8I99IM(_uZ9!G)hB}Uihh;Y+M4D=NdVigUM?V^@Q zSoX1m2RyjHa@TWhXI$UQZF`feI5;0kDNL>XJS}}8t8>I510bo-S>w) zSce_ZY|<6yFV_r5F;&gDT0}*lo_2W$=L=oJXph4yFuX#dvI-5?cm$@JK-O-xjvCkw z9)%?*y~6hayjs#gRUA77rP=dw>X+gZRW`b|TtZnYOF)D>?gfDHS_YTbRUNU-q1WvX z1+TBfaN~wPD(PT_m(oZ@iyk{c2a3(4=*{<{LuDDRuR3KvbgSCNP2mzn;x}{<4w`)4 zWZyAnyh9Ayep$gGvBU8QW^|)+_bmkId0NBLuCc`(G-5cTY*B(v>l?Z*c<75Y#5lF3 z!+|4Ev{6vq({OBiRy25}7A=fVgUcTEOi>A4ywPIQrk;?s<}b7yM-SU(2b9?W_b#^% zlBM(R{`d>*FWLhJo#kv?69=SbH7+XF5nA}gJDq(CLB9nVZpiTysNYvc_ZK!Rxtj1J z`;+=N?^{IA6p8WXEJj^Ax5c3E4RzL7Y?o+1G6YQzN)H_ujL~|270Ss>+k1q=9sSMd z^{343OmXkrr10VERdN|w&p&OX;(Sx|Epv#{iSfPSEPAH;&$Vof@H}cW_%8YtAmw81LrEDJ`Q8)p@oQN%C(JX1?`>^du_N!tgdjT1Hz{5(RF`Km;lR}URzq#Qa94i z?p&ixZ&Nm%oP3ITA>6$db=d|TuL_Ef>s+Y#ZkhSk7Cz`7>lNRk#s2_eDiPUJQT<_5 znlJt!zfeDHfAtY^8PpkA{?@+V7%7Ij)v@+&pX{i7tNu-r!|sBXE+VCZ`Vpw~EWS+= z!TKgO(p`@o#XAP!Fsv7Az~&Z8iv44NF`nhG^JK1jWNq{h*6RCK(AZe>$gO9cYYBYK zZh$;`(2tUrkKwJvDQ2e*a~}5GJ*LI32H@OxfVDn#%=nc?2Z-Tya>F}kYa7?f&wYz^ zz}yqgz>hgsjLn9+rbpFN^^K6nyDM1YI>zwf&1ekaGqBj76&pc`#Q3dlxAd=&>Ur{g zqA-oGBwRQJQcQjM2v(G|YL5rjK6~ z*bwHDPK0*z1$Cnc%~oGMtsR{`W2@|qhns7lIim^2s43q(5w-QylfpxTV~*~Y7ZJ!0 zTB|`mGl`r-GUZ3pO@>j#`RAu};gSeQJ!1xH+TdFDp2KHz)hmUW@|ARrmJ-Y-6M@i4;SqSRYln*F zli1)j9qYY~M-zKHuOHMbSmHR#wb=e|%s4(9MHCahBi4ZD9oIKs7>#^g)31=Lo?pn= ze7Wx&k2zxO4#S!z-h!aADPtfK){$Z?9DM*PjzPn4nhbh*F$`{e1Vj&0Q478B0uPGY z@fSQe>ZkI45wqHi61mx$vSwnfe2sLH%J}5n`-fYd$0W)3r6n6mye|Fy4#pTpFpVVb z-*?;dMmSbu$$48itX)E(l%+z=#OgL80^T>z?gMI-LmR!F8t89HikU)i0m>_B%0ilw%0wicQ-1f3c~m z_T4TckKy%H_;acohwm}cv;yv|>2L;u=H7(s7}_lP;u<`?hEw=$WkpPW3fV4P*^9vg zbnx-Jc_@5yFyRMi8HP+dsHVcP2Q|!s82Yvw4MonJZ|Pe{qd3EJ*GQZx^iaX}H3YLg z%w^ib+gZf+ml3M&!xlE-P^wnG9!{Q2xpvM^B5k^B*TLCYCnHkj9I1!k^xzrdj0ac? zx%^!7er_b6I=Dc{uxFU6AdW)zGz;5!A0_yC0(B-AB?U9;32T8H&6J))L#?#brj#sW z+V{;LUr+$H@C4htRmYN6&ZJT7l)>%mBC7Ca5Lwl^g}2jz7QdTcT4_}TRP(jFjdbNcAMY1S#aSXbK|PqXd=J?XAMtxljlvb8Yo3Th`({tg5ex*Jdcgy?)LMy zJ1(TSvzZX_P1Mta)JDSn9iTjZ<-lltJ&6f1I!d~s9LK=sV@QTZ9oqq@C!ZmG6s;5u zf+m3&lr}*bEd{L>(!h)SUskIe&xgW8DkvEgZ!C8E3qzhXI=B1RSG2n`l|8%}_*B)j z%!jNJlc!kASe-1E)8=-$-Tb{*S952j;?$*wvfAadU;3Oixv^fPD*X~ zl`Qo8vrZq?faZvw#^-_W;!mA#AL4AS_AWa+uXY<*CJiK=b2m(}ynGPr>e1!3{Kaey z01JYm$hy(S>o#09l6wn2Y}39%#0Po-{iI{Mul_hYBv@^eO|VKE{8RjPCM*;`A? z)aAjVTsIJA_ZwZ%1+yyV&=sQgvxyip?g-#*Y$p;RgKxYp`PaHd!yx1+@#5ws8y!7Va1$xN!f+NB=AG9Aw2^7|Qb&Eq&YaWY?qe+B zZF^QujzBGT`I?jPStL4;uyWYHyGO8kSHA5Z`YQxy}RsqEpR-+NgOjx13>Nz!Go85 z*UqU9I@eEv#(F=DY>cK*0Ly zP+b@tUfyl`LgO6CfX#o6rJ_oLa6mDI7i z7EKV|f895~v?j4sZxH62Ii~t?2gsyp*!Hl83$Sx3Q$a~m#|N}BM-3z#mrYGB%=%YL zu#P8=RAU3|m$C1@_V8_qK4iSvnqz}chlrzWa<#2-!K8`Z! zBsX!kjBhs{<=8oYEJK%YdR#i1Fd~JUx3;c4$?}!#^gSfA=KDm~-H#tL;eDg(3m!kS z!TsrT@)tGFKXvmYpIIly;-niyNcaI_{5qT^xQz&kBGT@$ zgxuIzI1}ntxhb44;xze{cQoRY;FOWd#xUMSGS2UCA4-%gT3Xr5NH((8S$AiVWZdhT zGdZp6q{bTU_`0w+F5vDrx#8HhhU~LPedYp34b`h@acb;CIb#&NtdgFY+%9#owY!R@ zZ@FZf5`wl`Qpp3a81LeAQ!_Q8gR3IIa8+NdhGSBSjJ;ARF*X2A6vxDp+S=T>@e3Db zDy%AYP~$X`;q}})xkazu)4{{ME;el!tS$F#h$B^K!hBg{b9Av)JhxjFYy!j`c!h7n zVIoG`P^vdB3ymbC*dAd#2Smn`?QA{C-T5w4tYagTK00rPRE}GPOFZdi5s&`>lInE( zf~w?<(K{v|_N$AV{{W*q*2PWfj(={l%YWJG59cvmc1S<%OB&Yprarc+KTKnM!IsDS zEa&sswdCVm9O%!tKScqWjq7lK{6G%A1PlWWv|-xB#jK(UOjhkBN441LPEc3yJgC?a6;{+fiy`y^MwOEOe~fR#%)i>;A5!@wG$|%0E4ev3d`5i_RNv z8J?Rr&qq}JEtKwtq6%>f-@m>N@CS7K0l)>g%}cU5@awT=l7x9W)sEA#D$+QCr)p-& z*mMsCIApB&{{TAO9*P%l6_~!Ai55!J_g<2e5;o(;2vKH>a@BivfO~E5zIB%HJV*!+ zPZftXk#l}9D-z&Nq!HMF@yRD@3Q@#!BSnLVCglNv1b3v|Jqc580@^GbHS9nn;eR4c z$y27F=#zI2!N~xNhKVUO9qD&-NWX>EKy4=j^rlm5sZV9Ny|qm04|36wqwlz@*s-v9 zDtdQ-JJo9ye)jhZ1)x9K$i8Txw+;UQpslgB2X5s0-d-vF616_fjq^qy@{Yb`wRWiQ z@jj~D!Sxl1a*DV4n;SC>C(1b1{K!h&}1Aw|X`_DFJ+Yr&CfueCXR z>iVPucM!^_{iVb6O8PBT_NR8Qo&Nw-hChDF&mZe{^n$e>OiP2eHa#y}vOJRhJJ=-= zzw!)LoPUNIe+%eT-Gx7_*q%Uf`>XiJFO&?GaZmPFCD?nEf@R_LO zWgOAd)RE#zEPZ1E?`_4w@`XUm86y>Q4P`Av9_m$MR1&<>Q*J{OA&`>t!sEPb?{W0o9oAdc~^qeR^`fC2`WU>#nl(SqsMH; zgc2WXTsqpDdATIz%XPC?;j_hD$~bAaH>s)G=Evf>#D;+2EZ{&l2jGh+4D0eku{w%6 z+KTZmD@@rV6)&WX>~wD+mTn$EwYE3DoK4d_)w7wi4jR2)e;Fe;^I%vO0VNNI!)@?XzE)XO;hPfkW>F?K7KZH|w9%`&(P21F58(UrCew2I==I)f4!`(<3mhifXO?=gih_aooaF*_D zZ;X+^gEVB0@U|w#=&A559;OKE--yb|izl7r8@@KSE!Nvh0T>=-sIzo6IZr*~`I8Ze zvSAomtHmRDw)17{7{<3>x+Se6$P@04xvsYtnnzbu3My)wwu=pDC9%h1THWhgde?F0 z;z>Q;OlMlA-+SzI)hyQeX71UxfBlg#39&B(6mE{DT5}s@-vl)HruH0KMfBrzo@d%L zS#Le%e7N{nY_F&k*aoclwX9^$jKk!Qb^Mqi@Z%5+mmf<+xgL* z_FYw*HldO;JQa~s)8KBD)WG(V=|iko8_VGhSFhOZjy8;Ne7B2HX6oo9bzU7C;uAIA zDn7ZE$#T3tsKEOfM6|8G&L3 zHyY#Bfxb852gU0{c#zWIU2YAz;0ma8N~Nx*sFEtUI;k-B8W}9|>$q;XoqJsO{z~b* z&1Uy+ia3;|?>6{1<~1aTFxN&$y>%qCfyC~<` z$#)Mjr$_!!KOlO_xaVLuXue6*d6hxGyY0m83wlZr&|Nmi_@S3ci;gefk z0XAK6(`lEHBEWLwq4Kc(u8?R$Uzs>D;9CYaaifu78L+=ZgaZX_q=uvR&O)jwPtL|wa&xqD{-ox zt+MX!!z;_@keg~P!oyvA7s>T(aLY8dJ6-R>J8m2C(&W^8MUrT#rhHGck10j{39eJX zqQLR$II%~ULsd!EO&qq}GH%_xbN~`~sa*A!#a0~@m~~ODfvvUN;$30ddsHE-lB)#A zGZr-`R~#ZbI%u2sM1n{i;5DnwPj)(9ey0o`%C7WNGtDI(ZaX~`F6w2ma{wFL*5U_x zsMXYap)^zuYnuJmv=TU$us6~}LcPy{QsPd-X29i=vU;Hyl6iIAd$irtg|ER=EAXR* z=BVjojnx&sXI9p@=*PRoZT#l;?p}@-t8bKgSlzl4!@9_DI?NF% zr{+px7@cM%l&NsX%w>xW5AhstxE|tf+%yXeHmVFO7<;8Ibb=`&WK0(?dzxA=*a0^u zxgmpN7;X&AVxqF1TKc)-bu@ABZpP5ztBK-IFgurFCf@4!o1674m~JPCVvteM(9l!V zM){dn8F2T~=J5cX0P#Y$Z4YS~cQaww9YmRKY8*<2w|1twT6P;S0vyZlT-cGsTScz2 z9c!74|vGSrs;8QTJ6WgXPzR+m9#hxA%ikGYZ+PvOW(XAe0L-VS+?_W zvRfX?xp-8Hx7D;wEo58FjoXcMZY|yh))wcd3;He@PHYD{Y_ku`wFZigCP?Eezzi(c zvxy?(=USHz+4Tk~9d$h{lU7dJ=SK}pk01zvwfAl1*8Piclp^-;e|Oj79A%Mi_et+v ztJK-K`zT;|y$xKlvKo3yN>FKHq_yk;;bY|0TsjU5swkaB&igyG|*GP`|sPgTk#}$l)l3;C+wG`4g(wN_b8?=f~Fw- zB5b2{Yx1{GnRDVzMp)eK^Z6Z2yJX`y&_q^X!ONMnXj){8e=fyYZtV-=ZHY|!7tGn_jjwRI5j%L>L3^7H&V2c-;jO7- zU(`y-*(n;nW13mtEl%NkULE2K>v4TZzm->} ziU(p2l3LQow?>9s-(T)rH&3y(@itar!JQUgqp2m1n6#54TsJ`PP&md+rogM>mGy3v zbw){A-5b4PL1-FSfOwTRm8#u~Vbamnp0*Pi1*B+OZV82S(9rL)DP((?(&qKn@onOc z16?i!`&TQ2lZ=tX`4~Qi392cbx>)wPws!u9uvm5vTHDR71~5&@A!;fq_v>i)N!;Jw zBmf4V`z?VEEfn!zeYXQp3i_U&n~a&!)V}Ewl~t{DWLVzIo%XwnLNre-R{#OO9;tftbX)9SGGj9RE&)*|^ZQ~Q+*p3}87cmhRGvNl`bhL{bc|a>NH6F!ds|6~`Zy8884xu0EJvcvaC-FbM9v=&MNaJt8{`96dA>d`wjgHDV;*gGH{wh7&jlXj5LbNv`S93+8 zcI~m&V`5xa34{u!w|-a}J|Afv>v4O(82f|hOtB0^Rk7kXm|jJLjCAMG6tgd`IH%sSc%=#U*MJ5Rr?;oF>?1-)fuFD zyWT0w2|cW?D0T{6+r& z2m921H?o&z2;*U|>O&sa{i^rfApCCRp=hSb3t1NqOIl$N)BSN{GDEq%Wi5GcZ#B{X z0I^(?FgL@)d3~z?07|W49T;7Q)>qUwA1^g?x^fLIw{OegsYNu+Lmy35)<#_%fCAUB z*n`M|tn}5iS>A=XR%M66!$5N?t!K3M+-eVaTS88b9b>L>&@{EY7XJX+PH;|+?7}ff zD;-amNxE@2jqK3@dGKPjZ>!@7XSz}|Tb7LMXo4S#1ME0ve z&%uQ~x|WfmR23J5R+*vQJb2kjZ{L#9#(48^qB95iDyv_(Dp}_0BQR-%)nDQjS0-7c zc0@0<+;sBrbL(oL^u;&K*>HAJOn*JAU~GZ6+CDyeAh*sIz3GN;nzHchqnC%v3w})a zIo?0F$att6&M%4Jy8fm6}_xd__CqUn=TqUjq{fx~I9tr*|DkGwrKN?u%X=`n% zxzSWcS1{T$1v{NF-7N7)2cKc=TIZx)6>c%G{N+uD#Whs&P&_i85xn=N($CKmaf~RKS1-#&tZQ1_bN`<^Y$ULy_e&8iwhA+T|mak6K8323F{gNAP{Y0 zd92R%x;Uib;?v;yr%g{a9$kEM#P}o?0_>5ct{zZU&d9Sjs9?C|&2C(`)8yhXvAL6E z_;xM`@#lh20sL3Q@6uNcH>GyQ@eocYv4&GSOZ7DLZ$CL|@zk z6OEtjsj{9#5yr7-kgM3nPK-CQ|+T zlGZVyw%qUHxiHF3X>B`tJUB7rPB~Pc!5s?I>FwMJcAoL4YeHnn$hNmjvVQgHA4EH8 z`8Gd*5`d0-zml6aT{!Bv>*|7S>uLKMW?TBkhN2uMvMG&rAq^i|Bak_}jWTYK%Yu6e zMh?+#=;B*%l9aPF_&#vHE8#seHmi$HTUD9yCP{zw*`xV`t@A0mj+UEE!{h4?WW^pw zW-m9=3Y2GhN7%=1gZDmyu^VoJZC43vUj{u=>mPwq_D&C%d5gMw3v`;z zT_m!`VyVou6tvP|GQ$itRPi~U5x&9i`)#K*J%xq^M;~G;S|4vKUz- zoWSFuJH%L%!-~>)m(^Fx($_jHFDc0+7qe!ptt@fxENpJ|qBPwd;ODyf6Zv1zy0@WDYj^2`?*VA~9 zC`_lUa0kV49%ah;OX`d4?;phCp~R`H@M&ZrqFvE6K%UYp;rS(W{{U9%!obHgwc|of zl`40@jYUVj7w$+0ZxscLmk6!}gSnLJ9NwjiNjm2eG=w?i8|$D6%>^_V-0;%QT;1|n zyxUscw(V53H`#4SlQPhfY(Y2l6}9%KFm_O%UOFmf3z4aCJXMu+m9-Pf!fhCg5yINo zuXc-z;w-{qf>9WXSC*Eag=;K6>>ob%sCTxnKl|+Zd5Bp=E=IRWZ7JLPRjSwhHl*q0 zDOin*bVEq>?f25QR={KHHqnz?{{V?ZeFbY%_TqW1r_s%wX52DpxB7cepb~{XpCJfb zMu7-kAUGpqfJ4(GX{_OQR{sG0LkjEcBUmKX{x{IM4`u%I(_?7N_|L5VwS{%&4n68$ zl(pc>98?c`caePe`lIZe!|jnd0NP|{Wt_Pczo5;9R%Ug3==xTI<5qO1-v)^+=iEOzU` z0P_aR%8F_#8ENCFs%$b&?Z}%Wz%O@TzZFP%ELCv)D4pg>w!P81&IPBig#2NphU}H? z@;PVVvj~p+8&uw5&DYWr#Th|X90zKB6oJYm)a_($mJ5L2ha0%HI;ER;(!(~I zj3*z#Y3cRYRkY_yKSjZusk~}slnmU@{8FLF<=#P+=`{4tQr?ptJrZoiEH1Xj0 zeI6Lh6|sn2R>0{VO9O+Zla}*c@LQnXI+gSDgUOn4^V*Jt<8!lhWsGthXA7gOh0ZLV zCP)Lgi-1Ty;`Z|^R8m=?bH+p36$Ciru>3uS941zHY(htGbzAOk{8zq(uYJ6ZX$sSy zwvC8j8RH{RQO?#-;uFZqrf!4QH^mJXI*S)yMeJ$vd(3J1@^)b>My%kPnc^Y#z zDucHCdVQf(_N;KHc&+E4Udh}nF=z*oW_9)~BGs!REzM<`GHBPiM`j&pU*u9P)kkJ8+!7zb zRdN{volm^b>B#zmr$*ni!TbU1KGku~deYFI5 zwjuujrsc?Vr|j8y=o{)?gVSVhZ2Jb%Q}=JwqULOpY2>KXa>Y=)Ls5@i)44ma=t5U4fXU zI|lu8&Elsg;2UuHUHp`uEdPnCS;T{cWoZF>Ize?rIcZZt+w$7?VgQwhUq=^&_l1isEBHKy0z zHnyXRtyN>S7{=00A&1hz8y#3?cUf}RIOt#G<^tGwMoXXThMCb_l67+zxQzj9Wzrt~ z*56L$3pPTp!Z0W?XVS?`%uB?i-y5avu;O zKa12ktPFy01Yierz0Dwb^+R?;nya>W?0$~{ti&h`@9S!*qA%*&){hv0t%mUB&&zJ3 zr0CBk7qhQ(s$m}LVpx?8JP$3^jeA4oBjoQ8Pv8~M`yl{2`+okR~5u)|5VV3Uve#Z*UERN=?kC%0|Fyaiu;2<&aO3^JI;ccwj2at5N!kx+( zM3q?d?S`h85T%-!NhvcH?g4)g5-?K%{dAp;wY1#g3sieW_W5(!ZiJ{$0!YKFmMac!*)&%y8*sbK#$tITve;#FN zn(Clon7(Po#XSTRj+LS~;gVzG8+g(UK-bT~NMzg#3c~T6QYxHMs-~6;Zbd?TN4$%X ze@71WBZ=&fJI{+vS6P=tU1VFang$6P)(^S0&7H3jSC}ruvxXgwx=Jhxsf>Fp(Jez* zI9Qk2+Hv5x{oK-NdVIp1ZZ5pOJAQ>z6R5;;mSON}T9(s_qt?k3p7`hi=5^wG6>k)q z5~atPTI!b?nt~o4=J?xy=;G1Cg4`1HcFr@`!C&C?&ZsT{&7P*k*CSKAeFtY^+xQvHgTEflm5i*Bf=ZbwBMom^ye|`X!7Ty02XVQ+ma*;P!!FIQ9nP$>Dx}{%yPGp% z`&g^Z8A5`lN0(MW1Ql`0KYVBxC$PDs8+X}m8hqbDQpaur#=LIsynxZpz@P3{@1Oz- zW5(SzeOrKmzV{O!CayfssCrem+&nr9xq>iH8%f9KN>gDm(NjkRF>XZ0hP9w@2$v7> zeh#bZu;W;uqK_(a-tCbUu~SFB1Xr--Aoi_coNyc;5yYscslypHR6ERx@=4<`5;v`9 zcMn3^RWv)7aA{h{Jz`>qJ7wxKPTk4bB#x7K_`9xWFFNNq%2q#E+y@e%5yC?zeYZ(A z79-lXc5dk%4pGIh$|$gS@rk9crNXKz9Y0oEEW4Y|w&}gM_;XsrZa8yd!Eg%5gKXN% zZ#SOsxb-MKq~NtQxr&OWc8$uc-$5)S8{M~rYv%=Iud&x{&M=Y#sGXG_X z7=hs2rCAfM7EL0>7Bt`Wb`Lsr9yz=F(?^|GBMuyjS9sdQAfm_h%;>**Z>3i=X4t06 zkwWo@Hm81A^{{ULmP-e)g+cfl1wch6k zZwuS6Bl{}3gLJB9H4SvMSYVx$b#J4Pwy29p--I=1Bcg%Dsowno0?i9m?CUL6Wvm-9 z#aJWjDzUn%#)cdCk9jS>eXO9q=ElukJ7vpx+J`N}l?{CJz}cjbVq2Y1bBF@nQNIiE z998ljla(^U0G>c$zu7&)I*X7ifByiQ;yeQmIiqaY{xVy?1VNERdz)s9dKUPhgf;-q z?tGPGk8^nc0EB>j4AQd=cZ4(z$Q@H{@<;?$?$_z1rWC!gsS z%Gk)>&smW6F{9^N%ccRobHPXZ1HY^*UQGBo_AB+$s2s*GjNrqT`_JZ;Js;a-uuwUJ zTzdu_d}p3L%Ru@?l-gUB@PJMI;kQ;u7$lL6anO(y238FjYX|^bcso@6K9Vz@p=8lt z&)Wp6&u~>bj~9vwW@|dJQ4t}IX$7X>_bRs1Ixyz^%d`Av0>ZHzA+;582107uT@z0D zI)cY(V0mb4S4QDS-kao80b4kwiZZ+N*!f`>)ZE)99XODjp@=s6hfZ8b?%#&ND@*l1 zwu?(sS>d@dhN2xj9n_elcOARi&TaW{vig|4=D3s+c6hluE}JKrFOxoAo`d<8mKU~t zo?;J^cy2zPsu$RExbTsGn}V%ky%4j-6b2*JXT9 zEsCK`8x|o<>&Qo0a?`reVSPopub{|MFxu|U&Xc0!(q)cV&AK$$p{i#P;pK$;SFMJ2 zom?zAbw}E_9_T!fSkr9>p>o;uRBAShaKDs=sW!`*47DdOPlpP+2_tN;d6RbTxv;kB z?Bm|L)WI9L#?L@W$%fKOee#wU{HVcE+u%|2V>HlVwd@g$BAtzA0_tW-^O6-xY?Fpp zM(Evx(pZ~yp60~jzNR}a;Zl4oGL7C?8h(VC%6J>N;PfBx%=Ruluhgf^nhlp_`J!tb zgwa7^a^Z!sg#Iugbhk&$(N9-{O_!-?SxX)g#d|@czB@S$;_zE+6hRM&0K^<@Gxm*v>-Cb&jjWa&85QRXV}b)#33< z^KKV&no4HQkGnKrvvL+*VEi09A?j-CyjizON__F~HTP?SCuEjF$8Hz=PO;IDMX=$eFpA2Ed29qnKF1HKVa9?0y=;^aM zd>atLW63x?cx8LpB~>G7z>aq};FmX<9F$Cb5w8o0g6-fM*Eqxn5;P9EUqbL~W*l~s80cegGLjo1 zkb)T7f(X5V<9{a=xF~u%?B5OC6xllsgfQjh z+HMKap!pT)!*o~+4#1m9%~vMQwIoGN1XR&(MH^#nxHMU74XAj~(hfn-KQJFk+7-t_ zwSLmIiZ0L`es+aX8S~d@4&U0HNx%A4QT*iQFhOc(osRa?>W}dCU#6Z#$!LfPyuNj| z`ZBYBZrMC>`z)VB9JM2Lm(I5jMW%I|dCFsrr+NO3=xF+4gduSn1R)3x3CRBdWAx_F zjrh;1`S(ie%p6`Re5;YN@alScX7+I2bmz->T31})_S~1uwcyC+qgFe}xFhP2MQpEw zzp}bN(`xUDz5&s2pVc9Y{7J&y{@oYpwOE*!4C!8s5(#=eV2iU*n?wYk%R`{b)mO6HXPLtWD-<948&>!@9vy}@9om-L`3dF%T;-1j=n0% z86)SabgVIwY~29Ap<$e|l}2R5@CurGhCtjttW7Ild%3p;!rb)v)tc+u>Vvfj)mPHd z%LH@J6f#cz$e8yAnH_7{qnS{cyx6Q6(<}-m%%^haoy8ztj8$p)mK}M_-N2(Qsv!8b@3d@cacIMozaW^N)$Ht1QwYD~^uR z%08l@)H#-Fmc;qPX$k70_)f9q=}+rUjtQhbK5z{uLriq@qcU->(l(__QHAGfNNHbF zJw7`*~!$iy^IA~jPNWXCGQkx6QIQ@QM#fq6|JdhIl;K1w77?jCgB6dxgMh z4FQ(j&;=u%X(~2%wg$zBHW_d^JSz~Tu9S`T7})6q55Hr`vE~3AmOC7kuFcq;40xET zbufKRTcy@Ynu%Us;s*f62eE^+g1n4&v;F=~rMp(mQ?!GK;gDmrSR-Jnj!Jr~cr)&I z;+dQ5ZMZms4Y^njK~r=807zQA$3c@T>6=vP%q@IUz``F3x6E>1kBz?8fo|1i+g2}u z*XJz7mas`5OXQ%n*_$XqUfr!>~5BZOI_vj z76_fMFz0A{LgH77QjA|L;O?8E{hBIUm$NMmUTDP2aVp0?NnN~mDOhsbH#O(xTwRjcHY=LkUs!QmQk9ISkFU9= zoy_s1W%=E^_PzIw{xDq=(SHct&zO!IO$m}ZYSCyMZottr*CzacZ>cdYnIh3)EwV{2 zPTIsOjM=h42r)X4xI47&pYBUx92=r-Sm4~!=3d9aoNPNcQ;a119e!o*o{B7Sod%01 z8>}KX1F>d!Taunl9f(l8#l)*s#U%nI9M0o8mNIOLqj+lNCKv zyCNrFBHuFfLadfj^OH8e^e5J0$rfN--V(-RsO$AJw zyU%NZ-Oy;_7O>h4Jh)zI5p@E3d@3m`C}TQ+`|=&v)4Z;t%Y74TGL-~WR9Tjtw6WZV zbq+j9CyTpDO!3}{_1LyG79evaJ9i>678+u2s5id%a^afXi*0an)>O(=I3``e>FTMv z%9?p+8-em&Cy(Hi*_D}jmGFsa@Ni>>D`~n*EIh%44&u6cqoRdj1buQ- z{{X`u)=|&sn^?w{oXbu}bu+(NaV$wKwb;tNv{W(NLIX*M;(>xuPbhOm{toj?N%GjA zN^34>d^Am}k)qPyaif)%z<);i=3#PCyD>sq;v5fyyGwpDVO`PAvRX(WIfffV2p?k| zZ}`idF6OP8HyzA1^|kLUd*h7oYf7&^392e2d$+H9?-jqF(W-Wxarljo`mz0FiRkAe z9dnMvfAYumlxduaqd~G4!!z5fu9Su+I(KffcKw`|?|(;W1deyRvG)N?e_3G8kFrAd zJKe?`{{XU`KRHTb(WEAY;(Hd7ln_1Bv~ag+Tb=zMq;vPZ+h_bS{bh&j10!S(_icm@ zUJQR(aw9CFW&C!x2d`mlq{qOE=thdigkx0MfK3%+1di)mI|$)x9z{<#Vb5!U_hp)Y zvma`8f^2KtS!RE_jgitky8THO`PFeYvkS^NL7aAtVV)i zy+r9;4er;6UdM+Oy3SNPyP~XcwajPQlWt8R`^K?{`6KOKbo8*3x}&iit$8&ZuEs;G z$_Ya_5F)W2M8=~F$8oy0HV9cn4~xH2{l z#h+^8`xJbe_s_vV%RL;aD&~%_FyiMFV0QVWqj6+Tci}tXA2pWg!|G`zea@kc*B#16C;Q0NLB}`oJ(sR~=orJf%G~(jErg%Db0jz8Oo|xHMA1{*(laHRj+yhiZ6S zEqmyoAgU6+W(%~$;M0D3199n38(QYt_8=$+#l-8w=~%hT80T!wyL{mCUOCzo7zZ6t zA7sv+snsr#`=CM{y2H)Z;(V9W&EA{>6fKJf>J~;@4k^C&wph9P6c#G!wBo86_50Mc zy8i&BLC5&kt23T$a#ce;KJ-+#{{UNq`qrw5KCe6PaO2oQ)Kr=JpB#%eVf3-d7A1#H z_HG?P7js(Rd&oL1uJ;mc#cNDAW)Ni@Q!wK3*Gp9VUS0RcBN`hYj^|igkHaHg#6gw) znq@a-+Dp#RDkNhZ@^$-=wmd9kq=t>x+Wo~LH5(!WU*6g33B4s|YV2eD z@65dPZoYE0KTEdPXpL{)dw-*(BfHV0EVI#PZQvCT^xe#aK8c-QnwAnGIR{D!0`uCvNsO08E)WP zen-2$=+Q2@I^PQT4cCy-ONFU&hShNVK8jg$J|ztn^v)0f-OwW^-tK#C$GvO=n{xYL zeOb;JBWT*eCs_HI16Xak;_kB6HyYcT!CaE=**Fn?#K#H7u$*vT4#)7!L~*?=?SE%u zk7)F%Uk77Ay;L_mHMi21BPh*WU7-{w0*`xy(Vrn+`=2K#F!|EW4cDo2Lm=kgN<0cq zzh(Rvq`Uf)}wJxJ9b6)X}JX27V*M?E%|npN{KaCPm-YMRR{h|!iEt~OaKBkTSO z)cZ@sX``v7rF}$gsFANMh`et&2QZrsX&t@ioN^2tVA(!b$BxxX*v2B}wUqSjzm2Pb zE;iG}2Hnd^WlfFMWvtM124Tl)a}GZEUJTN>?lSIRHoeKw$JiGlMp?~ysPFAAr(e6N zC;7zwJ|RY9t}oghLH;*yzIz!t{)x&nN75uA3yjbq2taUs5bAn<=27v3{{a608Krf$52%~X^sY&s@O@BB zU~u0K`RKyyTpx`6_WD-5864ETvBu{)!L0*QO{~6m{ZdV(U^VoacPU{woh~hys&NLF z&{9s>B!!JPE_esU;bDF{_Aj7Wf;P!TBU}L%JeSGLnU$#XKh-LW4Xc5ZKt*h#H@4$> zuuS3eg;9xmA49q$K{Yo=9E}xA3nyh8n&zHIta{pcnA-`YKf|}ynq^!zEFUk@QNqT@ z6hMZxqQDE2^ru;%?%;Ii_;!AhFT`I1>KA*;_Cv)tp_krxr5yXJ$}_2Tp51=vdemz% z7+}SnPWOQ{jV^0@Kx=R73H4FczEw}n_zh+=@3?JPqQvRkC4y2e+wK5b;mi@CQ-nt8 z5}9i=3~Lp}sQRG6K{$!72K(aNd$k2vd{zfo2J_K#&J(oe?E4qR-I%Cq=;~``4|Af6 zek}pH4!y{^Je2D?N#(pt55!FsTt>2*;gt0<2%RpRS`N1JEUd76GmG9w)NQf-t3in4 z`KJc0#QN+?3_Pb^OVmdD?|ONQVS6!0M5R);5yue(zQf7kArD6Kq->KRD-%w;SH^HW5;4!`MZO ztUN>cr1`s7Gip4Yrz$p9WngwALoYUtUGDzn35PiK@8#niFW#kdGv3<#I>+AZcfC3& zS{L4;Kt4J3t*#9|OD!9&Z=!tffpDX!oSzq6fo1YkMOPb#1m2PD`tOJ1cscqKmFGt$ zP&;=v)4z!DWAq^n3%6LP^On)+FrZCME*;@5#33=Jr@SF3|j`P`S&4w|xIR0qF z@adY$=V6htlFZ)~l7d?BMfeBD+sR1OZ3WqnY8yv(7kI0>;Y?|#06TxHmK`hr-5p5kVGvG41SD&?pHS$OZzXJH3eSYY#BQuY`gqQc==uNH3qgLkZ z<4ccia}KJSfZ+J7mClB}ta&lGwD^y0z_@mxA0nt%$o#t)q+xvXVpwGrULs^9v=Hs^ z2YrK7&eJh$W}_OH4a8`pma2~vmMC#Hi!qN5_yYR05-(zVETHS=WcXpLsFSLys>C9t zB4=q7aWsn_=7DvY#-z?Z$+TE7)>GwJAf58MTu!F0I+}=K*T89uw_4|XQ+G7$%=a$1 z=!ah&PS7)Sl+YO+H&pQ}je|Fw=MZ~tPs~>#KN-QMX~8h6Xyc{CDf+1~N`{c>q%Peq z4|cD!i#TX5dlyf1Sq*qomfOrdseB}hH(Mq+oo4QGiHC!!H}B{Pd?IERvd;%j)$I5^64epVB?-CJH-<)$ z#VF|oxUA8$GVN4$Vvr0vUy@UqB{%U}nCjv7b*05gxv@E*9{oF58( zz9?Ob(>hW68sV(waV8Oe;^bU;PRrD|+TJKnBs)K%Gq1CHq1lMZ`V3H*x&HvPy3WC0 z{k1dE5dQ$#&39yxY-ZzWwfuj|AKM7IE8yu|ouwqii*Q#;y1P))Iw>_Qd@=#X z*A5=PnNqTWJ_kRUSXgcVS&>lgL1qP6$D!+I@rm8t8hO>Gpjn|;ae@TRzr|T95U<({ zI2kT&6;$FweI^3h0d5`2Hx%gsLxoM%$A{L=J~vv{{MJ!e%uRc4<*hSSJ?fh61AL20 zgDv^XradN4!e_2_Tf{1v3wNBxRk5bulBII5N{SrYmniXid?Kjs;jhj3aBu_~Nxw;o&^Cwck-x%@Q~aRzEFH8q-p-rQoI@j1;yJ!qi_Avf1Mu1<93Ba=K=0Xx&9P;6((>?wOG@G*0;F< ze){;MBL3z707?DIFzrf!Pb^;J8^-5ri!ldddk1!N&L{NK-&$@)KV}J zg@MoQpWL$g^P=JIUcbhFa;C>^#{86^Dgxd0QnVq5^zll!yNB1HXU! zm%i-Mk^6vtU-u|Bo243VfR~dRq>a1Y=Y!q9{mPGsLa zs^;}CNuOmhbhA0BfM~=&4zYH{n^^3d&fH4^}H_4q&8+yfw``!a;CDfHzq0 zxUQ(*?U1y0DGCe7(oc%sUFn2zh{fNj90y{1M=kfcc3pAN$z+=!xBwaehtynXnm%CG z@l9u}WD!0`xHXLzaVLOUCt?2pUt`(al6_Dd5203mx#XyE@M<^7A1AriznfH$%}kC~ z$5j)li;aVcxVljbM}>1)Hmb(wLFG`DIE+-Vx>=-bakq<#Z9$o^{6?tLQ`JQI!*}-{ z@gA48?(zrDh!&3s2T-DM3Ad_y_dsv1$HSH!E~^-thbS`a3|(kbBuXsEZ(ll*L0%lghn#?-~Ee57q z!)a!FO~XJ3M(8}NLFNZZS63bbSZUo)JVrw1w?N(i--uml>JpVB*TK@VWX10!xpZuO zuWfp}KIeRI5N2YY7$T{7p^loN%wu9>_eSlA8{57&dfdAe>4{60avmW`o9gRv%IX;d zXe5dv`Klry{5Km96pkd`$DNlQI|ZS`-82;MW!T#bIFkEzU&y2=@hXSWGMpC2D~E{E zE&=ay>c=dj-aXHAE{6_jd#vB(?Mw4J(wnB9&+%-%Jym4&xIJ7QT)qoY0WX%%X*-*w zgXbY@Y~|Bi6_*mL8A_g(p9yoX8|zwMESDZJuy74G*4p!2d0n%dN~V;TO;H&h_}POz zR-WO;$_kI`iY!3T2w3WNxHP@)1*M~(ZoW&dS>&9f_gx-d4KAM^R~)c%^gfmBccorg z%z0`C=1S-?_9ISPPrD1dIf65uknmRf>P5$XNAl6b2Xt^PoxZJ-(rK;)BhF!Ruua76zmdyEJYo5-n?$ofG zk>&4IJozr|4JZ5MIR2HZpLPq=69cK){XALM6scw_Pb;AnPo|OPXh*<%i$S^KFV5Fn zr!wX|H#1-uU3L>)1$`{lw60{Zyf_iox6$NXsL4x%$IFIzok*@{Gp2=C?7R(u! zI?!U3=1M9FTG#-Hd(%vkjWeYlDO_N~ejL8(pD^gdY0@(x1k04)Yb}FC$};J^KI}(SFYqFKDG~fH%;xX%;L3 zq0F7EI*siKD*ph7?#m+q;eIP!VZQfM5j;17*jPJ=ea54)L{?dFX4&$d*D0opwL{?A zu#kATsknu~RLc-0L}r6*4zuJHEaz#ehRp811zB&1eM+uOnww(xIlnWXlj|!|^td8?Yuzw)zcG?{94QB zPl86CP_?$xWOQ_L4kf-dhj9g824t-?d7i1wPDF;FjYTw4Y4V6sSnGtlF~opADZ%2u z;<=v1Wz2AOyEbm0=u!TeReVX!X3DV{ZE@~X1^lwGD0%waUiVLy3(RD`sMk48vl7Fw z>=LUBtd@eR8kyeNpmytp#AE(~=hot=KIde9bJBW#r>Did zQ)`ab+p_dJ9!0A3>K{)F!s6SIG@C z)@IFFpB0-BsmwL(n2R-#PUxdx>c&;&+;#EXv{r_Kc$-Gm+3C4+%OhDIV~3;k z*<#mys#0ycyKLTW#Op0*RZQX8zEWm+Nx7Be{GyQFHw)IAmGtM|js--w(pd+Q<#X=U zHp`FYeWKOjRG9rmEG>pNG6sO*a@}}>x90NZeX+j{hiH}cH3hyE7>KpJ-DZm}EUuz% zD%p6*8(7V3tCAeYUsBc*(-f~@U?BF`Xso#Q)-j4_Rlg0z+~!lXmjd134f|DIk27Jo z_7umy7ee<24z5VZH$G(khZe$cx;#U|16sy3l1p!UmB5>$n!E~P+j`6tS#QHBu>&PW z=r?pnbYHnk?3-+PpE=E6;nlIaC}WMxbS&LF#JcynJ%MXn&s|Gfh0bUq-Huqmxow9Y zAS(vJaBp&7i~H%6yhKU9fcVQ0n|Ty$vWih{5P6TL7IMp(!xzHGxzbb7RX!G(n&9Hl zY&bhouh|!+4q?odJzYfw3tPXtan@XmUz?X6?NT{$9i?U%>0|3AZX)+M@)mO0TRy}j zilP|ZA5$|XBJI}JYU)aPT-;+X_A^S3ip;$yar_2KX2pU~RQ1yCc+Y!u?c$ndu9q0j zR-T?Ya=jfum8HbCsqM9`_$Re#Tc29|B%6zXz+KOuSP>N|PT=t#j-*}MG=v-U1 z{*pQ0YtN3ZqF8V^+5?!(A9@#9xD8#sbI0aae0nKk=*NyTtm`FqVUF&1@j#W$_u7}hZMsbopS{*fV+3CZX+??)Q%haa9c|c!lnArbd8^Ley4ju_$^u=WD!-n zJLdPvQPvY#yW?=S`>d1AHG5=wT5g+xsC(=$?>=w+&WG4udd^rb+;AFvQ#j^4R)U&C zQ7s&m@^y6$+%{DIYNDz8)EVWf#o5((u$=T3#KU5ko z!-=0g8C_$9X(YZtwcyF-HZMs30Fu65Y`LBd^;9TYe(9CJJv7VeE;8HFzsOh1?49;M z)l&~f-zf?(6fF zj5wT?m32&W14Kl4-y4s!=UX$t_Yke!8Ue?&Brw?o^SRxu1dh)XD7;Ei(k=0V$12v# zR@OY0w7I3HOPw_O5DhbqNuIpj2T1?Gvi8T1 zK+Go}sFE>JS5b?)CcM4&k%97Yu^$z@EVYhd$@r!XhEn1ZWqN9OOfl3xOHA$C_}B5I zJgj(ju9hY4@8)@}RK7=1;#+CPcCnjrN~oUbaEuP77Esd5;Ut)=7$L9U7dPt&e&`^5$hVzsWW0&D*?%^3#%)PCA58OBbAn9ihhUKh z8I3!qbf0nnb*<$|(PAZ)^RV3R%HgM#P;mIAs>CR937@_jPb(cK2RZ&RcZ@aDPePob ze17WH19NHKHF5aGdYJz5wt6Ywb7j$10^93yRA7dH*I9rYHG;yz_v3ZcoeHvjZFYIZ zD{-tnvD1RtV2n;Bv%`r|xz2b^O-QNA?6^Bx)PlgIAe1@7nuRc%5_L48t^~S3K-cH_)O}rZYGkC1%x?3eGBw4zw$}8e zYPG3_+-x26D9V?zDx5vhP}Ao^tE(Z2}Gm`HeZ z&zbcuVAO_zx(V{{A429BoAy(_NC_HD@s4dCqNf?h>s?z|XMADBp{-*YY}T|503OrL zJ7a#za32*2b|*wL{{Uy$fb7Iy{aX}9JEqes`Td{$prv{tAMpnh9hi&1t741HBX?;#Z2tfTNyM-y>KIHFwH;KF1NNrq zH;;A5zjDlqY%1Zqr*f;}?)xNbplY?^dLHX4rY_&IT6tQ_B}H=-U@SKl3tDF>S$3On zIsmptJ1*oz@VeHSk%C5VI09CXWl)%EnC=8P5wDSXfMED$2Uk%$wvqgIG{xwAis^Z)nLg%3nOVjY0;$Wg%D}ECdTJdn^wB}qtA6A zQ1-zUng!jWhn`-s`hoyLiJo>?jRNqx-cHrwcLX7kARrL24)Y+o-hN07DtG~9MPgfs@XL^@WqY4DP5 zH#+Owp=imRMfh`1+S>uf$%k0R+T>~4Q@H1d0q=syQNvlU8>LzkVOJ184^^9MV&v~V zl(JFgAlY3Q>dm~IR-W3HTF2|NK3~PKoFbnaZ6q_()Kb*dbpv)n!+Vyx{{R5#ZfkE( zh(^#gK7}1uRLNQkZVzf*3N;01*L;++)3;~%Ybo6iQepR87Ik5}yMT2(Ev1zvOskk= zsKTK7)WfpA89`qjBIzkAM2&OU;6N;E8}O_K_5CZO86b0BAE>g2#A;%Vy?}DJ6_gt6 zN5zk|(KXVTcaFA(P2=bweJWalt+Igl-W(4wtF`UJ@fu?k+qWMD&GB5r+7BgFZZlCv z`m8oO-O8}iVwUz2f$A%CiDtvaG5T5yi6rrd5DyP3fT_)~;&jtTO-^QGnpjJT4LWwM zUz8)obH!B8tazuQf}qA)U2`*m*2lAkt8r6M$rM4YqHgO)+>yahYg!y*GvR$?ZIa9m zA%k~#hTl#$no(Atp zhZx)PlFE>cb|W4qH`!uC4(QyDDYpY%t;xF`z00v!Fn@6A>0ccMHC#`5yX7^Edyi$u zQWm_$Yr4DmrM=|vP}KRvVRN{%t&(0AV@dNY8lzop+hrKeA)6$vgEC1~hdrB&!|EBj z#<`#p?-uqpaN~RSjtfn}lW|NwDH~Nn+Q}U2d&8RG(XTE&oR>jj*yFK$)mmm_qj{%} zOB2ZNH_UurQm9kDnxh<+CQBTWL`M4>L33}yA3E+VkwHgQTe#?Be*swTlG`sW`R;yV(yPDJRxvwU)? z^%Z$?mGe5=&Jnif=0~S%$%WKn>A1V~v*wLPXv`QER-SiDMO{o|Eb7?%JPUa=DxH(? z!Hz{fCsRLD;VuD?hZ0!cJb_b{z7E23u1a7-DJ(`CrXVGd}PM?3;NrEB>1U6rMQu|F^2u4_eseIpz|@{m?O z%{HkuM`}G^P2PDSCqEAOI1+~l#4{CPqlTff!yD}KH(k%2uViK%zX*3%JbUIcJ?daf zPiG*z-w)XaP{A7@pv&|Wo9BJqMLSt@+HA0wD`8a9lFBGpM=-&4%^f?BWY0f0Rrl|9 zROJ0zI8tmI8S2>`79CYy*%%xJJv)m??BQUxHp23T)8NeU7x)GVmSZ8)MT*rjH$B{p zdi*XN^y6jGSluiUG=|33`IH6*_TstUFVWOBuYsHKHG#ZF>)vnku2|wwc6s@Alg%BK zNZb|mP|{`B#~vw$R@E{Cey*~V67b$WGq1<7YE78N`V3zcl6uN$#H|jJIOk)GuP(T~ zi6+-^^R0^}%yP@*bELxIb^bNh6YRq*QZhQ4d@i0jV{O{t&2Qyghf0{PIQBTVQSe$X z_@PD7;yw=`2IaTnIVTmA{bM8GZ>Fj-(Fo;pK)Am?3L8c53#<|~fz~(R<+3v9EvMAq zRiyBWheJ^aChR16mg~zhT7kDA+GtQR(}Qp}I$Wy_8#m9DKw%BD2b8NO7TxKZ5W^#6 z2YJCl=8y6XY5WBhmNRUa=55S$CCzjc(t!nrpaNR&oGDz_{z0tl+&5KnLsRTbAFkZh zt;dP_PFjnlk-pMt2mTu$^^z^Uu^;oJG@kZ;la`<9VXo6D#~WeMew>S5N#N2k{3rQT z4OrgJ_#p0g=`~LFyDDG~4Afrs;HsII42q(wdE1&0 z?-*Y8v9jA3HU}OvlBgnaEO?9-JaGo=Ugu1;T}vy-Yl$r_1~b9d(E5@0R5%jFFkcVL zlTt>}tYNKa7O)Q~Jh_r6jvV|hq88R+rG}O9&#vN?2tp6$;EVfQ|su`t9sFJo2phG%EDvOUe-t+Ynp)~&wW?MB`# zlzuAl+3yeSkY7z$RvZ_TKV-uH0EE!h9Std@mGW0+>Zz`7L=y+PgecJ+2W(`40_rP|@ZH$#Fei^Nq-NCC{ggPS*y# z#yo1`yNT2vKx!S?QIhG-6(z}WqP)bI$`z6PBid3t9+5| zT7x%b%3OCmR^rma9@d6#vPRj$%?of0LYwbQ^A$?RFe;jiTC%DQ!vw=dH0m@0%Zn^` zsx=nk?DGujrlRU$rrmYeJYXz1Cl&Nmbod=?azx*OXARrj+%GU#C8Tpiz_9N(k9Csx z)#ah9Y-Bye3jyU^3J#7<@6e=G;XO_SNQiDZjk9|=DQ%6*O^4Lu0-^p7ztvc}=K47= zTSZYm63ANDZNLX9H8M)(d ztyxDT%ZJp5cuX#HqvJP655oIZ_4F=mV@x&7hpa7y^DsNO zb&>m5V{Joab@(2}W}|C-AdEq?vpwUD0&QQk-Db-27-JyRTxGRaZC={rvI zmgdE9`3_!VAco9!ORikgr;6SSOYFx8k{Zls#;DzOKp~DM!^U#qll(o;rNIr-%XsZ2 zOTT6EH(tSV{PS&OVx~gp)5=-8M-b>=f6)uPLn833Ij?o5-C&J}nb4m=3yu0YQ%Ey) zTV`Y3GqD?oTTRhVJ-&6-)pXUvF=gkocvHkB>zrLUUTO-Aq_X5+%=;F8FhtD8) zvwY`=ykGOw{8ugty-nk0xzGDJ{UWS!YVq8mP>edt0_c8(tl!&>Ipe!QQ16%zrE|nq z>xVVb{RdgSwzm`aIArJZm=t^!A3+d=E;B%cApyd>NcXR*0f(QB6hA#N3#;&x@6z{= zrE-SXz2m382=9*#H_sD->g*g^c?aO#Z^4mFV1111b`|pL4u69ERNSwx!kT9KV3~bK z#anu3U$C#1b#CfDs%B0-wT^G584++|N7QOh7B_sO>qS%A!wGQ!5JtK$X}CY-keFs@ zTRs}*xLg>dQF3U7d`*)R#9q<}C5GdN+^lI^)yq}dUv#|{G<3AhYZ@BUU_cyJQZO-v zzyL|q+se5NkeI9NK)>B4*-B1K!>&F6<}BTpl}$J5%na-*hJUvY75HB;_&xr~2pbNy!8aC+Q#Gu2gL5>z&_rPDxM zS_^LH9S@yxWrLFYuergIDXJoNbF*AqY1x-9;TVKXI8_~4ti+@y*DSCsw|U+ljJnTk zn>fR>4pWt#Tu@8Zbzts`ZjrlhCjhyFA?@xgt6XYM82Kk7b@w}g!$1w}c{F+Mt+}VA zk;ZVzXyd|~86yi1zmj$QRly8biN;I0!x&O_Sn(^OITYbq+E!lH3@>3}(p z5mx}|4E^+S%dfr8>^M=`G$FN9n>O48H#}#90 zx1S}8t-r?FecHBV4wD#`8I46NFzR6}g^}IQao|n4j?|6>;}syFomun`G2t*}+&2cK zr*YkeIYrMGc&@oVa;p@$s2IIq4!7A4ADmXM+790;;LW*0hO!vhPh8pCBbj@*#jre2 zXiLMHq;@4&P|)K}>7Mq!+7_1*52DMsr5N^X=aiCkbOsTKwaoy!>Sw~i9CTW@E#ouE zLdwGnydBLCYk}q4>sD+(FhyTV*`km>H(yY(9FWNV>nvuU43WmF#^D#yG{7-NGdSUG zE*>)^9D5kW4UghMl(IOxmkX^On>LY)QSC1&!G^j><8Z0)hq6834^Z0++QU$}8%wh; z>Soc}u1Ue9#IvR*Y9#QSdXco10Fk=D#<`%kPA21zV)@~SVsZ=~wqYyk@bLKX=^o*2 z$ryZ7+otQs<1Ll79inXxuQcNHa8HcXQ}xab@OVjWufs+yJuXAe*o`do&B8HbPd!_! z9>&l}FChRE!@K7zrKp5*Q%{2qN%C5--Iko)vb;944XLMx7Hscy&z}3vhz-jEIBL_& znN(^_pN}|F%4v;UOn7vxWPkv|Y+PO51&)`s)rx~=+d^brT|N70c0+XqdFIK3un6=P{d&-gINn zspF>=yEX-}T<4Rq3i)wt9-^8#B5N6Uhu_Az;7YihA#KSTjG~>{*}2~|Q%9U29eZpk zf#cd(sM%vNF#Xl_D^_DWJI^&cQI8tKVr+P4fU=p!Nef-QZG2xS3e!QCGfr2d4r6!U z%z=J-kh$KY67p(UVvXBFjeEmn@r-;6t)h{p%+&ke3|`u;TI^Mn)KvAS6npCFgRKo* zjtvFeL2m=VTTK?n$ryzmXTuZ{bo8%w1nsLk1g!HVSrb#wOn5eQZ}Fbj_-guPNX! zcp6qZ!?Nsmv0?IG2P$mz0fEV3Yt_tuIIApewN?9-cW3P*1eW}KDqqqn%F$|r+hq|B zy6AxGBG#lGbyB|8;c|_ipeR&0=I$-5l zaq1ibT1cBdC)XT4k|#HKm(*AQ%Rn^0c%y(m_6_?2%(*oCD@RG?{8_l}Qo%76yh_@t z7-rub9d-kB!x}a$K2;%s;O8#od>aaL`o~dGLr&+9x-4L6K6@zaU8VCda@Axap_ncp zB>EP+%rD%eF6IV{VrEMUSH0Rbp=#`Apv)h=#9RT`w2d;0JeQfyI-B|x*C^E9j!U}a zDm`PbF|g-*D^6Cir;}b^LZ(ZW_Va8!#3^cOfat(+@qiaq2q~PKru*5a#c0w1aI=EiKQHR*7Zt({*kHd>&yqbx3S?Zl{W><;={?Nv@@%ltm1o7eOTP1qw4Ai@weO@8Rr@k#nD=jxCBK_R2_9L&DYR~1lCHyy^+llhLqPSZEGH9o15jkb zvfdj-D~9QmIzsDqZx<=KdWvP<9Dgz$1smJe2 zdwK3k_Me=UH-cx_@!7l@3VcQy!qd9mv9D*RbgOBOA(-f+-+YsZ&mG7bTnBK`SKpZ@5xPlg>gsHDhN2e6?mgF8Qj2tr z?UGi_wMSPKWMS~k$(h>2buSpO3!CJ}F$zlW3Zt3$c)pBluj%A{3h$M(o-2*ZD`l^m zo>nsbYh!DnK2S!7$Zg!UM6+zqQ5`KjlS=K#qiK7bc?C+P&bg92YZq;1UVT0pLn|2B z86c6)1%dYcf-qdR$6| zqB?n7GwG&ua?II8ony!jbIBaZcD?smeTCSi9%Z4%p=0T)8s`yx!gWf^I)=vbMK&{t z325SFEn`Ecl3;gZ+-|RYgMqN;Yu>>ae9vwh4_I91ZvEdIUrYDe%2kw1Bt8zVhlm4wg_=7?gwI0OQK~6=+!36ltU{kZTA8-?3`kTP)%EyBZZnqOP8; zuJL51se21~d>ihcLfJU3Bp*R2dq8PzkEK9ixNSR&T+e&f{x1czVU|p*fYo7Gd=#vf zIiiW6HPgrApD7w&%oXaEnxa~ElTA$LSb|$4f#t`=Rh&8(QN-CDHQH=9Lb=6MV~!S> zx{~Gx5^r|{s6YEH5b{Pko*OcXJCVdxoqlCf%oOhw?tg*Vh29M$mm7nRYLsZxxrMbpIcyLxj(g<)0A*_&cw$a_+t&sT#Ua;Y%oD(oL?n3iRRMz;>HqhwEt=XjoHg-*^fHrd<#8AT_iNtm9o}TbwVjR z%Wkmli?zU#2BN~@Tq$vUGMtCS01^%sOm=6|Rk+60tFj&|hsj4#O(Wu^sC=8kO~HN^ z&=IKaQ6{-LlTI}hb!Hib8f-O@jYu4poas5!J2Z4##B!b;ma_gGUU5oSR@ByFjgieE zvKDnw?m4z2cI2D#fS@=^3{?v#`nxHY{$QTu!GDaWPRs#=P#di@fh>03<02{7h9b zleiX{8XdavVlFRY6ZpQ)*~^@3&$(TrqRFo#IYE(~5etWHRz+}(;Y9j?0Q z94*>Hylm2o!}G|i7u}?SM_ZNMW{;(7YCB~SK1nR%}CD&6>IR%-8XpwBJgzeWyp=hsFAGEy=L|0P~MEn~#RU`OjKsr9-c^ zD|S#r`cGQ&7Y3GR-P^T7+k0p@Ap8@@<+V*SP46m+fxG)gz<}34Kcr&O8TET4w%Jz? z15hJX4B=lT7C%zf+0i^SJ~wI(pD|ZFlHc)P4$|KC6&F8?WAj^>Z>eOg!iu@i!^J%G zvC8m$?pEmbfOOZI_5p})Ij(GOjoiAjFXgusodNhK)TiL!+bK(fNzn%;4Zj=9DCvx# z3w5n?T6OLOrLg=Hc#~001V;7-p1NlXc?b&42Q73>TTIe6*6Sn}<{pzcR8EvV(n{{)?H6^c zM^#I?tm4Vl68*0gar{RmantqaXOOvr?f*n^e*NI^EtE(`XS;C|rl@KY+G}FR9 z+|M+2`c+ApvXcC{h_~R+BW?1v`O|8Qw~IN~@SBdpUVepqb8##9wT%O_b;HzBPi8Dm zm64IycmDucUgR~sehH38)p#t?wcw57_?+vGcGE=mk(k^&wk!vk+#~xc?D%nBb3ozc zliIv%F-CKQ^3qp@JC4O_x*FQ-J@D1C8RLGAj%MAYdBUX2k!YcDF;LS-dmFYxr_Q$P z{5{bS=0fA%@<*vLni}E_=_#G$lHb`*=OkTlYN@jB^e$n6(!|Ga9o7ecPkpuRXw1Z(d#?_j@>STBCHq+w}oM9m{o1n(o~RV4X2 zee~j~GNG3#=;$#Bg)9NJU15CfYYUpzZn!tb8gCMKasc0mRyuEV@eId8Kzx+&@0;#v zdmo=(3P;>(hS(aHxB?EOayRT!6?Ih=4~^zi6WGBR5t}ptehuIWzNXjUwvGwQaU$Fv zrbzn9i(_vR`3-_+S6PPPtaklaC!6jAeB5rLtD5N>za)>EPmLpsimGF6-ii0-tsHh+ za;d>HE@6*WnIIy%(dERA&g(n1EapyF*kE=6({wPsQ4+~$U+wxph6IU;eERM$4;!O z{{W8#A^kAAvk13@Kji%@m3l>vtETQrf5xYyKhj7SRN*?*zvQ1-TkvFaiGXR{eHMHYrTmX4cI%H}!;E z8Ikn6m+We@Uwuo(wkcTQF5|x=Ha#g)99eRl4phTMyI^Tg>Gma&X>4N4IBOgZhryet%�xxDl#6p-X{-=h>?W=bsnm>PcwK_pDZL4`*Ot?Vp-W(-S7dtJDvjp~u5%%_ z?&Gxs)4^2o{v}m{R<{bOE_!5BWv+`8hI|}K3zca6@2>M!{VkAHj*Inc(K?6CIeyC^1RtWAS3O!+8x< z_9rjxRA-j=FFLGbH-C)M{Ds@ozw=T#arT-wEGORk_eyUsjU0B?;d=z0_C87VB>Kxl z!+s0dJLEPt-Rcp;?Ss@;AN0wL44Z=YR`4ABM=rs}#qjWL`{?21E0MZpMZ1$QgKG~C zAJx4;a)v)6r9lZmhK;b3DB)Wh}Cb&DCS8kYi6l8TPe2yNc1f= zioCX_L9M|&mXKv$x^UU8!?jK2+;X0ku4CQ-Jg%rY%P_@(`0_i)aouA^rNr<6cCRw; zUzYHLTVC@vD_Y=1J9XW^!Hju1l}xj3w;raIg|`OkQxC;sZ6%4`X}GntR2p1C?gOle zo5#Ic=^rbiXl)#BHXLOkXoFT}Y&$OI>K5Y+FTraIV4kYl!L6f~{EKvHJ%NXjRb8j! z)q5|@brgAqjyyt+NgXXMT|2j*EJslC2)6^;RnHW|WyL6Dh4e7ogKhWk194&q<_IJa z*h-Is38C7qSBog(u(IDp{WDGVPQ%T+9Fv{{1(dtIm!S4SV4#~`cSKSn*6#YzSpnsEoo zQxP(6RK(d~dyU-E(#;$WoHVtPL!!1*h3#ZGt#r|Yy>j?oAE+Y!Ck+OJzqNFI=B;xW zzs!qs@G+yTE%Bom7AJiN*%3X~?A{J4S{!c_j#%cGEYfZ?Sa>~KF79KBZCBA(1g)YF zw-A~+8vWr=DWTsDCgr~d+VKl1WpQ@j7_Hup*i$gMT?kM~lgeb+4nzbu})%-sZ9{XPE=J zRd&zOI+lYj)R-~x921dF$VCOqFRu3al$|Zuo9q*!))$LV;`BoiY!SuPT$)YdZ+?6C z<;P6d z6zkvIqn>K*57gb!K*aOzU80H7RMusBX17;4^{jooqkTwWn_$cMejFqJ6s+cm%Ybo*n2q0J35mf;y&%y($UpHGmSJ; z;w^-^&0r$Hk6W|Byxn3KAHIOdtM7qQ9(1uexZd|=h|jfVCz{) zi`H)v;^yJ509!&ydzuF>OWNEYXS5J9GH|{9(1C0k-TmW@6506&if_7%UIZG zDk^H4J?bh4%IN_%yNPA3cH2|QVdc%slvn0rQcJUvH4Z7ZDi>4K95Vr`l32(gkDeI% z!J)X_-qPLbxmUD&_lio!;hAle^sadU?TPgw!=DQerEJXA(hDh5Va<~V!z$>hAb*b7 zDC2<1rI^vExF)*#_wLTtE(XErta;@ToTk*;>^D!TZ30*9C7U{ z9;@8Ns%pK>EsU7mBWc=2?&j`-95lMz+YiSutcOL7V6ie;ck3Tl8zH;H7+BF_=X2h; z16$lJ0^O_TY{iID!%Ia?Pu9%HR{^)>So?7=(8P(KC!x z#EySbaNkcT+rST+U#r+PqD*6txuYZ7JSsm`h@@2ZGo16~Z~48;H@?@+~7Q z+>OKqOX;I&(CU2Z;T|9eX7h`;Ko>gL4gqS64-f{XA$|hiGPuQw*@MO&P`M1*dy^i8 z@icD{y3pN1io|hPok2WzE-n_~rxghfGRUHmGOwZt*k`;tcuMX%>f@JKu>F;fAk54kqJoPLG1GZq*1*@M2Ex|SOq_}v^E zq&RZpz^*%^Nm?~F0h!H34K%e-8akISI!bvQ31hj7jS5c?&pAUb=e#Qu#Hizg9-V?Z zd`-|F7yuc%U&7#6Yp4WrRUCg0tjk!vK&hF`1cr!Y0B*bRU8TX(O?+WuVGhSf6G7<0}j+%2?n zxKZLUS2CJLmw~+<-V0pQx(%&yZ3xw9EGHR!urc#6)676Gh`czk+l9u5$`rM2Rv(IB zH5Ayk2y-6#7B!{C!}R#iZ)aOT+I9<1jl)TbW-K%p zQc_7Pj-L<5V9<>>tnZvF7AKnNrHVmkn@xZD*6Ujc;P-*>NgD2*MZjGTMTyg4bTCoV#Q5Naf;J7)z$l%O z?Dnf9;SQ*-iYYOAz#S=buhln`1-^d&03x}`C}gLuf-~|-_Niq^g??aTtP319_@yN_ zKLDMrbGJtUerj{w3Jv$SsI!}E-M`+L;w_=aqLPNDpQ?&sxg4WHAKZB!W46b!OEg4c zvbIWQmiWTnm0q4w3}sD7D(PM`)sh z`)htot_?cPcf6`5iJ|*Tz}tK9AJR2TGw!^qGYlKsP{6tT9Y2+7q&~4^q5do3+FRa+ ziiNZnTOXV8eM?~E;cTymkKmfM=Fnl9r!q|xJGW&P`e)gV9Kx+o z$1O92QwE7!@PkdloTie%ADEyfpl}vTxxjPd+?dk4P)b$r;mu`(Ga$XMqVT@L=?6)E zBFh&%lm7r(ARO)9&PblNvxLY8Z|pcMzqvIL~J ziL%HWAQcCa$Z<)BabE8d3IT9O1?|E_JDSK90iDIqCFY{b3+r@mDi2Ay`K+-gvzji8 z`cNKOVppWBPj;aJi3e3iaKk|Uk*Yoo9n6Fwahe1n2o4(QJMTR+as&P~ zJs!mN7u;b}}b8I`39ZD z>AqP_a|48Ud?cUW96c>5dN~^~k?kjS&fXUU9YEp_4?1C+;HjSjZ-T~Um9qzKj$+!Z zE=u?-?y6{`jFZj0`@%}5y^NNT4)lRnJAH=TgQ zN5;g-{{YiS9Q`xT`9lI={0ZGBv0t&}^Iudw>+u)#FXip?c~&lv5y#NA>CaV9xEQV) z(fBknN$QbdCUC?ZySt0SVSV8q2|J1GwVr;I`zB9S^)#m@{{Ra%FIqcq!FXi{T~@sr zgALx%FW2@nzbF2i{{Z~{=D?Ol zib{<*G#{O4-1j`tW-3|Wib-T^op!|An-E6@dw}ERghNG(S2@PTxPguq0p*Ri`ILnQ z1&ulT&A=%b^QnBYxx><07NezVX}7`Hge5v{SNHHWo>i-^s?E4zo|2*%#H8;?&Conk z-nYZ0Z-H1Ie@d@Z;JK@-CLAjX553#I=a4(}vWA-q+I!u)&#mZTy~{*WlW#lK)M9g; zqmof+@LL}6P4CTMUfUghbsjSIIU`Q}H7i}s-VuMBcSYoxOBb8~>1d-D2Zr+E`z87P z4jq{3yj*7fvlAAt+>{N=B) zuUGW{0CBS?qCGXxU+t_Oba;I={@UA))sV^cT=9o9U*6##O7XcmK<^tIt3BOYJ&Q|6 zACKr?ca!1${$?VwCu$vkASGfd+i_;de36u~%2A7NVwFUEh7X}++_6M7ePwg|gn)XM zzQ-Sr=xg0i)qj7Ol}$tIDA`X0Pke>i4bnS%QdtWvRnp=WRTWU#$40fJk~?ms=BFoZqPah@b3!hjlha|l1lJ#`gXmaXk{hk30f&0!|jE0?-vnI_4Uu9{8q+)bl^ z01mEAG#rwNc5)ui#jv`*e`U$*R)z~p=2gs%5r%@Tw+HN}k!5CY!@oNyQ>uC&U+Zfh z{{Ve&^=(bFsJ7E_Klhe?i&Kt=#`%9Ri+`pw`mmADV%tr`&+zg7i&j@9d>s{(HK(51 z_@j!QW3=p0IXr!hH_2128Yv@df#7!YLXO3L)>Gyiv?h3~ec{DqhP%xzFFZT`Wu$XO zTy##Wo>vC3fzIN$erh5*cSz%AfvGE=@a!smr)FxF2+!8_6q~gYTg9E=->V&?<9a!* zA~shthDCmCs+T!q!>OQ+q~Wyh76H8GaPJ=nyIx^F!mwHKyg}HbqAi8DWh6%WGvnK} z-mfaPXH00~)!yP$Fd8aS(^J)p2NA+QCky%#%*2~PhL^T5C3lM$zXRLmRLVtb5~yw! zP$H3~&iku!?pACwJkqs{9t$yjxmjy`$BK28jfyu3CcWRz-c=!&Y3u6cVU3Lf{9mFP z_!~+*=S`{bI*&kFhiN$U_)a%PjN#k#m3V$2%DA2Z4&8Q+7aX^V;mrSXl|4gC-q_FGhV$Jz>93)1W>R?Ck0sc$Cl)|$;~`;tM8YWc zqnDc2!&4(0jZ#F?db)<_NdV~AcJ`_UO~h%ap_VAAAbTQ&RJ*R$R!6wvTwD!+0nE31 zPhh>nWBL9Jm5bD2Sj$CjR>vdodbc$1L<6sBE%!Fw;lx=|c7=q`u?)?K<(if@rv#o8 zCL2*HE{e$$ck*0mDeI{yW~Pq0ufv}NQb|)C zN$Uf=S3Q@wHzni90aDqMBVzS2LGDf=Rw?P+<5MeZoY})z;O`RWPzAL*o(j8FT-;`b zwG5e&O4>tF2UtLSQ+R|H0)7KpH*9ajaZ_}4bl3$IOX#sUU05htPZZ7@#!nc4TnX`= z;@H>@BKigMuQs9$G}-1!#(nR(dCFU6#d6*oL54cIuM(t}ea)*lgs_&zS~p(nlcSxW z1BZ~WYWA0#D`R}Ll46lQl9iJ_mH4CYB(8A*y4dP9u>>1OVcc=`|tcwxQlc`3Rq2Qv?KYUaV+9w2L-Bl_7{F{&(Udbbn8Hr>RVc3F1b zr&kqmY~}Y(lZzZnjpl60Q%&KRo-K!gO8FZRt&@0|+ykEAPQEt*%q*FV=8UydJ|!J( zXV&$ZER}}6?T`x_J@MS>%(N=pR2cmRW1@YqDCw28secA|i zr17}FFqXhF7GcJ5C?<6^EgX@;*0!>)M!utcz_>k+ZS)S^7d!>|sA`-By9%eQiM6m% z;zp)NK2|y1OXeqCe}KUiHKeJMAZ%M_7l)Bz3r6c$E^aQQajbeg zPcC4hxz#kVP|rPc#B62WY7O1LVjL{SG!oz}ETU*D-HYNQ46Ld5J&z)HzA?PUF|I8P zj^u`iGjsw+nJtXeVfcZnly#1L&2(>!x8B~vy7v>P)VZOg+fl#;<|>C;1(Zbk}r; z;#>;90qSvDR!KomNZHu;9}r7-Fgd%r>g`7j2Aoteqo$Iex>v^}?+qPXnn=>yIzUCN zNx8MO8^-t0CYYXOpx>I6)tGe*2190brsbB?L35^{Y0mx0rdgtEt0a>T@a$$^gHJ3J z#jI~Lyy-Vb5YuzV=UtpyJksGau?#Y<>Wj9@u)3Vjw05?dpRbv+^;GeI2K=O1x3>!& z_Y>ICS5i~WhGiNmTn7#Yri%{V2Bo(9CZXERIr&6bb~T}DO^f9j_I-gr6|0QHkK+-# zDrne4yb_M~l6^mXl#Re{d{E*ItgoVI_W*2i00phPqWb+Fo#4kz=4~XB9^#pilCXgr z!19HS1LT_zidN{y5(W8cR4sGj0Nt-1Wo^qCVXezluVYDRbM5&>cla{9jbSg}aV4do zYND!x95@~->r0mAGFz$+X%;u^O>1(jt&!d+-Rw&Z8ZQ;Z1|5vmur!gsaye_N);Xse zZ3bzOK_^rsXny0l6W_!C0Clu>oknnc(K{T%FB2rta<}&_En4%5;*&l|-0*(r~EM zRMeZj%xGY-;eY*=>MUw1MwXbId$d~r04rAt99YC6mvTXCZW}8lYwBMq45pLHLdTPp zKE$46!Ek)>g$QbCUmW|kWZ(ivaU92T(AlRW;u&J0!lFIs9DGTbJW>|;!eFg@Exb)< z%PMr?GFL!isg9m#Nb#8cN2yZ{S;%ygvkaB-j<~}2ZwER}_P4lP?*%(YjI=Szj-tZ2W@~Qo$Wco(qb&bpM!!a0w~#J3j!wIr-uK`$!D#V}%TpaL8w`^)05{|(LE7VG&>Jqw7%tDiuBoGv zyBT>eblanKgz??KKJ)V{9u<{F2}~fMrKhHgv4HE|O|?r&h1Y?ghTc=`T*@@4;=6WE zzi3pgZOt7=pnFtBcF?ID>zYa0JMHaJlVNq>e)F$+_A1^#MPCsH(LTe2NhJzSOP%7V z#F$}*Mwe-4TGmzxn;s`ea80;}4IIY~2g(+ohN`B5vN@_^c!V8YyTM^RSDmMU~Vtids=uE<|}|wZBs7?hKdY!h1L(ZjRd2MPiqfn zv04^I;ZEx%U>%`vOsf9?v0)mE1TlH4#Q#=o9gw96UcB@P1 zs^l8mVS@O+Y&^R5@$$WBzLUrP)@u8x;2$%V_Sj~)mTN!lE%m)N5oa+gQLs?Pj0KwMoap-c=u$#rCs-fya73>sqzsht^D=VV1D#8~e+B zN`;#o4svk(A^Ix)lz?tq!+_z`x74+Ud&HTCX6a>nT<{zPo5xVHY$~7oX*IbK`XNYD zOJEcfRc)-Pe83Rd4Zu7`fGJ98ZEbidWukVK`VDkTOC`;?CRDHSKstPDvM9;& za_{DnboV}F-bn~)=jBbz-ZiyBM1y7Jd!H(E9Dt~XOQLxMgaLJIluJhaJHlO(TqY4b zX>{R_4gTdwI^MHxy${f)-EH(oTVQ~7zr>-kd6QEwe?CjmHw$V4mRcURO-DZU=y|x- z$*Uyhv)}ezLeRebj}M)D)^;Y&lee1ptUS0Nq0aYu9P1Q-ev3W+cUm%MeLIobu@3Vf zJ5@j)YXiY$b%{D45_nA{8u8UTc}(Wikg6jYSDQASNRr#6Q}rgU187$ zJLf#m{>li)rcdr%p9jP5U2*k4qyGRIvwQm~ARd>y*rVXl+UPElqAU*GC!MajE2b{T@M>L{ zXKc|RH8{D$WoVVn0>g0FcIc`j_!~!IyL-Sk-InWdoV?nnQo~i$yj4ZjxHz;t3taMW zTO8ufb3=Ns7Wci^7kx|_4BIcv@RRT>ppxGz06((3N2nIytt4SK)tu!aJKuH-Q(v|W)8*YgNyLsg35%MHv?;T1`SRd-@EHcRb^Ze^zgwp z83Uk%9jX^em7N<~83aU1Bi!TVf)N9_EFrMj1$MRLd3sVHkb>Kq*+V zz5^Ss%qEC=Aq8B`8+kMjAYOPSxDpR&D$+7l*@j_}8#5239!hcSOSURTj~1DOPE`K@ z+^hx}8(<#oI18@~Quc%Lni7z*4V_Too`8V8Rf*O!9!F-kkG6{26nj^CW? zxCQ?J4QAfAn;ZFO*q?9H?|cb;0eD?bc(P;W2}0MX!OfSD1J8AbRW*s;jCKvuew89$ z02)In=fp1zjN{K}_ z3nIJ*9RsQ-kp2h(0*)&U*q-jwB(C}jnD9_9;_+DuTiAhQ>|W$3!x(klyfpe-igqm#4a4GT`&)d4p=QcJEJH=P*eO1$*5})0k%rt>O(aG-5^cOm z@~;dUT!kQF8Vwc>Wt6j2M|E5@vgH1lw|+!{=Rz??+wvq`dn9BBpEBegmrmc=)DFJ& zu{ImqPlELsq;g(Lv7)U10C{$vg=1q#ea|Ib$vV;2^7|zEg0e+$?luY(>RyNk_Lnh_ z_NdS1B=h@!?Kcwb>!UxK)t5vEUkl8G{iY*-#4F9BjoqbUKQ1$;$!d#|z7B`U{{U#1 zeh)P~-E}GlCF>M`{6H0uLuHv7Tp7j5A&U!Ayq`*U40?@pRV}CHNib|WIxIdDS&P=V zLrF@*tKN8d0OC&|3dm-#&$8ENEMlK2V6QBwoNv|D0dY$O^?C0+8abZjVZ|^?eVyex zOcw*EYi4-rDWc+TeiV*8`M92CUAAws*W~Oa{t+u`@p{K~)zyS+B%On?t>6b|c`906 zo*JA^TIs5tB(EE+1-eGwz_nzfK*Nx!uxb~{FaR;8S*Lrx5#r|+s&R~5R#enW2obQ7 z*jRZEUe(vxt7Cs%IBB7FCp8={Ait?#yT@j{l@Xl}aNi3PzaFi4Zw46IWU{WuFX1_0 zLibB6%#V@V&U>{kR~#LUhG^Q+x*Bu=bQtKdhunVLRdD;;Qm}Mjpr?>MxV=$5>}r+` zcK`+^n{gLd)ympf9QW8Xak|^^>?WEB!$u=BtT>j0%NOYTPl`e#b}dmD9~F&O&^X^Y z?B6QpY(iP(!rM<%BZ+j>626(l_wEsYI8Skmfrf1_XLOk>5yc-ziDBEmtW1bs* zVSC&xbkGyTbGR4k1JP=acE=j3J^fo#>?5OgAA?N!g>&jAEwddoO(BQ04i{K)O%*zJ zabvm|=S(rZbbCR|47)4+(GTpVQ*2iemqVLzHjlvFZ>TP_)52YvUqegXr)^)(zhjNC z?S*61H7=GMXAPx!CD)?GYYmAOBoRm1rxh+XVUN<#3O}wo*&`Z=RcDjw0yE4ZXE>I3tN3LuDRvzBPw7aSk0*0jQ8mDf;4UTh9W(=@4C2H$(~EJbn!=4q z>@lbOvs3)rQjO?~hByBAsQzuMQPJUxwGj0aeU4rF%EE~POvLw&#!$rHos41m))VCn z7&tYxbu~=_#5!7l)A+r0zGc;M^iacNB>Sm3<*aR4e}b+b%NVF=>=89~7AP(e#PUh< za<(6oGW87IYUG(z)Gg|%F-Ov|x&aj|Z3D?*x)!gZbrnoogCDDm z9wo6gMIMFNV~O6(Ri^s=sf$%y8FOsY*j8$;b!>HYb&ju&poSTr#8_A~_E>NWVaDy| z2a8yaBbvA29TD*SE}lpRKUp+SH?yKD3TX=8`oN#D3FyyiCLZ4%8hRS=YJ zJK&YI_IHGjtjacxM*N?qRFn1#O#MzLF#JZVHVctmf#Fe5MOgR887>ZUbvGpaDzJ$9 zd>Om^Id-OX_ybIY+}!u8TQcnN>)|B%my%_00kr-Th)YUg4rnnj8-s4&BBe#q3k;0^0GDddmL{r>Hmv5X zI-IuJDd7pVSisc2Ek^)TEUizr&!ekRMUW$2@aN_EES zhmt0yvh+j3rQ?rQxA`V4Gf5nW1VSpv-Ugw=ReF(y%?G(}q0rL^>jS>HE40?#QxwCy)0ohNbAihSHmi zVx`Bl#?<}Y9;r)g++v}f;x?^W#?kZVAK8{IhvO4L9aAY;IJ}k@4n4M3d@Ca6ydMXN zlo_K8scf3~7<`O#ZQ4wGzdGzJN1|;8CsWr;m@7A}Z?v%y^hd(j0667at<1#Lt1-0G zIX(Mb!*G)(A5l(NM(cpZp|R}HkViD;33ka%G_{ntqi}kO2IJ}_#^Y-Z_=9(z?n`t- z(Mt-Cjxkn$5uqynir8F$4|XcWx$ZSvMyZ^KkKh|XmbP|P;uLbpyi4Vdpi0*ht@oM@ z)K(Lab0tP3(E7NlCgK|ywY>iT+`C2n6!6J4IL%-4ZBB5Wi`ae>QfG@AsxBhh#?-VZ z)!A-}u7d@nr>~d7Dx;3ileGT;cMmd?b4eT)Y*Ci`u~tFtjjAga=!b-2@dina*0hce zY;3xPH15uEwz@{)RZbRp_lJ|fpSf1b%(*@XjK_{p#nwpHhSgKF={$d|IV-I%M0`4x zk&mtl=Q*4nVrACDquFe9)Uib+W9nYUhM8W#3%k#HCMeT6*Jjk&?+&BP7z5b`H%!9n zS-`N7yi8JeADHXfyxCT17|c`B)W+&ePLbVBO*EQ5CypSk)q(8XW$K!zQ&!2~)ehe@ z#e;RO=3CiyP1;__s-0MKPF)|j^y~s^aiFe^zi@vHeaZ74>tv$$MBBpOBKAuK1GBu_ zO`EV*O%W}XzATYES{w>XVF#2HO|7p@XXYI z+Q+TSLG+k6+U6zx?+55D{{XPT_C{$sYgzQUbMHw*eXioXz2=|#u(jligH$u%yULm6 zalN)21Xz>57xk@A8NuFE+bA=8Y&adYJMiiJpjsjIhbs^9dV@=Q%XITCIkczmW{RMG zF!cqwGS~UPLHtX7OGs@pZQR*ZH5Yq*L2!=HX6K zN#isgDmU2O8bdbYnUYNVi?$4Ix}KI9%yB~u&SM(UZa~>D7G%uPG=AS#5EFBV=UH!!PZ9pgie%Nb7=tNf>tfyo#E?NCDN%=0 zebg*@?R##0C3%^8?tL3joZEi}evQz5hw7ftVS223myQY2xl6nGB0`gD*}6?dd=iew zQ?uBm8MW-&12km>hw$J(LxQhCgiSq5Bcf~!d$~&HvA>jb3Q|n3k3}1ZqGd>L!thyH zF&<`;S7R5n8jPt<89uD?`k%8sf1_rW9Fm-p{wTK7N41?J^KKMJTZZ9MuyYZ}59zJV zvW=k0v`nbnQhG+w%-Nj-<72m)OKfg0x_cb$*B#Cs7en@^r)z!pQiQ(D*JmxYZ;m^gSj(u@ zdDmR_DDzWpdM`XA*lVKuL+YrJ>u$4bx^Ypy5w9OT)=ZpU_fVv>WJ%T53TO+<_X#1O zk$V%tHEiGyIztevFu-2N0V{0q9nAqM%&Ux6v_1?4Db#fdt0tV6G= z@SBYd)d!AnO{6;ZC7eZ*cbnVjRS}HqK)Qje#RUa4@pWbWY2ptHiz(X8ZmRgxd|n** zEfBoyh<%S-ZTC$dGNE**d2R!UcVX2Z)s?U|L_Wf;KZZ5)EpyVb;9wY+{{U~B^a96{ z_&$5({c&tm{{RTV`KrqqvmejJRjj-}Y;j^V_wYY7D=P)#lb(w~7*qT*y8i&`elqyB zbARv@!}FwEe@<-j7i0ZTwtbVGU-%TG=aomnqp}c$E;B%cApyhvIeq7DdoP#pTk})} z)3{MS8q4zqb01HE)ooj568AiMCXd>>ZwfZ7h;ktMme&R!B=A~OrG+aib}^bEk-O2p zk?k!!!5XdE=vs%Q#9eO5a~o~`iuG3{d>qmBE}TBJbA&^Hl8L~1V}G%C2Unuuuc>JL z>=!x5ZFeYLugR5ncIR@hlM&!;)psv?q1mc&xpZz=A)&F0x947UJUbJz#g6D9Q#3Z} zRwf%KMY^vGJq24J(8YZgCNCm*SOGw2=7gb`xhU8rZ^z;PNcL<7v7 zmfOmXtt?Q}sTSd(UWkHq)uXG`EVMT2z|;ldi&X4K=Uy1al(Nv#(dpj2G4dfgELfHW zEVMQ(vC(2%uWE8dxFp`&C{eSUenlkafvts;mkT7F+s#8%HJ!GJ$DNAf-j*YKW)R-j z_eMX_dh9ySd!?ECu6b(--D%Y6$#MSxN}wIGxQGOOpAppeJqmI*NYawHc2u4oK&=tC zTdKxLKV#w=aH=}_g0w|_?(PTclwqK20L00C=kJER^$#3d0^ z<_KkZX>bBJ@B-xAwGs4;+4PTlS(xB+nBvC($MFWlaP3fcHdlaB{6vGs9UF^ZBO2kU z4tNDozh}IR*m~2CLL>Oaj~_x}{*aqBo>0eT8aKZ8w&RKQA!(Zs%Z4`BnlKKxFALdQ zhs4&o&u9+Sr+Y%FP1_ztdqdlcQoK7+yBQ?;fV!^I?V~T^y=1jHjyyGi?-A3p43H@kdR-r#|)wFkL3N7Ur^8Nbl}hSv~LJ=SZO z*(GM)ZI*9WF}MGVLj`RZ=|TI6$SdJRlkw_ksJp!@0w<>Hc2YjVr32+T2cp zs)^U!bu}!^b?>FYWw>$GM>ww|+-ADk{*2gdiiTI1+U7anUvxWFk;Cc!C(f&}?FTLI zUOKi1c8FXLsVVy+U_a}>(Yx5O?{WVCseMg-n=<|f^km_~ss1O+>`1q1d3r*3Yr(Ox_qXx%DetlW0I7XP*|UG4{TV;*Z8`goonmmxe~I&`U)c)+ zBK{l*{w?0bk=kBdX>d4`F5V+`-o<_PKlLxD`!;X%Kcgp38Bg&(b?3Qs_xK+=t;@Bn zs}=Y0XxnZ#W3>5_ZQAZsli=daH~tO ^v^)IP@%bjzL{TXKWGN0mn>(6s3{vxd% ztz}zZ-Dt?|+<&no{jy~C8#@|>hp@H7=v8IL{{V@7OgtG^Fn>bR?qxs3`PYXFrud&a ztGjGjJ)-9ug5F1a7EIdiQ+T^^b{qcy2LAxnN0%DrOX^Q^k^cY-`W91#Q+!XIcyPLJ ziSw$s@rZ!qIT@rf|;kgc>02G`#oeUT;@7pJdYnj-J?5Sp>v*=j+!$h&TLVRE_O`Pg_2DQ#EU~5gBcg2D4^o5(KXxagFnlEi#%}}tMOCnOu6x4Nl zlEWAbb)E}a16zGZ;Eh&zhIGi3#z9qxv>o7FOJpQ#alk7?FAk)lY>kY$q3#zA-qfs_ zK}zs!JF}2;5&=`gxnCWTy>&nK+UM>T>EyCchX*x-4n+T@rv!>`%n{;H9}`bTWn7?IJ^X8by5w}>w- z^?;?JXutcLPCn!3N4d~^Pn}bw+m1(xh36fK%@*;aibsujaA{4?NKAm!u3%u++SAMW ziD+6cKIXykK6UT8l>Yz|=TwL7`yjQqIf@WJij(zHF6j}o)^W^H_I~G=^%BsuUxph` z@ji9$xv+d+ol&FP_CuH#GcZQrHMoXbeu`PUbIB#azV1i7MZghBbNQ)go1^MoJz0??h7aFdm^xS_?)R{{VvR{R$gC8TaV_04-~1HoKQGdYMVgwDc71k?o2W&^|{7gQJNpBgWL&TEfY8 z=j^$@Z=9nH({%fkb?y6vwd!kZp9t9TKM#`ovc~Ii@xob=(Y?Slwa2hHU1^MOYE<;V zM}=l=E?GCPzgvaPySuKQBY5yUIh5L?Y#mKi41+M>ehIqI3un2NCP44I>d0pf%a1Br z^)I`hp}eGmmKN_kZjKivc-GLqPOYV`eXqJ4F0}3uw+bp;ScWtvGQ?$(NYMGn2x!iW}^IGVdd>Co2OZlUwzra3~;iIM>y`M_f?dct} z$sAbR%S>D}w(IZePMx=$jvCYM>{f8N@gKv`sY~yIQ~X4X)O6&01wwB~42dO7a5peQ z>7)d)fsfYQc_#RuNL+=6x>i)?2%>akwfBwsAIU-#%+pOvmA3hV#fQWx;>WQ1iE3EQ z8u?-^v&iis)+f1NH$$vS)Aid-%G*`jCRVG;_`F!NaG0wgt8^`J5xMPcn{o!eDd4u# zYMEhjRc1#UfmU{cEr3mJFK<{@Aym%QS=t1GOafaT&Iq{%yVz&K%-p`sx9Ki<=^$^l z>|l0B0RD-qp|%um+{%C3zJj^eq+#CM20Pr?2lOmk@=pe@X9sGL$s+q_z}h}MC;C8@ zkKj3BknVQVg4q4sJiiT6S@nA_i(qsk_!jz?KkZtpb2e6M!a=W?E`Y#AusRTa^50Ne z3p`5K?G~;#wXbO;5aDsaE(gjxFjko#hgEN}hWp=2oTaG^@vf^blrt2ajA&qMhyl7- zSxLhjn2UT8C-rjA?@f8sG`m1Z^P_!bREv1a?e?MTDRYgQ~&#ZK8vtxr1bMDYBV%M+-X~?Z7T}_D8 z;!;grB#*3bivG3MaJH7Q;cIYT4c7W}vPoo`aF@j-n*5L0`WLp6=r~rBQH8iScDps} z@PDVwo}r`@O8VI8sT}WVfjV$zT zWfefFrG)itm;RX=Z#sGTL>ycMh16>Nrp01r0u2rlx-VG%ShTt!lQ> z2&mls#<|hDJMwt331&%uVwi#Dn=J-gp z`e=gX%w$ly+;m7e`tn}6_Xm!%z}aYCw-i6^AK41Z&$Y>Z#aqiF^%$A|0BJv>6_J35-`j30MKGrLN65Q7 z>V7lcv#b9A3Y2`(t}>?=ao2!dtMxw_Y%4iGwt8m%Y@XF01xH*V2wY}?2tosh{-{9e z^UllV=eFNB}6pgrG0=Fh4kH*+S;B)17_hvz`LLkD%{raW%Z>RVhGe3mo8 zYQB*X^}8?3ZR7SU{Yz@t6|DC49{$cV3wvoK^(wg|;N)+qUoP>^(DqYMpX);U09XMF z=fBjjb+N4RA7vFLo=3_T(9%1VmnLEOL`!VEweFQ7%K{Wd6D_s5m5J|45R-QmNMc3T zhehe)jsXmq@kPAL(!9R9pbG#IZ3c_pE5dy$@((OL%gc8xt^)GwMC=v_-D)?rSnnIz z3E1}9APX-qrll9M6!ixFRpCBzye}6gwF-OW9);)3vES}qZ|m$rS(gKK;aHd4FE_RC zP+95KVpca;nKd^`7JH%ORwZ~|f^<-#PD#7Bu_+^xPCQgJAyV5xFUc#AI#hq~qTFp; z7Pt9JuqUyGOZ-x~2c;-!*+iG+TNfWGLY#k+Y$C*fx%(*b`7KtcxY%)3Y?XI66c+yg zN&d@Kt}S1p6&)?m{N1^nejPul>BiIV`z2yO$$o=VUWez#%>%xl)b#6WelF6n4jdvs zm=;$d@N^bT9`z^jYQn|?YpUi-Bdd7+59n5S?PVlwaAM{L3$7>I_6Y-#;r+D!g2O_Y}NV7xYbTu_NW2CF9Y=xkgM%sAyZ}`Bf9F-#vst%?t ziNjFn>EdgLoH1;SapEjGpjz4$sU)cqOyPfmY_fC8J&D<*pB#?Wm9os+TGBC&)6Uf! z9#_J;@x4h4>}_)s2liGpwU&12aJ9Qs4sOgYh7n0iIF0gsvPPX+I|q8q+*EEyK9U*o zWl+N=pRb{3tH(0N4AEiU2h3HWmpgZ0(YwlXt)$9sV;IeQQ=y&p`wv%TQeF z>UVBboOmV`k48KOxu$R~=;v}$7r*Bb(RyHwRCXnAq#Y4kkUczKtdo2qoIbJ>!< zdwYK)ryh^jXNhJ_)Bgb7-}#R{$%*=FU(4#9T z7#QZ<8;`nUPtc|6YP==Huon&ku{4^NNe1?S_Ae7JROpy8d%4aR^8Tq&?c>t*?2z9V zI{yIV{{WTQSDOoonuEID#<8R5QgpaP5y93pZj0M{b)x6Y5;ft_?=|Aui!WJQyAT`J z7rmFc3_47)Q-osE@{&Akn$3J9;gc-WUK@Y7o+m>SsqHK*sEbR7>+~j68Cw9JQukqW z?URTtjB0%fNtF$hkmGjSRUNxfDoZKBbB6-eCbYU}IZZ|}{E^2Ty1eq*rzt<1W9qys z2pT&Lrjp-$g`cXGr^b#M8(Bxy(==!VY-4>vFNWkyQynjKwIT+Fmt2BXHnxJUnx;rx z-x@a-lXd24wVgAl&RB45n~y!e+rQ*?Ghx>0qNkO~C-MIPa>YJ O{RG#0q{z>q=t zluUSwtob8tG){f)SpzQN-{PpJVbc?G<~2E^r2B`<-Ygfdiv}z%@xe#rk~$N~IWN5? z{{S)NUSY%}r7=FBNef$H8{uJXhnYsjQ{jyyhy{-i28;Dm(?G+#3(2@h;nb1DT+w6m zuEy2qaBgXGAnIqv+qKY^@GTUPYCk1z?v2?e56v`~hq}-*s+u$6(>gXaj5pBPts3~w z^sQJT8>ZW%zRRQfF4HlYClH%-b*`>^OYNxvjML{UNWl}I@C-XBVii$gkcwEkkVGue zx3;F;sZ5QLuw3(s)*ND(SH%=SG6@}c*mNX;+piSoZDqp}pGKA#4z|>26qdzAZNxDi zt;NK~&ER<(5Bl>iUk4wPoO)Jd>?0)SY#o$1eLHEzIdm+1EO?7hNN)2@u1PCIW7*1| z17uEq8%Hsc_6D9I+58&n?w)k?^wfD<5S8)3SZW&S9mT`Muv|xpV)WU9m_#HDm_JUY%YzLYxrnXCs{3smAm8}ZoK&A$u)cy80RkMOH<(W^tpPU8O3ni5^AcL z#IjQ}4v%;=>Mrs%HrImo8tfYqr)wX9;m>PHdszBdtPdk?&M9m^G`AbdI7}~)v+QQI z#@q-NKGjyEbhK}&hDjSVai4mXp9r_G`4`IL>N z&9rFsrdWPqtcnpNPbLQ)_6}A$u4Sv$zr3YiL#rrc@@&X*o)~Dp#>1%vwd&0@eV6#F z%8aVVVf8Ya3Sb!%pVf$1((TcHL%B$0v5aC^3u?W_tfy($eb77Kbog40M|vx(t6v#{ zP3U^a$Sp0gu@=>PT|Zqs&Em@KqZshU-N9e2BT!Le4U!QzxpWW*hiJ-Xn`>jQrAo6G zp9_S^htg2RD^BxV`eAPGI%AbEV^v6EAaB7V>1EUIfjh4AtMl^6b`u#4`j}N3}RNxaNk~BKrod5|sSn3;g)6l}=Z+Ay)lik|BZ{B2W+?n-}!zUEm zF0`5QI*7WunDFtw;?^~=4cl9R1vSPqn=pnKQ_Hc8rCaDKsQS5KbuJ{w zSV{qI>F@1PbZ|C#ofPgHz}e0`9pzb4o!;m2F3h;s+>>9Kxpqg0VR`zy5oJydhs7m3 z81}vhfF|UFaBtkM7>>=dySTqa`j{U8EYZl1)s+R}F>ij;6umKF@5JA1UlWlm5 zo;O@S<)>R!en%|z+O+CZm3-N-{lLQv1^x{Xc;UGWZL#w)dk!U4Oy{;5$)N;Th6W3d z$ym!HnGrTN<9leMMOWDlY2rfVTR(*2lbd%YN>JepmZ;Qqo3lHM#XLL5&$yj5wx1=Gy*fST(gnvUC(&U~E zYZuzjl^Z)7+iSvpTpT`GUPOK9`Bqys*KOEeV0<5>Rq{L^R^lOkc#evmKXw>=X_=;- zgpMn8<{Wd}tRk{~!V4NhPc7~XnK7APH1D1AZ^-rBz!PK0ms*dO`)cmrUmg{i5Clit!%SBTyBiXngPU}7+@xb6#+^NerCTYUxWTk={ zygi|@3|kCo>zdHr8!RJ(ueuAh(&t64F(}}yh_Y4(b{@rFr*1ivs*QeK#3ZJiUq~nY@Lip~VuEeA6^)OWY*Z2MKb^6D#8FaChewQ8a^9Ng({{R;M02j&A^lVpYSz5Cu zG9y&@Dk&;jO+8&Sf$_=*Ve^G!$dfg>Zj*5Mu8KDgI|2GuuiEC)hHk^~=x5901;$}> z{33dJ4^cZ$Hn8v{>P_#bJL!7J6OolL@K5?YT7Dp3W7BlFuxqhP3%I@2^gku`e^Rpb zZDyTCt&m)%ntP*Zrjt}#pHL0;xLLz7d)n6ONw85)n%>nRf&Gz)#tGAk`|PvX2QNeP z7(;ZFO@)`KfQ&R~LbcOT7XWQ_4}sGL?&z)bvG{>>aXrhqb1Hp|DX8~(YNjaZLk-Uq znRJas7oq zsc3smlpRcFDMN=Nb;R3`bU7#6Ldu434G88E5<&=cQVrq2_o)mY$AFXCk;Iv~Gf2|m zay~2+yhfPN)Ruk8)u*&3u6KW-Sn=vUzJ$o%haZjR=Bk}y>_OTx#f$VQ9;=sufgPMv%9q>{5+&c_y;qg~wLx*& z8$)nPiw!#8pe;Mn<<`M*Zr=q1^O(0L$v?;wp4m^REh*CR{{Xbuu)V&=VE&Pd3zJWS zg|fbq;zzcr+IfPqGwo7xRm`ieho0J}s~8uF{$-$JPw2 zQ~V>zcNahy?US=h`)i{fi2ne+W$;vWQV@lMK!hOy#a~oqyS2MDm*Uh_51tj#SP|dw zH+HeVqPgqpkGFP@XR`cSx{>om>|FttoxM+oJGLet&0Aa;e3r$ow%Z>5LC2fhNhj!) zu~iFfEZ@l((Cwy^^h&FeJ`OGVmiA_E&rJUSVuR+ILG-Vp1nRzY{Y)tXHjkd7{{Uk3 zRJeUB=&Q5^S#o9^9ugJ=`Or@lfjTIP!NnryFC>!52TN{tTB8k%IsNMb@Dc`F4HhO& zOH^ZU$e=nQW9z!=3AMUT^Z?n;Am$t_5ppjl+{CK4{(v zuV3DdxOS$|ymP#~-?>ft;mcLwn0Bc}+&C@)c<1U9cT78YB+WqMby5yPt?&vAmrRXN z4dSqMU>yAmTTVZChsFKKGIyxsS}*XIhPWGQn@R5xAYb0FFBeQrXcyHa{6V%lskG!% zL-Q@TEV{r)9ts@dBb$=c^{tQ{Ld%J@H2V)o(V^P6zw%O?; zsp)Z*41|En7ra}vJ~i0&JPPBjvN(>GxmW$dO+Oil9!V|^+{;eh%ea2Gy)1vA6b%Cz zK2fV-0NNQX`6Wx`Ctoe$cmDuXi}V7C!cXlDYX`)5^{lgTIj1w!b#(O&zaq5Q#0P}S zZU=(e+1^g2@;3Ss;qxsk8NEq2u)jYl;j_`4Y9y8;iFhp_Alc5~sWmlEZCQKUz8%`E zSgSlEb+@ok1$e9z`*C)>vj%FJxq2AtS|lwyrS`GEl}l{;7UDR2rI5RFZY?JFZ(^uzLGjY% zTCm>`wahK@?*Od3BCllma)*b(0_)Azxh*r2^61{^t#8@o-6-1no0Bh5!A8j04Kp1R zcr>>~ITbWAzC$EkSR=(Fl2;|V87#-{+_yliRkU!nldKj7ud7$`ki;6!K&0lSvB{g`ucqft7D6JIqo(}RJN2m?0FpKj&9L1!w8AYyK5Uw; zPq=UHNBd_7NYgAjRh2KjF6Pp0Y&5hlV`Ct8p|`d+UYh`mc68Q ztBZ2P)ZjVpBI4?`m@vNtFO`~D+}oP{4@vDl4Ea_m!)^JyK36|QoFL~3T>z=Gj)}cB zM(s3LlyxJ*t_UEWMSZH0Zj(M?#jf;N8#yh%1I2Xz07cz)cT(=$ zLSNBhXO~K4o5f8)KrUd_%W_Rq@`roMKFxCv5^@Z-#)9ZKB=7U zrplQi>;V0jQZ*O*hAs)q`NB#z8fl(g2)^UEkV(C}!7IboW0d7_@Nv>}9aV{IYu+J= z?lL;eV*w}4Wj#Jd#@Qqi`=2W#5rz{XYj=k|2?cFjg(W9C-CR@Sl2$A%xYGF){}4nB!wZn#~UZ!4hNc zpu(k%k_j7e8>ZBdd5=AlOgPoz6*U!sjuADkEdu0lupUxur8BlwZVQ88SaH8z>VWEy z@u_hF{)^hn8?nZ5g!zB{vyzhJY|5x-jKd=%>&q-(D~{}XascXmYTc7-@I0+sPY&;d zO2QoKiNGQ-2PWz+eLd=-nQ-WEN~v1-t~$-o9bySQ4{oDW#{;9Psm3G?se$Zk!{iRV z-Fx}8?C)HW#I+Eu)R~sn+d5WVqc(W_I`YL#N$T!2b+ql{WcM5JOy@;MJw-^Tl3x{* zvt8lwPi==Hw7Y2;9Xw(u^@7|1@rP(C<&-fgu&M{s%IC>MNRfU5- zUZYJmu zKqX8&FviTuBrTHhbrGG>!LHrc=urDru8x}{V)YHTzOk%zyGaAoer3HTx>=x{x$WEL zcPFQf+BxOG(B-K20qd%!CGH$t2PG{GRd{qT)i9YG6^oipfp>5mltnHn1zc2+%52Ew z-URR+psD;qrb;S9P9|iGf7;mAUBq%1>|WO25?r==F=_TAjxX-6EhMuwg_SKE%_E-; zJqk*fEV`B#&pU4;hU`r|cW0E`DDsdcC%OpU-2(zv&uOP1y)4^9&ED_4dT@%D} zIfna(;*T=oo_tzM$tU+>ab%41&0xdJA+lasrkX0zUjUv7T48)L09fXb4)>dfaSCYk zeaiKiWUr6HDBk@;qn7sXu@<*-Wtng}C}ftdj;O~JOb_b8Ad7LgXy;`*Hir!PN;%{K zl`On-TZZ#=xb8fB>*95~&2FN9YSLwDGHFur=$Y;5wD=`tDm2b%lZmc6bY1=h8@j;eDoZHkn&^ptQ-QqAxTrTeXL9Cv-IR?nN|tMHiF zBd;!(><9alQ>({Z$Rrz^zAzo3eWsIFtdQ+!4HpDWPc7cl+rr1lsaX96Gng@&BOWIfC~42w^1L=DGf;eBZQ0(PNXEIL@kI8< zcsbnCE*pmY(;xY5xOTJc_1?+pb3^XQ*|>6^S;bjI9gJdPrgyYjIM~qEel@i!H#A|= zVN#i!AbflY8W2@yDQAkTGImnSDVB}?C=SHq(sX zg-NITA0e*L#|8(EUKeIgWoof491p&(I<1F-j|#`BSPvPPKR7Knkt)fIMox`*w|1*> zWH%a&$4Sfy*nV)oLxb#mn~fa@kHE-laLC!M>Y^v|l$L*e@~$L501wUzHyu0p#uz`^ zWA&$5`Tqdo#eY8)qhFL4ppKEqJr=0hNw)m@$jNg>LDk%KV-M219+`3HMdQ7;%>Hd) zDo6EfU587!q(wVv)U2|1nNr=eNibnI1(_Um9S`9Vd=x4}o)K(z*9@5CMY zA#+bi1AVV%PJTQ?`W7yj*jPU1W9R<>5O?T>%$t2b_Pd!odf3PGEL-wt!LA`Uiv8(b zmjL~^VWRf&;N|#5B?hr^;%=O5ox9s|!@Hjkt;&Pyt`>i4X?gqGeM^w`y*BoF#4W9- zKDF0)dELV)zlv|EagNvO!*kXpt&fDbeCw%~ly~&&w1qJF_ZS8AD%X+ZzH7n2H$EE+ zczzgK=MS#lXmiZ)#Fmzll^HSjjy$~9%2$4%lmb|ti|;h1pmt}IV3;kYD_ z!4z0Tjfz=hbZwQ=JFx{)>DlA4UyGw%{+g{qL%4XO2Ln)ghAnMNh z{8HQd=IJ8CdAtSmux(2%<+0#BISTn5B+dZ=7yYAxO zZp~7pn(}MB@3t{tgH=H$4J8~Wu4`QSsGl!c6HA=O#L`Ov4Rwh++km)QVqDKpQ9&ha zl=U=*HZoU1D+gFw^JWkMBE*6V|$nL1ZWMH2T@#S0h83U~M$>*15E8Y~}{U;7h@aGZmXRyw#WRvy{#oC`&(~w_@tClq@S84C(GDwXMoey z)3NP$5=Rd3QqkeCU@n&qrE{H6Oe2kj=H|jSaW+!^hh zdlHGSE^B0H4!0kBV)cEN$o~MvmMF^NqbFDAvE}K4tn}nr zSFlz9z^CClg^Ws()XMFT;5wB2d=(!F(5VKE2Wsx4s(9gWn@ZgZbV5Q#lwgmV+os$U zJz%->^HVhqvri=lQ0@ALN5<|865yUoJ`mm5NJ(P@?S53g8>0$G7#{WyVGCXgG11YM zk#I$bTK>8}1gV-MbGYkq^egCf!lh=$by$}0;UhslUJH2NdLttpE`=r5T#Fl|y3ZHd zlk2o0c8)PwWV=sds^eAlQN6ImGuaEB<3)AV>4cT_u*$%mBr}nDD{?jL| zSi$$F_GqiQOx~8D@%tiA(pDS$%hrl}PsHfEr=TQu$QhCSxR@W&H`=&bo9>r*^ay3P zWsbYI7jXWDH6H~>O`!-}W`PJo1C73^13$&=i2lq%Vfl^rE$fvZzlUKi?Sdoup=lqg zKFj3nmj3{YiO4^r{^hzdQ@8L;4c{7;1IO?J+A#Sp+*K!Q4ByGv$hZ15pQ5h()i-JF z-pW|N`?Q~;Ra}vrC-pMq-P;a$kCC95`YOk@eH<(1-_+87SGPRo&H&-{U;hA@)%212 z1ER9z%s&W=jfzRmuvxf)bi%t3f{~6hO&mhk@!SGMHv1WrloiR*jI(M&Z27?AY^(Lz z;{~R3y3AUIfP89(52ZlW?N2R0NjTjTfcTTO-lDs((`APB>Sev55Qxc>IqD{wHzxqV=TjPncY!T_PO9Wj$p9jZr~SX=(Q1x zoUyc*!01P<^y%bSp{R?K?2?jgvt)Q^y{^19z3MC6+(__rE%Lbg7oOzSkA+tYe8qI> zdY-=NdJ?9%4r>EiH1UU=s>VN7%npiK>M@!a3~eqAo%_4i<;80#cFR221eNcmWKt0B zt()Y6;md$s>{>jzqrMBZ*VOTKJ!4Cy%CO~~Nv7+b%bpG5aRH}{vGDP4ggq;mCEM0w zb@yPSW&0T8^rlknuQbWRYIgjXf4zG#=o+`peNT&@{6Xl^;mQ91)BCfkebH$9ubpCX zRyNjf@?COVyIak2-}cLQxv;Q(38dRd%@r5k#wrE=Er+NsW$f;u^CkNJ$4X!EAEqzE zj&Jfy`ki|0W;XrU^OTeE_yGHBN1bWBv9~O{hvnKRaZEO*s}hPZ*`?~*{;|XEu1_@P zUXhps0qQd3SOX=7~qn0sH*UO3KLn5$P^+g}Gr+E2{X?fv<;@}}?2 zG=S?n2Da79Qr2{s9bWA&T#c^h*3YNR)vfVt)joC2g1({(dKn~$!qHCM&@?tDwA}IT z3%i4%>LqU~+LHJ)#n$_G(=lz1l3wynrNc^}n``ts3a@Lqa+U59V)Vt#HX)DRAo&80 zn{C-;m$oF=c5bof=v+$85IO2+?*Pq=T9VXv$<4N6~XP)R?^n}7*keEA3+={LKNXNBkAx=*KN>Q{@JA`gN zWzM=5jdI`Be#e}-dXPasPpw-@e5n2sA(|!w`;w& z`Qy5-=Vnr30-`3!+YogGb5G-!PwralXO-0w;E~5sXG;s+5pWi`2Tz$^so%pz`SyHX z>m;%IAJDoTO4aG%ohsauBzLbHk8d{{RhjQ+@%~+8q}x^qd>p zC&jeXRK>6Ifpt$r;|!Oin6LZ5{RXR4X3n~*w|<^VUyq7Nz(fIR>9j=EZRImiMO` z^!e5-FdqqI(T4mW8G9?1Ny}K?(w@^r1e92Dw*5?lTX%L8-8jSGdD^9*hNtZ_ZOwQ_ zxX%=od5W?uE|NK_V+Vd?7$gJ8Ufg-9Y?Vkr=q_DpmG%*WVKi-E z>mMb&XOb>YGLy`^BB83or-O5>jI{WV0D`*tXB?Ql(_O#5=aqw3izcn=Wa@9X<>*mx zEX2pe*&>$2jx8#FOvd8~Z(2y_s?iy)j}+N%BIiKI)ZtEbw+pW5k6`TzXD4FS+3qt^ z86_nH4(9uXogwB2cY66vPfnCp_WtSgSA#B1l65X?`mcd`$8$t6MJJA4;M3x>Q|Jp? zWenRqa#Tl%Q_Dxz%;_TQm|LMdHCs9y8kU|g4dd@RU1K&EOy~<=5Oi)f@x{Sh7^MtQ za8Ts!#l|gd;eMlfJa>0>@yG$<2q`Ro5%+41^!08sME+aWK9x_+v=v!O7gu4;GSW2O zN@9Kh?P=`pQQ2n@tN(@~K?0hcgfcV#4Q*P5A`08ETe}CNYri@10#@Z_M1MjKU%s#7k|>x>sV)Z?j#{Cpw9&NF)PZiXngit=)qY%A_*}1os?2+M5Oi?u3VNp# zmWGY-Pw%Ya*?EsD*ICe2=FB+K;cf#EYk`hVP1jrZS#)t@_mW%XYt{HX?6@lwrk4a( zZ_O{Rs%14hUpd|z&55Mhx741+xpK77MMn{^AOX4+2MEZuR5)WPqluuAgJ%q8XX*ZOH}^=wX*)R+!vjt(PUY-&D2+) z0jS13huCDSlA0^d$sFNw*0^}SRHa2BjiN|PL-^gU)i=v|3ZpO47~LtUmy4v3aZ)l= zNk|JOrfF#Vxc!UhC!cOj_$77mJoSl6J9013#}33@Q4_?i+?GET!-tTr#OSJbwoenU z#6Wx1atE)zUYZJgzQp+YwG%{l6<{kA42^Pq;(~)$5Q-p=l4D% zs%RdHcE1d`Ia}(g!q;Uv%)X7EKA(CS{ZqO(%d?!RQqjEH zdg+Vxu>i0O`fR>*ibx#{bx=IWz(6|c(!Pv+R3|x`Aj@PMZ(`er$YXK-%K4v7B#{mH zoJS`C^shfGe)XniZ)~?6OOYQ>j^r#dSc6Q_$JM&rTIP~G+>f1B^IjJu^)pnT6}Ai` zzWS|uv%RJqr%?E%h0mavgDi273tjQs{{UL8fsE#A98Mnyq^apnMct+oU>YG_}?@aRh3&j!ea2%+wE}tgdYF$rIq+mAkGD zBHl}m-sR=%apb|3XY_xe+?LsPHJ*RMpN-)dMk5sAkhz<yH6cgr=iVQ+$9~<>b?hFQJ&h1qS=>lOj@#q^swR-o3x}hHcY_X z@#W-u^a{;~;8=|-gmv(EI6CeDaC^b*@+zhYc0IDwREdDqq#qjH{i|eU?906uQbRX} zuBVqqIQM)~z5Jf($95&PzE>Zi;m%XKpMQy1#bBz!F=(X@e34WT);yjEu&XwDhMIky z<8Vt=3uF|ok&(IJlIDxp3->LZmg(}Yc*3T^s)H$OPmIW_2JEqu%Krd#(s(#rU1&`I z0JHqXlXEG;9Y*@tYTW4Q>Odq&58R%@2Om!b;6f1h-*>Zr(DY%I>{rR6`q*WUQ@Snp zCsMCDE{hb*brhIZ6ICIom*F+h2Q`f!bWdW{_zfl@ieohRO&ufZs^^eK$Q#6)^6c-v zi>xyPus(~sIfgE^wOPv*Ww)9)ogSLH-xa*OO8Tu%EQ%G6yYOl`B)gSusAilyW%wI% zt_MGaqWnztT3qJGPWDN^0m9=!+|QzRaEJ~4!AqFA#I5IE>_Xq z&|}SPN8!id+8!F}O}xtYgKU2Q!{~%F(M3}wtm!6h^PTKn48GNL+rtkEb-&F&L&VR5cqHLJ zL#Q!g9gX3C?+qzkp*;O+*LfNGZA@z>56n1YN8f?@igUCl{{V{B=j48ySJNZMd>oU} z7Q5YDre`|g5QxiA;93p9unqpo?5b~qxR0SVNT|+vslaGz9)5M(wA>O#W5BDpD#m>u zv<~gwR+2lss;#si7XonQy~#z%zXdm8n&BAfy~)I1uWyRg%lmnC_P3dTk6`|Zi>P)M z54c#*ejxt<(Q4(bx*hGTW^ea~kLcRBgobPUl6($mK!xKAHQNY#RRdJxho7s`FX2JW-&uZd5$#bUKh1Tt|#gBv8lJHH~v@ z32%1}D@W$Nf>hRG)!5VVs2ve=>K_+WLG8C5>n^56ZBgIA(oX!EmUnxXhSF5WByN&P zT+q-1w|8xFZ{^^uyD*=u+8$W|fxU@S-@o4)sg?AwGNy4&6QPCAGPSQaaNaxGpE|Z| zt)y)uEMHq8?qcpwN+^?3Xy~Zh; z*76P)vWk)Qz_fLZkOymi%Z~3a%(Ip$`kyy0l^smCPIA9q+R>k>NDd?gkoXHG`}KhXEVQh+g?!vHB`Ib3f@JvHbEPmLSA^e=H+U++v91V?^};#kO=T-k22y{?K;=m z3*vkRn~A@HxbTDGk62TXs4OkptqO7%Udx{!2=j7sKBwYBl8@kn+@j!jjHKf`%nP)R zxtA4#wYtdPfnE`&ih?qq^5D)}7vzyaRQmC0O0~$Rg{00S_22Zk!c6Xdu%^O(@jw7M(klu7*=$)5WB< zRd$?9aHcqM4+=w+N5M%rpE50e<&U~7F4fj59L{%6tH)Fpk}q->c_WI+S?fcA1dmdx z;}t;F#0G}tIDD?HtBxT3!+V>3)|RpKs0?P7rk(N6x_Xv6_vK=BR2?I-4RVVQY>r&} z(+W#rSh&A9NA#cEx8}xW-^o;NxthZ9^&3S>*j9 ztViumRotVl7*A}{ewkJhJ?Q$Die*p1-HW~Y1~S_x#ogPCN&Qqyf+>izmw$8oTTXcM@o+&-86t zyXuXy`zp(C__*5#rWt*UWbDb?`6nwowt|=Xwyng&HZ|<3J4$Z$RK-C509KRqO1A*2 z+d^jkTEvHKJICggS0rc8zp1wU&X8HTy|fjB{{YNl`com%e9-!x59+!}W{CN^j=X>Q zOe^VZromZqVmu9Hb-as|I!&8x1`-`@>KOU{6W3MFc&^M&YWJi?a4a>TWTq`0&m!K$|5n?8VmU%vC+zeUTd#fz4I zH)(oCuUFq3dwfa@X?M6dA0Q{_PO#dF>>9Q@$_eGBs*d}T#0YQz%Rn^0xUuB6tv*bq zV?YrnBPmml-dC9}r;Vo;tshN`*=9o8QV0}5@g1)CttE>f#?c8Tu&LP zzgt;dE%@(D^M9RfNw7*9d3vK5l6~F&qA1LgZv1W5f^DZB8YSYuDIZEITG;x6-Tt$X zZLf9Kk5bb{)L^@Ok3xRVbsUL2x`{OXTm8*4D5@kk>YjY6M=(;<&}3*j##yebX6Xdn zZ(LZdtwn`WR|j}GW6N!D8)7al4UZx<9jYU1_*-c9drsm20}-Vfcgb(1aOUce$py7` zJwBD}zDyd2DNVOk^)>BASFM6`7>|4px&eC3tzQ2C-fXu+=ml;kUrk3&YbT+MOH$TL z6AUcfzWamT)4OlF;oqT5Y3e9yFzIWl-vw1P5=9*DrmTWv36a69E^8zv#GWe>>+p-( z+Ov&&3n?d&DQ)NSHCu9Rd%oZVpve{IlS18t1ZQFyh10$6V_d@F#h{CYx7H>j>~^2k z{Y@$Li(GlJA6S*V-ofcxTOBac&{Z~yrikJ!Y3kyrm;t@md9P-sqoAzI?iDpNGZ?{P z#QLe)9`?E9lGcVdfEFHQ)t_G3H3#JF!`O^d$1IV*yIx)QHAu*HSJqbg3Y^OM5VE|P zi)8?43wza%7RKpmCarXHz5QEhO(bwe*%?sh%vmIE`>`dwW`H%^e;Zg^vh0*~HO-B& z%^RFc8fAg;g5WM&at5Gs;<%;PR$bWCyPdo~#$&-KW^rwE<@`IG>9TC9%x@0IA7k%0 zd>J?`p-r>8X7Pc(bynD(6D@vH+6>UnV3LD6Vz04ab6y!W_Z$xpN6@zJ(XgCL zD(5_Ed>aacVOVuLD%nj@hqd(6M#xN(hsx$S>$+M1H@O@*uRlLr%ZVhouT$ya?0%O% zT$yB*-=*?E)8tqfJXlyffk;P_;%NI*eF>3Ypuz)- zg9}*A0J(1<+YV$kNL1pO{XRE`VYA^fQOi`w-z(Wl>7u3&BE|9-FcJZ~fcPVsIxdsz zN=n;jp$}!Yc8)b)krw=+9kgH`WdSx9475VN3TbA;d@I16TF}uOX1N=hkWQfR1b3^( zIVNL+1NTyjgWXu-td^2^aCf$6MkBZ~7jC-y#Aw~!1+L+BT+7v7@9euY)I}7`mHDzK zK@%FtWO?dgYj=qb3~?HhXIS4w#`?^Z;~0N`BbECxjmBQ~HQCqC4v8^&UQg;X!um?gyYl!5WPb#t++??u6%aP%V5gsp3ipctk zUxU_5G-3OMUw-Wj&>NBnE76Zwa&*^c{j;(n>qprNJ573fOG@K4gY zgQVyG00}Xxfo{Gof5L^+k-JVSnR-kC`x0ZI>+kUq-g?%qq3WDflddPlbknYH=!I#F zfIwAzpZlj3wu7y2=!I!v@w*Tm7105}V)>MI!vXx~s|MId`2PSY{sJ#iRnCYu82)1% zec}W8&sJTsHresr^zr>FE=u?}enmHHd*b>Pt)*1*7OY|8c)o>fXg1(hWNJJK)Ezi3 z3F%QcSJI~-58PZ1VUr4~jqyPn3tZ8m4Fk0& zhhX^Ns;ztQ+1S_0=QiMK2`WhDfyCQ-t#p~!1*hKe3tUb9P*OM}n=Fwq9~HJYCyKLe zYhZc*03XdbWo-^ut*VL;;P)^$O?%3FhHNWn6tcCaD^-RY9pV^m`OCo8W1OWHk^5(| zTJ0tbIiZ&%V%zf4@QGM6B~)2rTqnZgj*a9Ow{%|o#nnGBGKQV(0j-iY*zY%*vtqjz z+7yptVaphULcIczjIU9 z;k0>^Gtp8Aw`r;;Zu3XQ8j}-RE(P8*(6x8st7-IpUtroCc`<6OAZ7UDW7;Rlf zQmqg}AtlFf+r{}yqp+N*nrWgN*bQ86>><9@$2@sEEv~*t=lx$(r^}W5yo&v87hFOb z8hTj8E4Q6(PhnN+^2ToOyHC13fh_7A*vfszU{jOI;2)(|>+-H{sdSI`4zcCp|epSnL|enhm>csr0A& zTiG_!aq3Cw@Z6h-&SmjS#^!_al!jW_4$$MnDVtN3E1-}@1DfsIgKw0)opzs5EK#UF zeGk#J+N~6tjIzZ^MR&F8Sm=Bjw*!|CYz%be+x>AoyA=*x0~DgiBdP29x-IAL2RxnO zVP@`=Z#Y*rY^y)O4^@||T2EnbwIfloy`fA4PF=2ZM|*v%pt0vsuXO!5Ii5WXq?^=6 zlfT5d`u>Fz9B7h`cqReOEpR&MLW`%)QeVY@-Yu-a%Q%T;1XSQ>0wV{baj)9O|y{#)nKo}1Nl{mLHtH&6JNz_wJn zmlKYdQL(*CBMdFK2U}k*swP}0($kY8hVdrWPpP(DvzxglRsbVq5ED|xmIBwXg_Q{}&+{&pRXG`HS zN4#}5Ypgv=^@!}3KRO9Iys=p4kZc>f?Nt4oY-?(G92F5()izAv>*oTZ;+hy$0aK?2+x?Or?Sy0y+3U%@lPdGM#(EPkldTUN;Czb;ix6b4CY z*kx#M6Jc@`rp|KJCLNb32d)1#gWF4neV3kz(c3Q2Hd6-z+?(Twg zxz~Wb(wQG;J43**IyyOWy<1FCv{~VVf(4H?=u1V^9*sr9Rep>uR-H+$@xPi9uE3rl zbGowD+p%k0#MZ=K3AK%u?Zt9VeypZ*8<(pIJb-VtYYfG)O{7-6{Z>=NAJ|6k>n

d?mmY#o)@r88l<*{MI4em7(TjpGc zH%0xX;?-`FuPIlum4ise=Jbd1v0e7wbQ zX|PJUDQ9VlXtX_+?eZ1%N2QKS&YAlq;S`t#UaYUGgAo(GkpK%>)kq1{_K-a5k@0Se z8&Ae16V>eR5UQ1}vt01QZtsM;xgmYgwv$IDlH(oQ-9`1{7IoYN8*;*>>Q2w`X{o`xT-Qf&`RmBfqFRV;YlE#E`5fA=3qBQ| z#Uq(fzZ2aP+${1gT1numnbS5=+V%m9!wwybbxzzn(9BO)EN<;&gW#Vb*Rgb_P{pWoRTChZ zt+lb1*{*$r&C=X>ANtX8^j#FTk1J$`Y>|u$fJCk8#k;M%wkFHACAF-VE8K8gQLwhc zION@T55`?QT`pVJRcqkRJaCexxFzuom0;Z(Mmw8lFT|sBoaibY7mX8Kd|2(slcn^& zoEI1E186vo(X%yHGl@OUnpatjF1U@Hnmmi?c3e5r8=#*E%{dabqKZ}+}$o`-#+sCXewRK%;MVnrxf552=PjJ2^wz! zbFQGRpGAoVQEuMXQuLh-_P0ZeHj<|&R&GY(JSGQ6Bxj9?A>0Z70NGqIev11@q>4d7 zlragI*A381ca8P!;69$^=jw4wBTXakJ(lWt^jPv^%IevzXfZDZFM5Q+D%n$CB(+W6 zW1Q{hiaXM;)%ngYfvxW4hF?efA5-?6EBM#cr0t&@@O+b4!r^&$ zT0FP@oK~F`Qa%SI6c351QBJhD@((?2*4)SRa}&#@y>6KboKI zB5>THh>%6VVf^;0%vdgChceUDoF=409m#cp`*-zxwBH=V&> zp`+&+$~^?--_9AVU-#qluIi`Z!Em=h%p$)4+D381F-mvQ;uQ1lO;0bwW36%zq%P}) zx+ILpfl_bDRknuj+Aqddk{!q@uF($W2re(bqaM^w0&)IfO z+m_bCcYAE`sgbeIkdg?n4fJXBD`r=!b`g!xS1@lro<_Q(0bMi03HNMyCrIU6TCm;RR-{{i%AEx2bLXscSjSC!mVhpM zm8&NGcHvcQnhHq`8>}RPXW6B_pdNNyx*uw_IWm3dRsR5{-_<+6(BRbc9C-2Ea(DEV zEBQXfyKQ*Y3=}v?WWX|4x!djFS}X|i>a_JOd8wXB%j-C-fY$AMcnF0-z{TOn?)ao5Zj z*n2mF-RrKuh5rCFbUn7yeV?a6`pJJLM9&Gi(7U;9y_ZZq$$)bleJfm5Q`$f`d0y)8 zvq`roBOC{3oqgW7@j-mg$TYq2&+<1_1b{SZrm3XRD!l~YwY-$H}x#8(nvlE5q-b_YOtC9GrpDUhNDXk^o;)i3Bvr!^bx?u4XQC$O8z^b zTjo|eJco&6*?o?~gJz67cGVBuJa5mHjg?T1`iWhxzAcvE$h8uTy%bTjxEz|OXy@6B!; znz=`$vE9pcuK4xh{+Fzj<@}-kNN;?PKb%#7n?!u7zmv_dXuebb07$9x_OsacKY7gj-p=Iz?Z{#5h27w4d z1C>6gyKk|)4c`|Yi~5Px^JNd-+5S^^ZXZkiTU9ISi8Fg*7+bzBI|2TNWp3E>`puc; z7hnGFr~Lr86AapRRow91>;}9Za6i`6fDE5=ctj8#3&?Y{EpohA;1fs^KUx!I&}7C zyROXnZ)tEXMuEYr?w@~yHI{#-k7xPfg9O5acFdV^5Kj>4|-8bsV>*cyYfk6`MwOf z5q)hbqG+Bwd#>y3zE(|Fm(iR;T-sPiMx{{Ros zroQWIo4$S@AIP2AZfK&+yH#r_WMgEj!7ABO<~i&a8a0xz>Ac$#2;R~99a9`8d8_t$ zjEGN$ONHV6bhH!E7#UYiZo=qg5;)j)xs4}H1=NNSo$!gan}cK1Po{n9 zZFnxeJh^`KoSnba>}12IOCBbZioZO3yZVV?R2jP$%QEJQcx&;B*d&JtejNmnin!s5 zGT}6?ERyBUevNBM0nIzc${%U?#JI-8Wv`MNntrPor(7DjF>yXajg~HUr2L zx=a@wWqmd_%rh|Ic-+k`mRdIl0t4?EX%e&xH^nS_cXYnGg7>6Uc$C`^%=q3bO3E6# z>{61a!6SC?g`!xP&`q^OB!<8GSq z$2ME9WU@rS=Z0P@*b**4Cc^7SLxoUcIMz#wIB_?!YPhNE;+{8Ai76iXcEVfL$oAWO zx&HtP`^`Ko7U#?-Fk$%pTj?n<>dGpr2T?Rt)|fXLART4e7)zXY1&;6la^|5hoUfQp zT*Ij0#IV^bFzU9>*klG)7}$5BW1P~$cU&4t7QXGdtD6+FN=})GWQHi?il)eN`I{l+k5;Qof@P z#4%xC32h9OvJI;Qj5=OhW4&ZJI zRE)FAB-$z-6(@B$ZZ1C6A&`Cp+p{s@Tvlv`tXz@#nNZta%%{LmIH7WIG&hYG4 z!lACr)o-W7WHj>%N@!`!?k?Ufd!vk85(A5LYqTU4VXD}r7A7dhBaGtJ&7-%2HT64< zg2Y>JFaXdjG!`0gSdN<{Wx1qkdYI&cQMCJU^ZAu*{9?+RXvH%w7lktu#^~J@VWcjz z9}j}sS02M>5*z_H9171U;5k@g*j`}3sVgvgnu_Ttrotqu6TtJ;b%to1J`is3X>s6O zTcwrD_8Y;atrc~3IIVSSTe}@qMk6!jh;!V2_1n*Z3TrFu>j=x(fvl&FS*r^-JSMJp z&nE9+bIBlgE=XzR87VsKamz0CDATEW_&M8RRh0NfO4{+Lb66zLIK4D(aSdpD!N zfCbMIR?*B(%USzr7|fAXzP>uDHuV*FT$|2oTNdQ8n`$?Mh_MzoD!0*of@;b=vO`YT zoilcUiGbOgywW%0Cim_oZ#rBD6OGb2kA|EAOl=Hit>hhM=y=?Y<;^JXX}8%Nvx|$I zW9E%9h*VHxSz|8Y6bz|!@pyC&gTUqvfK*9%&hcPj+|amh4hF=jY}ZXsjpK4+xQ1V; z807Fr4NVl-YpWtMFm8z+^jf>$-0p`&;-ja|o0cyPM* z91_0OZyTga;_;$U5DWq?j2V0 z*VIQ!_88d&9%UCmx6~lzYND?IbW;9u}p8>kXt-ro)-YtwVEC0 z0@eauW}B0zZs9pzrdG(;;&WZvG_V$y3us2XdG>cHS}ZiuQb!Ak9-E zw9W3w;^W$x&)CLG&Y1oKgJFjQqokIKdWq$!n!BuuZoXc&Rv3g}?(b+kd)tlFegm>R z!G>aZjXf3}MSx-0oKK~ze06>hd>zl;baAvr9k_0XJiN8K4%O1{rnumnI*4zN6?U>Nv!zjWe5hmOP3qIO zZ}aO;2A)5qReB+tW^BSZUY0M@a<%QG0iD=S15D3ZQF2$oy0(6In-??H4^q2^i*_n5 z&du8ezVtNn1$=J>nHrA*GPG{4M8={q{ZO2!?~R32H~J?P*bfVmGgCUgpLs3k?x-pU z^bSNx9hDab`4g5D+9RM#wD&3pEGbZQNHq4PT5hmWK(e~PEUt!+Z`g&6$7=JeZ^j;! z27Qaghg#2$JuAaJWA4YL0HxQLN%%y67%w~`FOBmd1#X0qs*c|a=3aPAeZMjkSg$!G zjZA%~(2ogcZ_I=h??O+)BklQ@oqT^A=0GdO2|m6)8_4UK@z8cNLcmPJ_hEV^XT~0s z23(PK=KF8NdQb@M2oHH77agcVha@i$faMRWaS5BEZep+}c6xVd{A*Uz=$!+px;^A* z-V0pZE&xI0;svfgsVE1cmSzW0zXuQFTDHGN#(}X6p>O_Q4Zk2QcsTz6h}oRbi)f4X zESnAs)#qvu9-`~STy~%`xn5trW4zwbyz5v8!XA_sC`$6{4z<4+dQsLk+)!Da_@QCe z1a*xZP?l%S7fJQ(9M&eKgPHUo7GBvQ;W74~LiANf+IU;oEgcV)BuCQRc}9 zoAFeFPfaI2A@V0u6w-6zJIJUw=U{MkohWvzDd&nxJq-bMyezO>pV7SdM@XHlN%3)+ z+w?BAV{}Owj|aP#rX$U2U;H^HQ?>>t*^EBkhUT+IDIG0DhC`SI9*oFSP^Sq_z$Vw(SO&@!S1d zlx$i=Jf%!@eiz%cHrCJQ{8QG5&!8A)Crge%%GxQJQB5t&ByR&_Z&+3=6|HqaW59D> z+ZMQJR-wt&49R)>8p`gwC@Sh-Ux(cItm`afjCmh(_~0$Ug#PPq1%%V=YMLT3E{<6{ z9F_Aj3XDESl23xoX5Yx*Kv8mZTEe4#^)5A5&go-oy;fU}i{OOxaa>t3LnIs|yFSe3 z$DbxCsm1qQ(V;4OGu5z@OOC?p*zUar~5OpF1dzX{?E1g#!Y_bC~X6y&J~VPKM~6B6qN)Q^&s9UJe`UUE8)+lz}pP1 z-I6&T$BS?v*pHcZL8F1f#u(4y1j<~kgU@~ZE|J^BlXzRHJb5oqr}Rgl)O+QU)m2xX z+W1msOma3qt9(c+HKqUd`?HIqZ*UL$ffcdx|%qx!2Ii^=h@y`s2)BKP57<1 zsWyXUd195JuwjjW4os25>0ZXCO+3u8OoKCdzMnohPCDsQ*pF>bZu2cU2toT6EqE9#rFRgCM zljm>uu3vXEg~<)uRc?AL+g?H4xd~sbnLyH^r^?mg%T)mBI}ezq$&%2tb>mC=Qq8Ik zZGlYwx2lH;V?J`SZV6b=x{UrY_AOMs3D|Yv0N+&JdVm+F3aqw)hw+nXaBM-umWF;> zl`lX>VwFNToZ#(80<_k2#SKWW^3fOX)IsBezzTrGypjb<0TU6k!;H@THs^4`fQu!%)ZBIeap z18~vsTjXAi(Zx-g*ClMeyD&@P6>W7b8FlV-bH~uPMj3%pU{sBUnlRAOYx7fEjD>-W zXlq7*1lFL~E;j7lQ3Ht^g1G+xq|>dIq_;Z8{{ZS0*VvKK+=}DAlhRu)063bq{)5V> z%Q>Sc586t9Kdh9gSM_~1F$P}S(&8=t zitFj*mpoOMCmc8#O>ni&)pGSdV8djJsu|{FZEUR+(>f_y=DazsW7E5``p-;-;M4#D`Z{rFSv(?@t$$buu-Apn z&jsYWW}%_9b77S(sdt1H#UAK&qkPQ(gJKJW4&}#1EsE_Q5W?!JsbI@Bbu|>yGC3^r zBL(a)X%5{t1UAIehv5w z*(}T>5HSduhtop{bBJrhZX16TSX^G-ZV5aiG~>9YI}I!FNwE4hRG8(KSYF`fZpmUD zU_KLBbHm0SRpT9+rHgUhpOoB zn4fQfH6i~1@?%@?skbdwkm;!&=SGa&>wFVRfBdMH_gUM76l9rs#xUF}x}lNQR5g=5 zk;G#X2A6_KU_A9LBwOO1^^-8nDwd)bnPY80B(=s{IpmXab=KsShA*?KOvhUWCoN4x zQ94e-S$yF$*jj@@x!z7N=K2TUd8>fBb`GV$B z?aAX#Izo*nzYOJK#+Sk@$&15@`4HXR=x)Q)_eo@zoX^Upew?~80;tr*4 zWi-_?(#CffEcSJdezF#TJ?;GU(#i9-C6%OQQw|(@x`&w@#>iR$B$5Fdfp-&bT&-ZI zosjUS(M3vW59=b{W*v0te_x}Lss8Veg zD!y2nLuuuBYolwa?&999k_UL8*jhj}wzo?^YFJC#P2iE#R7*6`Mp)>bA66eEYz}tt z)9tMzwOc9j1_Mm^-xd)@$3{0=L?Z1Qt!=!Ut!VQE=w&P`ekY7FlX@3#M(&}vVX)L` zq48Vj9g8kV+;sZtsZJ~8gQ`8jDFk+qSw8SpoK&`cs~eH)EN;$b=)P+m`#DEI=Q5@} zqT&D+G&tCZNznIQZ>l!tGRaJ=XuM_2Yl{gE+>8d?Y4D7O_T)J&InfFpuQToPJ$zlE z)}4Fdfc-0_1b1CYMLAuw-ou&09>)a!EqCxy>M*?BSx)#}RZRnAgt5+LgTu=Xpnz@T z0p@Y+32$(76s~xFF4jioIjjw}*hp{}A%lBT(|s+w-fwj3mx3-UkKM=B{Z7~0GI1D1 zf6|KA*8U*Mx)RJnS3=r}A5a}5j`U>jF{54}wWG|E{ALA%>1%(oY_nZsY-nz|M56$#c_6%@JwWP3tQ+`wrb!ws*X_88pb@48+nSi#x(^Bj)CZj{&i#Hfi#hR znO6<9joj@YeJed>Q@SG`@xvV!{{TXd^wr00{{ZKt1pff4x7OvA$b1`*XJqbE{{R#{ zL0{RZ(5-*7lX$7WiEf@?ugsCXqsXL+xH52Do1fTV>CKQPwS%drZTzr3b(5YfY(nMj zvejX^Q=}GE&3Ls;baWITHcE+3>mv?pnp{sHFRG}F>lnTWP8d}-mueYCXjt}Uj2+Ez z1QA~m1b$L<&8B5P{-(k|$9E(9lkar2>#NPEV^zEWcpXTty6Rf$$l@-X?nmW0w?n0X)@MFuZ7$)mTe!6=rE>RLOO zmK}j;3A$tDcKxdc#^v%r!RvZsKTRAJmki3-+x*cF?W;^$IyS?(d7xa&H%+P*pA#oy zuD`~XG4zzyPMgg8jFVh`RZ|Dhl*OZ|4Tijznl4k5rwpKJk?K#Dv=8hgUDJ}JaLBWt z{P6umrY#+2Y=F5V{5k7?`UZda;rfYI^yI83T!T;t`|$llrY#+1aJ{%(rp}wwI@|dc zta&RMf3lp%({>y9`6m${BB6)qN@CH}mls~>xt1=QIBUMylktY1!F)&iN%S2wa{K<< zmhs4k$cX;{ZAuo7jNJ&`Z*tIabjZo>1FXYwsJyhzKBuSD{Vy^EpmmbsRQ~|RP`vuC zn@eck8;$G>-mRqk6{sotRczjJFOEqO{uYX6{{Uf1QuLGAwAQw3qhbCOJ)X9ODWGij zMh7c-lA(>Y!lF{`!)NuD>a(5(N#twd2hd!l;o4SHouJ2=u!oM;*F=A`r5Z(ywH8nx zvIbcn{{Rr-U-7a2vh(&r%OCwkg@4A!`pOjN@2WoC{)?FWe;R6W{VP_|=*{EoFD9S- zHXqOnp8lw=)UYg5WVE?c2*oLJJWCL)j#*-)nX>gzv&JrV={CEAK))S(75AZj*)r}Y zu&f6S#Bqu&KN!Md@H(1ms)oe-H%2@M5pY{$an9qC--D0%j+WhC?v7j(2+#Rooih#> ze(Ko&09j@A`03z1`NlXPtCy4KZQBzAR9Px#MGA^r)Ef2pb}qor2P;J6sI zM5KN0hGYGOQZs1VrGv2+9g9cQbiBy1pL-aj{{SM1!v32kGjzwwjQd^{jLGDIg8u-X ztrF3{^tT1>_zRq6>7RkN{a#zfWbZAAe{C|GroJB2X!3?C@0BED=~jiKr+Up6CP8J( zGW6SE_!%;|pTenN`VtQ5wKKjBN~rVJ57kN*j;k&U0zq7?-kP;fKjvJ&@q_giZRwv_ zfBp=UPxit3iA-8Lmf-S6iu(^%g(jvomydNUHNzH}w*jv~@<7(F0Y> zOzFQEKKYWX$H}Q-`Vy__#fLQe?oP#rZtRhtq$M%A9YbNmn)bjhVrNc_zrtLrj6j(#Xo!KiS-hsY5HR9 z&V3(>)>1vgw=?}Adt8Qm3 zA%y+*rLq2%l%m=;DPwKPV_>Q9ZI2^W4|>h`Bk$RWMf%BOwG5<1#%IC^?G@2}vI4~^ z38hK*r;vFn{F_k9MaTSR!aw7p{bd`BZEq%FxOGKMPHMrSf+m+a?yic%^N?B2T@~*x z+;+1wxIY@HAop*Oy6K=US?IdTYMkwwv6@01O^MV?QrB9RVQhrI|_^Ca#>uOE4vmS;)O{Z4P?JU+OsG z0GYC$UKcN|arV|v{iJ5Df3`FJlD`3>cbY}bi5&cj;&zRJbwTpd)I-Bf&~0;W zMI|`JIZ4t+V+l#9rQMTYU3#nXZ96DKG;z&aH9O=sZuBoA{#y{IU5j9>r)=)TSrjJL ze1^@}ysor0x@?N!kw48HiRkqht9~}Gj?7Za)Rb7J4?o(miZ}TtZ?!y*2&Wf{su>4s zTnguoTyy*~PxCk>j&b}^XhO@|Cq4luBGcq614k$v2c#|8$3j6CS@jZpUz{&`nLZ=D z!BAkm0eiUN#iV(WVh*aZ1+2Z~jwlL%M>H=~02j7EUaAilwnrp#KpI_oy&Mr_01yK7 zApz*%nbe!PJRB1`kA$+yhLR?ea4F34HJo^!LyD?q+|DpcXNckOs@FF0(>2?z^2qPG zA35c93e9V`ow7%>YD#JhK8^UcXTAiYEQT2WK;>B&|Kt zJoQ;W3mgXf4Zo27^~j7fK;8neh&KNK`DY)Y zeLs-sSIiEKQ*0x49?7Mtd3y$6%WW7nS zB=TP14iyfn<^gZgtQ|9pG|+ zccP*qx4{5j>q&ISsivEwMtq62F&PCLPUv_iNmzJ!mR!@DN&sbW8v64m&XCKkaZOwEWm*`V=p?gLV@JkiG&Il4>B3k|wR_rXVE z_rbtXgEdn>?QqQ?2aSLqIZ8*FDGj&8kpscbZO6>K#nv=Vzs+6#Ki7kw?3q8(KM)SZ z?@|S{SkqTi)sK0aIZtCqxbr3cW$8yOX177wINloMPpTb@*0aVI3Z1katl(QZh%&NA z)nX5B^jLi_jVp%$PbQ1=K5MOYPqysoj?{IyegQ*4L)%v)q>%P%3rCrCXRXBv?7M!8 zxxqPO#`eBQ?uqOX=U$Kvwxd5(pG`G6T^cO8W1(&_QHth`yi`^-jH;76$oU%>vqA7! z27>3YbsqZWgtsd_Nkw@0e<4I&HmR(Nk_XK+jfy4_JI+B(+LI;wZv*vQDP(evNtT+N?xyh?0#Sn4L8rP4SU#Esn7A zFB>afY|c-u#%n4ah*Y)JO<*mN+uDE{T6+bF^e(Kr;>RfBoPPAahlQ;6UmllP3{uNj z<=r>W$lRMm+7=OAnnaLQutdn3!el$T-1hlXjK7$u@h2GJsE$X0do{qV8&8g5IAq1u z(YAT*;=RoT+S+U8TMHoKk>fP}8w5f&0z+-gFQ?Pb-a$9X@*P)5v|5?rhvAE(YQp}* zeK@TL(jmLsE6sLo^aqXq0DWrSRI**7=Icmwh6na7&Ai6v{-al9@lFxSXZH6N9ju7{ zlDI}J;oreivYEa=iwheVqV>64Bh|v_5x=@48)qzN6aIqF>jJy&#D9)7$AIu=`pTm8 zNF--mV1KX=>jJ)Q%v&~5{{Xu;^oq*ltlL{T)+$?Rcm{*W73Mg~qU_j%6&dB!x6~HI zWt5OYU0DF=xg(_(RjS&ye4@&fak@%aV{Kj}_6yoOoygD1BhU)#Y*w}`GY+Y%nZc1m z8IkTh00QKy2Fq}V%Z60r)sYt0cX z>Z_L$F2DBcGkHN_P+3@sBdY_5^?*-0`1E=uH=w3B8I&fM&wrVExG% zx0QSQ2h)N3ia$c%%>cQ#;dO=}>DxsqqGS)N`-aR&XC7n(-pjR7&$8??2Y;b5-bBW2 zfxz$d7L~hAMsnpxf&TzKC+ett!B@2gnlXamxlTZ39ujM6XKkbAS-Qh_cNJ`ujW<6> zsNQs2+ zIIpc6i1{nB+9Lp8a{fh;>U$gL3hx_H#_j0OO5cS-^Z11y;MYPv78vJ#Azg|CH?<4{ zxq3uDgXm4;Va4+FWTB{%-sZin;z^AV^kbrqLLG-a%^VL_yD{x&AkR7&*!hNelX%#1 z^j#UL05+g-{6xEYHd8QJv~K)`*W0mM?ryLfy^;R_*O@nsry-t=@!$6u{FUr|99q-+ zk;m~@UwhSs_k96i;Sk}we)QZn9G?$Iy~|pO-i09!!M8vM4+qM+g`z%to@Iq@@T8d1 zh@DaxdZ8|jF1DAqE2iPK9M>Mgn{j+SiNM%QPPOQm7S*Jw+8Jr^svNe*UrwfJTrKAT zc~MccK+)7t<;S}E^^;7|Z!jwEWZG)cux1VvtR2fQ*a|msr(o4|EfIUEU^PsMl_jIC zY)0$`+Sr~~MS#-EZ)Mv@Jf4s6pWYa-Js#q{`j**rPM1c7H?nO2DBc9!`c+dJQcTMEj16&W- z(UsA99w^hXH?c_OiMl$8KhdM^)iymFT0#36hW+cO$Q_8V>`m-aIciRgO?CdoXQU>Q z^kpP|cgPs=6>dmtPNYA0>Bl*xtaKJ~o^9p*Ibu1WVC-cSDCe@>FIk(OMenDXHbd z+#->Mrp>Xpa{2=#3_o%`38b{F*To)$vTZppq6Br>J1}BdGaC+WCx^rx438G@nsRK( z2cpNYg1YUkyF%2vUBHeFJ`tWMAttv0koZVo@uj1Egw6b0Hc!Mot7~CZkz!bNJyb+K zDOeuY4g?X!N8Hlw*d%JFZ6t2tF;dHT4TNZi^87F zbggD%jgAC9NJH33(2fS)O+72jAhhBTjoWo>l-#tB-k!SI8%ojI$4%4z`w=vd2&$%V z((=eZIZ8T+VllduzjLkecfi%BwL`&(M^TAO5Cx88uf5tpK9H;!ZnbfDxo0d>$0)x; zmjs-1LQB;YOm3GK%g%EuA{CV5gt3kzye)y0E8xw!P1I628AENlc=r_@jbK=m?WsQL z2?O`~wmtv@FYtIBZS$&)4j-8E#cOJ4<$?ACeWi!S<9iQB&c8F%XjYm|cw=kI^-q)a zM>zEi30jQTbp9ji>^7#7cflh!cTTM#dg0Z@)JAuW?nqX+tLD!XRPASg$1ns=T;~Z| zbg*c*4m^94I-IG9Va$;DF_GE;==@+uYWTfQmo7iQhUFiJkDsBeddSP}X8W>UKdBW# zTUSj9+^U)~4fK$Jesw1N!;G|E98#@>06^20Aj$@p95FqIZVjHF$$0 zjw*LQtJS274m<%X%UN}v^KL0iccO3k7bm!Xeo20Y2Rh?IK-p{Nb8W|T9eoO80nC`C zP8`XltdzXIL7}#R<(~o4^eph<%e1BS=mROR(6>0Zrd>Jy_(~8Cgt0X>E30bwA!G~l10$&^9bZvVa z;jb`n=uTYSPhzTPDgY#-tse?E77{xSy80LBx{c}=2i*Bg8yxCuv9hQ${wY)&yy%Vl zS_+kiHdnlhvIk%A7QVnH5aQ^_dQ?;H)lAW^=qa(&~XOI*0D3BWAq6}+W zm?oQ7LhVIC^MmYOnu1=(@)4IJ<>`KQh;?rfiOT+{T^a zc!j9s-0F!3Y0KF|Tu)8c7?jaJP+bF;1v9jxn6U zj8PgHDQIX2V{6O7Z13jQjyiU#4)0S;@ZS-D-*jjfnI-YdMOU~@gvR9d*j;kaMe7pH zGDz39k)2TIBeii=oMuOO8{Xnv;?m+z5}hz#tQ?bk!otidW5Bhi&7y$T*%8KEKM-;8 zfva_|>o<6bY*{8u}5TtJSNfyEpnZ=Fh?KWB&l;#;M&GAHT4yui7J&e@gYZnP&e0XTWcfL*_wx34THK zDR|A}XUPgHFb}Y#AA(U3`JJ#(DSCd!I1`A~4^11F)9D+AuA=`?|Jv6Qh z>HLQ4Qsl%ul?>mF&4x9x!4F#_#r_a&g3o2Rl=z9oJSB~Z`_b++NxVq2uBv-%6*1to zbJ9GrXyGNLylyVJqh&DTT_7-wZc0qUQ-S08a;7l_L=SUr=e751h#-OD1;MDz9X1D= zUJmYuPm5QqhMzu9cNu>bT3x)Q-0xgt7No#%`73K>5kVN#o+R=Bb0t;)=0F+0Y4eq4 zDA4#B9V{-s=y#0kck*06n6}ZheV%44I|bS_(_$E8G9IR(?F8`yd_AMb0^fD0x$RvM zk#nclE*jdpNtq=hfDDo|aCBdkdr|E^T@EjQ2{_?PmCT=|_&%>cvl;q+NUW7O%kauB z;?1_)#ZuSqHlj&n;zJl6Z<#L4+f__H$fPaJpN*8r+h88*jVy;NYHy8@<}ce;x}C51a7(VxUto<{F|2 z5Xqr;leI@}FKIYN%d*Va&MhI_2djbxA0BBB2Dj-vg4LO_H&!*%;j{QPd=BfWW^S9W z8iC~GwNFdD6E|&cT(Hc)O6r`$M9(fPA3Z5wC!8LO|$HB|6GZt9v_N*XO|xU`#jNLqh2 z?XIsJ$=I$9RW!59QricYXaLvf;J&9rsKcvX_Y~Go{mI||07uV5rRp)}Qp~&GpXhxZ zq3LUw_FF-NQRi%=`iwqGSbB^`f!l@b+!paqisNs>9z{Ks_R&MMs)yszRdtZO?!^Jq zs2>oHN4K>~Qa1<9yE4HjaavfbX=TFcgQJz8jbd)S$GvkV7qe_bW!r1RAj0aVn=4|o zIJ)Nho2@P^cPr_k=jJQp`p;O7s~oi1a#MxLFA}B4&(QsI28*V3OfBKZ?sLUoX73+~*hrG$vFnh_j+WV--!b^qx+0@I^vPPqg(k!_BBPrX~(oXoK7XXK9)5xqa zt*%W&k@HPvDF}8Ym#N(|?W1RDX)(BYOGrLNQmf2iz>c=8sPrt!!{pIK2aim*y z#A~xoA&KIZam!y-Bc5cRdB=_P1ArE=)0LFv2TC@YxFDKh$r=UT$u}Eupuayd>MUzw z850nR!fdyM%XaJbD_VUZU2%s>u8P#zGXt&7nI+R2I$wRP!d&AgR?csY$6F3`j=xbQ{jWpnNU>-Sl92v>NhW?tY<{ORAJ%@cu{b{V$|+ z-l@^B;?&_urAk(l=fm=KMt<9V1&g(^VbzrQ&JP@In)I`h*-iBqX%@GA{EJLh?aE1< z_u(y)h=~bn;P_#!;9k%J9lZM!Yj6Q+$THStlB~;z;8?5QCyM6N!6{)KfLl*8wn4K! zqGPmuQ*yp8EH^-WVis6x+X4qW@m!JBYIDiS#rO0_cS+DXZ%o4&=gA)4vQ_jUu}z>< z%_VHIx)UtTbc`e&=9_@x8rz1r+vZB-%<)Zy#3!kNubL^E`*pod*=}G1z2xsx^gS51 zkt?OPUoC9Sa3S?^#MU_QBgQ7(p+Cd=6zvLm1d~-^G-rUlL)=H9tDD^FaxTz+L)yvI zItDy%9Q80u;U)bF%~sH9tMRJph8a8!k9*9wYhEI?!$5B!b-K{`S84c}j~0feS!yYz zncfWPUvksE$F}2&>(S6V9<0JDXy(q8*mRW^4wjyk(w2|98o9hznQ!WA;5cM3vlPx) zg>#vEx|*4g7hK%l<;r8}Ip--#apsTCy2o1Rc{Tcfb(3^4_q3mP;c#;-Int`C2&0;z z(#s>nV7TjJ$#&Mu@ONRE8jeSsJ?swhU3rA`9NE4DQBP4fy11j@c#(~&tFueHNMl;B zIi{Kn8xNMpM*&5K+@(b(qn5BjaGUWWzuk{=~OIw#d55p&ZgQ^$YHph=JwC6}B zpV+@Q0Cnok{{Sp4xrKP4r(Y>rpKriBW?8_EedWi$;RW43VBoB%0L)mY@i7zWa=5a^ z!`7*pQo}OhpTtG$a=EkN4>A=S>!MM%dc{S!;r{^ojaU7;TxOat_hI@)YtD&|{{XZM zOb`BCf2?BIyLJsZo`m+(kJ56=mPSB=NDs(BiIiRB=ZO7}fyL000gOue9npYx2Z!!5gJ{ucL8m z1}Lt(b%g)X*pbr`?*F(Z&;V6 z&zX9$rO;yZva}l#*WSfi9f|GH*u1dpQ2zj4SRnrZy(!1ztPXg<8g<2KN8_ko#OCO? z0KBj~f`x8&z@Hyj+*X*gdv|+(pJJ4!t%9y5$rOS{I9xbzAq9)>i*N|t9f>~(kAQGM zo;N#mI1|`|v7YGc3*E-{FE@caR$-1KMv@R1ZVtqCZR|=IUIoiY9z~pVlSzHZ%e;tD z4z0a`8;$Hv#v4+9c|MSjb85cweIW@$POZI~BdciSo_m{8KYAz55HQ-Gy&p(HLoTho zfqT_Au{`%Or|(D77l#h12kl4F5Kz;rXylHqy@}tswKw}w)4VTE8&mhA=?Exi)wi%C zt7tp|dG2LT-jAds+}fYLA4ow%BO6cLgmq2fx)aZHDt`2RAs*&b{pk8a3K{inA99YV zy@~I+l|Onuki7Rcr|(D75KzoS-o$lH>`tFUJ3bU-dDbtck}-ES!3#rvsiTrQruHn_ z3-lV4u%VodBPnBR^c!_;>|S+E>%Oi+l<7ZR}okZR}n;c-m~|fKoH!bhONTlFY_2jV^PKAO*=&a`iRt z<>)Wfx3D9tZ(vm^cDs`)$?J0-9?z96lX14phU$X=%~)ZW3zl`l;S`hMl2DX;Nl|p% zZ(v^TH?S&P`)bIvtqzwpVUe_(0_niImXAK=*+WY)idP3}EdDCm^)pPJ+;3tx2V#hJ zalslljwJvcrO631_Fjm%KNqH9YeR@H;M>~%OQoVwJ-9m&xZcHGX3{dmw%x|*9dU8v z@m`&w<+e0A*v%km;7j)5gbeE_W1+n?TF4H+lHQ zD;wHgt%7jE-ofZt7#rN9F#O|!;=A4liPO~Ek5=oc16c@Re^}knE=r)q&^NQ@Njjaq zfq5=3MIQF`Ol`Lto(jKLw7jiJj#6OLQjRX}mbHgNUd{QyYb4a)#ix8Ahu+=nRBee;&xhohX=-A06Ge$v0>_IR8kro^=V=Gd zgz{b-0p+Tp?LgXs9_K6L$W$GvAKLhhx$;HHNR5wwn9L*Z^8S+JEu^@d`I;U70E$(A zTIJch9RC0?qSK7h{*vPDqBxxMna%j(bzj!GJc6{-??C$1o1)i^w&@AfiQWBJRg2m% zO0@J=8{2G}#@_z#&ThXgZEZf|sSJjk19E+T1|mq?YKkY7*Nd%QS_rcBCXkta_hQOV%{uLZ{xu~h2S-qw(Pj1 zrLmKL28%)GD!SN21jg3bG>`!Dt4tZDt)OpV;={4UR%AQ!Io7cxn=hx)blf^0`D?r7 zzU}ifOC8*VyIK*>O$ZhAYuG16r>pCG@x$E$9dAna&F;!rNL40;t{)0@##ub6@|8;w?lca? zCyQfV=)5t2wkDrBOoKokb3NMj`bawDCDifW~Ygc zl1G9ATg0q`U+l(>(_rD5p>@{Fc5?i+g$XNW6Fie6 zV`(Hbp3~0Dka}Q0#BI&IYM)b9$Z1RjTL`2B?@je73ABxz(<~TU3#A{yH`Jcb_O@HS z3MmGR3+g9E)^2PsCXeM3M5rAUq~F+HUG5VL{{SlGvN~T!)^u>r!?Kz;dM1pmU(qS$+YP}v z?!IB<_rvX5iI#L+C*E1)3O61Fr^SJefeJ-EzYJWdPg%*UG4jyQQMd9Mu^)g?tX;gfzc3r8C+ zP}p7%i)?#odrPIR!s}q8#H6B@mRCUTu1(&&ID=~)9M#v*zS!Xx{p=r{t00ZCGggDA zFuEGeKiaU%5|-QYb}`Rnw^7QwP^O{UUIS5u;uv(&vA~$+Y%br4+??GwkDW#Aivz`S zHbSDoWTL8?qMh=WNXQ*$-bWUm9~R56L`@n7f#yc&0e*L@_7@hn3&ERFCkFRbE`F!g zX?>&3ro;Aqmm9io$BoW7OhL;e?Z z$n%z8p{aXEH^wFPXFtn=W_en7FlwiEU7&p`$CdCbb}S+|>oA!aI3yk7FKg+o*4?(Y z2mp-=_VgaWbiD-qtqs$~;i7*Qz_VP{ zO_Hk{!{{Wpo&Hm?ZeSfIW{{S=eU$Lzju-q?B zyArRIe5Q|~JHz%Zm*^{5R@c@^GcRK4wBc?5ErD-B@Qpi{p^Mq>m4T-XJX84{Np)Rx z(@!h!@hi>9*h@ySJ^uhIS1e`O{#}|WBAW@Oo;d*zWX<0W2Z$i(La|!tw{rCU!K7_B ziaovv>3yd+`rp-+JO^a?D=W0O2g+C`6bS^1JU%F{K-1u~D->9LEDa5co+!ZJ7{Dx? zfx|`S@V^jGV(smZp~vqg)oyfQx*Na&oF-JmWvW`bsQ~q%ngaddio7Zw{^MF z{km>4OH7ZDDM<2KVh+r*Ag>j4c87=! zHN&0#V;0HVzxm5Sr}{AcCoOByME?NVC1|n#04G2C#x0q*f&MY!$Nii8PFY-)@NDgw zo5Ms$-dpMmWyR@Q%VtLKQ2ziD-%wjN6k!Qs!tg>*4!MKpQV5-?NOeg;U$&Ga+4%NMyBO>x+TY^I%d!0Q`jBY@kz^-j~8~sVih`F-9EnI9|(U3pO3KJZgf*DIwnY z?~i!;P1KOvmTHW6wXrw(5EK~7Rk{NkJcY{#Wm>_;HoNhV7Q7>Exw^jHKK#65ozps7IJwRMX?vt8-S2zXw`y?50?j=oWZ32j9lm|=mhv}Gpbjq8?)0em z1@#)9JT%cRfDna=Y2OEWz;f+CWU!UuHr$&9 znod@*_|McgwE+=`Vl@zl7~Q%6Bo{PlwB9SXoNA91tEGk-d0Pywjij{PfJj?IGFKUz zDQWIxakNeMpOiiVR zA;>j{+N_ls9+0-e2<>l+Wv(-B6&_r~g*)N`H@AI`?&dtoswZ-2ZDSjG1@$d;zE|rk z+qJB2`cEIB-qlTP(2;${p2D$xOl@uYznxkoo7-dgvcEFb;;gQaJjB@C+X4W&cWFI{ z+iqg4%e0tGl#-Z?l1LqAIk&JM6op#o=Ugjs9Ddik852l6wQ>4W&XNqVpbW!?Mjd zlA(pqcfg5*#&@tRO+_o02X{>K`^6nS?tX*G;bg>?)mAaxrn;d20O}_1-=_q-6{(H9 z4eMtBe@e9$W7>W@i|?|Cw+yByM#3)gJKTPYDotfIWqIo=XPTkM?xlHg`PY{``gO!? z``tH5>lq@ROwK>6H~mg&!s%`&%-e^v-5Ia_YOUs7s8iwe6%yf?=Y&+VR>u1J%Rjo} zUMo>I1|#lu=2x!532D1mURsDW>L~Uh(e-g@&pe;dnSV)~*8`2Z;y?S1aQ^_;O{D2Z znThwvDu1FalwBq&Cy)$63XK9VrG)%b3dp@XTUrmJxmE>*l|stWdbtTxJBrH9dYQyxZ6ePN)m{ z1goE!ONijI31}-MjqTxLT74l(N4Csb2#ppidBD?o`Z#&{uAkZUv3H9rlKL~DpEM)) zb&#*s5?JQgb52OBnUq+yRb%F5w`%I=kT4ByJ@Z4QlXi59&XPqOrl^*d?>IK1eFs0^ zIH>%alSbMzHZ>hRduV(E40UbfVRrqbV+%#N9~$c2OR@YT2@UJynzlxr>p9zuzsVm- zE9vmLC@uGA=X%$R-%DatUA8y3Oz)g3Cc_(t($``1qpql{osN=7+E2l2wQ(-w4`J+E z#A9{Q!b{_^7Tt}J0(BL=-VGeLMYw2gom17j#FdwAo*GzPyK!zOPTt8hI_!SJXAht5i6wJ7FhAxIqk==U5Mr+*Jpr&HB8teuy?FPJXW zKM97ro1bRV$DJ`ciE$WMAARPSNo~7^x(?vCJuX~VX|&wQtQUU z*19s3_<${UGRm$ik#PFjsU?%FbcEcv*q>76-KpVF;e8}FMA%pZq{MLuNj%2nZ5nXB zmt)k}z3Y;;%@G|XwljTiisA?6jdCN2onsNaEcqkUuy%;Lgl9J z!-)~4tE30e)AXsg@I2H$Sn;C9d68V_err*XeIQ!@0BTPA3LR{3%g9#1m22Ipe&v7x z_gwslgCe#${$Nx8039#smBo8Nb@QfXJ-C%u^{-v4vEgl>QvU!ME}xE96X(Nx-I&jA zBUSxtm0W_p3F^mcw)9QA_R~Kfx4Pew)lt8_Hr7`Ti0ub#^#n2W6IY z_!NS7OBH+C=W=^P$Ih^hYck~CD^k7)3Bn81A7#E*p1=8}CWPjV|_ zb4*x;Lo7H4v({OEojRqG%pAw5I}&NGtBJugmN{gIklb>gSgWUy_DVy?%vzR*7sVlf zwxqI0UqUH>{?n89$+D(gOVr=LZF3T(K->E(Cz@BC(3x%VSJ9boQ>ro!)7fM5!EKLWGTI>FF zHu=)DwPnpA&baia1(3Mkt6@E$Eu_)J+I|KD5{a*IbL9X7Vb;Wbd%m@WHDyPRYMUD) zE+#iI!tNZdE_Lz}pGgD~4blktE+f=aSLNyn?&mSrza8>z=|_GDtH`t%jVq11(Y@Kt zKBS&Hr@VNPwWmi&@Hf1HLw#;Ry=GL@$EAgyIC>Yb<;qw@o9v3E?<2>a7Cei0kfk#A zEDm7%heIhmn=4i?jzOWPcjYtnlq`4^Gc}4RB5~u@#2MX`xf&t{&6gq!BaV;Wb@_{?j* zkfnSK()BT@p=F6JE+2$ROuP||IyJ34G!{zFiR0G&(-H1<7;*lwGIlYI?vPzd_Y?l5 zZ|xOp;9B^O9S-jfeA10~i{7E}Dm+HEk}8)vn9RpnrZ8Cc-uFB^lV1tP19XlM>vE70 z`Zroqu2CFshCRmJ_B5lH_bD8S1D}`an#Q@#aU=jWDTM>sP(Z|?>s;3A$lO}lALX|T zeJWO;A9W5K$S7%BMHRjctUx@weJhgew+@J*85y#)DO^x^P-Z9UYb1)#WTc=TS=Ce-&^y zOes82IBQgYMR)E3-mfZAjzX2cqP~6f!k5Qr?y5hW)e#x0FroGY1Nb)jld6w`&27at z!$qi}AAkqUoYWr`fOc?N0+4;IFRGgdXIHVTnBV*^1NotRtb`50*uJgoS)E?U@^gRi zs&DGT%#9-Ggdt#{gdqW`x>rZm^morAvWEVN8dja?g(mjIFuv-EhMz5Nu9NKRIzMNy zfB9sG&_P-^q0_^*DMSz4)y_2Xg}9k#OVb=7zxjL_^1&7;@yOhA&yf`nXv-y1s<+NwPzNke%XV(n}&TPJ<)NmzoNaBZ* z0o5HwX@Rb|WR&hD*3enQiTq`8h1ZM_ZWg@FSWGzfA%@|VP3tJDUic?r7Y|`$wDg9b zGSre7CLCM<|&TW##}e&sKn+GLJtk+-_@z?T``w>593&^3uMfB(-5Ae!^Vo8 z&MgPCdF0>F7n7>$x?LQ$WS8FbUEk<(;>SG7;=3P4C&lO~M)b9`@Q!9lT0Lb%)NKzc z(UK?3xI8DAlD;A9a{2F%bO|h_cYx*!u{R?#L&uy8EgQ5>jycC?jdaps492Kfcb`*4 z_iu*$gY;c=@cTib%`J*l{Ti?4cGix5U4MTA>wPZQGCg&4badIr0;#N!?y{a7N*BoE z*cu#3R`{ZlW`{C2Ij4cPi{LgHj91~~u~oe0Dl&HC+?4o|QXL!P?40 zov~RVJE6W2H(6jgUEQnOd6HIgX|yubWmvZ*eqRUHx|bSQ%cNt#9BkYC!Beqas^Ius z&WZF`)kDWBd?ume^0zJXu1L$hCNoAW2U%T=*9#H}64+^O!_VnYGxpRfalMshsiMX! zJSDh%ra5}|hqc?hLB54?EmD3Drp@f0s{|fN$ydV8mwwwQ&XHxT!zW>N5R86vf^7Y> z)IUfvb}atGB}F8Dxo*C5Y?B~k`&g-=sl~C3Mc6ewOzTUkWF~j~Ti?=Ga^%dlh2@ME zo|*_*Nlz0%YXfb|iuvt-v^cbxRtQIQy!WJ!mxh+^-vzpDu49MoSw*pq+MH4$`XR~- z9oyz9f>XYF%51xaz`=Jus}zv#j{Vq_l(iI=XgjxY(V%Lo`1M8;QwySahE~aTxsD#S z2g1m#05F z+!njrS4M0TGgDRK8GW(P;_;3B*u|MC_@s88)yCC0bl7cd)U`1>3W#1=ByScia5{p% zi>LLBT}K<1KGIyL${%D&_dbnbeJP^MWC$~sDmRI zV41Ew8&won{Dv=_`DZcDg&0d(**6_xH4|gWRDi4B2(j^8aUmc^e*GnWWML< zT26)1bvQlZwIsXSI*$VBMHUsFGe%sC0;sAu8=(%wF)UI^n;?4w?ndv6+2!n0c+X4x z$A(8MX)s(eIROogu5*tb7dzIymm%OdytD0FKGr?mT$37mZ@Y2YMjPtltk{KIl?9l1L-r*40k$}KK>i3@91?-N9h%tvozJUIG!Ov zMTfy2@MSGKddvPXAltDy#x~=Q5cvJlmKRFLIfb@qZS-xdJo@&PuCO|K+@-VpCWfM_ z8h1_up9F1bE+x0|h&Mfg;ER0XXl0DOx*hAda2>84oKK#?Up6N_1Pz;H%HNwm~vPvlBg5Zv=!N3qs zggV?v)mt$dikT>0WsUBjk~_Xmu!l4U4;Y>Y%(V_TkHz6GY+Yl{jBB4De(@Sy5HuWg zCz{?!#nOVK<@#iD=bg#j4(Fr}#;K)0ZEQqbX{ZdR;=i# z@B2%izYml2*tVwL0QYwc*5|?h0JK(omGEqRnH$qkcS7G#TRt8uSM15xrNj7rrN5#U z(We)19i>(ghB!Uuh;g*d`cvpW2u$(8;?vud?H@5uAr@-MMcS5c6q8C(r-Gp5oc{oe zQ((P1pfxjfCaWmEo-6&+`uN-j%vV{{J9(@;tQQq+?#=1BmCoQUI7>i3$_3gKX7NX_ z+=Wc9B)wy;8Cnxn`Vk+Po6;I%T+mt zK`Y5)9$4}K4jK`t3EWs$RTy^er6E;OB>>)uqpbuM2XH3$x6HO+-M9`99w&!F=Wq4x zXlu^`Pj8=!eI`;C&>Q!k?@Y3VVa8}7#3*05*A53zdqGwX+?q89ydP@I zG;4Z{i(52|l9i2QjBs;FJjSZUOI@C+fNQYKGIsoL33CY^P13KhLLI=^4#f530NB`7wNhi<%7$6G?-#g#OYH_&%%e8pTDO3|k2 zHcPUZq+6|;5q!Yssqo^MST5GYx&J(aLF-_IKQ*`N-V7kj(KV(%f);6Y( zGh)>8wA=wPdry_F(E1YRBWCEP>mkkc4i~=Ul(5^>fY@Sdy4M9Ob*?IMzIuDw=(9go z2i&rf8y`p|PvP+9nBfennvQeL?u&N*U~Hf*1b{A0f^Ss|>f$V9om)@c0jk1=DyFyW zr^e@YM9t8iAxL;I5lk%`!?}|2saYB+8f;n=vhd#8AR#I#92;{Ow`$Y7U%{#>D=8|( zSEOT1cee*#_dZ{tZ--O0G3LADd&tnH;WLQ1D7ahZ6IlA0Pxd-l9LSa z4~jhs>xtxvFlW_W1B@jOz(+w70ln~i(bD;2`%u^PzZsRfL4fZuL}dDY_-nXle2cx!u=4;z%aLAZ2j(Rx~R(@glT zv+n1LP)6j~UJHihO3ZKE_`{!3hq^b;={MX)`whE-n#HDRd$}!VvYM%>a~#eucaKXT zT$}4-Qd7&baw6VoZEU7@80wanWfaSo0E!n-#_>C)_fcJ?FMSZq~TBcTU|2QMDLJHBXkJjwepnJXSLm9Cv=<6@)K=; zy>HDBq-}6AQ-;SBoAI@d7roCVte_5#c*Tx{hZE1SZ2aUrD!vxTZd%f88(=wU=vAuB zA~(e1%WqkBAin9AUv?hiE95%rLg|WMfEP6EJ>=Fn7Wl%z`d3TRn={O|Wr$9Ecdl03 z7lXT`*2pQupwvZKSv#ZV(EY#b0)au3&=NYIykyE<+e>hfSYRcfMcKD~PkfzG%z`w!WwO*Dl`K z82KTNJ1&Q9LUcpHHM8V>xCq!xkWq z1Aw*xrx)9K#{I#-dR9@iSQ$6M)k)AzecgC!tG#gqi>LHeM34)-s)7xqpk0IqO zu}gqr^_W9(Dw&&AEexh9Ue-0FG(2htjBjEW6GMyUOj7ke>SEP00lw)UT_c}v+wWhR z?7ylNm5wvAtQ#L}+pfYZ!(AER_RKeZAho`dxXUbN8Y)Uz+dQHtzMQqY!-=@l<}1(3 znpo#v4|hwW$En3`@8DQ)UAJu?AWUY%^Nu}6AhC2c6;G&yUr~7E-fq4{^iQLX)iEuv zY;y?AxWgcBAv2|hS3u3iJhA1w;irY|z%Q4%zZRse70iNRGo>+x?sEZ+xxbjNq&*XI z&1OvK?Ud+ini*3(PpvGy_ggE=*?+|?K6P+rmRVCXtOeoe;efv&3)Q(?!< zqaRUgi;l#fgp&R~g1hY=A~df^?w1nmFCD}13^xXY6U6H2yeQ3EP-BKR7Z!nFF5`Bd zVQGK=0H{XML$|%f%Wh%&feik>wN}2V240pN;gxazv>2+Wq@(Ko>8_3Y*YXi3%(Na} zte~3+aFSRIf$@MIKyVt}ckyXfXr`Iv=haILnrhC?%U@J(p|ugWGUTT92jSFV6_^{~CoEq4+Kb58;YAdb+!CgZclFy@9x42DMw z2VC1i;To(>}9_4dqKAStw;M~lX?zWd% znQhz@d{S$tG`pqH`j?*B>BSm{ZjxuJ>~{#E%N11=m~){nsFscX`@UUkklz}>a~g44 z)}h;0$`eNRW4N76u4`P_W@M6=ToY~AwDvBRox(g)o#2<3ybas%(~^6dYTVZX;^OB< z(bc-wX1W`$hD|)DEy`^j%z4yS*96Gyn~K)r`D+ZT#Ocj0BP}~!JVm>^ueQ2!@+&kF zT=s=rp&kms7m6Cq8X7DBy{V53n}6jx%_~{VT>|QPk7| zNaPj+l64!_s@pbW%(xCxj%=?3=B<3~Y_dea_d^|tbAxrhpmH`^syxrNTs{F-akk3K zw(-guZEy{UEL}{xym)1{M?O6)@~er|)hA69ElJhKf%2-B%Co*LoG?1aV>I!_TSq@s zWr3xvlv?)}=i6@OWT=w+#4g?}k;xS(@Xoa0@yW(qjB4G%Rk4yJtFYjHWn-*&jyro* zV-`Er&D3^Wu5*r#nZnz-mFdT2DE$@lx25~=5$a1+Y9rR#B&jr~XK4W|Q*RTzsU@D7c{rW*X1){&NU{UN<1 zJ-dN5#YO4e=T0bpaYOP#Llrirapay$;Q3LA;)DMH>Kw?#ZfJ`$;c$mrM-t`}uj5&L zTBsTuFP!+cMw=1oXGfSIfzPHHo{~6mCKUPCvG2GAy~uVZ1Vc8(Ve8Xoo7{{Roos9T#S+b14UA9R=4Z5>4< znXdq*Y&9`AezFM1+ZoOF_`~N5p0I!#7-zD^4YwYfQM6gQ+e*1WT1J%QlGpvzRTJWrLN(YVU9zo@ME2SPiJ}HuO z1t6F0u{oiP7ve2%?^?K2UOOAyb6X!|5PPLJgAsPkQyVSiA$>ZJbL(GK%Cyf4mt&>2 z8$(Zk?AI>F7yHHZUp{&=(lZER#dKYmXLJ z0ir^`FuJ9nzDF~7Xm9hc%5_?K{ih>-TR%URrcq8CNvuOWJ{d~{u4|eF&hfXDdDS-( z+ElD_z4Vtf>uzc6m4x)6%$+^;~o6~HUJBq56rdRDU|W%R9>$Z!&-gP zj}Lr8WA9t$G1eQ67&^w>p8XfMqv-O_4efS1$}^vHdgQ&Jd!ZcSsBVkyf*s4)xv!+1 z4IkHpShWmcjIC)B064w2*UCZpmf)qy?aQ?gQ^}ZU2K(b<uuu^!;d1YGGL6Rz;J5d!5FK8Xv_Ot$=%1y;pkHGWL!3j9;S|`HsNEg zjn8x9J!bCcZid8hx%4kDGJf(&AE|Vln7_ktODk*fdb{rZ3l&2fRY?s@lhjt?Gc!td z#k?YAuHG7p55f7AeA#~-tclDNlfEYxI0mqQ3rGg`<_QDJRhJ8X1%p4r25yMY3=U*$ zWhVadAc8f&cD%xqgE8Q5G0QeLid57Kxy_@cYXN(l$3FIEw{v*+4?cFLyR((*GGc$_ z#kKN(vM3PcoN}U)2?ifGgIjrpv9!3}=Rtd&8cw~5e6N=AsVds|+Z#+i_#G^RcXxG< z*4N@BZk^>?D=`{+r$Gxatigz3j13(@V}Na%@OJdWf>EeGr01 zIgnP(BSlEvE#0>uG&enfE=Xa%=^Y&XRpX_RrGGEi?-M!mHsg489UTRHw86$F5ZX5q zFM9%f;P&|zyMlKKfr<{HLkrhJ9vCvF@8CU}!pe){fhlzDv%W{>7V5#ar4oTE%F+xHP{f zXFkm6(P*F9LBqC!U(#aRdwUu-v2?bxO6tP;nWap)psb^F0W5LVuIxOD+m69o zyKU6MuLW-7yj!hVC{JVXKr-th0kJufJj~IcYsuH=TYj2|y@Jis+*YR9>=5C2wkb&G ze(CDo&_`v#`WEDZrXd&&b-e2=(ld&LFmSR`I%B+Uu<+>Hrv2=_Wqe@hM;|@+E)Tb5<1d{_@f5 zPn)&eqa&=$c8*j06ZidBN+p*hDn(A&a-A>3#9r@_OEZVe#SdH38!FNfx$ccw?1K-K z`q~wtX3s*@vGj_=>Sf)g%Gr{oza6xUXg-+&k{eyEt7tTNKNNMwo)qRs>MlxR&5(tx zowR)1`)*{mw*1{%rSpQ@Z9n)o}(ANjUP$TdYAZiquxFrCqLnMsk<6ZovO=6 zUA*svwcaT`MqaIaIE^IK+lIzK(pr9yr7)~=96`6Fs)}+3tz>|oM-|{p4qQ*VKWNkS zO%}En42t~?lTV4hHl5dzlAF!xv)OC8=6_l+F> z;~e{{FI5=y;iPrO-c<#PV|%POe!f*#tIf1eX)UP=tBPjIdypnBJ%MG)QaNJ`SiANo zFxp6Ukr-BEMkJCxMfvjPh+D3nVZF=@Y3T=6^fGu&DoY_brewHP`ulE zsP&Sc#BQIQx8_>eT~$LE=fYUujPYQv*k(e52w-zqB$nR{jvhtcc+>9BfO76uwsn;1 z@O+&`g1)YIG9x6DRWa?2(n#VBg@wQZ{{S|sbWuIi?%9RGws|i*z-ywJsAWA31P_`P z86lLqE$&VubdmU^g%4R@MN1_0tza`bp@vrjZ3(r);*ze&$(3a!afU`?by1PQuitR9 zTWv`k{{X!q6;(K?YpLA>rZQW`eU9MJmVw5{MFEIr$4di+9URrwL;xay8=Y_CTY_mv zmht+^${LDH0y;jVZi7<&(i`Tv;o7VdW?MzJuDC?nn5JZM2?=W<-m|xO8=n17p;l{g zgF#qle2~*p85;nOJ32tt>0xdf@zj8(quHJ(S1X-8c&2NKaS$@^{G)N`0Y1ZaGlS7c zC6YO6<|gI!4=%p9I$qy0to2Y1jor>^^1J^_YWMETa4zN6xrLzcC8V znt38E=9V(Lv76WD*UD^s*7UAaame}%<9|X$+%-xUY34}0+$@?9l-Z?1l?iQJ&RZEMb?fDdwl9H zLmF(DTW7d;ZRGtoE*8zM-p8?%+gCaMk;}TaP7myPhTorA`fgkynjL&&6+N|6AJ1~q zZ-c2M=M)FB#+!6HLq@_-Q{z=MiD+u|H z-KERBoF*DudCMuAEAD>rJ*m|c-Zq{^80ma-bL6o~)6KbKeJ-}bMy2V~P(Q);si6EU zLYNWw#I7O9)Ns#5FIgL2MLUQt7q`?2+f=_jl!pJ77J;(7u9W?%ek_=qTUw5DF^f|Uo3O}vPurOtUsc@ zpk#Tsc5#*4*@e>%KfyI}Xt|Nc);|tE&-xRRPEQ*S%~z4kFUG+}%~85Qzs|glvv*BN z=DkLI(m50Ak?#g-22&}rvdBP&zx<<=eco4O}hIC8z$x1p`5`Oc$iUBAA?8y}^L7-=5X z207eq&crXYn>}!YAm!$8-NUKIoPO5T@qaLz(a@gnq%X0V0}yoi6E8~m-vNFdPE2xf zZ@sre-nyFpQQzs-;{O1@CB`{?VtzEeWmFu^^EM12fh4#D2(~0xaCceU3Bldn-DQyg z!QF$qySux)E{ogZZh80q{om)yo;f?!(^WmyeWt6ct}F3x1IdrQT?Leq4?}}{6Vw!v z>r0`!TkCQN{g?+-qzc^u=jzq2e{&wQei#ey9N zzDlI5aW1M45l0%ZxeT%*7ll?awyJsYyqM>tCR2O20 zU~-#g(Xt(p-Z=XvF>W?gYfJ5c5l%O~<8iZKm=GiTUYIiEMaTCfstE#>w85j$4@j%H zfSg9u2h2Va?YFCwY@VkInsnqTNj0Fzpjtc19u7y&>W;rh#=NF_TrZi6e z(s%uh+)#9+&B&3Yv#0%p?p6|B;EjD#$3e%D$ssmh6U$y2Qc6vDL2=7QeNhA6lzrr(L+*J;gN#4h8V2aoJYeb8JiEU0d=UFOjV= zpo(CcV}1<2Hi^(aba#cpf73lvs`XN57yXe@P$K1Yn90d^JQ&fw16J_ZJ6`J-GFRnPHWaD1K%ACiG%;E>X?`j`*pl}C5$`uLn!4u|(Ddimy5C^`E5U^Z3F)(l7W)Px{g~8BTwR z-eUK07^XYEV$d-g=qnkzk<%l~#k2BST8hY?tfJAcc&PsseYjdjCYcN)Irs&Gw9L2Z z6=*W@*yw0x*Sr0x-7%dja~fCet*xkcyrZMgNa>PJIcc-wO;GogT&Zv<`TCkY^5w5Bpz zeSn_NR4R*2$j|khULu|`(91*?%g^|rv$Y=4+_m5u;^OF#PRUS6Ba(a^SBh;`yk5L3@7dpzaHAp!|;+r0c?*U_ii zt~F=UWtDHkr~oTG2*jyypo;0Jx~@Q)(_%E@zBfwg{FF!?MG^J+*n_Mf5^D8#vbH~R z`$M@JqPFinQO>cw<@bYN2bqLP*&Zj^MCR&-H&T5bz8Gi4ivDi{dr_ZrAB^Gzhep}I zWGe>+7ac1S&9ck z65?8RzcyJio~zEJHR%QAe2}t+QBD}Rd%Jho_-c5WpU03uH?rVec>Qj(wXw`n!H2^51kv&9GU@K4Fnsm8EVPK>; zH+G|FD>{2@lIPZ4uYcrUY8#5`%xZy3u%#0R=)F1J<$ARa3?ej=MQ zyooOAuVVlz;Us}c4y$!H1r@o>gmltp5K}K!%8(@p{z_+OO^`D9P=;=WH%twm8cdwU z4&$W57vh|!bpcIUaS_+q=I!`bV{JCL;k(Tq5AqPUv4K~q=`f@Fh8lS#^ z?~&Y~Hzi~-Wz5>BZPG*bFD|z+N+KF+tv>{vqd=N6V$ozSmVE*ZmNI>gXF%=6(QD!y z`_1Sxp&Eiq0h0yB9(zx-ma8R^uRJY74oz1XyOY`?)U)qaBYa~EUygT>=G8Ld3k*j% zfk$Qu`)hS64lnNlar5%?@>UwzRWLlfw>5Sn>rjW-P@M^q+AbB9y*P`DVCbsID;$KP zn$QD+5;CvIm$Je^^ASCV@PS`m-;>LMY4c0ClNzpRL3Ki6;c9*-w!aLnrN}zSLOf56 zrs*73^G~i`BdDc#70qWnK$K?fU*f{}x{|3084m4Jbk_R7Td~2TeTg`h&A&MwRZ}!}Gaa?!fI4?* z^m%-nyEosb>SGZgh_3b(Rj11}O0VAdmIfcU-+q;^P2#r-d$!&8YisM#Vom`Tp=q#;L4Lq*hf`jAEGdsckMhTvX-t9f4J~OF6Av&Ap zx0TKBcibZ$Uyg7CKvD1>!zTF(0{gwSSrXbBW{?0<^4WdV7TuUi17BKLnD}MP(0A3W zYF;QmTbz9J5tCyRQB@K(0j01vIb8iHG@IeAkVte`jT(}YTR?a;DScc(o4_k%z`dc? zUvS^xu%0+`W_w^uLFF>(pPe7-JkZ-p3V254id0@4tB&{?j#TE${HG1GA62=AEBsHc z6_G|8K9B!4SW$|6LpPA#F1FTU#k)s9DZb!OZPsrP9Dl^XHb_vL22c!n>WKz3D!#N4lBlfavGX%#eK&&MX=YB|9Ot{O(MHI%{mrMBZQQj5p!&w`=pR<9)DasH}OVE z!+$N`#gJurHh^YT zS{Yj>HT0Edi3JUOX);cp+&%E|YWEYJW?!dBEUL#ml`H7hVkTcwZG_d**?@m3lI9-2 z!^)#jP^ojZY*%@c{pso(-fQV<6u`YUD65_SQtg50N z&URw-tR3n;DDleY@FrK6Q6GJ^Qc&{P`M@wVgx6hIWEV%8`k)P?%j>v+1=^6Ov&qx{ zNY3OL*~R72JnuY8N=Z$-YnnaX zUJftqi^tkvocky=4BOa)&-=7_FO0(XeYyOdzjI62`+l4)GYHdfJZ(mnKU5T_!0)5W z?jJ(xN}Bfu5msqTUtMa%w9T5Pqd!O2Kj9L6f0QsK=nx0?#8yqDHN4_AIzo*6x&FcJ z7rYw(&cJCje~$MTcwLD1tz9F*nS>3+b*~2aP-#Qt^~WBJVPc_k^G;9IlFC_jB?qu; z>3?N)KvI;yHbZDK?)wjJ@p|)Yg_aUjF=l@Y$B==lvJ59~$bM&VH%liDxCKs}$c z;P#GwqD&xV<#;TIZ>^*z+V{p|QDcGu=imepKI85!sXoXi)J~B; zrxtmwJd&@z-l@()xu0+;MjD)imj|;Da`N@9aZW??1Q+Y!k}iHotUtlYBwMyqSjJ#p zh@-uKLaOoDnt%d1Yko*yBxF-sq!(5`c7#uTu(Rs4gO%+}>}FGK)vOa*V&yEVpEG_E zl$hUARuOwOw&m?l*4Wi?(39Fo+w$UF%N)Io=#r=BAk)!Oq8FV^o!X>#w#9vaRJL)X z(|=&zB2%#Ot!#C0B*Utax-|4Ur6XJ>o@MJ;y?{l8Fe+#QXKhVNl6kHK!iNUS)9$iinA(^)+(S0O%;O$0K%aIg{Kjr#Sg$^js$?>zS{&>XH~1 zM?j5ky={2CmU=S5BfnrqtCu^ip0Q&rX`F!OAACYWoRenUOU)d}1A5uECp|u;?YwwQ z+U=NYGrknx(G0f;1O!d;DyOh=kFt~Swp2#6?$cMRG!mV#Pxv`iD$BGC|B;ZhiaB{! zt6p$lePJLI(<~WhJ0<@lUCX_lJj3el9D4TbYNqs+xGc`DNoajPcoxGs7)Z7HSuVvAN9Wn!bVS$5BcSGfYIvQWn=9qHK-ruHR z{pyxiRbm7S0%MnrZDj378xYU_(PIu@+;2Unkg~F&PnF21DyC_f-d@;<+i0;{P4L5|fPBmrRD)yMM^>7;t_FooHjuV!y_7Tfd2y}sH`_%IiYymJU3S)6fL&eNRNC&m8H7IFMCG( zUdP6MIAtr*8OiD3hEFj^>TT z@a|Bfqx^if%@Cfz83gm~?)&Ca{qVb$=2tZ}j$hFapQ0yB@?B?iX;?YFVsKs(0D;d* zyA#a8+mHPI;KRDCr<K+mKkhff*QKJ5H>*UB8KRNC0b)?OJC2`Cm z4*xtfQx1^pb@t!);gq}M7MdjZMd@^G!SPtD7y9Qz2 zFsABI>vW`0TU*19?rsJ!yGQ$Z20xExAA`RS(v#`Vds7~bn4^*6F-Ki=cJ3{nnY=J} zB-EgfBkxj>f|KA^At!qJOs;1JY;vdmf!D(0k>9re50&JDo~8APt4X@XWp!B1D{?aF z4Qbpn*TBk3DE3ig=fLusP;|wxMr;-#Y6)P5+aX$LNbmd3c-Q9(5R(^f?AOHygHFJw zGlANcwZd@5S~{`C2W(LgA472O#Kl8M@}jEWQQAJMk@|QXe@#u)pZ|Cf>>{OThT_o! zE^FLAaR6fF$$l$>R2Lf1k?HxXd6E{KK_gi8Ax-PUeoiiky@&RToRPE|_m@*%?1`W<)o@9E*)`@n=1%d2JqHne# zDEhBdUp$;pXHAl?Nn2)`SkJmg2gwMX7PJX>%HS>K;?L7a*oT2 zKLpCTP5t#$9@1T0B6Mt?9iA0RC{auu!ty2-8ezYMffB@I)?-d0cCKJ!jo3cdN7B(K zp;mC}XEv{uIBoFm=s>k&y%s*@H#khGv}OxCvd6vyrCE>@(^4&$;e?9tuQToJ=8yM0 zZJE<*1!-WvGwHJ`$u34=c(n3W7=DyJQMYvL0Fum6Dg3)JOAM`&nIA&$UT`TIuin3N zMM(7f#jSNO@EwUxIPYe(`ow5YxxO#Hy=0Jeua`Hmi`R*~Hnio|K(&3oeXWpV#J6DE1c|_or3Xf% z=ol`nv>Bs%CDGZ{BrX#D@@ws$X63wkdrg*b>t`OYgq+qqW@i|?5-&ODQu}-0A z=}xQbADm0zZ4AeKol&2_UKa-~`UwL`a_$>8#h5n@?TZ?C5evB07E*ttpk_OTx1283 zk}(St2EBER!E!Fy0&kPM>J}#+e0=v{PfF`~8Viyar$O|$4C@j&uOUB+s8{7EDZSxb zhIQ%|a(Z6}0~u_wgMC6*`_`g?@63DL%ueg+VaQLgmVvOAWW9xD)BK_0H@dmNBX(c^ zQ@K1R-rS$DYYZW3W-sj$TZWiz^GaV^Rjf0!AZF#%Ll@+Xx9^&>opoRvRgKcQmd^e> z9{OKk&9pW*#Aj^t^~r(sLds$Nj_V9s=zTM#TS*vAUJ>rlR`b^=56S0E;$NWZ7DIus zWrv32(tmKA%YRvO3khv3|974KYkl%h>Ct0tvF{uQquPrKJl;z#j*8j>>dLA9E!R)r zFRzjvyGpTqtr;y;gnJdeEVtz(JF#-^t|JwZ_Ot)GpC|$@;Ex8piYpG01A`y5Kq6t` z#ZP(5?p+G8R@%Vdl}0|YG@)i(f5|D2&7uBoM#ZO^HC^YK5sA{ zr_Q3umGHu*?kq0!1R~IGS=&KehMo<$h`&^K-L~fJZou#=XOxf+)KtBK&uggB+r@#$ zK~PLE5?nVSctI9)i_Ll~+?ez<46(&bUM6;W&Q$6K;g?4q7W{_MA}<0)?&x+7Qv*3F zsb46YJekBoZ6aSrBz?A7k&Y(nPqn@j=HuX#d9D!5waHKhJZ0!+nr^0fN%qwFk_9WV zelVfu*UG_?lDx<0c2DP3G3fK;G*DL&O8ZK)_N^&Gdo7&sMenKh9`g_qwC!G@`i(C! zDzi|8_H=*sATsz^k$80FD65{zl3BLP_LK!=DXyEAE*9B@p9Cjw))KG%ay-5nnW995 zY+U}{7aOEv02E(mkg(&C9@4u=e=3o5qgM9eam}J+C#GHTmqIAC_A&agxv?MjVE|l& zRmj>UXH#Tiz4Qk%J-t<{)W@2RoF#pwJ$5LCuK4q?Cot70zM=v>66*d9N)YI9PoqSz z%#X{j`jpl}qeq*m=hY;0NyJ6*ZGB1aAKWjSqCV9_SRK~=O3vz>G+NgvmCaL*mxTUh zHC^ZVt|{Ot%(O0d5>G`9^3Jl5i;bhZJ{&o7^49;bA_V)nwCvg^RMH2M092L{sD6gU zP4(=;DDCb>eb5&OCMpmrMwVrumu88b3n!k`kU%9hWtbP#65}xC+0`Bo?LK2$0^D8j z763zHt?S{U5?f2{AOm30Q@#qIYxmnrCb@6Qyp|^=fb;Y#XNCc&y${8PK z1qD9WmL(6|QO+}nuNFMzroPj1d7#uUy6VC1P^9*>D}}E;`g8J=RUS-%`VUTQz})TT zSD(#uW!Xqk4W%a)B>zN18AP?5E@OSfsYH^;*g7~{+YBEc z_`5K*vynj+Bznp+xbVNucMtPIkXI|K>dt&Z>ut^S`ewU~7XV0nkjO_{Cw?3GoFl5* zm7(ygYWS;@6tD(zSU#*l^i6Bk#2qK)}wOUCGA3@>Z^)&Xoqe>*}y zA%$L>4*i4MIc@p}cdZW8u&-;So#B*6F2y^ut1)^*PSA`-{(sAy)Y|4m*V8k7G;AKj zmNo3AiL`%kw|81F%CK@kd9%g_UNPKa>3>RwBg{CVQ|^~mg>3zAtvYs-L`_5g2N!(> z18Dkt5Imi{dyQdPir@IJ*SsfsgM-)aFnO=|RVD`f(W^~}rIJ31$$?qgOCTv8l72V}YB zwvDW=b5`<7N7Gd)S);c7Vcam6+Gi@0Wu&zc595S*&R|c?26u7_zkpYL+JuPrv)n_& zkJlUT6UB)p>ne=PcHo4_w`Z=+D2qN^8W+*SMk#vkQv6;}#^a;T9%2 z;JxaSTKSsr<0W`3@~?R$xV#6<$IUF+JQjacufL0{%+(A{S)okJm|^*G@9)67*iy}* zb@1aS+E@IB*izxsYCJeE+|t#hwOpcdsoGU^&xLeNw=Pk^t}wep1xHn?+G`k;e~Q!_ zDmY&aZpXGm&H$QtAfKQHvL2Jmga8M4Z^9m*!v(IC318Qu55B08UvP4wz&7$QDB`rp z&GE;@pI*N~(mvr?>)wK`c*m9!)@pn*V*`NMuH}gE_zpMda*Lh~CPx$Z7gUibL!TJ@DlbqaUhKV`B2tkW|` zZ!?Kvmt&8FGA0^AQ^~!Q@RJUkUc()uo2U`ugZ&@cLdHv69_qO{0%)W;I5e}7dLPKz z?-CV?$G-+tOvP;5>2vZ{!!2CeDWIU9ol}k6Zf<*qIY7VkI1X-`III8Gep*@`liqLt zQomqVj)~;IJ-J~EL>4o4;nyl~_;Q`xu{JbxFr@diU!9hz`6rXY-eb${7EEF?KTz)O zSYEwHTylgfYlrh(XvK4Qq&v5bx$D(YayF44MKpd^`sDY?DfG5jdkiIo-?)DAfo#wK zY5_UDrn|G!>PoE`5__Tl65_o=Fr<)FPp`N7I8s*V6D<*6#v8BQ8WxVU}Eg zmlM*U2)vbeWGjz#Nqy4BP|w-t?}tZZ@Ni3r z1-p*#+{s_L?$|s%QeE3lxgCXDvsveHtIacCOh%EK_O)=xje3CE8pjhqu<*i4c^;^sJ~NPEr$X68|CHY*zDvLt`|k<32*aRp!-CfcsFT8 z>_xH2jC6>n%9E?y@x&SEJMa<0U^-W2BxgTTv~DKAuIPY zxi7H>sKMW1c$6qz=SzU067k;V*Mh zsG;^fH;eeM5xTgYrfac4vxjw%rt-yC5#yj)@qKVmrhIXU+V%tkmA6*pdG5UPN|ap2 z{t{#%O5i{(DQV2^OBK&MA2G)I^=A^DY-9au876XdICW z1%w;-@>R&z+{cO0#`?RWQ#z?r&Fct$VM@63KEg5q>r;K(J z_=4Xfb{FQ;MP_I$tIJ0Q+*XbQI`yJN+B{~QydqmSmOeMp3PKH|Wbtqc2dN1cGM**1 z3Ni*NZ_5UiCoy;2#rPF6Ri$iJxG;#gj+Q&3(ivUa;h*gvxxPW@P? zQqk!%?X<|ZIX{QuUjNRYTda*y?rTv)4(c(DA{$oLRqNOIv#-sfJ{5MH6zD4|FnDtK z<1xLB*9*^eU$2;t9(}sg*g4bZn*Rp?>n})JZBEwxY;Uz{J9+$P+~nrOe8pJ-AhVR` zG$8C%ox+6sGL1Jm_66P+d1WHYw!0W$5D6o7wj&&#xvr{aqhHT0vNN^;usnVUX!81! zZPPpjIQnq+4!-TZQBB@FEb~F(qwt8(N8xv5oOR`+>TAC2C_7J_4BrpgK>aVnYZny} zZw5L;DQjKHHR5-(qd=z0FNbV4$#q)pwhIGA+ilDBe)yBV;T2^tTyM0?*reHx!(LoVSeh2Uny_sI}Lw>x;&ge*8aB=sK<~ zYj?dAj1f8Z25|RshuOXCq!G|bbv0MZ&)!|a`y=RDjbTW9NI~2`nUqBNxYXb-@gWws zi6Rurhk@_&xg#NRB_gn(hWb^%>?_=`~k-caUIeH^n`1+3DsS}zv-sojv^pIn^s{Tg$-(9m)$Lr^n44kQx!okqJqlF^~M&9Pj&;XU*>&>A|H0H zRJZPV($D2Ip3`L|R?`Y0;+eKnfxf4hE3LI(kuQdJEBe=2Qr>PBVtS9{&*kL$^L|GB z@#T6+On2l)PqiD8|zgFG;-~SRVY*`+b zNt$f;i&g_s4|5j^w|}2s>#kKH_muJHsQ=w|x3tBvn2U$CozV3$h3%G6e5KcYcV~Sd z#s8C$>y6}cH2U+b|B@KJs8x0G502ed-qHN+V%{rO;yxIrcly(){p*FVv_Nf!T`-Ws z7Wm)Y1xT;?+tq?~f(5v|Ne`L?pg^L?ieiLW&80%MsafrcCk^y~b#2L5-UI+6@qxFqw}qLk6H30X5o-;NKb`fJK;pC-3F2_LT@NE zVmCiFTU-Uz5z9`6;gX^dW~n zjD1&wtA~Dhkcou10$#`QmCC6{AowSN7Fcz@J=V@QwZ3B4}ICIbTK*E^s+ z7*LD+0v)Ow4Ucojbd8Pv4hAaK7F?hls_%UDF0a8|8cLRbI@Kf*+BWx~Uz^6#%|n}D zM^Ic{t|v~Pdsp^t9m7z^HNyi*j%d29K^>LttgPw;+zCBMIt$mPV3xKlZk&nxG zIv4+hFlK@^ZusCLXis|9u70%IuvHfSr%v_zQJpHqH%_lY4}ru^SAH$DPr(Eko3*Z@ z?*$_{|IAmK4V8Tw|5DkR;nZQ}Vq9*_`YCeljUwdBm5v#CUJcpFlK|J0&kJgS02#eZ z23-_5COw&0s>@uc36lpd9-GUff6*=}^_G*HiuL+&NiL-<2Loj)xoc2qNru7q z+}D+8VKtUc@T`dy-F0XSG?gLVwH3`r2lORaV5)0jyX>-Q&tx3)Oyn6dP~C&G8T?pd z30?MjoNVpWU?)vj_H*Fq4|`Vbp*OGGuLoD(ycs6l`PQv-9YWuRK&dqMal~tmIhPWe ztZvvSVgXI3J)oLlW-3sGBG5kMFHE1KcQQkAvQ z>1M1?tW!f64WbjKr-bBH7e`;WQHOaR7i*W7q{3d)6lH>!X4}5>5hLgP`NU})yi3eR z8p-8$DR=qWI;4W>1>GOi+$SNd;NT!hEOjl4miZ&*Cq*{)*W8GK{L-a?mS`xQ_8P&;vaD23fv1s?>k@ACgGzXSlzYq_}dmoyr zIwG+d1NIs`9sjLp`UcI4l{u__4IGyR$3=;+ZCb41~;i$0$+dhu$AAi1cTN;!`IM)3o zfIP}kT_&h@*L)MODC_-u@_9Hs9TYX;BQ|DaN>`^4Z1%Sz%*CwC#>k?;9^iOI4;nOo zDY%*T%JmjdX|z5rABL_ssDc9Ym^~$$|3YQCPNb0ve3-`@P+e8aNsqtlv9yY2oIb;K zeWI4w52$Y=b`_JjP*u|Mru(+VwqOEWV^_`})HHnq25~l)etYck@zJj%;7uO>o7Tr* z)15x#*$Dp#)zcaJ5K16XEQ~AtQ5XZ|GU*)hWda%?^jKhTCbVQbZN~N3Sc*m>h3Bot z=^K{QQPb=_=J$lLQF}1_$We%{y*Z%pb$OZd4EH5Bx#D&5gz|wR^5fE3n3Kj`2vt1Q zs9h-)vPP3ON$%$37P*}pQhim~%_L*d@4IS1s&~;--prYe+Xs*hPQaUMRcVS<>o`m7 z(t`riOOLHK8Pi_aSeeeQN=GKA8wG@{mQd?5thVcymvz|+_RCz~kUnVqp3a?Pq;J5; zif%G^tQRAgqxQnDmWL77-+#f&b`p7>?kZK8LriRQlhD5$$Z|%g34#9!3{!G5jC42 zB9w*v$6~#NOG$nyk{NQxmf2X=Z*v)95)-_5xMm7<`*a?h_f*>yEy?@2 z{Qcb!XGOTUvjf?kPEWM83xeME-=-`{LqY*v{)+VH&mOc}ji(j&*}b=~N39-7hkpWQ zZYqdtlVAOV{=wn$%eYmq@|O0pBrcJDYsVrsy=)zq%#nciat2rYgIn=h#Cg~Ka3rY2 z1cR5QBNycQU_6`tiFeUp5AN_wifc)+=eVSwGjXXxr5wWZ=W)q8LDwa^RV5)R&tH-` z)~-th1UKVSrNjKlMl;{XB~w4)>R94cV>|^2a?rcXxbv@IS?BA0qtooG?D!TFT-RyFW?s9X=|eX z)oU6}d(Ypuq$N?`Sdg1hokyndEMAuu`#Zdulyj=PWXlO2KU|J$_&Y^YvA|5Z>7*ap z@eZFjfwMJ?su8K!&go(OE&4i}rHG>CpVX7fkt>V&DGXiI@#wU75hViThJFRS-Nb`S z;gt+)Xo+EcB9Qf9HnVRkr-D+AysiZW5Vx*pEa@xSTy4)PHnSVs9W8vVe%S=et!C|R z*a;UiZ%&DyO)~j{jJ(vQir!hQ_;z;&7-QR{ZcQS|zeC!mJ$SL{pwoeV5JkLhoDdLe z^8C0>9~{E`OMohr#*$}$>|TIu^W)XS)?-p}Mso9Z)!+YhyPD&M{5FxMyo>eT8d&B9ep^MdzQo!z!&9<+qnL*{+OwE7h&2p;oS zNdCSALALL@EliTkmmVg&T|Hp6pXH-mR{6e_-U=>1`R`2BYs4m$*;*xYZ8`2);XFRwL(mTrs(SGO&g zN2Q*5hBp8W&ic5z7u)TR&K=~iRQ|`z2+zt|;1DQ|+ci($V>E)RmcX*I2WLl5^7FeU z6YkLjlD2oHbV#~|v!0`|&#o)WTNyGgR(Tyx(h?uy%qCxpOtgJb1UO^@3{l-Q z9M3;^RM%`UCc`5j3?Dsw9i2~ zH;w{GLuY24>XaANG+87l@?8hQw2(?{2LA9cG!?JdBLAY}`_358SWm-XdsyRVx=bFq z+({wRNxzK?&{DRXDW0G*LvFPaM}1_lpJWB|ZIk;vR241wAe#T3rFK$2o%`7s!#lrK zKWO@t%(YJzxnRhqap!kC#fT{{c(>LHIhb!H_{%z?rEq56;9kSoG1Fa}G1O8CZ4DjJ zMxh#Q1ATo;PM7SwCD9*Sz{S1HPfUL4El~&0kFCkzi(+i4{Cz#atN|uWw*t*ZyMEv1 zRm?R#%nUh$TeWrt3LIg%=ElC73K_-!ZaFabT& z9sl6+T+lUYZ1NS@ZdZ_5#3crM_YrQ?6d-APB>65}`E61@(j9WlL^>hqbb1Xf>!SG>a9sPoMDKG&D-bo^qv)h}d0FRGFtjK&8vXoLPSIs0fB%wua|C;&?#EYFO!!3cBpWsy@o71!uPcq~~ zaW9t(4-Yu8Ha0!J#s?_0fXP7s6NWC7h8&;ZqCQAWfa`?NS^Y;HxyqO`>4i7!PeGnp z`H9oMrfDVzg)FeZ@@cD6#=0ZJbk?!VCTD=UtHSbWHb0V77^C*TqM=>`cVprT+Jk%H zI%_j?=BqDE@xBMrZCX|u$I+&1+(Xx2oO6G`Lhpw?Je`VkgHc!8?;y4T3*vcLlXkwf z(xfqKkz-jP&Ehd$z4t)^B0~pJ4r&f=CErq4aP+k z{c>&VMTI(Zlg-j!soLQug`Qs?1+bJ;Ac@tc&sbl-<}f<k5f_F67;DQ2_pIJ1T?kVk2bQyrI!4|70TyySw|su4tW9K*u10j9U@*@zG-3 zRZx0_?DC$b5P;CxmTiyVQa>C1#;Vepk>@r3JI~bFm1Fho11Oy9EojXBCX%9duWy6v z^+O(yz($+F*s8hbVV9;sK`S6AA3LxtWO)2B6 zd?9lD6j5D|&^i|Wq`IBI>Wim?;3j(NhTw{B`8W{h96n&UvC&KY={#|Bz@?Apr;!32 zBBn?yuZdfYRZYUmCUi2z0f44AOtNskR7rTb_u>!L7N1N=IXi4b7wJ2XzGDRS#uIM> z8*m65Pe>BN&1_MiAx-i{$ZNCMwcKGQ9pQL)PY^Oe`x+{# zvDU{6Sq1nkT}U)|vUZYixfESWG&lerly}HPek)kK(02U!2lqL`>K|OZ0tgl+GE~HM zy_iyW-(%d3wp2PfK_@DBiG^zOzI{=jhP{bHPzI>R>+K8I%)1A6Ou%5*asu5MplY4Z zG3D*YU3oC5FN9iRJK7R5H(T})WDKcoTeWOKsBD5iS8o}7oKDRN(fV&0!R5LP>0V=pf6 zv7V3`?1vsyj0o0{a=U;&zFjQ9-ZBCnuV^vbN4-;N;mKn{$-YMxJ=my%j3fAb@9AFx zSDTeyXf7+&`O&g^yO!HwHkVt2)^7wX0H<=r8XH};KLHha2~YfGi%^EZbH|s>g3gkt$TxE>{%m^-Kl%2j`K#e^-e?Ov{`Oiv8b^dh)35 zZ~*FWINsPd`}65D)?KrNM!(AEtwyg12S4>N=6k7q0tMN!Nxc^4%sr?*W5DD|tpXLS zR>)v)S&O&XMj#|wfpY??@isD#yO?PQa|?80T8B(^esq}1bX~nouvA5M%r}*{0cvD3 z*SC=y&guY|RNkF@PDg8~ubm|XbGD8E=_MLw!0UfLaccUcKhJrd`1b$nBUZJ1^&|{5 ze)?wTo}kyxfJIDOQ|ZqB--wXC(AMNKFj>-q@c-vQlhD`rL%ZipJxN{PksIxjy_=w3 zAVeoJ(q|~A=@O<+wDhSp_%1EvKaYrMbkjS{ka(+N%x6TKD;|2{%ZzDOzNr|5r2?@3 zr#vIDw~estk>LMHD>bHz17~($vfBye@~0ozl7PSQkYR(vIfj^XJql9*NqXGLzo!@o zb5)qF&cuZ?{?BQ8qsV>rqw=>Fz#j~_F!RP?<%jF@z{#%V=!^2fpsK*>tUbL`RhTvs zr-_mhXCA;GbZE0BQm^Cx;6{;yhor|X5AHHU9*6!n_QEjO-~Tl(fH?!>3MK5cyCv(< zz}+=g7BmC9u)jD`i-dm@&wX=w8;E{T*#W(z1^k19&6kSrTD-rZBKUx6=HuYZVXF00<;gwbI`1ZjYnR43L zYodaJKs87Ox}0N3Id2Ycu+$P>?Slv4LdK)fa_lgnuRsO;O#``F7?}ZlZuPNCpPL5Va@_o4{!RY#3oP9#%ea<%xa( zn~%7z9#hOGchuKXuVZ}~JHW%D$Eh=ILmm~QzyJ660SHVyIQEPK0CPRqnza(bTlcEI z_AqrO)y69>e`fS@aL>TbPgBzk8eRtd&);a6`~JZ>PU}UlW&Jndhq)oy%P`g+sn-!s zvqt~tWceLni^Ir~LX@9f;4cs#%qe7V3Wg;A!IA&^Pd{nVx|*ykLc8^@S&*ZZz|4Dk zsvgFds(S{Yo3yM;pEg0@zI;R zMg>uy7VQLPWI;jX{UpM4%Q@Z=AVPYsX_xJ$DdA!Gl`|tWxQ6A__gW$sNnuWQBX+jIE|;w2JM$)6GH0Q&*_UL$oX?(A zzAbrij&iyF%64MHC;Tl&+d3p7J-vJ%RXDN5m4TR+xll_Op?u~~u6&KGxtFiESo&3K zuy331UM0aikB$Fc8V_8ajqm_V z6#5@*mg#!{gRv=2(un8;1%3qq{<3&kv|g_7`R5dyFOuyqFx#-nJuo+P3K^HltT?U| z+Qms|Ee7E-4Nt-d=Eah}cb^QPmXw+kHYmb_jZTnRwN!=LC)`G(m;L&Ys|4r5#HCKq zD%cLj2)dP*>X+wc`CJ3`vx!zurstRXERlNq{NEVtEXOu5z#&4)Q*rxk^L`En%e8Vo z3x!*Ur7L@WS&IBi*$0a1UI4-X_-VW|UH?*{lth%kfyyHOfkhb>brL?!6yn_&QWkix zXn z8%qbnRBijkO1;I|29k08iRixU8)$w|NT)J8%t?Wd){m{Srrj@`vJSVzo{ z&qh#`gjb@QZ!??1S+Pk`#U+HO7t$&&=Mc~bcx z+9aePA}*z~s4)^3s-UJAI-|PMz+P@nXXY$#IFSd$KTr4U%qv~Iq1vii$YC8v?-~y!t zcUMC&UQ=(yaXZ{5AUkCMB?!W{Nt0Z@*rDzTc|w))W@7XDd@EE&sG7pobF#x9bMs2* zxHm5BFmdP1rT`Rv6-6rUPWqzKiGe;X=&mFdeyxt^$wqq5;pkx}))lXG9jUxi`=ZWz zk9X)f-_|%qS3*VqK!;Hy!LNi0ZFlWrM&!Di>_56sR)Imb0Rk9&Xr{ThzzAqi4>dei z!hX0tGr(}yxVz?_unM{#>7thNo)3ceGVqQb2KXU#i~jXvaJs8O2$as{y%u5@!l^+b zAdnu4KNvD}Kl8hLL*K(#9_3}-jm$RxftWCk3FUl?=Z2hHMd0feF3ai1#;$Vlvl*t2 zw|8X**JsLZ3-Qq{4%Ovy@l_4YsqB;?;$p%=Jw~ITGPf{5N`3p+oZa~4Q+#pPx_Jjq z1z}!E^t4moAcXa7x2w%N)2!KL<^F0XqbRL4U2%ls?7n87)y{;|x7=O_DQzG@&_Yk8 zYhY*RRR*NXsV9vykN4f611wg-CIJo3Oaf}U%gYDMwV*YfUjlp2ALsY7nva2{oU;h7 zy2HxCV{=|^L54eY1|zIYPO-(iOPQm7_6Zs<)y)J0H;8RTjMdZl__QXv9D_S4YAVW` zvS{U)R;w+097t=R1-~)L?rs!yGoyGEZ;PrXo>0Jmmxt0iU%0*tbTU@r$nNyqXEKv) zoeW+4W6ZPi(A3|uXo9Sz0@x^{K?5SUgZX-NLk;!OfkMaV^kuif>|Ox6ai}2{q4Fs{ znr)1SGS4LQ}k6t7}{WlVhx(qhwkpY~6 zhnh1rb-Tg!P7xWWlFH5?N2YsGo#Lx--1&#hk;z5)(-xnQqUnDUpVdc7yJg6B; z6?D-{454LL>76eR1I7##Y=jPDVXj*hgv&QQt-Efbg@laDa)QTil(kM2^2@iH#e*!$ zF~&rchCEkFdtllw+HV$f0yuN)m1T`;syz2FsVVv;F=1zlyRaFai2$U3UYqKJv~wgD75d698q>yf=&qWR?9{9sqy+bAd0%@@$L+2=533|zjtqqqT^8JDXn+1X`bGL`nb4}U3!z@xY7lB3AD1R zp$QQBi#zq76LU&=b@Kg{e$f5AG+K_keknIuoLe62(dXaJmD7qYO9rxtQGhgIu}-Yq)6LBQn{s2jxxdJwVP8{?wgvp zTI>1OO8Wp>M04)<0bDO+XIzDt&nKqfylGW}Np$kwMyp=?Yb=s+v;!D6l{MV2gg?3? zZh&vi>jd1|;ift0a~o>@$Fi0VFjY4r2%^jrFLdOxI_7cGT-j8*oJ+h-JW` zRKqUltabDgT_@3yQ|3`YXxWfSk|t2A3uKh-2_{S^_2lEqj{C>+8#F@CDD(hpPFvn9 z#CGs17Qt+7CZtKL3%ufb*cp|5vm)lBer=~{hrQ6A!)n|2IM2bK&$oaw<0ElD?cxpe zbyf`qp1s>n<-TR*Zdi^zH=}_}!R6(Bv9ig0p&LyRuW$h7u+Sos(}KUCx71i-eGQ;E zox_e9L@yN6k6!4mMs&ebC}Z}msQ=tDLs|t4`AWbE@ErM;@}9Sz7bzKR<%?-JA0n#D2FmpF+ zAf6K-XBG5=PafQnu-ajAZVIeA^G_KZEp)zO3Df*2z@BS_!O0cHK?s&UA^M<4LghIQ zEqRg;7tJ$}+xDcIS>k&m|ixe_uodTw0uF5TPR)}=?oE)Wb6(&atmN%$II-UFmNlE-N zV8UXvh6D!Kd0M25p2RlcHn#qp9tvdcwo|#K=oCrQ8enGt#xC-P9eS#19iroSP+2F7 zP*C=@+*T!UbNx$jUCXx!WXp?-i=I8vGH@zF%oYi1-&fRa+Ffm)iQ8RJfVf&_J-Zp@ z%k;MQhw?ZF=Alp7(qG6e`&?cR8<^2eYWMF-p3@8C{mgv&@1^c;u)_vrXn7PD{sSiA9l>1So(H4B3PM-$MjnL?&s{JjOZtZO z1?I-vT5?C3%(O@Q`OzVP5B|;opoGU|2uJo6jhrvL&_lK9iT)hItV4>I7i)$)nc+P1 z^!@}$L@qo&`1~$bBB7HX4N97)yn!8&D7|MLx53z#$l@`zHqYPvVz61G6PNcTxac{C zUf=2kceEbDsJ+*Gr&=0NhH zsq>1~;Luo}378xGYTSAs#z^e+v>Vc}2N~kO#C(UO)Y?3&r7Lol_(~p9XYee4QXpC| zB>Fu_Eg+LqWY$+Q#&xEvyNOxRuYmNeVtVPSWywvR0GELi$_{+c|45zNeBH|ei?_%e z`{_pP4~}J=vki|k3!@LCWCvOiq(-`zUlL@sn){dG?0lN1hM-xFizqF` zVu4T^@ya5I4|jJ3lj2*1cx=VU%eL$=4dZEbfgDKh>Nk>$y3x}_}MeDL!0JEUvc^Rq_TRVg#c#d9f! zx=E+*$HmBZ_B<%VBQ74t4^~~L_Z+g7PlY~+uCe1AZWjC)bSy2)2Bvn88OET^@R6`!*bnf{ z1W?wxk(yro%^K_$v9Uh55xRI?u8831T87`@>p$`~$zA22uI+{9m+Hf{g?^B6e`rNF z&Kr9{z`~)tk>O6_2vw3A)|y4DpSvUY0YEDapC1ZYmd1tNJUQLm1z6O|QT^^_Q$u^dVH(qRxd3%-@v5`giUkZ!!v=$)Vb4!84pnhH$ zUpp*GDF&1FzMblN4ryr)*@~z>^bU{8N^+1PWZPYhp%VMxfiKRePR}oz(zk*H((Ke} zHQ&zzsrSUo!yWJwzq*_IoO9b=kX3X_W}m_GW#J4R{}M)qS%BIbtI8ctXU42z<49u|KP;+D<2 zLrT6TxHZq>S*%Y2nFf@xF88E?sG9hzc-F9oZW@-cRplQtfl9RZJJ+$KR5Cn7L)g%B zjV_VY`tIp0AT^eh#m$G9B{|oMyH||QOV@?y$lRf<1!v}dlC0eTv!LD6EKVbAbPJYDr#V{=GP#L(u#o*g!l8uisccx+N7beJ9z*@@s zXBfUkp1@epg#j7{NIlbXYXB!!$Cehn=@6kPu;mNVZBmzKiH#l)iWC9V>KOsKRetsn zNIRQxuLT5y{n{AuHQP6Cof!e6l!3*Axxu;4e@Hj8O*pYS1qdT_E8vO|cxuI8iAfn> zeD0jnmR zP5TiC`ek@Z@sSWTy6PlS@AOcs8rpk5datg>au(ReekJx`eDm!Te+36P5<2I-fXL_& z7!XG^qCgcl==H(vIPSg=`X(U3M(H9yyI!?F7}lcRd^y$l39_D%eE7wJ1!f{Yc_I19 z7{$t3U<^hG zIYOBK!fLzgSrzx>-F?0r9QtbxzgtlEJqN%E>PCtJBB<|ukm#WY8*Q|k{NSFTMnPY< z&7wYmo=!w%)C#Kj!@-W4Z`n_41f1r|hHs>kWpXz5KSn1GWyFEd+2DOw*Ruj~2SRW2 zg#Q-nL%x&K3$sUo@G#ig75Q27+xiSa^2tzM?d{Nf_p#73h}(mRfp6N6uo3ir0S-(N zLbqQ|o)rN68^!G7f#f$NAU*UC1}_NI z_Xz<~1_XTY|ESiL_t_B42S0`XT*NL2!{=BKxDILusI>y_pque0#pu3ebB}Dfx!IjH zJpVh7?u2jlZoH5FqeX_OzJ$jLk4x66*Fy3J4ow%cc6QVWoKBF>pgAgcT3fEz56`O? zhlKt^t_Ce#l{*n7gxud|jd+{mq1fAp&(TXaq^Y8Z>~-AGEh8kX_qb$P}GS;THL&X*mP%3hQ2?)qEEzzbtQ zw1S>iG>1b0X2j1CK-1hWO(Fc<3O<5)j?_`BWADt_6)D25>TI@Hp2$-Z9-SbC@q;H? zfvI4v!Qa7TVz<_`23O|wD3PD!vsO$VVnfntd0CY<>1zYH!~$`9r&Kt2YD@~Ic--CQ zMKvMWE_~Yv2B|55rSP_$^iA-lLhvs4SUi;|Kk|m`D`2S4@*({YNj*5JMLP4>t97SY zK>_v%d1v>@Q8b`!cuqfT!BBR&BF)R^4x@TU)tC2_#@c5Y@S{J{tK|mnugv??Ast`| z=EnL5{??!H_={-)sbEc=K4JnI_UMm~5H? zzDWizH;w#JJUDANP>@uUEnHj}6#TrS;8;e37MK1pK&RsM>Jbu4Gp4?pdRoSDnB||s zPa8uT8Sj|?RyNmR!6~-GE)einvW)V05RMlYIcjjBO|cj9p#`!a)G^N4daG;ZMS8+$kAKW+p==4SaFbHkIM`u`yViZ2$s)eXsx+x)7Id(kHkO} z_xj{YfeaJcB|?^y=Bfl9E?AUI?vU-%m5DlCJ% zrp-ku0>$T6iPfj1{L@Y-`i}~VVbS2p^>g|$u8;a}^@5}mZj}Q@wr=DQQHon7uI#QO z(}6b~r7Y^&_R5)hKc@dDmH>CPA)!)r_{MM~w_Ly5 z_jIJ|OkNt}d_;s(jTAGIYHwMl%LI^HfN`?<0Jp@EREF(={GjCSb2rO|+ zC~jDFD8S*r$+o-T&lEbCo@BBG1z^E*lNe@yAb?)Mj&zqIJl^G_pHd!fak z4s7W|YS#Np%-@V>c`jdMt;N#}j4sapbQcb3;wj(sO+sUR zOpJfsdo!|s{6XiDp06QW1k($L89P`OB6#%PSHhiEcMtk#wW=ben) zauj#H=E6F#ryHAQ=9XAym56!Hkc^K^RZGHB9=o%h}8hwHUVVt}FTHxLxXWQ!bRaY2u`<;eUH78WD)7bP^vHoQ&+ ztKbZk;4Ap6%8!F%TU4`f!Og+9e3y|2#jSN9Z$G(9ePQiG3fJhK&jn`l3Rn)~w!e_8we9rY59Rh7T;$l9Yj-_?%WOhL-E^N?yB zA;ME}aP;iX+uHk)-dL}2x8e$Un=M1O&%C%W!V8EPAl0uWGe({MV?-9Wx>Fos3E2D8 z6|hG?Ok6c|b7r9xHnl&-Bh=LQSB-h^6NbG+8+*mzLD>cm>alWRa-)I`u=qB;1YKN% z+gAqQTNxuJOh=>YE^ik+;&wdv>qXZsx$2XQtTJyzF5>w&UCH(*Gyd84z|c=Qk5Heh z>3CKbr8(4ddsS_LGZz!L3oA!48u=@48HZth&)fp;?BrJhOs%a~(pThBZy}K-r|vO_ z&~M8tGU7x2Z*MC1h$rX`)Upc#?MsCY=avIPA1T3R1MT=183~x9-uQ`(jDuPiQ|zIC*cdrbGmcaT1aECqyR@ zO8cu?3y>Ubl5a!%?Ioq`Eh-Gp$V;QyFM89~Wh!j6>Pl9WQG}dl9^5mQ)Y$}Dt&S1= zy_oKv-aVe5yw2`(4RT;6UyHH=7>TA>7{=g)=*Fi`1@lWsgc0_zRg&Bl0t?oBC295H z-=Y}c00$GuExBAS?I3|@xc0$d!-c{~uMr1_(Jc%voG_s`r*HNT`wEhSPK8=0a)rfy z;%S`6A03d7j5zgz6L_yl-Qaz;fVTS;R}^3#uf+b#KZB`|XnIq#^ZvYo!|s(1u0TGw zK-S9*+Lr8?Si(NPA`lW|32S>aQRwc#(v=FD!un zuDT6bt20`B{kC6G8tP9BBb>ff=oE87kZJ@1DF`fx7>pR^-vkat@wC-2^B-}U8G6_s zv6WRu=6bXu>ObSw#lK`nlKZ?&U35{nk78WYyk|Fx&^TkJn4~zM6n2==;!&+>WjpxDk@{g!QX^$9{797-gRkM zJhMP1+>I%koI{P3J$-V!CFb)JH94~bh`#G}#jk{@T}3~yg04Y{ax2&C*o&k_1Vv*R zyo7%CC;9?cp%01A8q9((=Ye-v%m@5Qfv%N#5XeplifD#b>qw|NJ(#g;@SZ$B??g1U z>$&+*b0@P?7&L7gjC~X+k)L=tfq94P|3dxQ0OtM+SVc7%WAE3oU(%WUys zST~I83%MBM*&R@uM3t8zg#Lv%-GIiVwOh5uAc~(00%(6vdc}9XrKd~JCGz*n-f&m( zP#p=Z%Hw#+i`-%u1`vidXS{JAr`C9VuNk*EBwUj%T$OVS-P2!)rGa@2f?2*C*gNby z)r|tV^bvCR2pGB7GXw`q@K&2YCdIGGnHq2mgxyULo)b$+4Cue)aE}_)>g^&*ssXGQ z7>C*NAYa`;R60iQ1V&FieyE4>q3;sk3+zYXJB3U#7~6fjD#Vw+>6`xpMmBw&0RD)E z7az;=bzF5Xd~hbHx)HQs;WjdzY>O!4>$xxv3bV5u!@kWaYX1+|gurS&wh@*Y9(Akl zQO<>+Py@H+u(z#z!p>Mp@#Q1TZSm=1 zC=+~*6_8ifL?bB7VM1)xnE)vBsm!N(-CkT(oE3^|qEXMCAR+J%$XSq&Oge;M5G%_l z9|G;@jOMe@LyEtp^xx|;SUPqAz|=|^gzjkmASkY0%Ihc4(+TY<|05S<+4F+%faXTq z?zs`Wp9ICw%4+y;hM_$-lP{?D;?D26kzvOCVuXe_drNZcXbY%Ys9j+&nEFXMps`AH zfsg`tG&4{H_h_e#M#m_+El_ zoL(F+-*2W;OeKMlp@|;_d}S`yo@@fUdv^Tdte=$dzPpS$t?KDm{x5LGBefq}6hFxA zgdGXRCr_zxTlqw1jOF-q{g!O=BKIq0e~4{UYWeU-yYB~L<|kZ-<{0=!eaC+PyAVp^odjkW>R9P5uIUI`s71usF*uU{ zbZG%;sy+!tyv}^kz`NBN9up={%ranZci?#IS=LiTvbBdd0_;fXc*Aw5xp5Biw@hq! z1d<7c{=5*VHe9AA3*3i3s?`2oT%6*q%t%)yAQ4?E$R|G@7~I8grWaV#hzVb`Cg7(a zOkx^Eka32RBZTHi{2qR=TiwBjaZ~I?E10;2{gmru+IfEO;tbDg=lNN{JUn#lPsJzzkLod zxcnfyKzyB{tgd|TvBP_b!L8mxd-+SKsCV+d)|RPUepI&8RRY7Dy_OCPJam{7(VC7$ zC#Q`w>}5c^$eYP{L)a&>Z}w7VXxMj>iMju3m5u1uSr+rv~GN z*A87)TR!_J3oz{Iu=n^rb8rgMkEp9;iW}f^4@pPMN%d|YJ2;vfSv>fCS1h{Qv{Tf^^i{p z8*us$pxmn9Uqv)^>?WW=W2A>Dsr?Ta7Uq!QpQCxv2Ud#Ohr$`!pZ9YTp7^qM3UI}@ zgk?L)WC*MpIT*zhe#!BpfLv&`L7x)VgJ%S%@PW9}Jd(w`6w2Av8p+J-hy{LdHCZt{ zT0JN*o~cgRl-hI)%OkG_iBpsKq3QVHqrogmn$kLkvdgU8?Hd_K=nC^)yi*d2dtjBy zEkWRhavs!3$n3KJSB>^W&Uihfa3lh(P@fH+NPhPc9+3F+&rz_*0{QU}*jh{1| z667@$2esgyZL3}#1v4GS#A*g@kXf@nTgTE%dnzvuKg(nCQv=^0=*=q0Cpmlx-pc_w zPTlDba;C@P7loXdmB;*r^!cR$~M1<;mG|#oDo?~-s$p} zR^-Y@lfuOwQWk|VB5ZsueMZ%E-Wb&r-Z1Qr{wg90%9A442^4^Ds>|#7PvnXMLa6@NE5Vs!x-Kpq*ST+Ai zLXmx(pg>XWez2#=W;nM0_#>BdBSp{e!j3xSw9 z3Vb_Tco%@{T7^w!k)=;tM6{&5m0D(j-~OtJ=ngz}R6%YkHt8(Kp?kQRlU!&S!f{Z+ z@tP4W>1lLdY{X6v1LJ=3QJjL9g01(3yFk~aoe{Nb8F>}riDXsKyS(@)qQpqu%B{Db zN7qR!j`I1M8JBKho%jCCNy??we%5` z>EW19^sy)+1pNk4WF97F=EhhQz2AG&08Bmfx?AS`G944>?d7W{8+W|ziby{ufX~Zs z@~9}b*>r%(I4=dBC~JPzV5Ex)*=g0ce}mMG1XVp$Y@+;DQ0&=L?DtKyt7G>;4&{Ge zM615*-7z%$n2>#5-WiW~F)HBzRi&7UFX^`@*L5?_@(E&)e=~3x7%G>F)>C$b!CilTYL`6IeumrvebgR)}4lJGnv0`+@Vd?BOJ6 zae`V(Nzg;P8~Amc3qtuVgI~ zc$kmexkz--?;eDp%l-wd$NA`o<%IB20S;~vJM4-yDGbJX?L9&E=TsxU(U+2$02shW z#<#5C?Zw%H*_(5mS{EKfg1YXcyj3rIZR&M-k-s2qh=%(WKlWD<)*T;+V6PyC52T{K z-&(bM>O~1opnx54S7>=%6VMp-D-7U={GCUXdN_#Jx{SUjgAm`8p#_EX z<(MG>Rp!C?gKX2BWZ`D#QlS7pGUnx>a#%midsPZ5^7l3p@Cs(;fM-3)R--R`1u^oz ztDLFzg99VZL$QEy;JNQqZa2|ZrO&P!7cg@v-9V)u`gKwAfR0UI81fI8wOtWcEYrZ$ z!F&=Aip;n=CNFi;t9m92<*vxrNx@1plCqO~1U#=jVlt?h(&vqbGJ~NoZ;3y6F)~Q_ zM%Xd5pRDlz4@=q?7f&=t7JPvnVV$&yoa?HGG8q)w2~NtrpxIb57QXNS=D$d+SUO0c zLuA8h{zhuq*V~SiMq30TXs+dw<@eS$d*QK1xf7YyaYcR~ShHEjF?}Vrs}pWvR@%6% zdMLt8D;ojTTEwVMl3!$wQ4-`%0z2sNi}P3Cp}ETNUrVjertYND$;E$XtDuHPJ7OO} zPy-$!nTDxpam$9*sT2pxg7;&05_Br33+)xUxoNsx56gT5c5%}A3dg)(J%B+Z+1|8V z=>46KhM~G#+Pf<|3EHns{}>v|QI`1gH9I-7L6%}C?Nm^DQnd9o$yvm|$I^h_tWcn* z;Vu!ofsCZ)`hwpHVo8o-G~W_?qzN56B1&&()$5G8fJx!=Gh4cf^oQlDrrntM{yR2D{tz!_+-C; zw9$!39U62kY9j^%1pFcWPx0U2d8QYBJN{ab`3^hNt0NAnUjg|twEdnCT~4AaTt;R$ z#sWNOUeII9l){+UQ@W= z^ov&Ial4lHTYM~Gy853;R?~7+_tMfM$oPT;p4Y*cMA&MQ@kffL3Y&CH?=W1G)~l=6}HE^UTS3dYM9)S zZykw!9V05DIhI(#bRzps*9aDMw-HK~?`p$!looQTy%j=C?bsirjBS73i?2E(gNxEI z_&3Isf|oHbYJ`MJFf)p>;w?KlNgzpi0!Iqu|>`y880+Iv9AkQWe zxm^G4JlgHb^+)rzn?GNQ$~GR&EC2qPgCEYM8=_VO4I@nAyrJ!(whBLjz zRWIkY#6Q*j8h=#n(Y=ezS&~(e>aObcXc#nH--5$F(lrJ@Mzwn`$-s4Bp({Obegq zOKyW%<|Xmxwcn{P^{5(eT)@R9j zFLt@5>k$4A`4sD%8cCsdi-#I9m&EIKchx4gdU$$I)o)G9`vFs#EQ5ZZjW?GX-rnN^ zKK#iQolJ+if43_7v?y$5HJZM8XM}2B)2;OA-32}!>y0<5h4jQzvL%2_{xY|=Kd0%gryr^$tc-C2< zufU_m^E#L5K=FRrB6Z++4hWcE|6VREUhh|m)jmeiUe4CK`+$pR6LI?S<2|4N) zYOH*R+=FI|e*wFQ^$k;*?i8!DOcx#?iFaMw=^+&{j4{S0_mbtP=cp{&Fa4>$} z8ZvSd@F@nIYN4dJO|`@zCQB))Wx&Oi{GMl>{KVK{dp7Pb66jX~e?TV`_|-n*g~Lm( zm#{yR=kyEx?L1G>>G2Y@kvCJ}RS5j@lF@E~Do%W9A9#pMm7Np7jk5StvT&``wB15K zRa|YGUdBnCZey4cOcF$K(iJ`6xyxDMBZ70S7@JX0O$X@jyOp?{T8!n-?G6=Xk)<%}BTqZbuB2R>AM)qYXTf#jkfa=za_xx3H^ZD8_^)e`4BHUpV@Hg$qoFU#swI z>GI_TR2gq&^duTNT&0zq>Ph?^Fh2tLBmiRjqtJ$}>jI^{KgfeW|7L}r$PQTCH(D=S z#nu_ye?Y8w)66w+C$Q#KuwO)+pP;>B(oK1P0N`Cy5(Es5=%<)7{dO1(1`Zb*JreB; z0(2VE3b^N%8eMx2(?(61UGvj&s4+ooi4*mi`Pc6;sf2wU-lFV2W53h(fg8*Pm5J-s zhm`vTNo6lDzg+HvkC&r43EK@niq;aW&FJF`Dl!hsUFS0->u9_8p3Zg!VgM(yqY5M9 zRDEA{m`BjN;|hAuzw}Zt^#lyDFyE&1@7hA&sW7d@p_Z4NbzKI1Yf~r>{gd`vyMBq^ z_TR24@+kFGg;vJTsEuU;P75cmw|PRV5Q5poDrs@>dgMM&6fMeG*@;*vDl@8{)Aqvw~ZwQkHF&G`7I7vD^{5}mp?1Okjc zkf0U%s7hh9>eEsV7?VUiD~jMsJXd6ROzp{p+o|GoUmW<@eX7pDB^s!CFMTb`|5Qn2 z=L_+z5}1M#d8aarG2oIzd`$JF^;D}GS3dqEC!ByQBU$~6A{(v!WxoIU>x?f-{~5Eq zoL>m*Ya9f)4=WZ!{H;cQ*)w@8l5krR_KMN5LKw2w{Z`yC`Gj~;;$UqA0p=aAV|U>9 zZ~3@yibX9~DOQ_O-xP{@$$AnvJTXm1>NdeU^|XEO$#`Rp-JR6Al4I|dde$@I3L=Ij zK#b8Qu4BR5g9R>b&sr|&g|9@ebuNpZWD^3;;xu0>%AxO)lb)301z)E|U&=KCQW?Q` zLhpETTJI5+AVVDC-iyEbz7C6sNxl5ZTeTMh)FW2$NHym$^ zR)t zd{2CO`69nqnD1Qo(kH0X6RjqDqgG0Mw|~Dof;}Od4;#t`e-dtfIPwJS3Kry@>nS?- z4;Xre>g*DytKi`keKcohug{7a+!G_By3Q>HRDWS4ItNA!ab*4z>G`$65|}S65uva% zq3F8*(>T`1MAnXwj<5Qj2|%stvwXGCk%XI71gYb+4EHYc*qh4K8h{w^eo=6*6nGBj ztvW1wX~!L0=9geAjvy@@EwvSEpj;3M-6jqPnC*J&9iVD#jA@}e_edGcj%v-poz6eA zKR?qcfnrw7NwIIWd7^(Fj~_Xe6o4Dp<0pgWysQ6eGS7hDcTME7Pr*&t4JLv#hv;=ixwE#vz}VyzmQgzNQO)==O)gPM_qPwZaW1g)%XW zwi)IB3&{O1gqr{4+xFi;y#Lxg$}SE6fDLroy~Mu>{T`OV4DTU*TT!|+1AQaauWt$0 z;gavfV^O)>N|u{j8DHrp_4o7YWa34lflDNs#J6Ub zV3eD@@hV}cQSl2r^aS-S+lcohqr6cDpcq+(Du(X&4f-Jd9M2YDx4Oa zLQb@VAt&rp;doco3ScjZWBskr;O>OXz#f$w! zQWF|nd?GL26KCVWtSorNW!mwIIa_J5{g^Tz3L-s zog+!@LUl<$M|E9MSya{>`0HI)7Tv`rbQct8-cY@guivHB$5G0Le2CcmInx(e|EPO) z>oYDZe6ve5%UqX#b4SXVz%oBE=#t;LvT*g%$Y&{y4DceQnxhmTv_UiS6X3`s_;suh z{|{JcR)kG_>|96CGevo(lU?S{z%8?eVL_DM$m+vRuYu8KkI-3*Oy ztzsMpY8@r)yx5zfZB^LkhBpn62YhFLjak12CQwIa6I`4kKoqG5<0GEvd8T7NbURKNb)KIqc~)j- z65`h&kA2d23<2GwC#ad4xFR@qp`niN_giuV%*#8v%DZ2uw%N)*PGx;D-re{O8*E+u)E{@!y~Bm4lEwzQFFB*9=BL}2WoM8Pckn=czK|Br z!P@?&bQpeoB86c?xyiZ38$1yFo!L$q>%orlYg+zbHuh2h0Ri zGIbMtKTB-SO7B=RgfCY$!*z-x)cYisrbhaOk{rbKyPC*I1UnWvtd}%>EX}Whbl&<&UFwwke?bM z%^pEZ<%Uj{NmDI#$j-rHufQWpa7IxR6P#P0DNP=pF2j+gIt$MzYriYMingy1l~C|; z+CN}(7Divp4Xg7-a2x{d@*GiBAgqx^YD=c-p^EFVCfl^@Z~zO@Mmh8!Fj-?}1c!{h zeRXT&CvI_Xa7|J6&9<26OjEle=F(|2ra+5~Q_ZCghRH-k?-BEZSF~&AE_zs1Ye)3N z%eJhZvDuB4pDxhD6?_T}&vQOU$SOHh*9FxD*mWr9k8KHcTV2|;cGahop0ESDe9zcJ zdYdc5I~NoNn=utL4m|#^i=uKa7Tj8+)e09h^z2*4t4V{7&KiXI5UvD6%t=PWdD*(J z)JFpA(!Z`O*DLpWJaJ>Gqe`bVQFP~ywfgB2j;5MysF#4cuhAHg5{9ZJk=)SLq+=3uC|K7GeZMWP`8a$=a3vlt`b` z-$49ni}w57Xowovx#yPj=kGX|1Slx-u8i)2W@y8z5LFRu@@v~P??*N?fL*5P$GEP~ z7~?&9N>7OhupJE=W2rggY1Qqs)%-*P!J@F2M`uN=+jKr5Ii^sCt`jYk2<|!7~ ziz0{D{v(^5tTVSZx6oyfe-RQ?>_4tf#6xX zShg^9fS)@|SK?}LWm>%iZ<29P0k{NQwoA?dd0hgK2nL)o3}n&k|^*%kG~lm9PaiL4$1r(^%sifCQYy4R8>Cv8fJ! zQ-S&M87hPVD1OnIa7n{DK##aga+F9N^B+Zv#R-9BK;z>hnIjx?bVx2?>s#Y1X-n}m zwz$9vxuJY!I5us4HeXVoI4wY4&cIMlr0Z#LjiYTUv|b$O8eCpX3Dzs%p+7c&UamkG zsdf5a=B)pV^YXtfh7udUtBV2VY>WbT?Xrf3E+F96OpjAXrCG!##WZkDi1_sn)y3Z6 zg3!$N`1;T^$y5|BxhVFM zXbo+Jy7Yb$xAme2J--?ffEc@ezami*-1d`Q)5xy|ArSGig92Ym5SKW7r~QNuer?Mt z{c+gM=ni2=VbQdEbhSr!$_#la2vADbQZpgUxhbaxC&FM7H+jr z%XS*pZR-7_9Ng{Upxl`tywL+RE0@6b!Uw&>J46#xU!cK`GEt^>QE2BFu^g^A^CNvCv{#B&*dN zIcKV=FU;Jgm1+IhJveOY08YZW#TOXzEt+962MRNIc3 z(Sf`CWcIY&3Bp3R%Rlty;q;y8IQ|ge$G1Er{i_w@{#PsZUpu$|AAmRillBdo#k%K% zdP?uJTW?Ogs6pI261an1e|A|Pc~Qdd`?S{cs~WhLHATlZ_O9`-UMvGXWRJEtK1wfL zBd`r@pI=qZXm5zk41v2u;pR$1-j|L$<+;HBksLS;xMU2_^e7_8CidLfNt>WoIly zvS;72lcMrHpMDR{d2k+`^Z#G}9**mpYaY0-*Xwn^@B4MX#K9|(&Lkra`bTvY)8fPE z&oORp@`E1cnCBcpGxfp)ocvj0MuUnJ;7_ttYZ4!d52j>e3~-*EYkZri45H>814i>% z;w|SA*12YZoo6sy?XzsY;`|WTjYmYGK1-39yaNxNe})5DTETfLru><|9S;)vvSyt=gA&E z5dwPq>%YA{@t>4U;78%VY}lu)TzG!k{RhgsFRs)Znl zIb8S+{8i)0Q(o@cWb-nbp(soWMNa*q&>{&Mss<8F?wDm$Kb;U0qMG$-P(CI^8x+p{ z2T}#$>zYxsvPIlS{6rTVvWx#*k@?MH$MqdLups|jhw!;0I{;Aa_g!Mk&c%mLIE}v6 ztP-XX*d9VU&8C>QhKN|LSli*LN{BntC(NsTCs!zDTi z(IGgSgVD!c>MWx;WJ4;s@-wI)z_;ico>p^a5N99&VT4PkW-Q?b#ZCEw))8E6Okdib zkgS(*Zm@{i;+m~&ZEP0Gn-ZQ9qS|y)a#(x?5>268px$jYAo!U!wj_CkW1zSJWv>_@ z=xK4NS2SLJp@v)8ZW_gs8Bk1S@Ohwa@2rq}5EvrYF!AOOP2`k$LsQCm6I=T*e~26D zknrsxB=5YQqo=9veUk~GReyE>wCdjf|K;v~?V;p~Yq?AiCbKYfF_q_Yljns&@8Y6s z!4xe_j;`}l^BC?%SFP_@3!mB>{*Lcezu)Zqv@&IwQcF!1T}ZGLC*QHjE~{ll&U028~Dn7ppnN#hp_W^v#jaoe+I2S7aK{muSj#Eyt64U4)MIR zhGV+PMs4@hJ_-R_$&@eR!h3>c_~WCr3VW-Y!GhdI;=0P4>^wSP!9Zp#)VrV_DF=$# zrchnm^gA>5JK<&aEvt(fXoQie4LFfqe7ucg`tp`1T=)Q{p}GrXk7hT4dNj{)y>z;f zcQcs1F#FX>5!dcGzP_2))(4rb6*zdtri_Nx7YGt9?YLdvP(g*-etkz^l<05mTK*;n zn?AN=#XNstG};nrs~Q>UEYY7HSgM`uoCzNu5^ju7qG)M+>mL%vskjn3|+evsmb ztQVR&$z#Q~vbm1+zvY5>4l)FdaL7!wt@w?d5(Ov zbwus;Y2twMk7Dwbe97t@RL$>qKS>W+^7ensZkC*toY#CY-#8!Xd+O#!T@%O@CzZKZ z$n)iI!7NMsmjzQ~MxDJ^E;e-WGk{T#s=?vbv4<}yWMP>Rem_+Z@!2d}15Rm@i-AexSFkIM&OC zIAc9w>5zzdsc(BDmx}Tor@5#s|Ko9i4BSBS$^7%BlQTbF#T_%~f1qqTolZg?i_h;( z8Q&tt=0YN_lTe6UOASO$=Uw)xM=C_JzCJ$U&oQ8tWyMDp39k_P!4Q6)a?3NS@b3Tu zzj;sJ4Jr|Dc7;XOn_L)1eeIGBEs{w$0>GLBZ}|i7%!!yr3sG5?@F(rkYUOm|U(#G; zXm7IB$Go#@%yuyf)=$r#h|L6JyB3z6C!s>CZiZI#K#q;~1(dvdwa)E?>l)3h1wLbI znkJ%(6FRkP+6|F`=2_qlyMi=#+HuEs$>aF`W zu?BLn#yl$`T=qb>!2ox!xrj3#l@n3mpU39*$6R#YGKx)rvmA^$*w&%KqoYi7b-=0} z7&b6Qmg<5H*jBdUSqXPo8$=5o)akgVWqZYrgf zf6M9M_oA7l;)B!VZG;>q4gGlH3(5@Z!t(N2O)O(UwLah5^Od37ht!Emk-CD_?s`i# z0X!g_&sAD$QK0uVQ(f@;9N4$g_oWaP<#*zD|mCq&@_C8DF z+hhZ=-}COIY?e?>oqWmi;yZ?s{dg}206FxHi76h(KME7*Jw{C~GCUKxvboKr*7;k6 zQ74lkXhgh3>3*b%)d@m~c4ET3ZUgW58AFo2L36LFy^@8~(niZ_Zw zh|Z6{)~AY*{Tgi^!Fz>Pa|`d-Jjf#tHm&HPBA_Z#AWh&zk$GLHh?sH&B;rs6!mTjz zB&=ih*CiSqtd8TI$dF$w4L^$U&_4zji`ELtStHukHU83e8@Vqn^%AKQ%|UlwM8$@u z3*_BNk4%zEnHBU*AsfH3&2tpDZv}eKI8k8?dFM;g#DMmI@YF9orc>1|5v(`N^{wd0 z?(|C)jRAx8duX~L>i@J=Lh`X?i8jU(=EJf?e zxRxx2xPDGs*NpdGQk^I0Zr%e!ipku@ktx%6-F6>PI;y$x?Vx$`o&F#{gJSBok&Rm^ z?qks!)7DgCbenuLj6@UzR%Vh(%%Lp~UegyBwroCCm@t1e4)pc}#uc*I@s^lnDZ~ZMQCWv7K-&V;$XSp9HEFc{ z*^XX9oM$M}X0=Of_&OMqgH-jzI~f*%YK!w7w$KGB$ETlp47;>-KlUp>6#(d6*FF2pPopg0wW^Qx4->?NXZ30wrI(Z4 zKZUShtUAO#VUKw?qXxB1CZs-;TqZnN)r2H%u}pe{C1SU7?hYVoAP0gTr`Svl58E6~ zS3Amg0uNl6B{ou>z=$@bflF;dOtjl(Jee5O9tmb}RdJsk*=p8B8)mxzj%(n%&%P;Zp| z4*HSz1klzGC*zMlud5LUdNP3B6IyLI@2GJBtK`5@JcaA38RgUOo8uMw+2Bp-wFJpd z&Umi-%@5Ttprmzfr4am|6WlftqF;s0BMqk$F;SQtXIP|RwgO9OO90G+6AGUVRs)Y? zI^+@_!UoYdzC?q^gN?RFiVrqPvX0}TU|;B*(Kc!lp<9bSrq~%3nx|zedY~{uS1&xY z5@SRXqJCf$Zi4qOdU(Wp#j&{;N)P_Hsan*OCq=VF*Mn$Vz|nokHod>}R(hhB$_{cK z?I=Mzo>h!@?gI6@su~_Shuqd*TgyPZ01ru_!uIf*0GZqUR?Y}#Si=enT$ZOuAw3f% zuuyufUsQ~snDA#TlcBEJ=_r3Rxd;HE-v59wQtjYeV7cU7Q~T%V1->s_BeZ@$88JQC zu${59usivZur*x(93p%5Pr3Rv+x(xF);mD@YyN#2m-)q!hxlP~OAj;qr}Fmb(9)`l zQ++>rLt|BR0AM=x0AyNp=xN~`(O>X#vbGu{M~CdonFOjSBFoERky1z!H%qUMJ$sS@ z?lcW?emzG$qZ6B>hQ$SN=+?{F=*DYPgGX+xn|)0_&ZErl=+W4mShO{`s<4JkB}Aio zspg83-IO-9@Z}n3c`P1C*&jt*Mp$VYs@bG~8uLD&?ITyOFLwzqi3RXKng*JA%Cs7> zJ%M#X_ggucg4qjYFAX32Lt7F3tgfmN_2@&1q*puD=N{d>%YH1yZBc^I>+5L8`~D^U zN$y9L91_bu&zg4W$-DF)cY}W!Q2#pC3iUjSe?4=W2#xbzN^$(sWD}~>*J?o55Xrh2 zQM|pkmWC;ajvd*$86arrbRj2&EVVu=AG?vq-R=Kv6K&piCt+X>ebs<5vrpFH1a^?p z;t!zIoz|Ci?!8H?1NEdENoc+k@9&VxWdNXb4q~g({6pY@s9PVXRzJ|NjMvpJPlks2 zfsc9JY($3AH|hNvuxkD>u&((U6!fK_T^&+nz56Y!W(V20fzwgTBn`JIB+m3^>AH&= zTr4&`I?`WX3@SZ6&AY0{0ypl%HQV3(9fNR$tWl`8=eR)Lc#4 z>9qV~;iOCl96tM(51Yj@d`RgWNUt>=&fRq7@9l(q=67NiqV~J_y^TWRFqIjuBSqA~f04 z@a%*?w<;ZBy%JM@?Jr^%aHvK8!&DD#B@lktQ=?0GC8i%N_|1E2RqV)a{;qQ+uS}9O zJ}0U(yRV}43Z)}Ds&=S(izSN5$>k8Iv93GCzdW7d(MDFEwRGf=fUQ)x+2fJPX&ejcxAtAu{g*&PM?rUlP%(p1Bi+7HhG(O!A{)8^mO!-?ZyFjm>dKVZ0 zC0eE$)Z$-sEy?L#i3vre$Soga-9FIPV?M{+F9-vf$yP+|nkj)|pG&12pud(oiYI}F zyquqN%hei={uLL^44g3$qx{$G5B;E5gw<7itJ!tM7Pv zi9=~?yBl^MNE-V_hdT3~J{fQ3eAW1F@O9sMl*(D-7sVZR3wip3ugXQE+>9FU9z6sW zU{}to?f^MWupfW*A;iDZhjdDF`D`>E-M5i2w$y5y2#wp*wm> z8ljY+UQ2_T+(r7bQsAC%eSkX893mK(@G^&@L~OMuC9@IQyb+kEXdAcizLEX40nNzX zi-4gDpN3s9!YYnjmE%3Kd)=?G-UPzOM+pWR#Uqt_si4MwP#N7`LhIM8kxR+HjgjJ; zXnSc$va>kTo(V$^pwvBu2V7`AW$~MhGoWdmOu@eyR(~D8iDdEWS$`l2(+}JyT-n*J zhV|r>GU;E67w|{pcs&qybE0h4IW{o5LC12iDU{aN>d!M7M>K~$hryDvOdGy&OxjW* zPA%cO^s4uovV6n@|7s)xpyi81Fe(GrYm6qpb^rCYGbU0NEW{|{8u_|(Jl ztZ=01rextqiwNIm^L6SFTwZiOaN6ZytxALI3in0jZMmswfHmE6>ZnZ8%K+@X- z_nRQ~);g3Qy^)O-M65zG7S?-_%MDmLG{ug1Ad_(&J?ezGFj5z6g#tbCr;J^|O5<0u z_^u2BwhJ+dA-`FkObS5o39`jR_Zl)T>NR2hQ( zY_165Pvw?bmk15@H+e2SzgybZh+Z1^0QlX+O18SGj$)jgO(V*d+&At3k$9-yBD3>|N4+G?sOg-19^wy5z3P&h`5&Th! z5q<+Du!eIL95nVjfcxbr8vD&cKNkpyIIN9(pWhFmFP7(%o)Pfa>4d3Bu}isff3+Qh z{s^Hjk-tIj@N2iw1n?J$K1ub*dHp7~$-5D=<2V5K0k z@<+0sMRY1|^|)wxcZDEjMe9FItt}rXn^4nNOM^CZ>jG_wJgQfS-Pc=|hL2mMnep?j z0HbF|r-%MyMOMmE(l7F&Ox!HrsR1UIgKzwFv@TMrfeJhY#oU-REW&7oy_%AJ)zUpi?t@ zv!eATNiJ!cm26bXsklW@%;{W0!|Ng*ZK`Wt-wiCBt@UWngqgmy4R|B>DEWAL?!88o zQYpn5FAFIkD1uiCg8rO{oWqQfK(85kb#Kl3!nPI32RYyJxTSE-ejIuQo<7`BTxlq# z$D?w#&X;#qh&>w!Ia~KnHpjq5_W5y(Y@ApDWm>jTh4!tY2d0P?bQy+|Dm=hbr#9Uh zAx5sH<{w+vsmr_KxOiN24V#3GLIn3x6Y>XHGe$9F9N<`+blv)&79YR@M-#{T>S!Zc`zJYd z=QI1UNc4&#K;d(ZN*PwP6=wx<{0pI2wPGFg=-$HdT#n30b7a1;YxUXFvp!&NP0V6i z?iVy8xc!xnjQXf+ydiYDIbqCj|6X6?$iIA%S5%~1P`tFeR-xx>%N%NU|0$FnxBse% zP;KLSK|uWbb0?U2%~e>pPkPOz1*PW?QU5c6HRT4apMs*Kd z_vfDt-eBVLMkd#ICEMjmc#Oh!MXqDP_fK_jyUpii?& zp|6Bj!oPEu=UxB9EWS9M>Z!3_@4!%~7*Jf3EjuNbj$X*p^KKhqDCCFjy_L~Lt~6xJ z;KNE|k(~i*dI#om6J{)~SNIlzVD-n|kCMBEQ zb@teCp_xl+93T*7tNScvcCKtmP4(H0oG2e}X2GiWchQgc5OR*Xb;a8q1&l|!7C+4 zKnkEvLhe0-t0a;{Mz$8!$nQ$(Y~5GQPXw9EFKVn?!E29c{Zhhb z%aW7oYy-0_A&?OI@<1Va7iVA}J)rdbZI#KiL@m5E{*3*;w+Y+i5XVi81_mW(#q3e09F7U(26AC7JP>+o2|6~Rd0nR7qo1A;R0D ztPmP%Eq<#3&eKHg8jyhXa_IHrY`PCgecYRe|32GnS*-!otir7wi01uP{7G$c7Ntjg@7 z1jf?J1|!kqWGfBO^xbkdzG}FmKDP^@vHWEx5zmKt>Jy$_wpMo0e=gvYm4kfF_eJ?U z`fp!5u8CUsTzli@eE|?2S36+y&Jtaqyv1@;sW-Oj?zE;J01R!a-<8c;ZW6WP%Vsst zyEqIEZ?cpoh!ng3DDlW<7213*NVcVCU*4%B%0I+BdQ2 zrJ1WAgVst_$W5MBBY?ZNHFAR8!rKzN+qG|XDhT`;>F%URu%S^~mi|{NJjKEje}ej8 zcrAPBdHulq`;;cxa|LV9x+mMTn3jq4ERy1+cE-*YQGe%k6k(vDT8zZ)$`n}0^>%tL zYW9iYbf%^^?Vz8s31zoacBhM){pC@Gmp^AsD1V|0GI#ZD6z}L=_YAnaPy>mT)(I~O z#C}W#X43GM5!3|xj;bdmd54YaU@gp4Og>N1!;Gc8w|Nz?Lv&^viTjpmaWjiaH~!`H zun!e;Jz?5kvnbhF@#J`!QT`(Uw@1Zg>`8V7{Tf{kw2eOknVF%-wf$~q2Hqvy%B*l zZh)7nwg2$b-JlR5o?`=!czTKXWVRh0$|GJ@N1eC(z@3!ndoMabYf@Fu`K2nIoV$_(Y4hdLeC8Uxx_cmTp1GwP)`NHWyJ5rouQqJx zpS`Y|wsHcl_`Mfasq)-={^4iTyD=|oU~5Y_w90c_uea^iOzvR14Jxy;4c>|Q_#a62 zfgRVa5)T3>vonNt5HkTrD8!Z+@~A!G_$(ijPR1ohY-D%ts6D=nURrt!sel_8`^s+` z7|?yKr>D=~xL8#!5a1If1j=!S$PbWL2t-`h5EcL2aBfoIvYRln>#t>^r!8=K8t<`< z@L$eH?kbAaz|Qz{kS14Uw~WN#YQO`P18sW(1B+h^in&HHEHQC&8s`dc8zyJ9ZaNDx zb3S@Fbth$Z1Lwwvnn$&ehvhxAWh*&YJeJ-H*sWwrw-4d=lSQlL!{;X3_^(7k#UjbL zxf0$rS1|K=_2&vq&sQ=r#I74lm?+I2sQt_JC|Qf816ShsF0 z`xbj-Yw@osJm)Yt6SJW60_5Sm%Hke3h!d4X$241P&MW5dy4-->5&SkAojEdx5r z@?$w`hD3%DjtZoz%-UNIgzZs}3E7sY9lTi>zZ!Ch0i$wk65JCVBsWren%(6HChp1u zD}~wUojZE=?tsPVFq_Whh$PMjM6yc<)Ql#G+L#`h|kNn#2fE&VQvhf!)R6 z#bdR8zCgx{S9*vTbvMJiTq5NB@5i)!jK*9J8t#9r=!44qR4>{w)909Z_uXY%qE=6T zmc%Se#@w)_iO`{>0={&ZiWEPwIYtLQ7F6}#l`)rynq}$sG`IHsWY-9ZkNcr=mZ$Jm7+3`MoRm_9Qa_AP5ILDNw2U+=Lr zM(RW1omqz{sQ+wv_+`L@-#mS&(`y7At|S@5#1xaU(tA5r>Av(87Fb+oQm2=hEFd)y z0<@^3k*^{3 z=8Za(y8B8mR;q-L2QNlMe_ek!n8g9J(Ly8R5pqzL4m#{Jj=wEjr~U| z)#iaaWtjC#sac8h?U5NpVmYz=qv3g=5Q{qR%?IjuK;^%fF z=Q8e#-dlpOOD$o3^i{Y}mZ~7*#{S2g_{Q7~!)G%VX@LKe=n9+P<(mEP@@pPC@q40dHStai@`eoK${SFXq10l+`8Zzgo==juzmun%fn8{q5nX0c1PYZ*<}dpKW>%y0B&oI-4w41mH?+*pU;TF;~?t3EoEU)p&7?GlujLvCW3?q`LrZ5UfT&ubM;tDyzraN1*qDPo1UcNMH%N^Zykx>fTIPK9OtQ8)i@S5s1OEr z4-oxSX*Vl}VF&|%cuKt7Dwf0~UGpR8*9e{5T$G?n7X33br7wTI64EIfM#GB6$Y^j~ z_isGyE;gq>XpAH7*_W_M4n6;3Bj|p%-W?P_cTTF|)wv#{B>&H%he1ZnSF~@yiu+oA z3ap?C<@1D2_a0=NYmx7}Zqv39xcdwK#PSS@L`jYO5+Jtf7go(oatJ=2GF|#=^Xw=! z)7|XQg^G_J^6w@LogK;odp68qs(Co%%~cGaiC;Nin$pigKNBqeE4Jq%!iC! zcx2V~f_zyNv?zoN9hD#RkTY~^L^t(=7Dh>Kt@pa!PSwSTYM*OdcJ4 z^wr!maRJ_KdXC~>zLe3O5EY7~`Mmq4uS5;|X2`~0s|qW^W&@|qIleg!DVnC1beG0O z{Pue|(fqDY4*n{|{6J!PKMP1q%vk7jEQAWEqYo9~G%)8zSsuQJ=fU+6_a0S!EG=s1 zKJ(>ybu0Zzrmgdy^;Mr=5A`~>fN$wlPYcNG*uUec*{=Ez^s87p^vaE?Eq84dyG+;a zos#mH6;sk#Ki!?Df0EwI-HiW}V7M^# z56=^;4qY$?A>VEdsl*q@%~y_3qvDqeb^^kHF_I&)@vMkOsANx+>zjxj;d;-s(jNOK zJC*0UH&oskn@iuzdKiR?82DWnd)|jIB?5OmG`itYTfA0*ndzU5M3@7fGguc= zMR-l~U?2l`NHbivNh`UlJhp7Vt>;B%*fTV_yyvMpwA&m4qm2VsQAacVCj5!2-H$c% z=s1u7!W7cK8(b_5`3XCe|IN~OYgVJA`(yQmnYfMBKPD48!rcdXRnP}Wm3tua-5^5o zA?F6MIVX=LP$vmH30xGx00Sv;CkKJmPwqSOfpj= zV+h#8wr`K$w#FUjU7^O16@HgBL}FVDAD^6>(eDm3((e>wQBn4jaVnVYG=ZeRHV|iQ)yijEdoqq+Qs%jOS;+eZ$YB**IY@~cC zyk1m;B<9E#*WLylo)5F9RxjgjxM(qBy(Nwov-7VtxhRE|cL3|yOj#uGe0=s}@Gc8sRj>OzhJx-6VU~a_ z-FfJ%lx#ooTc~_hB@ze?S|Ccu#%p5?d$wGzf2bDx`X_1UJC4{>D>`SqFS%pQ z)!d^w{4o9gD;b88n?2Qf>jqjo7KV&d<(bFKId@0bW%PmrZZ&ExnzZ$-#d&{Z@!~{# z7Hp*M7W1*5hI2TU*uO!keeye3XbfaWEXv*!gS;5CIX0B0({Y$#HEalfK#kMCRFqbF zDMILSQRZxFYlaTwoH8pny{A z|9IKXd*egd`2qV1Rr4i=>=$Ikx$(D+Kf2j-R)I{9a@F56AyMQ@3}|+BjW~WDlFVc) zmPKvMchoq1jwK{&`VvDMsqjD0y~xw*ZL4=-sl`iKyg?Z(5xkf8wIO@Jog;Ia8(cRW pjsz~@IwcBrwO$3;1No_!A|Nk;ctCOX(*IY+{Qq`)G339g{|oWWH}?Pl literal 0 HcmV?d00001 From 29d11f928ad613ebaaba29607311639f6f0dbc2d Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 19:36:34 -0800 Subject: [PATCH 31/40] no developers md -> contributing md --- CONTRIBUTING.md | 20 ++++++++++++++++++++ Developers.md | 21 --------------------- pyproject.toml | 4 ++-- 3 files changed, 22 insertions(+), 23 deletions(-) delete mode 100644 Developers.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 04f0f593..08487720 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -43,11 +43,28 @@ git push -u origin main ``` ## 🎨 Code quality +- Follow the following guide on code quality a python guide or your PR will most likely be overlooked: [CLICK HERE](https://google.github.io/styleguide/pyguide.html) ### Pre-commit tool This project utilizes the [pre-commit](https://pre-commit.com/) tool to maintain code quality and consistency. Before submitting a pull request or making any commits, it is important to run the pre-commit tool to ensure that your changes meet the project's guidelines. + +- Install pre-commit (https://pre-commit.com/) + +```bash +pip install pre-commit +``` + +- Check that it's installed + +```bash +pre-commit --version +``` + +Now when you make a git commit, the black code formatter and ruff linter will run. + + Furthermore, we have integrated a pre-commit GitHub Action into our workflow. This means that with every pull request opened, the pre-commit checks will be automatically enforced, streamlining the code review process and ensuring that all contributions adhere to our quality standards. To run the pre-commit tool, follow these steps: @@ -60,6 +77,9 @@ To run the pre-commit tool, follow these steps: 4. You can also install pre-commit as a git hook by execute `pre-commit install`. Every time you made `git commit` pre-commit run automatically for you. + + + ### Docstrings All new functions and classes in `swarms` should include docstrings. This is a prerequisite for any new functions and classes to be added to the library. diff --git a/Developers.md b/Developers.md deleted file mode 100644 index ca7fda93..00000000 --- a/Developers.md +++ /dev/null @@ -1,21 +0,0 @@ -Developers - -Install pre-commit (https://pre-commit.com/) - -```bash -pip install pre-commit -``` - -Check that it's installed - -```bash -pre-commit --versioni -``` - -This repository already has a pre-commit configuration. To install the hooks, run: - -```bash -pre-commit install -``` - -Now when you make a git commit, the black code formatter and ruff linter will run. diff --git a/pyproject.toml b/pyproject.toml index 6377e1db..1ecd02ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -77,11 +77,11 @@ mypy-protobuf = "^3.0.0" [tool.autopep8] -max_line_length = 120 +max_line_length = 80 ignore = "E501,W6" # or ["E501", "W6"] in-place = true recursive = true aggressive = 3 [tool.ruff] -line-length = 120 \ No newline at end of file +line-length = 80 \ No newline at end of file From 42d987133bf51aeaf2ef736b407dd7ac42b841ef Mon Sep 17 00:00:00 2001 From: Eternal Reclaimer <98760976+kyegomez@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:11:51 -0800 Subject: [PATCH 32/40] Create makefile.yml --- .github/workflows/makefile.yml | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/workflows/makefile.yml diff --git a/.github/workflows/makefile.yml b/.github/workflows/makefile.yml new file mode 100644 index 00000000..ab01451f --- /dev/null +++ b/.github/workflows/makefile.yml @@ -0,0 +1,27 @@ +name: Makefile CI + +on: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +jobs: + build: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - name: configure + run: ./configure + + - name: Install dependencies + run: make + + - name: Run check + run: make check + + - name: Run distcheck + run: make distcheck From ca446037fa76017d85e1007b20a8ea8b92089edd Mon Sep 17 00:00:00 2001 From: Eternal Reclaimer <98760976+kyegomez@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:13:14 -0800 Subject: [PATCH 33/40] Create generator-generic-ossf-slsa3-publish.yml --- .../generator-generic-ossf-slsa3-publish.yml | 66 +++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 .github/workflows/generator-generic-ossf-slsa3-publish.yml diff --git a/.github/workflows/generator-generic-ossf-slsa3-publish.yml b/.github/workflows/generator-generic-ossf-slsa3-publish.yml new file mode 100644 index 00000000..a36e782c --- /dev/null +++ b/.github/workflows/generator-generic-ossf-slsa3-publish.yml @@ -0,0 +1,66 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow lets you generate SLSA provenance file for your project. +# The generation satisfies level 3 for the provenance requirements - see https://slsa.dev/spec/v0.1/requirements +# The project is an initiative of the OpenSSF (openssf.org) and is developed at +# https://github.com/slsa-framework/slsa-github-generator. +# The provenance file can be verified using https://github.com/slsa-framework/slsa-verifier. +# For more information about SLSA and how it improves the supply-chain, visit slsa.dev. + +name: SLSA generic generator +on: + workflow_dispatch: + release: + types: [created] + +jobs: + build: + runs-on: ubuntu-latest + outputs: + digests: ${{ steps.hash.outputs.digests }} + + steps: + - uses: actions/checkout@v3 + + # ======================================================== + # + # Step 1: Build your artifacts. + # + # ======================================================== + - name: Build artifacts + run: | + # These are some amazing artifacts. + echo "artifact1" > artifact1 + echo "artifact2" > artifact2 + + # ======================================================== + # + # Step 2: Add a step to generate the provenance subjects + # as shown below. Update the sha256 sum arguments + # to include all binaries that you generate + # provenance for. + # + # ======================================================== + - name: Generate subject for provenance + id: hash + run: | + set -euo pipefail + + # List the artifacts the provenance will refer to. + files=$(ls artifact*) + # Generate the subjects (base64 encoded). + echo "hashes=$(sha256sum $files | base64 -w0)" >> "${GITHUB_OUTPUT}" + + provenance: + needs: [build] + permissions: + actions: read # To read the workflow path. + id-token: write # To sign the provenance. + contents: write # To add assets to a release. + uses: slsa-framework/slsa-github-generator/.github/workflows/generator_generic_slsa3.yml@v1.4.0 + with: + base64-subjects: "${{ needs.build.outputs.digests }}" + upload-assets: true # Optional: Upload to a new release From ce89932702aa01620de63ad7eb3b17a6e1e693b4 Mon Sep 17 00:00:00 2001 From: Eternal Reclaimer <98760976+kyegomez@users.noreply.github.com> Date: Thu, 23 Nov 2023 20:15:06 -0800 Subject: [PATCH 34/40] Create pyre.yml --- .github/workflows/pyre.yml | 46 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100644 .github/workflows/pyre.yml diff --git a/.github/workflows/pyre.yml b/.github/workflows/pyre.yml new file mode 100644 index 00000000..5ff88856 --- /dev/null +++ b/.github/workflows/pyre.yml @@ -0,0 +1,46 @@ +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# This workflow integrates Pyre with GitHub's +# Code Scanning feature. +# +# Pyre is a performant type checker for Python compliant with +# PEP 484. Pyre can analyze codebases with millions of lines +# of code incrementally – providing instantaneous feedback +# to developers as they write code. +# +# See https://pyre-check.org + +name: Pyre + +on: + workflow_dispatch: + push: + branches: [ "master" ] + pull_request: + branches: [ "master" ] + +permissions: + contents: read + +jobs: + pyre: + permissions: + actions: read + contents: read + security-events: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + submodules: true + + - name: Run Pyre + uses: facebook/pyre-action@60697a7858f7cc8470d8cc494a3cf2ad6b06560d + with: + # To customize these inputs: + # See https://github.com/facebook/pyre-action#inputs + repo-directory: './' + requirements-path: 'requirements.txt' From 32c476e1d272f5d151103b14c4a007f290a84a53 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 20:16:24 -0800 Subject: [PATCH 35/40] contributing.md --- CONTRIBUTING.md | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 08487720..8230322d 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,6 +20,12 @@ Swarms is designed to provide modular building blocks to build scalable swarms o Before you contribute a new feature, consider submitting an Issue to discuss the feature so the community can weigh in and assist. +### Requirements: +- New class and or function Module with documentation in docstrings with error handling +- Tests using pytest in tests folder in the same module folder +- Documentation in the docs/swarms/module_name folder and then added into the mkdocs.yml + + ## How to Contribute Changes First, fork this repository to your own GitHub account. Click "fork" in the top corner of the `swarms` repository to get started: @@ -45,6 +51,8 @@ git push -u origin main ## 🎨 Code quality - Follow the following guide on code quality a python guide or your PR will most likely be overlooked: [CLICK HERE](https://google.github.io/styleguide/pyguide.html) + + ### Pre-commit tool This project utilizes the [pre-commit](https://pre-commit.com/) tool to maintain code quality and consistency. Before submitting a pull request or making any commits, it is important to run the pre-commit tool to ensure that your changes meet the project's guidelines. @@ -78,8 +86,6 @@ To run the pre-commit tool, follow these steps: 4. You can also install pre-commit as a git hook by execute `pre-commit install`. Every time you made `git commit` pre-commit run automatically for you. - - ### Docstrings All new functions and classes in `swarms` should include docstrings. This is a prerequisite for any new functions and classes to be added to the library. From 6c7de98d7136838528c50721210672ef9b416602 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 21:21:06 -0800 Subject: [PATCH 36/40] multi agent docs + playground code quality --- example.py | 1 - multi_agent_debate.py | 31 ++++++ playground/demos/autotemp/autotemp.py | 17 +++- playground/demos/nutrition/nutrition.py | 52 ++++++---- swarms/swarms/multi_agent_collab.py | 127 ++++++++++++++++++++---- 5 files changed, 183 insertions(+), 45 deletions(-) create mode 100644 multi_agent_debate.py diff --git a/example.py b/example.py index af41d355..ab496b77 100644 --- a/example.py +++ b/example.py @@ -12,4 +12,3 @@ flow = Flow(llm=llm, max_loops=1, dashboard=True) # Run the workflow on a task out = flow.run("Generate a 10,000 word blog on health and wellness.") - diff --git a/multi_agent_debate.py b/multi_agent_debate.py new file mode 100644 index 00000000..2bc67c8c --- /dev/null +++ b/multi_agent_debate.py @@ -0,0 +1,31 @@ +import os + +from dotenv import load_dotenv + +from swarms.models import OpenAIChat +from swarms.structs import Flow +from swarms.swarms.multi_agent_collab import MultiAgentCollaboration + +load_dotenv() + +api_key = os.environ.get("OPENAI_API_KEY") + +# Initialize the language model +llm = OpenAIChat( + temperature=0.5, + openai_api_key=api_key, +) + + +## Initialize the workflow +flow = Flow(llm=llm, max_loops=1, dashboard=True) +flow2 = Flow(llm=llm, max_loops=1, dashboard=True) +flow3 = Flow(llm=llm, max_loops=1, dashboard=True) + + +swarm = MultiAgentCollaboration( + agents=[flow, flow2, flow3], + max_iters=4, +) + +swarm.run("Generate a 10,000 word blog on health and wellness.") diff --git a/playground/demos/autotemp/autotemp.py b/playground/demos/autotemp/autotemp.py index ed38a621..ab521606 100644 --- a/playground/demos/autotemp/autotemp.py +++ b/playground/demos/autotemp/autotemp.py @@ -1,19 +1,24 @@ import re from swarms.models.openai_models import OpenAIChat + class AutoTemp: """ AutoTemp is a tool for automatically selecting the best temperature setting for a given task. It generates responses at different temperatures, evaluates them, and ranks them based on quality. """ - def __init__(self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6): + def __init__( + self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6 + ): self.api_key = api_key self.default_temp = default_temp self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] self.auto_select = auto_select self.max_workers = max_workers - self.llm = OpenAIChat(openai_api_key=self.api_key, temperature=self.default_temp) + self.llm = OpenAIChat( + openai_api_key=self.api_key, temperature=self.default_temp + ) def evaluate_output(self, output, temperature): print(f"Evaluating output at temperature {temperature}...") @@ -34,12 +39,16 @@ class AutoTemp: --- """ score_text = self.llm(eval_prompt, temperature=0.5) - score_match = re.search(r'\b\d+(\.\d)?\b', score_text) + score_match = re.search(r"\b\d+(\.\d)?\b", score_text) return round(float(score_match.group()), 1) if score_match else 0.0 def run(self, prompt, temperature_string): print("Starting generation process...") - temperature_list = [float(temp.strip()) for temp in temperature_string.split(',') if temp.strip()] + temperature_list = [ + float(temp.strip()) + for temp in temperature_string.split(",") + if temp.strip() + ] outputs = {} scores = {} for temp in temperature_list: diff --git a/playground/demos/nutrition/nutrition.py b/playground/demos/nutrition/nutrition.py index 41ff2995..c263f2cd 100644 --- a/playground/demos/nutrition/nutrition.py +++ b/playground/demos/nutrition/nutrition.py @@ -11,12 +11,16 @@ openai_api_key = os.getenv("OPENAI_API_KEY") # Define prompts for various tasks MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks." -IMAGE_ANALYSIS_PROMPT = "Identify the items in this fridge, including their quantities and condition." +IMAGE_ANALYSIS_PROMPT = ( + "Identify the items in this fridge, including their quantities and condition." +) + # Function to encode image to base64 def encode_image(image_path): with open(image_path, "rb") as image_file: - return base64.b64encode(image_file.read()).decode('utf-8') + return base64.b64encode(image_file.read()).decode("utf-8") + # Initialize Language Model (LLM) llm = OpenAIChat( @@ -24,12 +28,13 @@ llm = OpenAIChat( max_tokens=3000, ) + # Function to handle vision tasks def create_vision_agent(image_path): base64_image = encode_image(image_path) headers = { "Content-Type": "application/json", - "Authorization": f"Bearer {openai_api_key}" + "Authorization": f"Bearer {openai_api_key}", } payload = { "model": "gpt-4-vision-preview", @@ -38,28 +43,39 @@ def create_vision_agent(image_path): "role": "user", "content": [ {"type": "text", "text": IMAGE_ANALYSIS_PROMPT}, - {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}} - ] + { + "type": "image_url", + "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, + }, + ], } ], - "max_tokens": 300 + "max_tokens": 300, } - response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload) + response = requests.post( + "https://api.openai.com/v1/chat/completions", headers=headers, json=payload + ) return response.json() + # Function to generate an integrated shopping list considering meal plan and fridge contents -def generate_integrated_shopping_list(meal_plan_output, image_analysis, user_preferences): +def generate_integrated_shopping_list( + meal_plan_output, image_analysis, user_preferences +): # Prepare the prompt for the LLM - fridge_contents = image_analysis['choices'][0]['message']['content'] - prompt = (f"Based on this meal plan: {meal_plan_output}, " - f"and the following items in the fridge: {fridge_contents}, " - f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " - f"generate a comprehensive shopping list that includes only the items needed.") + fridge_contents = image_analysis["choices"][0]["message"]["content"] + prompt = ( + f"Based on this meal plan: {meal_plan_output}, " + f"and the following items in the fridge: {fridge_contents}, " + f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " + f"generate a comprehensive shopping list that includes only the items needed." + ) # Send the prompt to the LLM and return the response response = llm(prompt) return response # assuming the response is a string + # Define agent for meal planning meal_plan_agent = Flow( llm=llm, @@ -74,19 +90,19 @@ user_preferences = { "dietary_restrictions": "vegetarian", "preferred_cuisines": ["Italian", "Indian"], "caloric_intake": 2000, - "other notes": "Doesn't eat legumes" + "other notes": "Doesn't eat legumes", } # Generate Meal Plan -meal_plan_output = meal_plan_agent.run( - f"Generate a meal plan: {user_preferences}" -) +meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}") # Vision Agent - Analyze an Image image_analysis_output = create_vision_agent("full_fridge.jpg") # Generate Integrated Shopping List -integrated_shopping_list = generate_integrated_shopping_list(meal_plan_output, image_analysis_output, user_preferences) +integrated_shopping_list = generate_integrated_shopping_list( + meal_plan_output, image_analysis_output, user_preferences +) # Print and save the outputs print("Meal Plan:", meal_plan_output) diff --git a/swarms/swarms/multi_agent_collab.py b/swarms/swarms/multi_agent_collab.py index ce5a0dd6..85d9955b 100644 --- a/swarms/swarms/multi_agent_collab.py +++ b/swarms/swarms/multi_agent_collab.py @@ -23,22 +23,6 @@ bid_parser = BidOutputParser( ) -def select_next_speaker_director(step: int, agents, director) -> int: - # if the step if even => director - # => director selects next speaker - if step % 2 == 1: - idx = 0 - else: - idx = director.select_next_speaker() + 1 - return idx - - -# Define a selection function -def select_speaker_round_table(step: int, agents) -> int: - # This function selects the speaker in a round-robin fashion - return step % len(agents) - - # main class MultiAgentCollaboration: """ @@ -49,6 +33,15 @@ class MultiAgentCollaboration: selection_function (callable): The function that selects the next speaker. Defaults to select_next_speaker. max_iters (int): The maximum number of iterations. Defaults to 10. + autosave (bool): Whether to autosave the state of all agents. Defaults to True. + saved_file_path_name (str): The path to the saved file. Defaults to + "multi_agent_collab.json". + stopping_token (str): The token that stops the collaboration. Defaults to + "". + results (list): The results of the collaboration. Defaults to []. + logger (logging.Logger): The logger. Defaults to logger. + logging (bool): Whether to log the collaboration. Defaults to True. + Methods: reset: Resets the state of all agents. @@ -62,18 +55,40 @@ class MultiAgentCollaboration: Usage: - >>> from swarms.models import MultiAgentCollaboration - >>> from swarms.models import Flow >>> from swarms.models import OpenAIChat - >>> from swarms.models import Anthropic - + >>> from swarms.structs import Flow + >>> from swarms.swarms.multi_agent_collab import MultiAgentCollaboration + >>> + >>> # Initialize the language model + >>> llm = OpenAIChat( + >>> temperature=0.5, + >>> ) + >>> + >>> + >>> ## Initialize the workflow + >>> flow = Flow(llm=llm, max_loops=1, dashboard=True) + >>> + >>> # Run the workflow on a task + >>> out = flow.run("Generate a 10,000 word blog on health and wellness.") + >>> + >>> # Initialize the multi-agent collaboration + >>> swarm = MultiAgentCollaboration( + >>> agents=[flow], + >>> max_iters=4, + >>> ) + >>> + >>> # Run the multi-agent collaboration + >>> swarm.run() + >>> + >>> # Format the results of the multi-agent collaboration + >>> swarm.format_results(swarm.results) """ def __init__( self, agents: List[Flow], - selection_function: callable = select_next_speaker_director, + selection_function: callable = None, max_iters: int = 10, autosave: bool = True, saved_file_path_name: str = "multi_agent_collab.json", @@ -165,7 +180,7 @@ class MultiAgentCollaboration: ), retry_error_callback=lambda retry_state: 0, ) - def run(self): + def run_director(self, task: str): """Runs the multi-agent collaboration.""" n = 0 self.reset() @@ -179,6 +194,74 @@ class MultiAgentCollaboration: print("\n") n += 1 + def select_next_speaker_roundtable(self, step: int, agents: List[Flow]) -> int: + """Selects the next speaker.""" + return step % len(agents) + + def select_next_speaker_director(step: int, agents: List[Flow], director) -> int: + # if the step if even => director + # => director selects next speaker + if step % 2 == 1: + idx = 0 + else: + idx = director.select_next_speaker() + 1 + return idx + + # def run(self, task: str): + # """Runs the multi-agent collaboration.""" + # for step in range(self.max_iters): + # speaker_idx = self.select_next_speaker_roundtable(step, self.agents) + # speaker = self.agents[speaker_idx] + # result = speaker.run(task) + # self.results.append({"agent": speaker, "response": result}) + + # if self.autosave: + # self.save_state() + # if result == self.stopping_token: + # break + # return self.results + + # def run(self, task: str): + # for _ in range(self.max_iters): + # for step, agent, in enumerate(self.agents): + # result = agent.run(task) + # self.results.append({"agent": agent, "response": result}) + # if self.autosave: + # self.save_state() + # if result == self.stopping_token: + # break + + # return self.results + + # def run(self, task: str): + # conversation = task + # for _ in range(self.max_iters): + # for agent in self.agents: + # result = agent.run(conversation) + # self.results.append({"agent": agent, "response": result}) + # conversation = result + + # if self.autosave: + # self.save() + # if result == self.stopping_token: + # break + # return self.results + + def run(self, task: str): + conversation = task + for _ in range(self.max_iters): + for agent in self.agents: + result = agent.run(conversation) + self.results.append({"agent": agent, "response": result}) + conversation += result + + if self.autosave: + self.save_state() + if result == self.stopping_token: + break + + return self.results + def format_results(self, results): """Formats the results of the run method""" formatted_results = "\n".join( From d97de1c009e92e1849777102656dbfb9bb5af340 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 23:20:53 -0800 Subject: [PATCH 37/40] torch verisoning --- pyproject.toml | 2 +- requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 1ecd02ad..3dbf8570 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ classifiers = [ [tool.poetry.dependencies] python = "^3.8.1" +torch = "2.1.1" transformers = "*" openai = "0.28.0" langchain = "*" @@ -30,7 +31,6 @@ asyncio = "*" nest_asyncio = "*" einops = "*" google-generativeai = "*" -torch = "*" langchain-experimental = "*" playwright = "*" duckduckgo-search = "*" diff --git a/requirements.txt b/requirements.txt index 2330d399..bd7c513c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -12,7 +12,7 @@ playwright wget==3.2 simpleaichat httpx -torch +torch==2.1.1 open_clip_torch ggl beautifulsoup4 From 49c7b97c09b04ff5a7bf2a56beea05acbc00cf0e Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 23:27:40 -0800 Subject: [PATCH 38/40] code quality fixes: line length = 80 --- playground/agents/mm_agent_example.py | 5 +- playground/agents/revgpt_agent.py | 10 +- .../demos/accountant_team/accountant_team.py | 7 +- playground/demos/ai_research_team/main.py | 3 +- playground/demos/autotemp/autotemp.py | 18 ++- playground/demos/blog_gen/blog_gen.py | 38 ++++-- .../multi_modal_auto_agent.py | 5 +- playground/demos/nutrition/nutrition.py | 32 +++-- playground/demos/positive_med/positive_med.py | 13 +- playground/models/bioclip.py | 4 +- playground/models/idefics.py | 10 +- playground/models/llama_function_caller.py | 4 +- playground/models/vilt.py | 3 +- playground/structs/flow_tools.py | 7 +- playground/swarms/debate.py | 44 ++++--- playground/swarms/multi_agent_debate.py | 6 +- playground/swarms/orchestrate.py | 4 +- playground/swarms/orchestrator.py | 4 +- playground/swarms/swarms_example.py | 5 +- pyproject.toml | 9 +- swarms/agents/omni_modal_agent.py | 7 +- swarms/memory/base.py | 22 ++-- swarms/memory/chroma.py | 41 ++++-- swarms/memory/cosine_similarity.py | 9 +- swarms/memory/db.py | 4 +- swarms/memory/ocean.py | 8 +- swarms/memory/pg.py | 15 ++- swarms/memory/pinecone.py | 17 ++- swarms/memory/schemas.py | 9 +- swarms/memory/utils.py | 2 +- swarms/models/__init__.py | 6 +- swarms/models/anthropic.py | 48 ++++--- swarms/models/bioclip.py | 16 ++- swarms/models/biogpt.py | 2 +- swarms/models/cohere_chat.py | 16 ++- swarms/models/dalle3.py | 40 +++--- swarms/models/distilled_whisperx.py | 21 ++- swarms/models/eleven_labs.py | 8 +- swarms/models/fastvit.py | 12 +- swarms/models/fuyu.py | 12 +- swarms/models/gpt4v.py | 7 +- swarms/models/huggingface.py | 33 +++-- swarms/models/idefics.py | 4 +- swarms/models/jina_embeds.py | 12 +- swarms/models/kosmos2.py | 18 ++- swarms/models/kosmos_two.py | 38 ++++-- swarms/models/llama_function_caller.py | 15 ++- swarms/models/mistral.py | 12 +- swarms/models/mpt.py | 9 +- swarms/models/nougat.py | 16 ++- swarms/models/openai_embeddings.py | 45 +++++-- swarms/models/openai_function_caller.py | 14 +- swarms/models/openai_models.py | 121 ++++++++++++------ swarms/models/palm.py | 26 +++- swarms/models/simple_ada.py | 4 +- swarms/models/speecht5.py | 8 +- swarms/models/ssd_1b.py | 32 +++-- swarms/models/whisperx.py | 4 +- swarms/models/wizard_storytelling.py | 8 +- swarms/models/yarn_mistral.py | 8 +- swarms/prompts/agent_prompt.py | 16 +-- swarms/prompts/agent_prompts.py | 98 +++++++------- swarms/prompts/base.py | 4 +- swarms/prompts/chat_prompt.py | 4 +- swarms/prompts/multi_modal_prompts.py | 4 +- swarms/prompts/python.py | 61 ++++----- swarms/prompts/sales.py | 33 ++--- swarms/prompts/sales_prompts.py | 33 ++--- swarms/structs/autoscaler.py | 23 +++- swarms/structs/flow.py | 30 +++-- swarms/structs/non_linear_workflow.py | 15 ++- swarms/structs/sequential_workflow.py | 25 ++-- swarms/swarms/autobloggen.py | 10 +- swarms/swarms/base.py | 4 +- swarms/swarms/dialogue_simulator.py | 11 +- swarms/swarms/god_mode.py | 24 +++- swarms/swarms/groupchat.py | 14 +- swarms/swarms/multi_agent_collab.py | 24 +++- swarms/swarms/orchestrate.py | 23 +++- swarms/tools/autogpt.py | 24 +++- swarms/tools/mm_models.py | 79 +++++++----- swarms/tools/tool.py | 100 +++++++++++---- swarms/utils/apa.py | 4 +- swarms/utils/code_interpreter.py | 14 +- swarms/utils/decorators.py | 8 +- swarms/utils/futures.py | 4 +- swarms/utils/loggers.py | 28 ++-- swarms/utils/main.py | 16 ++- swarms/utils/parse_code.py | 4 +- swarms/utils/serializable.py | 8 +- tests/agents/omni_modal.py | 4 +- tests/memory/oceandb.py | 4 +- tests/memory/pinecone.py | 4 +- tests/models/LLM.py | 4 +- tests/models/anthropic.py | 20 ++- tests/models/auto_temp.py | 4 +- tests/models/bingchat.py | 4 +- tests/models/bioclip.py | 4 +- tests/models/biogpt.py | 5 +- tests/models/cohere.py | 106 +++++++++++---- tests/models/dalle3.py | 60 ++++++--- tests/models/distill_whisper.py | 35 +++-- tests/models/elevenlab.py | 8 +- tests/models/fuyu.py | 4 +- tests/models/gpt4v.py | 60 ++++++--- tests/models/hf.py | 13 +- tests/models/huggingface.py | 15 ++- tests/models/idefics.py | 25 ++-- tests/models/kosmos.py | 12 +- tests/models/kosmos2.py | 25 +++- tests/models/llama_function_caller.py | 12 +- tests/models/nougat.py | 7 +- tests/models/revgptv1.py | 13 +- tests/models/speech_t5.py | 12 +- tests/models/ssd_1b.py | 16 ++- tests/models/timm_model.py | 24 +++- tests/models/vilt.py | 4 +- tests/models/whisperx.py | 16 ++- tests/models/yi_200k.py | 24 +++- tests/structs/flow.py | 68 +++++++--- tests/swarms/godmode.py | 8 +- tests/swarms/groupchat.py | 12 +- tests/swarms/multi_agent_collab.py | 8 +- tests/swarms/multi_agent_debate.py | 9 +- tests/tools/base.py | 8 +- tests/utils/subprocess_code_interpreter.py | 52 ++++++-- 126 files changed, 1706 insertions(+), 728 deletions(-) diff --git a/playground/agents/mm_agent_example.py b/playground/agents/mm_agent_example.py index 0da0d469..5326af6e 100644 --- a/playground/agents/mm_agent_example.py +++ b/playground/agents/mm_agent_example.py @@ -9,6 +9,9 @@ text = node.run_text("What is your name? Generate a picture of yourself") img = node.run_img("/image1", "What is this image about?") chat = node.chat( - "What is your name? Generate a picture of yourself. What is this image about?", + ( + "What is your name? Generate a picture of yourself. What is this image" + " about?" + ), streaming=True, ) diff --git a/playground/agents/revgpt_agent.py b/playground/agents/revgpt_agent.py index 42d95359..16a720e8 100644 --- a/playground/agents/revgpt_agent.py +++ b/playground/agents/revgpt_agent.py @@ -10,13 +10,19 @@ config = { "plugin_ids": [os.getenv("REVGPT_PLUGIN_IDS")], "disable_history": os.getenv("REVGPT_DISABLE_HISTORY") == "True", "PUID": os.getenv("REVGPT_PUID"), - "unverified_plugin_domains": [os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS")], + "unverified_plugin_domains": [ + os.getenv("REVGPT_UNVERIFIED_PLUGIN_DOMAINS") + ], } llm = RevChatGPTModel(access_token=os.getenv("ACCESS_TOKEN"), **config) worker = Worker(ai_name="Optimus Prime", llm=llm) -task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." +task = ( + "What were the winning boston marathon times for the past 5 years (ending" + " in 2022)? Generate a table of the year, name, country of origin, and" + " times." +) response = worker.run(task) print(response) diff --git a/playground/demos/accountant_team/accountant_team.py b/playground/demos/accountant_team/accountant_team.py index 61cc2f7a..d9edc2f6 100644 --- a/playground/demos/accountant_team/accountant_team.py +++ b/playground/demos/accountant_team/accountant_team.py @@ -103,7 +103,8 @@ class AccountantSwarms: # Provide decision making support to the accountant decision_making_support_agent_output = decision_making_support_agent.run( - f"{self.decision_making_support_agent_instructions}: {summary_agent_output}" + f"{self.decision_making_support_agent_instructions}:" + f" {summary_agent_output}" ) return decision_making_support_agent_output @@ -113,5 +114,7 @@ swarm = AccountantSwarms( pdf_path="tesla.pdf", fraud_detection_instructions="Detect fraud in the document", summary_agent_instructions="Generate an actionable summary of the document", - decision_making_support_agent_instructions="Provide decision making support to the business owner:", + decision_making_support_agent_instructions=( + "Provide decision making support to the business owner:" + ), ) diff --git a/playground/demos/ai_research_team/main.py b/playground/demos/ai_research_team/main.py index a297bc0a..77d8dbdc 100644 --- a/playground/demos/ai_research_team/main.py +++ b/playground/demos/ai_research_team/main.py @@ -48,6 +48,7 @@ paper_implementor_agent = Flow( paper = pdf_to_text(PDF_PATH) algorithmic_psuedocode_agent = paper_summarizer_agent.run( - f"Focus on creating the algorithmic pseudocode for the novel method in this paper: {paper}" + "Focus on creating the algorithmic pseudocode for the novel method in this" + f" paper: {paper}" ) pytorch_code = paper_implementor_agent.run(algorithmic_psuedocode_agent) diff --git a/playground/demos/autotemp/autotemp.py b/playground/demos/autotemp/autotemp.py index ab521606..b136bad7 100644 --- a/playground/demos/autotemp/autotemp.py +++ b/playground/demos/autotemp/autotemp.py @@ -9,11 +9,18 @@ class AutoTemp: """ def __init__( - self, api_key, default_temp=0.0, alt_temps=None, auto_select=True, max_workers=6 + self, + api_key, + default_temp=0.0, + alt_temps=None, + auto_select=True, + max_workers=6, ): self.api_key = api_key self.default_temp = default_temp - self.alt_temps = alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + self.alt_temps = ( + alt_temps if alt_temps else [0.4, 0.6, 0.8, 1.0, 1.2, 1.4] + ) self.auto_select = auto_select self.max_workers = max_workers self.llm = OpenAIChat( @@ -62,12 +69,15 @@ class AutoTemp: if not scores: return "No valid outputs generated.", None - sorted_scores = sorted(scores.items(), key=lambda item: item[1], reverse=True) + sorted_scores = sorted( + scores.items(), key=lambda item: item[1], reverse=True + ) best_temp, best_score = sorted_scores[0] best_output = outputs[best_temp] return ( - f"Best AutoTemp Output (Temp {best_temp} | Score: {best_score}):\n{best_output}" + f"Best AutoTemp Output (Temp {best_temp} | Score:" + f" {best_score}):\n{best_output}" if self.auto_select else "\n".join( f"Temp {temp} | Score: {score}:\n{outputs[temp]}" diff --git a/playground/demos/blog_gen/blog_gen.py b/playground/demos/blog_gen/blog_gen.py index 3781d895..84ab240d 100644 --- a/playground/demos/blog_gen/blog_gen.py +++ b/playground/demos/blog_gen/blog_gen.py @@ -7,7 +7,10 @@ from swarms.structs import SequentialWorkflow class BlogGen: def __init__( - self, api_key, blog_topic, temperature_range: str = "0.4,0.6,0.8,1.0,1.2" + self, + api_key, + blog_topic, + temperature_range: str = "0.4,0.6,0.8,1.0,1.2", ): # Add blog_topic as an argument self.openai_chat = OpenAIChat(openai_api_key=api_key, temperature=0.8) self.auto_temp = AutoTemp(api_key) @@ -40,7 +43,10 @@ class BlogGen: topic_output = topic_result.generations[0][0].text print( colored( - f"\nTopic Selection Task Output:\n----------------------------\n{topic_output}\n", + ( + "\nTopic Selection Task" + f" Output:\n----------------------------\n{topic_output}\n" + ), "white", ) ) @@ -58,7 +64,10 @@ class BlogGen: initial_draft_output = auto_temp_output # Assuming AutoTemp.run returns the best output directly print( colored( - f"\nInitial Draft Output:\n----------------------------\n{initial_draft_output}\n", + ( + "\nInitial Draft" + f" Output:\n----------------------------\n{initial_draft_output}\n" + ), "white", ) ) @@ -71,7 +80,10 @@ class BlogGen: review_output = review_result.generations[0][0].text print( colored( - f"\nReview Output:\n----------------------------\n{review_output}\n", + ( + "\nReview" + f" Output:\n----------------------------\n{review_output}\n" + ), "white", ) ) @@ -80,22 +92,28 @@ class BlogGen: distribution_prompt = self.DISTRIBUTION_AGENT_SYSTEM_PROMPT.replace( "{{ARTICLE_TOPIC}}", chosen_topic ) - distribution_result = self.openai_chat.generate([distribution_prompt]) + distribution_result = self.openai_chat.generate( + [distribution_prompt] + ) distribution_output = distribution_result.generations[0][0].text print( colored( - f"\nDistribution Output:\n----------------------------\n{distribution_output}\n", + ( + "\nDistribution" + f" Output:\n----------------------------\n{distribution_output}\n" + ), "white", ) ) # Final compilation of the blog - final_blog_content = ( - f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" - ) + final_blog_content = f"{initial_draft_output}\n\n{review_output}\n\n{distribution_output}" print( colored( - f"\nFinal Blog Content:\n----------------------------\n{final_blog_content}\n", + ( + "\nFinal Blog" + f" Content:\n----------------------------\n{final_blog_content}\n" + ), "green", ) ) diff --git a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py index b462795f..a2602706 100644 --- a/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py +++ b/playground/demos/multi_modal_autonomous_agents/multi_modal_auto_agent.py @@ -4,7 +4,10 @@ from swarms.models import Idefics # Multi Modality Auto Agent llm = Idefics(max_length=2000) -task = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +task = ( + "User: What is in this image?" + " https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +) ## Initialize the workflow flow = Flow( diff --git a/playground/demos/nutrition/nutrition.py b/playground/demos/nutrition/nutrition.py index c263f2cd..ffdafd7c 100644 --- a/playground/demos/nutrition/nutrition.py +++ b/playground/demos/nutrition/nutrition.py @@ -10,9 +10,16 @@ load_dotenv() openai_api_key = os.getenv("OPENAI_API_KEY") # Define prompts for various tasks -MEAL_PLAN_PROMPT = "Based on the following user preferences: dietary restrictions as vegetarian, preferred cuisines as Italian and Indian, a total caloric intake of around 2000 calories per day, and an exclusion of legumes, create a detailed weekly meal plan. Include a variety of meals for breakfast, lunch, dinner, and optional snacks." +MEAL_PLAN_PROMPT = ( + "Based on the following user preferences: dietary restrictions as" + " vegetarian, preferred cuisines as Italian and Indian, a total caloric" + " intake of around 2000 calories per day, and an exclusion of legumes," + " create a detailed weekly meal plan. Include a variety of meals for" + " breakfast, lunch, dinner, and optional snacks." +) IMAGE_ANALYSIS_PROMPT = ( - "Identify the items in this fridge, including their quantities and condition." + "Identify the items in this fridge, including their quantities and" + " condition." ) @@ -45,7 +52,9 @@ def create_vision_agent(image_path): {"type": "text", "text": IMAGE_ANALYSIS_PROMPT}, { "type": "image_url", - "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, + "image_url": { + "url": f"data:image/jpeg;base64,{base64_image}" + }, }, ], } @@ -53,7 +62,9 @@ def create_vision_agent(image_path): "max_tokens": 300, } response = requests.post( - "https://api.openai.com/v1/chat/completions", headers=headers, json=payload + "https://api.openai.com/v1/chat/completions", + headers=headers, + json=payload, ) return response.json() @@ -65,10 +76,11 @@ def generate_integrated_shopping_list( # Prepare the prompt for the LLM fridge_contents = image_analysis["choices"][0]["message"]["content"] prompt = ( - f"Based on this meal plan: {meal_plan_output}, " - f"and the following items in the fridge: {fridge_contents}, " - f"considering dietary preferences as vegetarian with a preference for Italian and Indian cuisines, " - f"generate a comprehensive shopping list that includes only the items needed." + f"Based on this meal plan: {meal_plan_output}, and the following items" + f" in the fridge: {fridge_contents}, considering dietary preferences as" + " vegetarian with a preference for Italian and Indian cuisines," + " generate a comprehensive shopping list that includes only the items" + " needed." ) # Send the prompt to the LLM and return the response @@ -94,7 +106,9 @@ user_preferences = { } # Generate Meal Plan -meal_plan_output = meal_plan_agent.run(f"Generate a meal plan: {user_preferences}") +meal_plan_output = meal_plan_agent.run( + f"Generate a meal plan: {user_preferences}" +) # Vision Agent - Analyze an Image image_analysis_output = create_vision_agent("full_fridge.jpg") diff --git a/playground/demos/positive_med/positive_med.py b/playground/demos/positive_med/positive_med.py index 6f7a2d3a..ea0c7c4e 100644 --- a/playground/demos/positive_med/positive_med.py +++ b/playground/demos/positive_med/positive_med.py @@ -39,9 +39,9 @@ def get_review_prompt(article): def social_media_prompt(article: str, goal: str = "Clicks and engagement"): - prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace("{{ARTICLE}}", article).replace( - "{{GOAL}}", goal - ) + prompt = SOCIAL_MEDIA_SYSTEM_PROMPT_AGENT.replace( + "{{ARTICLE}}", article + ).replace("{{GOAL}}", goal) return prompt @@ -50,7 +50,8 @@ topic_selection_task = ( "Generate 10 topics on gaining mental clarity using ancient practices" ) topics = llm( - f"Your System Instructions: {TOPIC_GENERATOR}, Your current task: {topic_selection_task}" + f"Your System Instructions: {TOPIC_GENERATOR}, Your current task:" + f" {topic_selection_task}" ) dashboard = print( @@ -109,7 +110,9 @@ reviewed_draft = print( # Agent that publishes on social media -distribution_agent = llm(social_media_prompt(draft_blog, goal="Clicks and engagement")) +distribution_agent = llm( + social_media_prompt(draft_blog, goal="Clicks and engagement") +) distribution_agent_out = print( colored( f""" diff --git a/playground/models/bioclip.py b/playground/models/bioclip.py index dcdd309b..11fb9f27 100644 --- a/playground/models/bioclip.py +++ b/playground/models/bioclip.py @@ -1,6 +1,8 @@ from swarms.models.bioclip import BioClip -clip = BioClip("hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224") +clip = BioClip( + "hf-hub:microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224" +) labels = [ "adenocarcinoma histopathology", diff --git a/playground/models/idefics.py b/playground/models/idefics.py index 032e0f3b..39d6f4eb 100644 --- a/playground/models/idefics.py +++ b/playground/models/idefics.py @@ -2,11 +2,17 @@ from swarms.models import idefics model = idefics() -user_input = "User: What is in this image? https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +user_input = ( + "User: What is in this image?" + " https://upload.wikimedia.org/wikipedia/commons/8/86/Id%C3%A9fix.JPG" +) response = model.chat(user_input) print(response) -user_input = "User: And who is that? https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" +user_input = ( + "User: And who is that?" + " https://static.wikia.nocookie.net/asterix/images/2/25/R22b.gif/revision/latest?cb=20110815073052" +) response = model.chat(user_input) print(response) diff --git a/playground/models/llama_function_caller.py b/playground/models/llama_function_caller.py index 43bca3a5..201009a8 100644 --- a/playground/models/llama_function_caller.py +++ b/playground/models/llama_function_caller.py @@ -28,7 +28,9 @@ llama_caller.add_func( ) # Call the function -result = llama_caller.call_function("get_weather", location="Paris", format="Celsius") +result = llama_caller.call_function( + "get_weather", location="Paris", format="Celsius" +) print(result) # Stream a user prompt diff --git a/playground/models/vilt.py b/playground/models/vilt.py index 127514e0..8e40f59d 100644 --- a/playground/models/vilt.py +++ b/playground/models/vilt.py @@ -3,5 +3,6 @@ from swarms.models.vilt import Vilt model = Vilt() output = model( - "What is this image", "http://images.cocodataset.org/val2017/000000039769.jpg" + "What is this image", + "http://images.cocodataset.org/val2017/000000039769.jpg", ) diff --git a/playground/structs/flow_tools.py b/playground/structs/flow_tools.py index 647f6617..42ec0f72 100644 --- a/playground/structs/flow_tools.py +++ b/playground/structs/flow_tools.py @@ -30,7 +30,9 @@ async def async_load_playwright(url: str) -> str: text = soup.get_text() lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + chunks = ( + phrase.strip() for line in lines for phrase in line.split(" ") + ) results = "\n".join(chunk for chunk in chunks if chunk) except Exception as e: results = f"Error: {e}" @@ -58,5 +60,6 @@ flow = Flow( ) out = flow.run( - "Generate a 10,000 word blog on mental clarity and the benefits of meditation." + "Generate a 10,000 word blog on mental clarity and the benefits of" + " meditation." ) diff --git a/playground/swarms/debate.py b/playground/swarms/debate.py index 2c47ed8e..4c97817d 100644 --- a/playground/swarms/debate.py +++ b/playground/swarms/debate.py @@ -36,7 +36,9 @@ class DialogueAgent: message = self.model( [ self.system_message, - HumanMessage(content="\n".join(self.message_history + [self.prefix])), + HumanMessage( + content="\n".join(self.message_history + [self.prefix]) + ), ] ) return message.content @@ -124,19 +126,19 @@ game_description = f"""Here is the topic for the presidential debate: {topic}. The presidential candidates are: {', '.join(character_names)}.""" player_descriptor_system_message = SystemMessage( - content="You can add detail to the description of each presidential candidate." + content=( + "You can add detail to the description of each presidential candidate." + ) ) def generate_character_description(character_name): character_specifier_prompt = [ player_descriptor_system_message, - HumanMessage( - content=f"""{game_description} + HumanMessage(content=f"""{game_description} Please reply with a creative description of the presidential candidate, {character_name}, in {word_limit} words or less, that emphasizes their personalities. Speak directly to {character_name}. - Do not add anything else.""" - ), + Do not add anything else."""), ] character_description = ChatOpenAI(temperature=1.0)( character_specifier_prompt @@ -155,9 +157,7 @@ Your goal is to be as creative as possible and make the voters think you are the def generate_character_system_message(character_name, character_header): - return SystemMessage( - content=( - f"""{character_header} + return SystemMessage(content=f"""{character_header} You will speak in the style of {character_name}, and exaggerate their personality. You will come up with creative ideas related to {topic}. Do not say the same things over and over again. @@ -169,13 +169,12 @@ Speak only from the perspective of {character_name}. Stop speaking the moment you finish speaking from your perspective. Never forget to keep your response to {word_limit} words! Do not add anything else. - """ - ) - ) + """) character_descriptions = [ - generate_character_description(character_name) for character_name in character_names + generate_character_description(character_name) + for character_name in character_names ] character_headers = [ generate_character_header(character_name, character_description) @@ -185,7 +184,9 @@ character_headers = [ ] character_system_messages = [ generate_character_system_message(character_name, character_headers) - for character_name, character_headers in zip(character_names, character_headers) + for character_name, character_headers in zip( + character_names, character_headers + ) ] for ( @@ -207,7 +208,10 @@ for ( class BidOutputParser(RegexParser): def get_format_instructions(self) -> str: - return "Your response should be an integer delimited by angled brackets, like this: ." + return ( + "Your response should be an integer delimited by angled brackets," + " like this: ." + ) bid_parser = BidOutputParser( @@ -248,8 +252,7 @@ for character_name, bidding_template in zip( topic_specifier_prompt = [ SystemMessage(content="You can make a task more specific."), - HumanMessage( - content=f"""{game_description} + HumanMessage(content=f"""{game_description} You are the debate moderator. Please make the debate topic more specific. @@ -257,8 +260,7 @@ topic_specifier_prompt = [ Be creative and imaginative. Please reply with the specified topic in {word_limit} words or less. Speak directly to the presidential candidates: {*character_names,}. - Do not add anything else.""" - ), + Do not add anything else."""), ] specified_topic = ChatOpenAI(temperature=1.0)(topic_specifier_prompt).content @@ -321,7 +323,9 @@ for character_name, character_system_message, bidding_template in zip( max_iters = 10 n = 0 -simulator = DialogueSimulator(agents=characters, selection_function=select_next_speaker) +simulator = DialogueSimulator( + agents=characters, selection_function=select_next_speaker +) simulator.reset() simulator.inject("Debate Moderator", specified_topic) print(f"(Debate Moderator): {specified_topic}") diff --git a/playground/swarms/multi_agent_debate.py b/playground/swarms/multi_agent_debate.py index f0bec797..d5382e56 100644 --- a/playground/swarms/multi_agent_debate.py +++ b/playground/swarms/multi_agent_debate.py @@ -36,7 +36,11 @@ agents = [worker1, worker2, worker3] debate = MultiAgentDebate(agents, select_speaker) # Run task -task = "What were the winning boston marathon times for the past 5 years (ending in 2022)? Generate a table of the year, name, country of origin, and times." +task = ( + "What were the winning boston marathon times for the past 5 years (ending" + " in 2022)? Generate a table of the year, name, country of origin, and" + " times." +) results = debate.run(task, max_iters=4) # Print results diff --git a/playground/swarms/orchestrate.py b/playground/swarms/orchestrate.py index e43b75e3..a90a72e8 100644 --- a/playground/swarms/orchestrate.py +++ b/playground/swarms/orchestrate.py @@ -10,4 +10,6 @@ node = Worker( orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[]) # Agent 7 sends a message to Agent 9 -orchestrator.chat(sender_id=7, receiver_id=9, message="Can you help me with this task?") +orchestrator.chat( + sender_id=7, receiver_id=9, message="Can you help me with this task?" +) diff --git a/playground/swarms/orchestrator.py b/playground/swarms/orchestrator.py index e43b75e3..a90a72e8 100644 --- a/playground/swarms/orchestrator.py +++ b/playground/swarms/orchestrator.py @@ -10,4 +10,6 @@ node = Worker( orchestrator = Orchestrator(node, agent_list=[node] * 10, task_queue=[]) # Agent 7 sends a message to Agent 9 -orchestrator.chat(sender_id=7, receiver_id=9, message="Can you help me with this task?") +orchestrator.chat( + sender_id=7, receiver_id=9, message="Can you help me with this task?" +) diff --git a/playground/swarms/swarms_example.py b/playground/swarms/swarms_example.py index 6dabe4a1..23b714d9 100644 --- a/playground/swarms/swarms_example.py +++ b/playground/swarms/swarms_example.py @@ -7,7 +7,10 @@ api_key = "" swarm = HierarchicalSwarm(api_key) # Define an objective -objective = "Find 20 potential customers for a HierarchicalSwarm based AI Agent automation infrastructure" +objective = ( + "Find 20 potential customers for a HierarchicalSwarm based AI Agent" + " automation infrastructure" +) # Run HierarchicalSwarm swarm.run(objective) diff --git a/pyproject.toml b/pyproject.toml index 3dbf8570..eea95362 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,4 +84,11 @@ recursive = true aggressive = 3 [tool.ruff] -line-length = 80 \ No newline at end of file +line-length = 80 + +[tool.black] +line-length = 80 +target-version = ['py38'] +preview = true + + diff --git a/swarms/agents/omni_modal_agent.py b/swarms/agents/omni_modal_agent.py index 007a2219..6a22c477 100644 --- a/swarms/agents/omni_modal_agent.py +++ b/swarms/agents/omni_modal_agent.py @@ -18,7 +18,12 @@ from swarms.agents.message import Message class Step: def __init__( - self, task: str, id: int, dep: List[int], args: Dict[str, str], tool: BaseTool + self, + task: str, + id: int, + dep: List[int], + args: Dict[str, str], + tool: BaseTool, ): self.task = task self.id = id diff --git a/swarms/memory/base.py b/swarms/memory/base.py index 7f71c4b9..3ca49617 100644 --- a/swarms/memory/base.py +++ b/swarms/memory/base.py @@ -37,7 +37,7 @@ class BaseVectorStore(ABC): self, artifacts: dict[str, list[TextArtifact]], meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> None: execute_futures_dict( { @@ -54,7 +54,7 @@ class BaseVectorStore(ABC): artifact: TextArtifact, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: if not meta: meta = {} @@ -67,7 +67,11 @@ class BaseVectorStore(ABC): vector = artifact.generate_embedding(self.embedding_driver) return self.upsert_vector( - vector, vector_id=artifact.id, namespace=namespace, meta=meta, **kwargs + vector, + vector_id=artifact.id, + namespace=namespace, + meta=meta, + **kwargs, ) def upsert_text( @@ -76,14 +80,14 @@ class BaseVectorStore(ABC): vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: return self.upsert_vector( self.embedding_driver.embed_string(string), vector_id=vector_id, namespace=namespace, meta=meta if meta else {}, - **kwargs + **kwargs, ) @abstractmethod @@ -93,12 +97,14 @@ class BaseVectorStore(ABC): vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: ... @abstractmethod - def load_entry(self, vector_id: str, namespace: Optional[str] = None) -> Entry: + def load_entry( + self, vector_id: str, namespace: Optional[str] = None + ) -> Entry: ... @abstractmethod @@ -112,6 +118,6 @@ class BaseVectorStore(ABC): count: Optional[int] = None, namespace: Optional[str] = None, include_vectors: bool = False, - **kwargs + **kwargs, ) -> list[QueryResult]: ... diff --git a/swarms/memory/chroma.py b/swarms/memory/chroma.py index 67ba4cb2..2f4e473f 100644 --- a/swarms/memory/chroma.py +++ b/swarms/memory/chroma.py @@ -111,7 +111,9 @@ class Chroma(VectorStore): chroma_db_impl="duckdb+parquet", ) else: - _client_settings = chromadb.config.Settings(is_persistent=True) + _client_settings = chromadb.config.Settings( + is_persistent=True + ) _client_settings.persist_directory = persist_directory else: _client_settings = chromadb.config.Settings() @@ -124,9 +126,11 @@ class Chroma(VectorStore): self._embedding_function = embedding_function self._collection = self._client.get_or_create_collection( name=collection_name, - embedding_function=self._embedding_function.embed_documents - if self._embedding_function is not None - else None, + embedding_function=( + self._embedding_function.embed_documents + if self._embedding_function is not None + else None + ), metadata=collection_metadata, ) self.override_relevance_score_fn = relevance_score_fn @@ -203,7 +207,9 @@ class Chroma(VectorStore): metadatas = [metadatas[idx] for idx in non_empty_ids] texts_with_metadatas = [texts[idx] for idx in non_empty_ids] embeddings_with_metadatas = ( - [embeddings[idx] for idx in non_empty_ids] if embeddings else None + [embeddings[idx] for idx in non_empty_ids] + if embeddings + else None ) ids_with_metadata = [ids[idx] for idx in non_empty_ids] try: @@ -216,7 +222,8 @@ class Chroma(VectorStore): except ValueError as e: if "Expected metadata value to be" in str(e): msg = ( - "Try filtering complex metadata from the document using " + "Try filtering complex metadata from the document" + " using " "langchain.vectorstores.utils.filter_complex_metadata." ) raise ValueError(e.args[0] + "\n\n" + msg) @@ -258,7 +265,9 @@ class Chroma(VectorStore): Returns: List[Document]: List of documents most similar to the query text. """ - docs_and_scores = self.similarity_search_with_score(query, k, filter=filter) + docs_and_scores = self.similarity_search_with_score( + query, k, filter=filter + ) return [doc for doc, _ in docs_and_scores] def similarity_search_by_vector( @@ -428,7 +437,9 @@ class Chroma(VectorStore): candidates = _results_to_docs(results) - selected_results = [r for i, r in enumerate(candidates) if i in mmr_selected] + selected_results = [ + r for i, r in enumerate(candidates) if i in mmr_selected + ] return selected_results def max_marginal_relevance_search( @@ -460,7 +471,8 @@ class Chroma(VectorStore): """ if self._embedding_function is None: raise ValueError( - "For MMR search, you must specify an embedding function oncreation." + "For MMR search, you must specify an embedding function" + " oncreation." ) embedding = self._embedding_function.embed_query(query) @@ -543,7 +555,9 @@ class Chroma(VectorStore): """ return self.update_documents([document_id], [document]) - def update_documents(self, ids: List[str], documents: List[Document]) -> None: + def update_documents( + self, ids: List[str], documents: List[Document] + ) -> None: """Update a document in the collection. Args: @@ -554,7 +568,8 @@ class Chroma(VectorStore): metadata = [document.metadata for document in documents] if self._embedding_function is None: raise ValueError( - "For update, you must specify an embedding function on creation." + "For update, you must specify an embedding function on" + " creation." ) embeddings = self._embedding_function.embed_documents(text) @@ -645,7 +660,9 @@ class Chroma(VectorStore): ids=batch[0], ) else: - chroma_collection.add_texts(texts=texts, metadatas=metadatas, ids=ids) + chroma_collection.add_texts( + texts=texts, metadatas=metadatas, ids=ids + ) return chroma_collection @classmethod diff --git a/swarms/memory/cosine_similarity.py b/swarms/memory/cosine_similarity.py index 99d47368..cdcd1a2b 100644 --- a/swarms/memory/cosine_similarity.py +++ b/swarms/memory/cosine_similarity.py @@ -18,8 +18,8 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: Y = np.array(Y) if X.shape[1] != Y.shape[1]: raise ValueError( - f"Number of columns in X and Y must be the same. X has shape {X.shape} " - f"and Y has shape {Y.shape}." + "Number of columns in X and Y must be the same. X has shape" + f" {X.shape} and Y has shape {Y.shape}." ) try: import simsimd as simd @@ -32,8 +32,9 @@ def cosine_similarity(X: Matrix, Y: Matrix) -> np.ndarray: return Z except ImportError: logger.info( - "Unable to import simsimd, defaulting to NumPy implementation. If you want " - "to use simsimd please install with `pip install simsimd`." + "Unable to import simsimd, defaulting to NumPy implementation. If" + " you want to use simsimd please install with `pip install" + " simsimd`." ) X_norm = np.linalg.norm(X, axis=1) Y_norm = np.linalg.norm(Y, axis=1) diff --git a/swarms/memory/db.py b/swarms/memory/db.py index 9f23b59f..4ffec16f 100644 --- a/swarms/memory/db.py +++ b/swarms/memory/db.py @@ -151,7 +151,9 @@ class InMemoryTaskDB(TaskDB): ) -> Artifact: artifact_id = str(uuid.uuid4()) artifact = Artifact( - artifact_id=artifact_id, file_name=file_name, relative_path=relative_path + artifact_id=artifact_id, + file_name=file_name, + relative_path=relative_path, ) task = await self.get_task(task_id) task.artifacts.append(artifact) diff --git a/swarms/memory/ocean.py b/swarms/memory/ocean.py index da58c81c..fb0873af 100644 --- a/swarms/memory/ocean.py +++ b/swarms/memory/ocean.py @@ -91,7 +91,9 @@ class OceanDB: try: return collection.add(documents=[document], ids=[id]) except Exception as e: - logging.error(f"Failed to append document to the collection. Error {e}") + logging.error( + f"Failed to append document to the collection. Error {e}" + ) raise def add_documents(self, collection, documents: List[str], ids: List[str]): @@ -137,7 +139,9 @@ class OceanDB: the results of the query """ try: - results = collection.query(query_texts=query_texts, n_results=n_results) + results = collection.query( + query_texts=query_texts, n_results=n_results + ) return results except Exception as e: logging.error(f"Failed to query the collection. Error {e}") diff --git a/swarms/memory/pg.py b/swarms/memory/pg.py index a421c887..ce591c6e 100644 --- a/swarms/memory/pg.py +++ b/swarms/memory/pg.py @@ -89,11 +89,15 @@ class PgVectorVectorStore(BaseVectorStore): engine: Optional[Engine] = field(default=None, kw_only=True) table_name: str = field(kw_only=True) _model: any = field( - default=Factory(lambda self: self.default_vector_model(), takes_self=True) + default=Factory( + lambda self: self.default_vector_model(), takes_self=True + ) ) @connection_string.validator - def validate_connection_string(self, _, connection_string: Optional[str]) -> None: + def validate_connection_string( + self, _, connection_string: Optional[str] + ) -> None: # If an engine is provided, the connection string is not used. if self.engine is not None: return @@ -104,7 +108,8 @@ class PgVectorVectorStore(BaseVectorStore): if not connection_string.startswith("postgresql://"): raise ValueError( - "The connection string must describe a Postgres database connection" + "The connection string must describe a Postgres database" + " connection" ) @engine.validator @@ -148,7 +153,7 @@ class PgVectorVectorStore(BaseVectorStore): vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: """Inserts or updates a vector in the collection.""" with Session(self.engine) as session: @@ -208,7 +213,7 @@ class PgVectorVectorStore(BaseVectorStore): namespace: Optional[str] = None, include_vectors: bool = False, distance_metric: str = "cosine_distance", - **kwargs + **kwargs, ) -> list[BaseVectorStore.QueryResult]: """Performs a search on the collection to find vectors similar to the provided input vector, optionally filtering to only those that match the provided namespace. diff --git a/swarms/memory/pinecone.py b/swarms/memory/pinecone.py index 2374f12a..a7eb7442 100644 --- a/swarms/memory/pinecone.py +++ b/swarms/memory/pinecone.py @@ -108,7 +108,7 @@ class PineconeVectorStoreStore(BaseVector): vector_id: Optional[str] = None, namespace: Optional[str] = None, meta: Optional[dict] = None, - **kwargs + **kwargs, ) -> str: """Upsert vector""" vector_id = vector_id if vector_id else str_to_hash(str(vector)) @@ -123,7 +123,9 @@ class PineconeVectorStoreStore(BaseVector): self, vector_id: str, namespace: Optional[str] = None ) -> Optional[BaseVector.Entry]: """Load entry""" - result = self.index.fetch(ids=[vector_id], namespace=namespace).to_dict() + result = self.index.fetch( + ids=[vector_id], namespace=namespace + ).to_dict() vectors = list(result["vectors"].values()) if len(vectors) > 0: @@ -138,7 +140,9 @@ class PineconeVectorStoreStore(BaseVector): else: return None - def load_entries(self, namespace: Optional[str] = None) -> list[BaseVector.Entry]: + def load_entries( + self, namespace: Optional[str] = None + ) -> list[BaseVector.Entry]: """Load entries""" # This is a hacky way to query up to 10,000 values from Pinecone. Waiting on an official API for fetching # all values from a namespace: @@ -169,7 +173,7 @@ class PineconeVectorStoreStore(BaseVector): include_vectors: bool = False, # PineconeVectorStoreStorageDriver-specific params: include_metadata=True, - **kwargs + **kwargs, ) -> list[BaseVector.QueryResult]: """Query vectors""" vector = self.embedding_driver.embed_string(query) @@ -196,6 +200,9 @@ class PineconeVectorStoreStore(BaseVector): def create_index(self, name: str, **kwargs) -> None: """Create index""" - params = {"name": name, "dimension": self.embedding_driver.dimensions} | kwargs + params = { + "name": name, + "dimension": self.embedding_driver.dimensions, + } | kwargs pinecone.create_index(**params) diff --git a/swarms/memory/schemas.py b/swarms/memory/schemas.py index bbc71bc2..89f1453b 100644 --- a/swarms/memory/schemas.py +++ b/swarms/memory/schemas.py @@ -50,7 +50,9 @@ class StepInput(BaseModel): class StepOutput(BaseModel): __root__: Any = Field( ..., - description="Output that the task step has produced. Any value is allowed.", + description=( + "Output that the task step has produced. Any value is allowed." + ), example='{\n"tokens": 7894,\n"estimated_cost": "0,24$"\n}', ) @@ -112,8 +114,9 @@ class Step(StepRequestBody): None, description="Output of the task step.", example=( - "I am going to use the write_to_file command and write Washington to a file" - " called output.txt List[Document]: """Filter out metadata types that are not supported for a vector store.""" updated_documents = [] diff --git a/swarms/models/__init__.py b/swarms/models/__init__.py index f509087c..10bf2fab 100644 --- a/swarms/models/__init__.py +++ b/swarms/models/__init__.py @@ -7,7 +7,11 @@ sys.stderr = log_file from swarms.models.anthropic import Anthropic # noqa: E402 from swarms.models.petals import Petals # noqa: E402 from swarms.models.mistral import Mistral # noqa: E402 -from swarms.models.openai_models import OpenAI, AzureOpenAI, OpenAIChat # noqa: E402 +from swarms.models.openai_models import ( + OpenAI, + AzureOpenAI, + OpenAIChat, +) # noqa: E402 from swarms.models.zephyr import Zephyr # noqa: E402 from swarms.models.biogpt import BioGPT # noqa: E402 from swarms.models.huggingface import HuggingfaceLLM # noqa: E402 diff --git a/swarms/models/anthropic.py b/swarms/models/anthropic.py index edaae087..1f47e1bf 100644 --- a/swarms/models/anthropic.py +++ b/swarms/models/anthropic.py @@ -50,7 +50,9 @@ def xor_args(*arg_groups: Tuple[str, ...]) -> Callable: ] invalid_groups = [i for i, count in enumerate(counts) if count != 1] if invalid_groups: - invalid_group_names = [", ".join(arg_groups[i]) for i in invalid_groups] + invalid_group_names = [ + ", ".join(arg_groups[i]) for i in invalid_groups + ] raise ValueError( "Exactly one argument in each of the following" " groups must be defined:" @@ -106,7 +108,10 @@ def mock_now(dt_value): # type: ignore def guard_import( - module_name: str, *, pip_name: Optional[str] = None, package: Optional[str] = None + module_name: str, + *, + pip_name: Optional[str] = None, + package: Optional[str] = None, ) -> Any: """Dynamically imports a module and raises a helpful exception if the module is not installed.""" @@ -180,18 +185,18 @@ def build_extra_kwargs( if field_name in extra_kwargs: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: - warnings.warn( - f"""WARNING! {field_name} is not default parameter. + warnings.warn(f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. - Please confirm that {field_name} is what you intended.""" - ) + Please confirm that {field_name} is what you intended.""") extra_kwargs[field_name] = values.pop(field_name) - invalid_model_kwargs = all_required_field_names.intersection(extra_kwargs.keys()) + invalid_model_kwargs = all_required_field_names.intersection( + extra_kwargs.keys() + ) if invalid_model_kwargs: raise ValueError( - f"Parameters {invalid_model_kwargs} should be specified explicitly. " - "Instead they were passed in as part of `model_kwargs` parameter." + f"Parameters {invalid_model_kwargs} should be specified explicitly." + " Instead they were passed in as part of `model_kwargs` parameter." ) return extra_kwargs @@ -250,7 +255,9 @@ class _AnthropicCommon(BaseLanguageModel): def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" values["anthropic_api_key"] = convert_to_secret_str( - get_from_dict_or_env(values, "anthropic_api_key", "ANTHROPIC_API_KEY") + get_from_dict_or_env( + values, "anthropic_api_key", "ANTHROPIC_API_KEY" + ) ) # Get custom api url from environment. values["anthropic_api_url"] = get_from_dict_or_env( @@ -305,7 +312,9 @@ class _AnthropicCommon(BaseLanguageModel): """Get the identifying parameters.""" return {**{}, **self._default_params} - def _get_anthropic_stop(self, stop: Optional[List[str]] = None) -> List[str]: + def _get_anthropic_stop( + self, stop: Optional[List[str]] = None + ) -> List[str]: if not self.HUMAN_PROMPT or not self.AI_PROMPT: raise NameError("Please ensure the anthropic package is loaded") @@ -354,8 +363,8 @@ class Anthropic(LLM, _AnthropicCommon): def raise_warning(cls, values: Dict) -> Dict: """Raise warning that this class is deprecated.""" warnings.warn( - "This Anthropic LLM is deprecated. " - "Please use `from langchain.chat_models import ChatAnthropic` instead" + "This Anthropic LLM is deprecated. Please use `from" + " langchain.chat_models import ChatAnthropic` instead" ) return values @@ -372,12 +381,16 @@ class Anthropic(LLM, _AnthropicCommon): return prompt # Already wrapped. # Guard against common errors in specifying wrong number of newlines. - corrected_prompt, n_subs = re.subn(r"^\n*Human:", self.HUMAN_PROMPT, prompt) + corrected_prompt, n_subs = re.subn( + r"^\n*Human:", self.HUMAN_PROMPT, prompt + ) if n_subs == 1: return corrected_prompt # As a last resort, wrap the prompt ourselves to emulate instruct-style. - return f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" + return ( + f"{self.HUMAN_PROMPT} {prompt}{self.AI_PROMPT} Sure, here you go:\n" + ) def _call( self, @@ -476,7 +489,10 @@ class Anthropic(LLM, _AnthropicCommon): params = {**self._default_params, **kwargs} for token in self.client.completions.create( - prompt=self._wrap_prompt(prompt), stop_sequences=stop, stream=True, **params + prompt=self._wrap_prompt(prompt), + stop_sequences=stop, + stream=True, + **params, ): chunk = GenerationChunk(text=token.completion) yield chunk diff --git a/swarms/models/bioclip.py b/swarms/models/bioclip.py index c2b4bfa5..1c2627a6 100644 --- a/swarms/models/bioclip.py +++ b/swarms/models/bioclip.py @@ -98,7 +98,9 @@ class BioClip: ) = open_clip.create_model_and_transforms(model_path) self.tokenizer = open_clip.get_tokenizer(model_path) self.device = ( - torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + torch.device("cuda") + if torch.cuda.is_available() + else torch.device("cpu") ) self.model.to(self.device) self.model.eval() @@ -110,13 +112,17 @@ class BioClip: template: str = "this is a photo of ", context_length: int = 256, ): - image = torch.stack([self.preprocess_val(Image.open(img_path))]).to(self.device) + image = torch.stack([self.preprocess_val(Image.open(img_path))]).to( + self.device + ) texts = self.tokenizer( [template + l for l in labels], context_length=context_length ).to(self.device) with torch.no_grad(): - image_features, text_features, logit_scale = self.model(image, texts) + image_features, text_features, logit_scale = self.model( + image, texts + ) logits = ( (logit_scale * image_features @ text_features.t()) .detach() @@ -142,7 +148,9 @@ class BioClip: title = ( metadata["filename"] + "\n" - + "\n".join([f"{k}: {v*100:.1f}" for k, v in metadata["top_probs"].items()]) + + "\n".join( + [f"{k}: {v*100:.1f}" for k, v in metadata["top_probs"].items()] + ) ) ax.set_title(title, fontsize=14) plt.tight_layout() diff --git a/swarms/models/biogpt.py b/swarms/models/biogpt.py index 83c31e55..d5e692f2 100644 --- a/swarms/models/biogpt.py +++ b/swarms/models/biogpt.py @@ -154,7 +154,7 @@ class BioGPT: min_length=self.min_length, max_length=self.max_length, num_beams=num_beams, - early_stopping=early_stopping + early_stopping=early_stopping, ) return self.tokenizer.decode(beam_output[0], skip_special_tokens=True) diff --git a/swarms/models/cohere_chat.py b/swarms/models/cohere_chat.py index c583b827..508e9073 100644 --- a/swarms/models/cohere_chat.py +++ b/swarms/models/cohere_chat.py @@ -96,7 +96,9 @@ class BaseCohere(Serializable): values, "cohere_api_key", "COHERE_API_KEY" ) client_name = values["user_agent"] - values["client"] = cohere.Client(cohere_api_key, client_name=client_name) + values["client"] = cohere.Client( + cohere_api_key, client_name=client_name + ) values["async_client"] = cohere.AsyncClient( cohere_api_key, client_name=client_name ) @@ -172,17 +174,23 @@ class Cohere(LLM, BaseCohere): """Return type of llm.""" return "cohere" - def _invocation_params(self, stop: Optional[List[str]], **kwargs: Any) -> dict: + def _invocation_params( + self, stop: Optional[List[str]], **kwargs: Any + ) -> dict: params = self._default_params if self.stop is not None and stop is not None: - raise ValueError("`stop` found in both the input and default params.") + raise ValueError( + "`stop` found in both the input and default params." + ) elif self.stop is not None: params["stop_sequences"] = self.stop else: params["stop_sequences"] = stop return {**params, **kwargs} - def _process_response(self, response: Any, stop: Optional[List[str]]) -> str: + def _process_response( + self, response: Any, stop: Optional[List[str]] + ) -> str: text = response.generations[0].text # If stop tokens are provided, Cohere's endpoint returns them. # In order to make this consistent with other endpoints, we strip them. diff --git a/swarms/models/dalle3.py b/swarms/models/dalle3.py index 7d9bcf5d..3c130670 100644 --- a/swarms/models/dalle3.py +++ b/swarms/models/dalle3.py @@ -169,8 +169,8 @@ class Dalle3: print( colored( ( - f"Error running Dalle3: {error} try optimizing your api key and" - " or try again" + f"Error running Dalle3: {error} try optimizing your api" + " key and or try again" ), "red", ) @@ -234,8 +234,8 @@ class Dalle3: print( colored( ( - f"Error running Dalle3: {error} try optimizing your api key and" - " or try again" + f"Error running Dalle3: {error} try optimizing your api" + " key and or try again" ), "red", ) @@ -248,8 +248,7 @@ class Dalle3: """Print the Dalle3 dashboard""" print( colored( - ( - f"""Dalle3 Dashboard: + f"""Dalle3 Dashboard: -------------------- Model: {self.model} @@ -265,13 +264,14 @@ class Dalle3: -------------------- - """ - ), + """, "green", ) ) - def process_batch_concurrently(self, tasks: List[str], max_workers: int = 5): + def process_batch_concurrently( + self, tasks: List[str], max_workers: int = 5 + ): """ Process a batch of tasks concurrently @@ -293,8 +293,12 @@ class Dalle3: ['https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png', """ - with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - future_to_task = {executor.submit(self, task): task for task in tasks} + with concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers + ) as executor: + future_to_task = { + executor.submit(self, task): task for task in tasks + } results = [] for future in concurrent.futures.as_completed(future_to_task): task = future_to_task[future] @@ -307,14 +311,20 @@ class Dalle3: print( colored( ( - f"Error running Dalle3: {error} try optimizing your api key and" - " or try again" + f"Error running Dalle3: {error} try optimizing" + " your api key and or try again" ), "red", ) ) - print(colored(f"Error running Dalle3: {error.http_status}", "red")) - print(colored(f"Error running Dalle3: {error.error}", "red")) + print( + colored( + f"Error running Dalle3: {error.http_status}", "red" + ) + ) + print( + colored(f"Error running Dalle3: {error.error}", "red") + ) raise error def _generate_uuid(self): diff --git a/swarms/models/distilled_whisperx.py b/swarms/models/distilled_whisperx.py index 98b3660a..2b4fb5a5 100644 --- a/swarms/models/distilled_whisperx.py +++ b/swarms/models/distilled_whisperx.py @@ -28,7 +28,10 @@ def async_retry(max_retries=3, exceptions=(Exception,), delay=1): retries -= 1 if retries <= 0: raise - print(f"Retry after exception: {e}, Attempts remaining: {retries}") + print( + f"Retry after exception: {e}, Attempts remaining:" + f" {retries}" + ) await asyncio.sleep(delay) return wrapper @@ -62,7 +65,9 @@ class DistilWhisperModel: def __init__(self, model_id="distil-whisper/distil-large-v2"): self.device = "cuda:0" if torch.cuda.is_available() else "cpu" - self.torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32 + self.torch_dtype = ( + torch.float16 if torch.cuda.is_available() else torch.float32 + ) self.model_id = model_id self.model = AutoModelForSpeechSeq2Seq.from_pretrained( model_id, @@ -119,7 +124,9 @@ class DistilWhisperModel: try: with torch.no_grad(): # Load the whole audio file, but process and transcribe it in chunks - audio_input = self.processor.audio_file_to_array(audio_file_path) + audio_input = self.processor.audio_file_to_array( + audio_file_path + ) sample_rate = audio_input.sampling_rate len(audio_input.array) / sample_rate chunks = [ @@ -139,7 +146,9 @@ class DistilWhisperModel: return_tensors="pt", padding=True, ) - processed_inputs = processed_inputs.input_values.to(self.device) + processed_inputs = processed_inputs.input_values.to( + self.device + ) # Generate transcription for the chunk logits = self.model.generate(processed_inputs) @@ -157,4 +166,6 @@ class DistilWhisperModel: time.sleep(chunk_duration) except Exception as e: - print(colored(f"An error occurred during transcription: {e}", "red")) + print( + colored(f"An error occurred during transcription: {e}", "red") + ) diff --git a/swarms/models/eleven_labs.py b/swarms/models/eleven_labs.py index 2fece5b6..42f4dae1 100644 --- a/swarms/models/eleven_labs.py +++ b/swarms/models/eleven_labs.py @@ -79,7 +79,9 @@ class ElevenLabsText2SpeechTool(BaseTool): f.write(speech) return f.name except Exception as e: - raise RuntimeError(f"Error while running ElevenLabsText2SpeechTool: {e}") + raise RuntimeError( + f"Error while running ElevenLabsText2SpeechTool: {e}" + ) def play(self, speech_file: str) -> None: """Play the text as speech.""" @@ -93,7 +95,9 @@ class ElevenLabsText2SpeechTool(BaseTool): """Stream the text as speech as it is generated. Play the text in your speakers.""" elevenlabs = _import_elevenlabs() - speech_stream = elevenlabs.generate(text=query, model=self.model, stream=True) + speech_stream = elevenlabs.generate( + text=query, model=self.model, stream=True + ) elevenlabs.stream(speech_stream) def save(self, speech_file: str, path: str) -> None: diff --git a/swarms/models/fastvit.py b/swarms/models/fastvit.py index d0478777..c9a0d719 100644 --- a/swarms/models/fastvit.py +++ b/swarms/models/fastvit.py @@ -10,7 +10,9 @@ from pydantic import BaseModel, StrictFloat, StrictInt, validator DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Load the classes for image classification -with open(os.path.join(os.path.dirname(__file__), "fast_vit_classes.json")) as f: +with open( + os.path.join(os.path.dirname(__file__), "fast_vit_classes.json") +) as f: FASTVIT_IMAGENET_1K_CLASSES = json.load(f) @@ -20,7 +22,9 @@ class ClassificationResult(BaseModel): @validator("class_id", "confidence", pre=True, each_item=True) def check_list_contents(cls, v): - assert isinstance(v, int) or isinstance(v, float), "must be integer or float" + assert isinstance(v, int) or isinstance( + v, float + ), "must be integer or float" return v @@ -50,7 +54,9 @@ class FastViT: "hf_hub:timm/fastvit_s12.apple_in1k", pretrained=True ).to(DEVICE) data_config = timm.data.resolve_model_data_config(self.model) - self.transforms = timm.data.create_transform(**data_config, is_training=False) + self.transforms = timm.data.create_transform( + **data_config, is_training=False + ) self.model.eval() def __call__( diff --git a/swarms/models/fuyu.py b/swarms/models/fuyu.py index 02ab3a25..ed955260 100644 --- a/swarms/models/fuyu.py +++ b/swarms/models/fuyu.py @@ -46,7 +46,9 @@ class Fuyu: self.tokenizer = AutoTokenizer.from_pretrained(pretrained_path) self.image_processor = FuyuImageProcessor() self.processor = FuyuProcessor( - image_processor=self.image_processor, tokenizer=self.tokenizer, **kwargs + image_processor=self.image_processor, + tokenizer=self.tokenizer, + **kwargs, ) self.model = FuyuForCausalLM.from_pretrained( pretrained_path, @@ -69,8 +71,12 @@ class Fuyu: for k, v in model_inputs.items(): model_inputs[k] = v.to(self.device_map) - output = self.model.generate(**model_inputs, max_new_tokens=self.max_new_tokens) - text = self.processor.batch_decode(output[:, -7:], skip_special_tokens=True) + output = self.model.generate( + **model_inputs, max_new_tokens=self.max_new_tokens + ) + text = self.processor.batch_decode( + output[:, -7:], skip_special_tokens=True + ) return print(str(text)) def get_img_from_web(self, img_url: str): diff --git a/swarms/models/gpt4v.py b/swarms/models/gpt4v.py index 8411cb14..8f2683e0 100644 --- a/swarms/models/gpt4v.py +++ b/swarms/models/gpt4v.py @@ -190,12 +190,15 @@ class GPT4Vision: """Process a batch of tasks and images""" with concurrent.futures.ThreadPoolExecutor() as executor: futures = [ - executor.submit(self.run, task, img) for task, img in tasks_images + executor.submit(self.run, task, img) + for task, img in tasks_images ] results = [future.result() for future in futures] return results - async def run_batch_async(self, tasks_images: List[Tuple[str, str]]) -> List[str]: + async def run_batch_async( + self, tasks_images: List[Tuple[str, str]] + ) -> List[str]: """Process a batch of tasks and images asynchronously""" loop = asyncio.get_event_loop() futures = [ diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index 0f226740..1db435f5 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -133,7 +133,9 @@ class HuggingfaceLLM: ): self.logger = logging.getLogger(__name__) self.device = ( - device if device else ("cuda" if torch.cuda.is_available() else "cpu") + device + if device + else ("cuda" if torch.cuda.is_available() else "cpu") ) self.model_id = model_id self.max_length = max_length @@ -178,7 +180,11 @@ class HuggingfaceLLM: except Exception as e: # self.logger.error(f"Failed to load the model or the tokenizer: {e}") # raise - print(colored(f"Failed to load the model and or the tokenizer: {e}", "red")) + print( + colored( + f"Failed to load the model and or the tokenizer: {e}", "red" + ) + ) def print_error(self, error: str): """Print error""" @@ -207,12 +213,16 @@ class HuggingfaceLLM: if self.distributed: self.model = DDP(self.model) except Exception as error: - self.logger.error(f"Failed to load the model or the tokenizer: {error}") + self.logger.error( + f"Failed to load the model or the tokenizer: {error}" + ) raise def concurrent_run(self, tasks: List[str], max_workers: int = 5): """Concurrently generate text for a list of prompts.""" - with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: + with concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers + ) as executor: results = list(executor.map(self.run, tasks)) return results @@ -220,7 +230,8 @@ class HuggingfaceLLM: """Process a batch of tasks and images""" with concurrent.futures.ThreadPoolExecutor() as executor: futures = [ - executor.submit(self.run, task, img) for task, img in tasks_images + executor.submit(self.run, task, img) + for task, img in tasks_images ] results = [future.result() for future in futures] return results @@ -243,7 +254,9 @@ class HuggingfaceLLM: self.print_dashboard(task) try: - inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) + inputs = self.tokenizer.encode(task, return_tensors="pt").to( + self.device + ) # self.log.start() @@ -279,8 +292,8 @@ class HuggingfaceLLM: print( colored( ( - f"HuggingfaceLLM could not generate text because of error: {e}," - " try optimizing your arguments" + "HuggingfaceLLM could not generate text because of" + f" error: {e}, try optimizing your arguments" ), "red", ) @@ -305,7 +318,9 @@ class HuggingfaceLLM: self.print_dashboard(task) try: - inputs = self.tokenizer.encode(task, return_tensors="pt").to(self.device) + inputs = self.tokenizer.encode(task, return_tensors="pt").to( + self.device + ) # self.log.start() diff --git a/swarms/models/idefics.py b/swarms/models/idefics.py index 73cb4991..0cfcf1af 100644 --- a/swarms/models/idefics.py +++ b/swarms/models/idefics.py @@ -66,7 +66,9 @@ class Idefics: max_length=100, ): self.device = ( - device if device else ("cuda" if torch.cuda.is_available() else "cpu") + device + if device + else ("cuda" if torch.cuda.is_available() else "cpu") ) self.model = IdeficsForVisionText2Text.from_pretrained( checkpoint, diff --git a/swarms/models/jina_embeds.py b/swarms/models/jina_embeds.py index a72b8a9e..1d1ac3e6 100644 --- a/swarms/models/jina_embeds.py +++ b/swarms/models/jina_embeds.py @@ -54,7 +54,9 @@ class JinaEmbeddings: ): self.logger = logging.getLogger(__name__) self.device = ( - device if device else ("cuda" if torch.cuda.is_available() else "cpu") + device + if device + else ("cuda" if torch.cuda.is_available() else "cpu") ) self.model_id = model_id self.max_length = max_length @@ -83,7 +85,9 @@ class JinaEmbeddings: try: self.model = AutoModelForCausalLM.from_pretrained( - self.model_id, quantization_config=bnb_config, trust_remote_code=True + self.model_id, + quantization_config=bnb_config, + trust_remote_code=True, ) self.model # .to(self.device) @@ -112,7 +116,9 @@ class JinaEmbeddings: if self.distributed: self.model = DDP(self.model) except Exception as error: - self.logger.error(f"Failed to load the model or the tokenizer: {error}") + self.logger.error( + f"Failed to load the model or the tokenizer: {error}" + ) raise def run(self, task: str): diff --git a/swarms/models/kosmos2.py b/swarms/models/kosmos2.py index b0e1a9f6..f81e0fdf 100644 --- a/swarms/models/kosmos2.py +++ b/swarms/models/kosmos2.py @@ -70,11 +70,13 @@ class Kosmos2(BaseModel): prompt = "An image of" inputs = self.processor(text=prompt, images=image, return_tensors="pt") - outputs = self.model.generate(**inputs, use_cache=True, max_new_tokens=64) + outputs = self.model.generate( + **inputs, use_cache=True, max_new_tokens=64 + ) - generated_text = self.processor.batch_decode(outputs, skip_special_tokens=True)[ - 0 - ] + generated_text = self.processor.batch_decode( + outputs, skip_special_tokens=True + )[0] # The actual processing of generated_text to entities would go here # For the purpose of this example, assume a mock function 'extract_entities' exists: @@ -99,7 +101,9 @@ class Kosmos2(BaseModel): if not entities: return Detections.empty() - class_ids = [0] * len(entities) # Replace with actual class ID extraction logic + class_ids = [0] * len( + entities + ) # Replace with actual class ID extraction logic xyxys = [ ( e[1][0] * image.width, @@ -111,7 +115,9 @@ class Kosmos2(BaseModel): ] confidences = [1.0] * len(entities) # Placeholder confidence - return Detections(xyxy=xyxys, class_id=class_ids, confidence=confidences) + return Detections( + xyxy=xyxys, class_id=class_ids, confidence=confidences + ) # Usage: diff --git a/swarms/models/kosmos_two.py b/swarms/models/kosmos_two.py index 596886f3..c696ef34 100644 --- a/swarms/models/kosmos_two.py +++ b/swarms/models/kosmos_two.py @@ -145,12 +145,12 @@ class Kosmos: elif isinstance(image, torch.Tensor): # pdb.set_trace() image_tensor = image.cpu() - reverse_norm_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073])[ - :, None, None - ] - reverse_norm_std = torch.tensor([0.26862954, 0.26130258, 0.27577711])[ - :, None, None - ] + reverse_norm_mean = torch.tensor( + [0.48145466, 0.4578275, 0.40821073] + )[:, None, None] + reverse_norm_std = torch.tensor( + [0.26862954, 0.26130258, 0.27577711] + )[:, None, None] image_tensor = image_tensor * reverse_norm_std + reverse_norm_mean pil_img = T.ToPILImage()(image_tensor) image_h = pil_img.height @@ -188,7 +188,11 @@ class Kosmos: # random color color = tuple(np.random.randint(0, 255, size=3).tolist()) new_image = cv2.rectangle( - new_image, (orig_x1, orig_y1), (orig_x2, orig_y2), color, box_line + new_image, + (orig_x1, orig_y1), + (orig_x2, orig_y2), + color, + box_line, ) l_o, r_o = ( @@ -211,7 +215,10 @@ class Kosmos: # add text background (text_width, text_height), _ = cv2.getTextSize( - f" {entity_name}", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line + f" {entity_name}", + cv2.FONT_HERSHEY_COMPLEX, + text_size, + text_line, ) text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = ( x1, @@ -222,7 +229,8 @@ class Kosmos: for prev_bbox in previous_bboxes: while is_overlapping( - (text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), prev_bbox + (text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), + prev_bbox, ): text_bg_y1 += ( text_height + text_offset_original + 2 * text_spaces @@ -230,14 +238,18 @@ class Kosmos: text_bg_y2 += ( text_height + text_offset_original + 2 * text_spaces ) - y1 += text_height + text_offset_original + 2 * text_spaces + y1 += ( + text_height + text_offset_original + 2 * text_spaces + ) if text_bg_y2 >= image_h: text_bg_y1 = max( 0, image_h - ( - text_height + text_offset_original + 2 * text_spaces + text_height + + text_offset_original + + 2 * text_spaces ), ) text_bg_y2 = image_h @@ -270,7 +282,9 @@ class Kosmos: cv2.LINE_AA, ) # previous_locations.append((x1, y1)) - previous_bboxes.append((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2)) + previous_bboxes.append( + (text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2) + ) pil_image = Image.fromarray(new_image[:, :, [2, 1, 0]]) if save_path: diff --git a/swarms/models/llama_function_caller.py b/swarms/models/llama_function_caller.py index a991641a..ca5ee5d3 100644 --- a/swarms/models/llama_function_caller.py +++ b/swarms/models/llama_function_caller.py @@ -121,7 +121,11 @@ class LlamaFunctionCaller: ) def add_func( - self, name: str, function: Callable, description: str, arguments: List[Dict] + self, + name: str, + function: Callable, + description: str, + arguments: List[Dict], ): """ Adds a new function to the LlamaFunctionCaller. @@ -172,12 +176,17 @@ class LlamaFunctionCaller: if self.streaming: out = self.model.generate( - **inputs, streamer=streamer, max_new_tokens=self.max_tokens, **kwargs + **inputs, + streamer=streamer, + max_new_tokens=self.max_tokens, + **kwargs, ) return out else: - out = self.model.generate(**inputs, max_length=self.max_tokens, **kwargs) + out = self.model.generate( + **inputs, max_length=self.max_tokens, **kwargs + ) # return self.tokenizer.decode(out[0], skip_special_tokens=True) return out diff --git a/swarms/models/mistral.py b/swarms/models/mistral.py index 7f48a0d6..056a31bb 100644 --- a/swarms/models/mistral.py +++ b/swarms/models/mistral.py @@ -49,7 +49,9 @@ class Mistral: # Check if the specified device is available if not torch.cuda.is_available() and device == "cuda": - raise ValueError("CUDA is not available. Please choose a different device.") + raise ValueError( + "CUDA is not available. Please choose a different device." + ) # Load the model and tokenizer self.model = None @@ -70,7 +72,9 @@ class Mistral: """Run the model on a given task.""" try: - model_inputs = self.tokenizer([task], return_tensors="pt").to(self.device) + model_inputs = self.tokenizer([task], return_tensors="pt").to( + self.device + ) generated_ids = self.model.generate( **model_inputs, max_length=self.max_length, @@ -87,7 +91,9 @@ class Mistral: """Run the model on a given task.""" try: - model_inputs = self.tokenizer([task], return_tensors="pt").to(self.device) + model_inputs = self.tokenizer([task], return_tensors="pt").to( + self.device + ) generated_ids = self.model.generate( **model_inputs, max_length=self.max_length, diff --git a/swarms/models/mpt.py b/swarms/models/mpt.py index 46d1a357..c304355a 100644 --- a/swarms/models/mpt.py +++ b/swarms/models/mpt.py @@ -29,7 +29,9 @@ class MPT7B: """ - def __init__(self, model_name: str, tokenizer_name: str, max_tokens: int = 100): + def __init__( + self, model_name: str, tokenizer_name: str, max_tokens: int = 100 + ): # Loading model and tokenizer details self.model_name = model_name self.tokenizer_name = tokenizer_name @@ -118,7 +120,10 @@ class MPT7B: """ with torch.autocast("cuda", dtype=torch.bfloat16): return self.pipe( - prompt, max_new_tokens=self.max_tokens, do_sample=True, use_cache=True + prompt, + max_new_tokens=self.max_tokens, + do_sample=True, + use_cache=True, )[0]["generated_text"] async def generate_async(self, prompt: str) -> str: diff --git a/swarms/models/nougat.py b/swarms/models/nougat.py index f156981c..82bb95f5 100644 --- a/swarms/models/nougat.py +++ b/swarms/models/nougat.py @@ -41,8 +41,12 @@ class Nougat: self.min_length = min_length self.max_new_tokens = max_new_tokens - self.processor = NougatProcessor.from_pretrained(self.model_name_or_path) - self.model = VisionEncoderDecoderModel.from_pretrained(self.model_name_or_path) + self.processor = NougatProcessor.from_pretrained( + self.model_name_or_path + ) + self.model = VisionEncoderDecoderModel.from_pretrained( + self.model_name_or_path + ) self.device = "cuda" if torch.cuda.is_available() else "cpu" self.model.to(self.device) @@ -63,8 +67,12 @@ class Nougat: max_new_tokens=self.max_new_tokens, ) - sequence = self.processor.batch_decode(outputs, skip_special_tokens=True)[0] - sequence = self.processor.post_process_generation(sequence, fix_markdown=False) + sequence = self.processor.batch_decode( + outputs, skip_special_tokens=True + )[0] + sequence = self.processor.post_process_generation( + sequence, fix_markdown=False + ) out = print(sequence) return out diff --git a/swarms/models/openai_embeddings.py b/swarms/models/openai_embeddings.py index 81dea550..08919d45 100644 --- a/swarms/models/openai_embeddings.py +++ b/swarms/models/openai_embeddings.py @@ -43,7 +43,9 @@ def get_pydantic_field_names(cls: Any) -> Set[str]: logger = logging.getLogger(__name__) -def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]: +def _create_retry_decorator( + embeddings: OpenAIEmbeddings, +) -> Callable[[Any], Any]: import llm min_seconds = 4 @@ -118,7 +120,9 @@ def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: return _embed_with_retry(**kwargs) -async def async_embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: +async def async_embed_with_retry( + embeddings: OpenAIEmbeddings, **kwargs: Any +) -> Any: """Use tenacity to retry the embedding call.""" @_async_retry_decorator(embeddings) @@ -172,7 +176,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): client: Any #: :meta private: model: str = "text-embedding-ada-002" - deployment: str = model # to support Azure OpenAI Service custom deployment names + deployment: str = ( + model # to support Azure OpenAI Service custom deployment names + ) openai_api_version: Optional[str] = None # to support Azure OpenAI Service custom endpoints openai_api_base: Optional[str] = None @@ -229,11 +235,14 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) extra[field_name] = values.pop(field_name) - invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) + invalid_model_kwargs = all_required_field_names.intersection( + extra.keys() + ) if invalid_model_kwargs: raise ValueError( - f"Parameters {invalid_model_kwargs} should be specified explicitly. " - "Instead they were passed in as part of `model_kwargs` parameter." + f"Parameters {invalid_model_kwargs} should be specified" + " explicitly. Instead they were passed in as part of" + " `model_kwargs` parameter." ) values["model_kwargs"] = extra @@ -333,7 +342,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: - logger.warning("Warning: model not found. Using cl100k_base encoding.") + logger.warning( + "Warning: model not found. Using cl100k_base encoding." + ) model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): @@ -384,11 +395,11 @@ class OpenAIEmbeddings(BaseModel, Embeddings): self, input="", **self._invocation_params, - )[ - "data" - ][0]["embedding"] + )["data"][0]["embedding"] else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) + average = np.average( + _result, axis=0, weights=num_tokens_in_batch[i] + ) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings @@ -414,7 +425,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): try: encoding = tiktoken.encoding_for_model(model_name) except KeyError: - logger.warning("Warning: model not found. Using cl100k_base encoding.") + logger.warning( + "Warning: model not found. Using cl100k_base encoding." + ) model = "cl100k_base" encoding = tiktoken.get_encoding(model) for i, text in enumerate(texts): @@ -458,7 +471,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): ) )["data"][0]["embedding"] else: - average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) + average = np.average( + _result, axis=0, weights=num_tokens_in_batch[i] + ) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings @@ -495,7 +510,9 @@ class OpenAIEmbeddings(BaseModel, Embeddings): """ # NOTE: to keep things simple, we assume the list may contain texts longer # than the maximum context and use length-safe embedding function. - return await self._aget_len_safe_embeddings(texts, engine=self.deployment) + return await self._aget_len_safe_embeddings( + texts, engine=self.deployment + ) def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. diff --git a/swarms/models/openai_function_caller.py b/swarms/models/openai_function_caller.py index bac0f28d..f0c41f2a 100644 --- a/swarms/models/openai_function_caller.py +++ b/swarms/models/openai_function_caller.py @@ -146,7 +146,8 @@ class OpenAIFunctionCaller: self.messages.append({"role": role, "content": content}) @retry( - wait=wait_random_exponential(multiplier=1, max=40), stop=stop_after_attempt(3) + wait=wait_random_exponential(multiplier=1, max=40), + stop=stop_after_attempt(3), ) def chat_completion_request( self, @@ -194,17 +195,22 @@ class OpenAIFunctionCaller: elif message["role"] == "user": print( colored( - f"user: {message['content']}\n", role_to_color[message["role"]] + f"user: {message['content']}\n", + role_to_color[message["role"]], ) ) - elif message["role"] == "assistant" and message.get("function_call"): + elif message["role"] == "assistant" and message.get( + "function_call" + ): print( colored( f"assistant: {message['function_call']}\n", role_to_color[message["role"]], ) ) - elif message["role"] == "assistant" and not message.get("function_call"): + elif message["role"] == "assistant" and not message.get( + "function_call" + ): print( colored( f"assistant: {message['content']}\n", diff --git a/swarms/models/openai_models.py b/swarms/models/openai_models.py index fcf4a223..0547a264 100644 --- a/swarms/models/openai_models.py +++ b/swarms/models/openai_models.py @@ -62,19 +62,25 @@ def _stream_response_to_generation_chunk( return GenerationChunk( text=stream_response["choices"][0]["text"], generation_info=dict( - finish_reason=stream_response["choices"][0].get("finish_reason", None), + finish_reason=stream_response["choices"][0].get( + "finish_reason", None + ), logprobs=stream_response["choices"][0].get("logprobs", None), ), ) -def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: +def _update_response( + response: Dict[str, Any], stream_response: Dict[str, Any] +) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0].get( "finish_reason", None ) - response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] + response["choices"][0]["logprobs"] = stream_response["choices"][0][ + "logprobs" + ] def _streaming_response_template() -> Dict[str, Any]: @@ -315,9 +321,11 @@ class BaseOpenAI(BaseLLM): chunk.text, chunk=chunk, verbose=self.verbose, - logprobs=chunk.generation_info["logprobs"] - if chunk.generation_info - else None, + logprobs=( + chunk.generation_info["logprobs"] + if chunk.generation_info + else None + ), ) async def _astream( @@ -339,9 +347,11 @@ class BaseOpenAI(BaseLLM): chunk.text, chunk=chunk, verbose=self.verbose, - logprobs=chunk.generation_info["logprobs"] - if chunk.generation_info - else None, + logprobs=( + chunk.generation_info["logprobs"] + if chunk.generation_info + else None + ), ) def _generate( @@ -377,10 +387,14 @@ class BaseOpenAI(BaseLLM): for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: - raise ValueError("Cannot stream results with multiple prompts.") + raise ValueError( + "Cannot stream results with multiple prompts." + ) generation: Optional[GenerationChunk] = None - for chunk in self._stream(_prompts[0], stop, run_manager, **kwargs): + for chunk in self._stream( + _prompts[0], stop, run_manager, **kwargs + ): if generation is None: generation = chunk else: @@ -389,12 +403,16 @@ class BaseOpenAI(BaseLLM): choices.append( { "text": generation.text, - "finish_reason": generation.generation_info.get("finish_reason") - if generation.generation_info - else None, - "logprobs": generation.generation_info.get("logprobs") - if generation.generation_info - else None, + "finish_reason": ( + generation.generation_info.get("finish_reason") + if generation.generation_info + else None + ), + "logprobs": ( + generation.generation_info.get("logprobs") + if generation.generation_info + else None + ), } ) else: @@ -424,7 +442,9 @@ class BaseOpenAI(BaseLLM): for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: - raise ValueError("Cannot stream results with multiple prompts.") + raise ValueError( + "Cannot stream results with multiple prompts." + ) generation: Optional[GenerationChunk] = None async for chunk in self._astream( @@ -438,12 +458,16 @@ class BaseOpenAI(BaseLLM): choices.append( { "text": generation.text, - "finish_reason": generation.generation_info.get("finish_reason") - if generation.generation_info - else None, - "logprobs": generation.generation_info.get("logprobs") - if generation.generation_info - else None, + "finish_reason": ( + generation.generation_info.get("finish_reason") + if generation.generation_info + else None + ), + "logprobs": ( + generation.generation_info.get("logprobs") + if generation.generation_info + else None + ), } ) else: @@ -463,7 +487,9 @@ class BaseOpenAI(BaseLLM): """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: - raise ValueError("`stop` found in both the input and default params.") + raise ValueError( + "`stop` found in both the input and default params." + ) params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: @@ -541,7 +567,9 @@ class BaseOpenAI(BaseLLM): try: enc = tiktoken.encoding_for_model(model_name) except KeyError: - logger.warning("Warning: model not found. Using cl100k_base encoding.") + logger.warning( + "Warning: model not found. Using cl100k_base encoding." + ) model = "cl100k_base" enc = tiktoken.get_encoding(model) @@ -602,8 +630,9 @@ class BaseOpenAI(BaseLLM): if context_size is None: raise ValueError( - f"Unknown model: {modelname}. Please provide a valid OpenAI model name." - "Known models are: " + ", ".join(model_token_mapping.keys()) + f"Unknown model: {modelname}. Please provide a valid OpenAI" + " model name.Known models are: " + + ", ".join(model_token_mapping.keys()) ) return context_size @@ -753,7 +782,9 @@ class OpenAIChat(BaseLLM): @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" - all_required_field_names = {field.alias for field in cls.__fields__.values()} + all_required_field_names = { + field.alias for field in cls.__fields__.values() + } extra = values.get("model_kwargs", {}) for field_name in list(values): @@ -820,13 +851,21 @@ class OpenAIChat(BaseLLM): ) -> Tuple: if len(prompts) > 1: raise ValueError( - f"OpenAIChat currently only supports single prompt, got {prompts}" + "OpenAIChat currently only supports single prompt, got" + f" {prompts}" ) - messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] - params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} + messages = self.prefix_messages + [ + {"role": "user", "content": prompts[0]} + ] + params: Dict[str, Any] = { + **{"model": self.model_name}, + **self._default_params, + } if stop is not None: if "stop" in params: - raise ValueError("`stop` found in both the input and default params.") + raise ValueError( + "`stop` found in both the input and default params." + ) params["stop"] = stop if params.get("max_tokens") == -1: # for ChatGPT api, omitting max_tokens is equivalent to having no limit @@ -897,7 +936,11 @@ class OpenAIChat(BaseLLM): } return LLMResult( generations=[ - [Generation(text=full_response["choices"][0]["message"]["content"])] + [ + Generation( + text=full_response["choices"][0]["message"]["content"] + ) + ] ], llm_output=llm_output, ) @@ -911,7 +954,9 @@ class OpenAIChat(BaseLLM): ) -> LLMResult: if self.streaming: generation: Optional[GenerationChunk] = None - async for chunk in self._astream(prompts[0], stop, run_manager, **kwargs): + async for chunk in self._astream( + prompts[0], stop, run_manager, **kwargs + ): if generation is None: generation = chunk else: @@ -930,7 +975,11 @@ class OpenAIChat(BaseLLM): } return LLMResult( generations=[ - [Generation(text=full_response["choices"][0]["message"]["content"])] + [ + Generation( + text=full_response["choices"][0]["message"]["content"] + ) + ] ], llm_output=llm_output, ) diff --git a/swarms/models/palm.py b/swarms/models/palm.py index ec8aafd6..8c9277d7 100644 --- a/swarms/models/palm.py +++ b/swarms/models/palm.py @@ -37,10 +37,16 @@ def _create_retry_decorator() -> Callable[[Any], Any]: return retry( reraise=True, stop=stop_after_attempt(max_retries), - wait=wait_exponential(multiplier=multiplier, min=min_seconds, max=max_seconds), + wait=wait_exponential( + multiplier=multiplier, min=min_seconds, max=max_seconds + ), retry=( - retry_if_exception_type(google.api_core.exceptions.ResourceExhausted) - | retry_if_exception_type(google.api_core.exceptions.ServiceUnavailable) + retry_if_exception_type( + google.api_core.exceptions.ResourceExhausted + ) + | retry_if_exception_type( + google.api_core.exceptions.ServiceUnavailable + ) | retry_if_exception_type(google.api_core.exceptions.GoogleAPIError) ), before_sleep=before_sleep_log(logger, logging.WARNING), @@ -64,7 +70,9 @@ def _strip_erroneous_leading_spaces(text: str) -> str: The PaLM API will sometimes erroneously return a single leading space in all lines > 1. This function strips that space. """ - has_leading_space = all(not line or line[0] == " " for line in text.split("\n")[1:]) + has_leading_space = all( + not line or line[0] == " " for line in text.split("\n")[1:] + ) if has_leading_space: return text.replace("\n ", "\n") else: @@ -112,7 +120,10 @@ class GooglePalm(BaseLLM, BaseModel): values["client"] = genai - if values["temperature"] is not None and not 0 <= values["temperature"] <= 1: + if ( + values["temperature"] is not None + and not 0 <= values["temperature"] <= 1 + ): raise ValueError("temperature must be in the range [0.0, 1.0]") if values["top_p"] is not None and not 0 <= values["top_p"] <= 1: @@ -121,7 +132,10 @@ class GooglePalm(BaseLLM, BaseModel): if values["top_k"] is not None and values["top_k"] <= 0: raise ValueError("top_k must be positive") - if values["max_output_tokens"] is not None and values["max_output_tokens"] <= 0: + if ( + values["max_output_tokens"] is not None + and values["max_output_tokens"] <= 0 + ): raise ValueError("max_output_tokens must be greater than zero") return values diff --git a/swarms/models/simple_ada.py b/swarms/models/simple_ada.py index 3662dda2..a4e99fe4 100644 --- a/swarms/models/simple_ada.py +++ b/swarms/models/simple_ada.py @@ -16,4 +16,6 @@ def get_ada_embeddings(text: str, model: str = "text-embedding-ada-002"): text = text.replace("\n", " ") - return client.embeddings.create(input=[text], model=model)["data"][0]["embedding"] + return client.embeddings.create(input=[text], model=model)["data"][0][ + "embedding" + ] diff --git a/swarms/models/speecht5.py b/swarms/models/speecht5.py index e98036ac..143a7514 100644 --- a/swarms/models/speecht5.py +++ b/swarms/models/speecht5.py @@ -90,7 +90,9 @@ class SpeechT5: self.processor = SpeechT5Processor.from_pretrained(self.model_name) self.model = SpeechT5ForTextToSpeech.from_pretrained(self.model_name) self.vocoder = SpeechT5HifiGan.from_pretrained(self.vocoder_name) - self.embeddings_dataset = load_dataset(self.dataset_name, split="validation") + self.embeddings_dataset = load_dataset( + self.dataset_name, split="validation" + ) def __call__(self, text: str, speaker_id: float = 7306): """Call the model on some text and return the speech.""" @@ -121,7 +123,9 @@ class SpeechT5: def set_embeddings_dataset(self, dataset_name): """Set the embeddings dataset to a new dataset.""" self.dataset_name = dataset_name - self.embeddings_dataset = load_dataset(self.dataset_name, split="validation") + self.embeddings_dataset = load_dataset( + self.dataset_name, split="validation" + ) # Feature 1: Get sampling rate def get_sampling_rate(self): diff --git a/swarms/models/ssd_1b.py b/swarms/models/ssd_1b.py index caeba3fc..406678ef 100644 --- a/swarms/models/ssd_1b.py +++ b/swarms/models/ssd_1b.py @@ -141,8 +141,8 @@ class SSD1B: print( colored( ( - f"Error running SSD1B: {error} try optimizing your api key and" - " or try again" + f"Error running SSD1B: {error} try optimizing your api" + " key and or try again" ), "red", ) @@ -167,8 +167,7 @@ class SSD1B: """Print the SSD1B dashboard""" print( colored( - ( - f"""SSD1B Dashboard: + f"""SSD1B Dashboard: -------------------- Model: {self.model} @@ -184,13 +183,14 @@ class SSD1B: -------------------- - """ - ), + """, "green", ) ) - def process_batch_concurrently(self, tasks: List[str], max_workers: int = 5): + def process_batch_concurrently( + self, tasks: List[str], max_workers: int = 5 + ): """ Process a batch of tasks concurrently @@ -211,8 +211,12 @@ class SSD1B: >>> print(results) """ - with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor: - future_to_task = {executor.submit(self, task): task for task in tasks} + with concurrent.futures.ThreadPoolExecutor( + max_workers=max_workers + ) as executor: + future_to_task = { + executor.submit(self, task): task for task in tasks + } results = [] for future in concurrent.futures.as_completed(future_to_task): task = future_to_task[future] @@ -225,13 +229,17 @@ class SSD1B: print( colored( ( - f"Error running SSD1B: {error} try optimizing your api key and" - " or try again" + f"Error running SSD1B: {error} try optimizing" + " your api key and or try again" ), "red", ) ) - print(colored(f"Error running SSD1B: {error.http_status}", "red")) + print( + colored( + f"Error running SSD1B: {error.http_status}", "red" + ) + ) print(colored(f"Error running SSD1B: {error.error}", "red")) raise error diff --git a/swarms/models/whisperx.py b/swarms/models/whisperx.py index ac592b35..338971da 100644 --- a/swarms/models/whisperx.py +++ b/swarms/models/whisperx.py @@ -66,7 +66,9 @@ class WhisperX: compute_type = "float16" # 1. Transcribe with original Whisper (batched) 🗣️ - model = whisperx.load_model("large-v2", device, compute_type=compute_type) + model = whisperx.load_model( + "large-v2", device, compute_type=compute_type + ) audio = whisperx.load_audio(audio_file) result = model.transcribe(audio, batch_size=batch_size) diff --git a/swarms/models/wizard_storytelling.py b/swarms/models/wizard_storytelling.py index 49ffb70d..a34f6ec7 100644 --- a/swarms/models/wizard_storytelling.py +++ b/swarms/models/wizard_storytelling.py @@ -45,7 +45,9 @@ class WizardLLMStoryTeller: ): self.logger = logging.getLogger(__name__) self.device = ( - device if device else ("cuda" if torch.cuda.is_available() else "cpu") + device + if device + else ("cuda" if torch.cuda.is_available() else "cpu") ) self.model_id = model_id self.max_length = max_length @@ -101,7 +103,9 @@ class WizardLLMStoryTeller: if self.distributed: self.model = DDP(self.model) except Exception as error: - self.logger.error(f"Failed to load the model or the tokenizer: {error}") + self.logger.error( + f"Failed to load the model or the tokenizer: {error}" + ) raise def run(self, prompt_text: str): diff --git a/swarms/models/yarn_mistral.py b/swarms/models/yarn_mistral.py index ebe107a2..065e3140 100644 --- a/swarms/models/yarn_mistral.py +++ b/swarms/models/yarn_mistral.py @@ -45,7 +45,9 @@ class YarnMistral128: ): self.logger = logging.getLogger(__name__) self.device = ( - device if device else ("cuda" if torch.cuda.is_available() else "cpu") + device + if device + else ("cuda" if torch.cuda.is_available() else "cpu") ) self.model_id = model_id self.max_length = max_length @@ -106,7 +108,9 @@ class YarnMistral128: if self.distributed: self.model = DDP(self.model) except Exception as error: - self.logger.error(f"Failed to load the model or the tokenizer: {error}") + self.logger.error( + f"Failed to load the model or the tokenizer: {error}" + ) raise def run(self, prompt_text: str): diff --git a/swarms/prompts/agent_prompt.py b/swarms/prompts/agent_prompt.py index c4897193..b36aea19 100644 --- a/swarms/prompts/agent_prompt.py +++ b/swarms/prompts/agent_prompt.py @@ -15,7 +15,9 @@ class PromptGenerator: "thoughts": { "text": "thought", "reasoning": "reasoning", - "plan": "- short bulleted\n- list that conveys\n- long-term plan", + "plan": ( + "- short bulleted\n- list that conveys\n- long-term plan" + ), "criticism": "constructive self-criticism", "speak": "thoughts summary to say to user", }, @@ -66,13 +68,11 @@ class PromptGenerator: """ formatted_response_format = json.dumps(self.response_format, indent=4) prompt_string = ( - f"Constraints:\n{''.join(self.constraints)}\n\n" - f"Commands:\n{''.join(self.commands)}\n\n" - f"Resources:\n{''.join(self.resources)}\n\n" - f"Performance Evaluation:\n{''.join(self.performance_evaluation)}\n\n" - "You should only respond in JSON format as described below " - f"\nResponse Format: \n{formatted_response_format} " - "\nEnsure the response can be parsed by Python json.loads" + f"Constraints:\n{''.join(self.constraints)}\n\nCommands:\n{''.join(self.commands)}\n\nResources:\n{''.join(self.resources)}\n\nPerformance" + f" Evaluation:\n{''.join(self.performance_evaluation)}\n\nYou" + " should only respond in JSON format as described below \nResponse" + f" Format: \n{formatted_response_format} \nEnsure the response can" + " be parsed by Python json.loads" ) return prompt_string diff --git a/swarms/prompts/agent_prompts.py b/swarms/prompts/agent_prompts.py index 8d145fc0..a8c3fca7 100644 --- a/swarms/prompts/agent_prompts.py +++ b/swarms/prompts/agent_prompts.py @@ -5,26 +5,26 @@ def generate_agent_role_prompt(agent): """ prompts = { "Finance Agent": ( - "You are a seasoned finance analyst AI assistant. Your primary goal is to" - " compose comprehensive, astute, impartial, and methodically arranged" - " financial reports based on provided data and trends." + "You are a seasoned finance analyst AI assistant. Your primary goal" + " is to compose comprehensive, astute, impartial, and methodically" + " arranged financial reports based on provided data and trends." ), "Travel Agent": ( - "You are a world-travelled AI tour guide assistant. Your main purpose is to" - " draft engaging, insightful, unbiased, and well-structured travel reports" - " on given locations, including history, attractions, and cultural" - " insights." + "You are a world-travelled AI tour guide assistant. Your main" + " purpose is to draft engaging, insightful, unbiased, and" + " well-structured travel reports on given locations, including" + " history, attractions, and cultural insights." ), "Academic Research Agent": ( - "You are an AI academic research assistant. Your primary responsibility is" - " to create thorough, academically rigorous, unbiased, and systematically" - " organized reports on a given research topic, following the standards of" - " scholarly work." + "You are an AI academic research assistant. Your primary" + " responsibility is to create thorough, academically rigorous," + " unbiased, and systematically organized reports on a given" + " research topic, following the standards of scholarly work." ), "Default Agent": ( - "You are an AI critical thinker research assistant. Your sole purpose is to" - " write well written, critically acclaimed, objective and structured" - " reports on given text." + "You are an AI critical thinker research assistant. Your sole" + " purpose is to write well written, critically acclaimed, objective" + " and structured reports on given text." ), } @@ -39,12 +39,12 @@ def generate_report_prompt(question, research_summary): """ return ( - f'"""{research_summary}""" Using the above information, answer the following' - f' question or topic: "{question}" in a detailed report -- The report should' - " focus on the answer to the question, should be well structured, informative," - " in depth, with facts and numbers if available, a minimum of 1,200 words and" - " with markdown syntax and apa format. Write all source urls at the end of the" - " report in apa format" + f'"""{research_summary}""" Using the above information, answer the' + f' following question or topic: "{question}" in a detailed report --' + " The report should focus on the answer to the question, should be" + " well structured, informative, in depth, with facts and numbers if" + " available, a minimum of 1,200 words and with markdown syntax and apa" + " format. Write all source urls at the end of the report in apa format" ) @@ -55,9 +55,10 @@ def generate_search_queries_prompt(question): """ return ( - "Write 4 google search queries to search online that form an objective opinion" - f' from the following: "{question}"You must respond with a list of strings in' - ' the following format: ["query 1", "query 2", "query 3", "query 4"]' + "Write 4 google search queries to search online that form an objective" + f' opinion from the following: "{question}"You must respond with a list' + ' of strings in the following format: ["query 1", "query 2", "query' + ' 3", "query 4"]' ) @@ -73,14 +74,15 @@ def generate_resource_report_prompt(question, research_summary): """ return ( f'"""{research_summary}""" Based on the above information, generate a' - " bibliography recommendation report for the following question or topic:" - f' "{question}". The report should provide a detailed analysis of each' - " recommended resource, explaining how each source can contribute to finding" - " answers to the research question. Focus on the relevance, reliability, and" - " significance of each source. Ensure that the report is well-structured," - " informative, in-depth, and follows Markdown syntax. Include relevant facts," - " figures, and numbers whenever available. The report should have a minimum" - " length of 1,200 words." + " bibliography recommendation report for the following question or" + f' topic: "{question}". The report should provide a detailed analysis' + " of each recommended resource, explaining how each source can" + " contribute to finding answers to the research question. Focus on the" + " relevance, reliability, and significance of each source. Ensure that" + " the report is well-structured, informative, in-depth, and follows" + " Markdown syntax. Include relevant facts, figures, and numbers" + " whenever available. The report should have a minimum length of 1,200" + " words." ) @@ -92,13 +94,14 @@ def generate_outline_report_prompt(question, research_summary): """ return ( - f'"""{research_summary}""" Using the above information, generate an outline for' - " a research report in Markdown syntax for the following question or topic:" - f' "{question}". The outline should provide a well-structured framework for the' - " research report, including the main sections, subsections, and key points to" - " be covered. The research report should be detailed, informative, in-depth," - " and a minimum of 1,200 words. Use appropriate Markdown syntax to format the" - " outline and ensure readability." + f'"""{research_summary}""" Using the above information, generate an' + " outline for a research report in Markdown syntax for the following" + f' question or topic: "{question}". The outline should provide a' + " well-structured framework for the research report, including the" + " main sections, subsections, and key points to be covered. The" + " research report should be detailed, informative, in-depth, and a" + " minimum of 1,200 words. Use appropriate Markdown syntax to format" + " the outline and ensure readability." ) @@ -110,11 +113,12 @@ def generate_concepts_prompt(question, research_summary): """ return ( - f'"""{research_summary}""" Using the above information, generate a list of 5' - " main concepts to learn for a research report on the following question or" - f' topic: "{question}". The outline should provide a well-structured' - " frameworkYou must respond with a list of strings in the following format:" - ' ["concepts 1", "concepts 2", "concepts 3", "concepts 4, concepts 5"]' + f'"""{research_summary}""" Using the above information, generate a list' + " of 5 main concepts to learn for a research report on the following" + f' question or topic: "{question}". The outline should provide a' + " well-structured frameworkYou must respond with a list of strings in" + ' the following format: ["concepts 1", "concepts 2", "concepts 3",' + ' "concepts 4, concepts 5"]' ) @@ -128,10 +132,10 @@ def generate_lesson_prompt(concept): """ prompt = ( - f"generate a comprehensive lesson about {concept} in Markdown syntax. This" - f" should include the definitionof {concept}, its historical background and" - " development, its applications or uses in differentfields, and notable events" - f" or facts related to {concept}." + f"generate a comprehensive lesson about {concept} in Markdown syntax." + f" This should include the definitionof {concept}, its historical" + " background and development, its applications or uses in" + f" differentfields, and notable events or facts related to {concept}." ) return prompt diff --git a/swarms/prompts/base.py b/swarms/prompts/base.py index 54a0bc3f..369063e6 100644 --- a/swarms/prompts/base.py +++ b/swarms/prompts/base.py @@ -12,7 +12,9 @@ if TYPE_CHECKING: def get_buffer_string( - messages: Sequence[BaseMessage], human_prefix: str = "Human", ai_prefix: str = "AI" + messages: Sequence[BaseMessage], + human_prefix: str = "Human", + ai_prefix: str = "AI", ) -> str: """Convert sequence of Messages to strings and concatenate them into one string. diff --git a/swarms/prompts/chat_prompt.py b/swarms/prompts/chat_prompt.py index d1e08df9..bbdaa9c7 100644 --- a/swarms/prompts/chat_prompt.py +++ b/swarms/prompts/chat_prompt.py @@ -105,7 +105,9 @@ class ChatMessage(Message): def get_buffer_string( - messages: Sequence[Message], human_prefix: str = "Human", ai_prefix: str = "AI" + messages: Sequence[Message], + human_prefix: str = "Human", + ai_prefix: str = "AI", ) -> str: string_messages = [] for m in messages: diff --git a/swarms/prompts/multi_modal_prompts.py b/swarms/prompts/multi_modal_prompts.py index b552b68d..1c0830d6 100644 --- a/swarms/prompts/multi_modal_prompts.py +++ b/swarms/prompts/multi_modal_prompts.py @@ -1,6 +1,6 @@ ERROR_PROMPT = ( - "An error has occurred for the following text: \n{promptedQuery} Please explain" - " this error.\n {e}" + "An error has occurred for the following text: \n{promptedQuery} Please" + " explain this error.\n {e}" ) IMAGE_PROMPT = """ diff --git a/swarms/prompts/python.py b/swarms/prompts/python.py index 9d1f4a1e..46df5cdc 100644 --- a/swarms/prompts/python.py +++ b/swarms/prompts/python.py @@ -1,16 +1,17 @@ PY_SIMPLE_COMPLETION_INSTRUCTION = "# Write the body of this function only." PY_REFLEXION_COMPLETION_INSTRUCTION = ( "You are a Python writing assistant. You will be given your past function" - " implementation, a series of unit tests, and a hint to change the implementation" - " appropriately. Write your full implementation (restate the function" - " signature).\n\n-----" + " implementation, a series of unit tests, and a hint to change the" + " implementation appropriately. Write your full implementation (restate the" + " function signature).\n\n-----" ) PY_SELF_REFLECTION_COMPLETION_INSTRUCTION = ( - "You are a Python writing assistant. You will be given a function implementation" - " and a series of unit tests. Your goal is to write a few sentences to explain why" - " your implementation is wrong as indicated by the tests. You will need this as a" - " hint when you try again later. Only provide the few sentence description in your" - " answer, not the implementation.\n\n-----" + "You are a Python writing assistant. You will be given a function" + " implementation and a series of unit tests. Your goal is to write a few" + " sentences to explain why your implementation is wrong as indicated by the" + " tests. You will need this as a hint when you try again later. Only" + " provide the few sentence description in your answer, not the" + " implementation.\n\n-----" ) USE_PYTHON_CODEBLOCK_INSTRUCTION = ( "Use a Python code block to write your response. For" @@ -18,25 +19,26 @@ USE_PYTHON_CODEBLOCK_INSTRUCTION = ( ) PY_SIMPLE_CHAT_INSTRUCTION = ( - "You are an AI that only responds with python code, NOT ENGLISH. You will be given" - " a function signature and its docstring by the user. Write your full" - " implementation (restate the function signature)." + "You are an AI that only responds with python code, NOT ENGLISH. You will" + " be given a function signature and its docstring by the user. Write your" + " full implementation (restate the function signature)." ) PY_SIMPLE_CHAT_INSTRUCTION_V2 = ( - "You are an AI that only responds with only python code. You will be given a" - " function signature and its docstring by the user. Write your full implementation" - " (restate the function signature)." + "You are an AI that only responds with only python code. You will be given" + " a function signature and its docstring by the user. Write your full" + " implementation (restate the function signature)." ) PY_REFLEXION_CHAT_INSTRUCTION = ( "You are an AI Python assistant. You will be given your past function" - " implementation, a series of unit tests, and a hint to change the implementation" - " appropriately. Write your full implementation (restate the function signature)." + " implementation, a series of unit tests, and a hint to change the" + " implementation appropriately. Write your full implementation (restate the" + " function signature)." ) PY_REFLEXION_CHAT_INSTRUCTION_V2 = ( - "You are an AI Python assistant. You will be given your previous implementation of" - " a function, a series of unit tests results, and your self-reflection on your" - " previous implementation. Write your full implementation (restate the function" - " signature)." + "You are an AI Python assistant. You will be given your previous" + " implementation of a function, a series of unit tests results, and your" + " self-reflection on your previous implementation. Write your full" + " implementation (restate the function signature)." ) PY_REFLEXION_FEW_SHOT_ADD = '''Example 1: [previous impl]: @@ -172,18 +174,19 @@ END EXAMPLES ''' PY_SELF_REFLECTION_CHAT_INSTRUCTION = ( "You are a Python programming assistant. You will be given a function" - " implementation and a series of unit tests. Your goal is to write a few sentences" - " to explain why your implementation is wrong as indicated by the tests. You will" - " need this as a hint when you try again later. Only provide the few sentence" - " description in your answer, not the implementation." + " implementation and a series of unit tests. Your goal is to write a few" + " sentences to explain why your implementation is wrong as indicated by the" + " tests. You will need this as a hint when you try again later. Only" + " provide the few sentence description in your answer, not the" + " implementation." ) PY_SELF_REFLECTION_CHAT_INSTRUCTION_V2 = ( "You are a Python programming assistant. You will be given a function" - " implementation and a series of unit test results. Your goal is to write a few" - " sentences to explain why your implementation is wrong as indicated by the tests." - " You will need this as guidance when you try again later. Only provide the few" - " sentence description in your answer, not the implementation. You will be given a" - " few examples by the user." + " implementation and a series of unit test results. Your goal is to write a" + " few sentences to explain why your implementation is wrong as indicated by" + " the tests. You will need this as guidance when you try again later. Only" + " provide the few sentence description in your answer, not the" + " implementation. You will be given a few examples by the user." ) PY_SELF_REFLECTION_FEW_SHOT = """Example 1: [function impl]: diff --git a/swarms/prompts/sales.py b/swarms/prompts/sales.py index 4f04f7fc..3a362174 100644 --- a/swarms/prompts/sales.py +++ b/swarms/prompts/sales.py @@ -1,23 +1,26 @@ conversation_stages = { "1": ( - "Introduction: Start the conversation by introducing yourself and your company." - " Be polite and respectful while keeping the tone of the conversation" - " professional. Your greeting should be welcoming. Always clarify in your" - " greeting the reason why you are contacting the prospect." + "Introduction: Start the conversation by introducing yourself and your" + " company. Be polite and respectful while keeping the tone of the" + " conversation professional. Your greeting should be welcoming. Always" + " clarify in your greeting the reason why you are contacting the" + " prospect." ), "2": ( - "Qualification: Qualify the prospect by confirming if they are the right person" - " to talk to regarding your product/service. Ensure that they have the" - " authority to make purchasing decisions." + "Qualification: Qualify the prospect by confirming if they are the" + " right person to talk to regarding your product/service. Ensure that" + " they have the authority to make purchasing decisions." ), "3": ( - "Value proposition: Briefly explain how your product/service can benefit the" - " prospect. Focus on the unique selling points and value proposition of your" - " product/service that sets it apart from competitors." + "Value proposition: Briefly explain how your product/service can" + " benefit the prospect. Focus on the unique selling points and value" + " proposition of your product/service that sets it apart from" + " competitors." ), "4": ( - "Needs analysis: Ask open-ended questions to uncover the prospect's needs and" - " pain points. Listen carefully to their responses and take notes." + "Needs analysis: Ask open-ended questions to uncover the prospect's" + " needs and pain points. Listen carefully to their responses and take" + " notes." ), "5": ( "Solution presentation: Based on the prospect's needs, present your" @@ -29,9 +32,9 @@ conversation_stages = { " testimonials to support your claims." ), "7": ( - "Close: Ask for the sale by proposing a next step. This could be a demo, a" - " trial or a meeting with decision-makers. Ensure to summarize what has been" - " discussed and reiterate the benefits." + "Close: Ask for the sale by proposing a next step. This could be a" + " demo, a trial or a meeting with decision-makers. Ensure to summarize" + " what has been discussed and reiterate the benefits." ), } diff --git a/swarms/prompts/sales_prompts.py b/swarms/prompts/sales_prompts.py index 3f2b9f2b..7c1f50ed 100644 --- a/swarms/prompts/sales_prompts.py +++ b/swarms/prompts/sales_prompts.py @@ -46,24 +46,27 @@ Conversation history: conversation_stages = { "1": ( - "Introduction: Start the conversation by introducing yourself and your company." - " Be polite and respectful while keeping the tone of the conversation" - " professional. Your greeting should be welcoming. Always clarify in your" - " greeting the reason why you are contacting the prospect." + "Introduction: Start the conversation by introducing yourself and your" + " company. Be polite and respectful while keeping the tone of the" + " conversation professional. Your greeting should be welcoming. Always" + " clarify in your greeting the reason why you are contacting the" + " prospect." ), "2": ( - "Qualification: Qualify the prospect by confirming if they are the right person" - " to talk to regarding your product/service. Ensure that they have the" - " authority to make purchasing decisions." + "Qualification: Qualify the prospect by confirming if they are the" + " right person to talk to regarding your product/service. Ensure that" + " they have the authority to make purchasing decisions." ), "3": ( - "Value proposition: Briefly explain how your product/service can benefit the" - " prospect. Focus on the unique selling points and value proposition of your" - " product/service that sets it apart from competitors." + "Value proposition: Briefly explain how your product/service can" + " benefit the prospect. Focus on the unique selling points and value" + " proposition of your product/service that sets it apart from" + " competitors." ), "4": ( - "Needs analysis: Ask open-ended questions to uncover the prospect's needs and" - " pain points. Listen carefully to their responses and take notes." + "Needs analysis: Ask open-ended questions to uncover the prospect's" + " needs and pain points. Listen carefully to their responses and take" + " notes." ), "5": ( "Solution presentation: Based on the prospect's needs, present your" @@ -75,8 +78,8 @@ conversation_stages = { " testimonials to support your claims." ), "7": ( - "Close: Ask for the sale by proposing a next step. This could be a demo, a" - " trial or a meeting with decision-makers. Ensure to summarize what has been" - " discussed and reiterate the benefits." + "Close: Ask for the sale by proposing a next step. This could be a" + " demo, a trial or a meeting with decision-makers. Ensure to summarize" + " what has been discussed and reiterate the benefits." ), } diff --git a/swarms/structs/autoscaler.py b/swarms/structs/autoscaler.py index be79a860..97e8a5ae 100644 --- a/swarms/structs/autoscaler.py +++ b/swarms/structs/autoscaler.py @@ -7,7 +7,11 @@ from typing import Callable, Dict, List from termcolor import colored from swarms.structs.flow import Flow -from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator +from swarms.utils.decorators import ( + error_decorator, + log_decorator, + timing_decorator, +) class AutoScaler: @@ -69,7 +73,9 @@ class AutoScaler: try: self.tasks_queue.put(task) except Exception as error: - print(f"Error adding task to queue: {error} try again with a new task") + print( + f"Error adding task to queue: {error} try again with a new task" + ) @log_decorator @error_decorator @@ -108,10 +114,15 @@ class AutoScaler: if pending_tasks / len(self.agents_pool) > self.busy_threshold: self.scale_up() - elif active_agents / len(self.agents_pool) < self.idle_threshold: + elif ( + active_agents / len(self.agents_pool) < self.idle_threshold + ): self.scale_down() except Exception as error: - print(f"Error monitoring and scaling: {error} try again with a new task") + print( + f"Error monitoring and scaling: {error} try again with a new" + " task" + ) @log_decorator @error_decorator @@ -125,7 +136,9 @@ class AutoScaler: while True: task = self.task_queue.get() if task: - available_agent = next((agent for agent in self.agents_pool)) + available_agent = next( + (agent for agent in self.agents_pool) + ) if available_agent: available_agent.run(task) except Exception as error: diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index aa0060b4..166d619e 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -348,7 +348,8 @@ class Flow: return "\n".join(tool_descriptions) except Exception as error: print( - f"Error getting tool description: {error} try adding a description to the tool or removing the tool" + f"Error getting tool description: {error} try adding a" + " description to the tool or removing the tool" ) else: return "No tools available" @@ -479,8 +480,12 @@ class Flow: print(colored("Initializing Autonomous Agent...", "yellow")) # print(colored("Loading modules...", "yellow")) # print(colored("Modules loaded successfully.", "green")) - print(colored("Autonomous Agent Activated.", "cyan", attrs=["bold"])) - print(colored("All systems operational. Executing task...", "green")) + print( + colored("Autonomous Agent Activated.", "cyan", attrs=["bold"]) + ) + print( + colored("All systems operational. Executing task...", "green") + ) except Exception as error: print( colored( @@ -525,14 +530,16 @@ class Flow: loop_count = 0 while self.max_loops == "auto" or loop_count < self.max_loops: loop_count += 1 - print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) + print( + colored(f"\nLoop {loop_count} of {self.max_loops}", "blue") + ) print("\n") # Check to see if stopping token is in the output to stop the loop if self.stopping_token: - if self._check_stopping_condition(response) or parse_done_token( + if self._check_stopping_condition( response - ): + ) or parse_done_token(response): break # Adjust temperature, comment if no work @@ -629,7 +636,9 @@ class Flow: print(colored(f"\nLoop {loop_count} of {self.max_loops}", "blue")) print("\n") - if self._check_stopping_condition(response) or parse_done_token(response): + if self._check_stopping_condition(response) or parse_done_token( + response + ): break # Adjust temperature, comment if no work @@ -949,7 +958,8 @@ class Flow: if hasattr(self.llm, name): value = getattr(self.llm, name) if isinstance( - value, (str, int, float, bool, list, dict, tuple, type(None)) + value, + (str, int, float, bool, list, dict, tuple, type(None)), ): llm_params[name] = value else: @@ -1010,7 +1020,9 @@ class Flow: print(f"Flow state loaded from {file_path}") - def retry_on_failure(self, function, retries: int = 3, retry_delay: int = 1): + def retry_on_failure( + self, function, retries: int = 3, retry_delay: int = 1 + ): """Retry wrapper for LLM calls.""" attempt = 0 while attempt < retries: diff --git a/swarms/structs/non_linear_workflow.py b/swarms/structs/non_linear_workflow.py index 22cef91e..79bc0af7 100644 --- a/swarms/structs/non_linear_workflow.py +++ b/swarms/structs/non_linear_workflow.py @@ -7,7 +7,11 @@ from typing import Callable, List, Dict, Any, Sequence class Task: def __init__( - self, id: str, task: str, flows: Sequence[Flow], dependencies: List[str] = [] + self, + id: str, + task: str, + flows: Sequence[Flow], + dependencies: List[str] = [], ): self.id = id self.task = task @@ -20,7 +24,9 @@ class Task: for flow in self.flows: result = flow.run(self.task, *args) self.results.append(result) - args = [result] # The output of one flow becomes the input to the next + args = [ + result + ] # The output of one flow becomes the input to the next class Workflow: @@ -41,7 +47,10 @@ class Workflow: ): future = self.executor.submit( task.execute, - {dep: self.tasks[dep].results for dep in task.dependencies}, + { + dep: self.tasks[dep].results + for dep in task.dependencies + }, ) futures.append((future, task.id)) diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 22ae4a21..1d7f411d 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -113,7 +113,9 @@ class SequentialWorkflow: restore_state_filepath: Optional[str] = None dashboard: bool = False - def add(self, task: str, flow: Union[Callable, Flow], *args, **kwargs) -> None: + def add( + self, task: str, flow: Union[Callable, Flow], *args, **kwargs + ) -> None: """ Add a task to the workflow. @@ -182,7 +184,9 @@ class SequentialWorkflow: raise ValueError(f"Task {task_description} not found in workflow.") def save_workflow_state( - self, filepath: Optional[str] = "sequential_workflow_state.json", **kwargs + self, + filepath: Optional[str] = "sequential_workflow_state.json", + **kwargs, ) -> None: """ Saves the workflow state to a json file. @@ -348,8 +352,9 @@ class SequentialWorkflow: # Ensure that 'task' is provided in the kwargs if "task" not in task.kwargs: raise ValueError( - "The 'task' argument is required for the Flow flow" - f" execution in '{task.description}'" + "The 'task' argument is required for the" + " Flow flow execution in" + f" '{task.description}'" ) # Separate the 'task' argument from other kwargs flow_task_arg = task.kwargs.pop("task") @@ -373,7 +378,9 @@ class SequentialWorkflow: # Autosave the workflow state if self.autosave: - self.save_workflow_state("sequential_workflow_state.json") + self.save_workflow_state( + "sequential_workflow_state.json" + ) except Exception as e: print( colored( @@ -404,8 +411,8 @@ class SequentialWorkflow: # Ensure that 'task' is provided in the kwargs if "task" not in task.kwargs: raise ValueError( - "The 'task' argument is required for the Flow flow" - f" execution in '{task.description}'" + "The 'task' argument is required for the Flow" + f" flow execution in '{task.description}'" ) # Separate the 'task' argument from other kwargs flow_task_arg = task.kwargs.pop("task") @@ -429,4 +436,6 @@ class SequentialWorkflow: # Autosave the workflow state if self.autosave: - self.save_workflow_state("sequential_workflow_state.json") + self.save_workflow_state( + "sequential_workflow_state.json" + ) diff --git a/swarms/swarms/autobloggen.py b/swarms/swarms/autobloggen.py index dec2620f..d732606b 100644 --- a/swarms/swarms/autobloggen.py +++ b/swarms/swarms/autobloggen.py @@ -103,7 +103,9 @@ class AutoBlogGenSwarm: review_agent = self.print_beautifully("Review Agent", review_agent) # Agent that publishes on social media - distribution_agent = self.llm(self.social_media_prompt(article=review_agent)) + distribution_agent = self.llm( + self.social_media_prompt(article=review_agent) + ) distribution_agent = self.print_beautifully( "Distribution Agent", distribution_agent ) @@ -115,7 +117,11 @@ class AutoBlogGenSwarm: for i in range(self.iterations): self.step() except Exception as error: - print(colored(f"Error while running AutoBlogGenSwarm {error}", "red")) + print( + colored( + f"Error while running AutoBlogGenSwarm {error}", "red" + ) + ) if attempt == self.retry_attempts - 1: raise diff --git a/swarms/swarms/base.py b/swarms/swarms/base.py index e99c9b38..1ccc819c 100644 --- a/swarms/swarms/base.py +++ b/swarms/swarms/base.py @@ -117,7 +117,9 @@ class AbstractSwarm(ABC): pass @abstractmethod - def broadcast(self, message: str, sender: Optional["AbstractWorker"] = None): + def broadcast( + self, message: str, sender: Optional["AbstractWorker"] = None + ): """Broadcast a message to all workers""" pass diff --git a/swarms/swarms/dialogue_simulator.py b/swarms/swarms/dialogue_simulator.py index ec86c414..2775daf0 100644 --- a/swarms/swarms/dialogue_simulator.py +++ b/swarms/swarms/dialogue_simulator.py @@ -23,7 +23,9 @@ class DialogueSimulator: >>> model.run("test") """ - def __init__(self, agents: List[Callable], max_iters: int = 10, name: str = None): + def __init__( + self, agents: List[Callable], max_iters: int = 10, name: str = None + ): self.agents = agents self.max_iters = max_iters self.name = name @@ -45,7 +47,8 @@ class DialogueSimulator: for receiver in self.agents: message_history = ( - f"Speaker Name: {speaker.name} and message: {speaker_message}" + f"Speaker Name: {speaker.name} and message:" + f" {speaker_message}" ) receiver.run(message_history) @@ -56,7 +59,9 @@ class DialogueSimulator: print(f"Error running dialogue simulator: {error}") def __repr__(self): - return f"DialogueSimulator({self.agents}, {self.max_iters}, {self.name})" + return ( + f"DialogueSimulator({self.agents}, {self.max_iters}, {self.name})" + ) def save_state(self): """Save the state of the dialogue simulator""" diff --git a/swarms/swarms/god_mode.py b/swarms/swarms/god_mode.py index e75d81d2..65377308 100644 --- a/swarms/swarms/god_mode.py +++ b/swarms/swarms/god_mode.py @@ -64,7 +64,8 @@ class GodMode: table.append([f"LLM {i+1}", response]) print( colored( - tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" + tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), + "cyan", ) ) @@ -83,7 +84,8 @@ class GodMode: table.append([f"LLM {i+1}", response]) print( colored( - tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" + tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), + "cyan", ) ) @@ -115,11 +117,13 @@ class GodMode: print(f"{i + 1}. {task}") print("\nLast Responses:") table = [ - [f"LLM {i+1}", response] for i, response in enumerate(self.last_responses) + [f"LLM {i+1}", response] + for i, response in enumerate(self.last_responses) ] print( colored( - tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), "cyan" + tabulate(table, headers=["LLM", "Response"], tablefmt="pretty"), + "cyan", ) ) @@ -137,7 +141,8 @@ class GodMode: """Asynchronous run the task string""" loop = asyncio.get_event_loop() futures = [ - loop.run_in_executor(None, lambda llm: llm(task), llm) for llm in self.llms + loop.run_in_executor(None, lambda llm: llm(task), llm) + for llm in self.llms ] for response in await asyncio.gather(*futures): print(response) @@ -145,13 +150,18 @@ class GodMode: def concurrent_run(self, task: str) -> List[str]: """Synchronously run the task on all llms and collect responses""" with ThreadPoolExecutor() as executor: - future_to_llm = {executor.submit(llm, task): llm for llm in self.llms} + future_to_llm = { + executor.submit(llm, task): llm for llm in self.llms + } responses = [] for future in as_completed(future_to_llm): try: responses.append(future.result()) except Exception as error: - print(f"{future_to_llm[future]} generated an exception: {error}") + print( + f"{future_to_llm[future]} generated an exception:" + f" {error}" + ) self.last_responses = responses self.task_history.append(task) return responses diff --git a/swarms/swarms/groupchat.py b/swarms/swarms/groupchat.py index 5cff3263..76de7e16 100644 --- a/swarms/swarms/groupchat.py +++ b/swarms/swarms/groupchat.py @@ -47,7 +47,9 @@ class GroupChat: def next_agent(self, agent: Flow) -> Flow: """Return the next agent in the list.""" - return self.agents[(self.agent_names.index(agent.name) + 1) % len(self.agents)] + return self.agents[ + (self.agent_names.index(agent.name) + 1) % len(self.agents) + ] def select_speaker_msg(self): """Return the message for selecting the next speaker.""" @@ -78,9 +80,9 @@ class GroupChat: { "role": "system", "content": ( - "Read the above conversation. Then select the next most" - f" suitable role from {self.agent_names} to play. Only" - " return the role." + "Read the above conversation. Then select the next" + f" most suitable role from {self.agent_names} to" + " play. Only return the role." ), } ] @@ -126,7 +128,9 @@ class GroupChatManager: self.selector = selector def __call__(self, task: str): - self.groupchat.messages.append({"role": self.selector.name, "content": task}) + self.groupchat.messages.append( + {"role": self.selector.name, "content": task} + ) for i in range(self.groupchat.max_round): speaker = self.groupchat.select_speaker( last_speaker=self.selector, selector=self.selector diff --git a/swarms/swarms/multi_agent_collab.py b/swarms/swarms/multi_agent_collab.py index 85d9955b..98f32d47 100644 --- a/swarms/swarms/multi_agent_collab.py +++ b/swarms/swarms/multi_agent_collab.py @@ -13,8 +13,8 @@ from swarms.utils.logger import logger class BidOutputParser(RegexParser): def get_format_instructions(self) -> str: return ( - "Your response should be an integrater delimited by angled brackets like" - " this: " + "Your response should be an integrater delimited by angled brackets" + " like this: " ) @@ -194,11 +194,15 @@ class MultiAgentCollaboration: print("\n") n += 1 - def select_next_speaker_roundtable(self, step: int, agents: List[Flow]) -> int: + def select_next_speaker_roundtable( + self, step: int, agents: List[Flow] + ) -> int: """Selects the next speaker.""" return step % len(agents) - def select_next_speaker_director(step: int, agents: List[Flow], director) -> int: + def select_next_speaker_director( + step: int, agents: List[Flow], director + ) -> int: # if the step if even => director # => director selects next speaker if step % 2 == 1: @@ -265,7 +269,10 @@ class MultiAgentCollaboration: def format_results(self, results): """Formats the results of the run method""" formatted_results = "\n".join( - [f"{result['agent']} responded: {result['response']}" for result in results] + [ + f"{result['agent']} responded: {result['response']}" + for result in results + ] ) return formatted_results @@ -291,7 +298,12 @@ class MultiAgentCollaboration: return state def __repr__(self): - return f"MultiAgentCollaboration(agents={self.agents}, selection_function={self.select_next_speaker}, max_iters={self.max_iters}, autosave={self.autosave}, saved_file_path_name={self.saved_file_path_name})" + return ( + f"MultiAgentCollaboration(agents={self.agents}," + f" selection_function={self.select_next_speaker}," + f" max_iters={self.max_iters}, autosave={self.autosave}," + f" saved_file_path_name={self.saved_file_path_name})" + ) def performance(self): """Tracks and reports the performance of each agent""" diff --git a/swarms/swarms/orchestrate.py b/swarms/swarms/orchestrate.py index f522911b..b7a7d0e0 100644 --- a/swarms/swarms/orchestrate.py +++ b/swarms/swarms/orchestrate.py @@ -111,7 +111,9 @@ class Orchestrator: self.chroma_client = chromadb.Client() - self.collection = self.chroma_client.create_collection(name=collection_name) + self.collection = self.chroma_client.create_collection( + name=collection_name + ) self.current_tasks = {} @@ -148,13 +150,14 @@ class Orchestrator: ) logging.info( - f"Task {id(str)} has been processed by agent {id(agent)} with" + f"Task {id(str)} has been processed by agent" + f" {id(agent)} with" ) except Exception as error: logging.error( - f"Failed to process task {id(task)} by agent {id(agent)}. Error:" - f" {error}" + f"Failed to process task {id(task)} by agent {id(agent)}." + f" Error: {error}" ) finally: with self.condition: @@ -175,7 +178,9 @@ class Orchestrator: try: # Query the vector database for documents created by the agents - results = self.collection.query(query_texts=[str(agent_id)], n_results=10) + results = self.collection.query( + query_texts=[str(agent_id)], n_results=10 + ) return results except Exception as e: @@ -212,7 +217,9 @@ class Orchestrator: self.collection.add(documents=[result], ids=[str(id(result))]) except Exception as e: - logging.error(f"Failed to append the agent output to database. Error: {e}") + logging.error( + f"Failed to append the agent output to database. Error: {e}" + ) raise def run(self, objective: str): @@ -226,7 +233,9 @@ class Orchestrator: results = [ self.assign_task(agent_id, task) - for agent_id, task in zip(range(len(self.agents)), self.task_queue) + for agent_id, task in zip( + range(len(self.agents)), self.task_queue + ) ] for result in results: diff --git a/swarms/tools/autogpt.py b/swarms/tools/autogpt.py index cf5450e6..07062d11 100644 --- a/swarms/tools/autogpt.py +++ b/swarms/tools/autogpt.py @@ -6,7 +6,9 @@ from typing import Optional import pandas as pd import torch from langchain.agents import tool -from langchain.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent +from langchain.agents.agent_toolkits.pandas.base import ( + create_pandas_dataframe_agent, +) from langchain.chains.qa_with_sources.loading import ( BaseCombineDocumentsChain, ) @@ -38,7 +40,10 @@ def pushd(new_dir): @tool def process_csv( - llm, csv_file_path: str, instructions: str, output_path: Optional[str] = None + llm, + csv_file_path: str, + instructions: str, + output_path: Optional[str] = None, ) -> str: """Process a CSV by with pandas in a limited REPL.\ Only use this after writing data to disk as a csv file.\ @@ -49,7 +54,9 @@ def process_csv( df = pd.read_csv(csv_file_path) except Exception as e: return f"Error: {e}" - agent = create_pandas_dataframe_agent(llm, df, max_iterations=30, verbose=False) + agent = create_pandas_dataframe_agent( + llm, df, max_iterations=30, verbose=False + ) if output_path is not None: instructions += f" Save output to disk at {output_path}" try: @@ -79,7 +86,9 @@ async def async_load_playwright(url: str) -> str: text = soup.get_text() lines = (line.strip() for line in text.splitlines()) - chunks = (phrase.strip() for line in lines for phrase in line.split(" ")) + chunks = ( + phrase.strip() for line in lines for phrase in line.split(" ") + ) results = "\n".join(chunk for chunk in chunks if chunk) except Exception as e: results = f"Error: {e}" @@ -110,7 +119,8 @@ def _get_text_splitter(): class WebpageQATool(BaseTool): name = "query_webpage" description = ( - "Browse a webpage and retrieve the information relevant to the question." + "Browse a webpage and retrieve the information relevant to the" + " question." ) text_splitter: RecursiveCharacterTextSplitter = Field( default_factory=_get_text_splitter @@ -176,7 +186,9 @@ def VQAinference(self, inputs): image_path, question = inputs.split(",") raw_image = Image.open(image_path).convert("RGB") - inputs = processor(raw_image, question, return_tensors="pt").to(device, torch_dtype) + inputs = processor(raw_image, question, return_tensors="pt").to( + device, torch_dtype + ) out = model.generate(**inputs) answer = processor.decode(out[0], skip_special_tokens=True) diff --git a/swarms/tools/mm_models.py b/swarms/tools/mm_models.py index 58fe11e5..a218ff50 100644 --- a/swarms/tools/mm_models.py +++ b/swarms/tools/mm_models.py @@ -28,7 +28,9 @@ class MaskFormer: def __init__(self, device): print("Initializing MaskFormer to %s" % device) self.device = device - self.processor = CLIPSegProcessor.from_pretrained("CIDAS/clipseg-rd64-refined") + self.processor = CLIPSegProcessor.from_pretrained( + "CIDAS/clipseg-rd64-refined" + ) self.model = CLIPSegForImageSegmentation.from_pretrained( "CIDAS/clipseg-rd64-refined" ).to(device) @@ -76,23 +78,26 @@ class ImageEditing: @tool( name="Remove Something From The Photo", description=( - "useful when you want to remove and object or something from the photo " - "from its description or location. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the object need to be removed. " + "useful when you want to remove and object or something from the" + " photo from its description or location. The input to this tool" + " should be a comma separated string of two, representing the" + " image_path and the object need to be removed. " ), ) def inference_remove(self, inputs): image_path, to_be_removed_txt = inputs.split(",") - return self.inference_replace(f"{image_path},{to_be_removed_txt},background") + return self.inference_replace( + f"{image_path},{to_be_removed_txt},background" + ) @tool( name="Replace Something From The Photo", description=( - "useful when you want to replace an object from the object description or" - " location with another object from its description. The input to this tool" - " should be a comma separated string of three, representing the image_path," - " the object to be replaced, the object to be replaced with " + "useful when you want to replace an object from the object" + " description or location with another object from its description." + " The input to this tool should be a comma separated string of" + " three, representing the image_path, the object to be replaced," + " the object to be replaced with " ), ) def inference_replace(self, inputs): @@ -137,10 +142,10 @@ class InstructPix2Pix: @tool( name="Instruct Image Using Text", description=( - "useful when you want to the style of the image to be like the text. " - "like: make it look like a painting. or make it like a robot. " - "The input to this tool should be a comma separated string of two, " - "representing the image_path and the text. " + "useful when you want to the style of the image to be like the" + " text. like: make it look like a painting. or make it like a" + " robot. The input to this tool should be a comma separated string" + " of two, representing the image_path and the text. " ), ) def inference(self, inputs): @@ -149,14 +154,17 @@ class InstructPix2Pix: image_path, text = inputs.split(",")[0], ",".join(inputs.split(",")[1:]) original_image = Image.open(image_path) image = self.pipe( - text, image=original_image, num_inference_steps=40, image_guidance_scale=1.2 + text, + image=original_image, + num_inference_steps=40, + image_guidance_scale=1.2, ).images[0] updated_image_path = get_new_image_name(image_path, func_name="pix2pix") image.save(updated_image_path) logger.debug( - f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct Text:" - f" {text}, Output Image: {updated_image_path}" + f"\nProcessed InstructPix2Pix, Input Image: {image_path}, Instruct" + f" Text: {text}, Output Image: {updated_image_path}" ) return updated_image_path @@ -173,17 +181,18 @@ class Text2Image: self.pipe.to(device) self.a_prompt = "best quality, extremely detailed" self.n_prompt = ( - "longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, " - "fewer digits, cropped, worst quality, low quality" + "longbody, lowres, bad anatomy, bad hands, missing fingers, extra" + " digit, fewer digits, cropped, worst quality, low quality" ) @tool( name="Generate Image From User Input Text", description=( - "useful when you want to generate an image from a user input text and save" - " it to a file. like: generate an image of an object or something, or" - " generate an image that includes some objects. The input to this tool" - " should be a string, representing the text used to generate image. " + "useful when you want to generate an image from a user input text" + " and save it to a file. like: generate an image of an object or" + " something, or generate an image that includes some objects. The" + " input to this tool should be a string, representing the text used" + " to generate image. " ), ) def inference(self, text): @@ -205,7 +214,9 @@ class VisualQuestionAnswering: print("Initializing VisualQuestionAnswering to %s" % device) self.torch_dtype = torch.float16 if "cuda" in device else torch.float32 self.device = device - self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base") + self.processor = BlipProcessor.from_pretrained( + "Salesforce/blip-vqa-base" + ) self.model = BlipForQuestionAnswering.from_pretrained( "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype ).to(self.device) @@ -213,10 +224,11 @@ class VisualQuestionAnswering: @tool( name="Answer Question About The Image", description=( - "useful when you need an answer for a question based on an image. like:" - " what is the background color of the last image, how many cats in this" - " figure, what is in this figure. The input to this tool should be a comma" - " separated string of two, representing the image_path and the question" + "useful when you need an answer for a question based on an image." + " like: what is the background color of the last image, how many" + " cats in this figure, what is in this figure. The input to this" + " tool should be a comma separated string of two, representing the" + " image_path and the question" ), ) def inference(self, inputs): @@ -229,8 +241,8 @@ class VisualQuestionAnswering: answer = self.processor.decode(out[0], skip_special_tokens=True) logger.debug( - f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input" - f" Question: {question}, Output Answer: {answer}" + f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}," + f" Input Question: {question}, Output Answer: {answer}" ) return answer @@ -245,7 +257,8 @@ class ImageCaptioning(BaseHandler): "Salesforce/blip-image-captioning-base" ) self.model = BlipForConditionalGeneration.from_pretrained( - "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype + "Salesforce/blip-image-captioning-base", + torch_dtype=self.torch_dtype, ).to(self.device) def handle(self, filename: str): @@ -264,8 +277,8 @@ class ImageCaptioning(BaseHandler): out = self.model.generate(**inputs) description = self.processor.decode(out[0], skip_special_tokens=True) print( - f"\nProcessed ImageCaptioning, Input Image: {filename}, Output Text:" - f" {description}" + f"\nProcessed ImageCaptioning, Input Image: {filename}, Output" + f" Text: {description}" ) return IMAGE_PROMPT.format(filename=filename, description=description) diff --git a/swarms/tools/tool.py b/swarms/tools/tool.py index a5ad3f75..8ae3b7cd 100644 --- a/swarms/tools/tool.py +++ b/swarms/tools/tool.py @@ -7,7 +7,17 @@ import warnings from abc import abstractmethod from functools import partial from inspect import signature -from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union +from typing import ( + Any, + Awaitable, + Callable, + Dict, + List, + Optional, + Tuple, + Type, + Union, +) from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import ( @@ -27,7 +37,11 @@ from pydantic import ( root_validator, validate_arguments, ) -from langchain.schema.runnable import Runnable, RunnableConfig, RunnableSerializable +from langchain.schema.runnable import ( + Runnable, + RunnableConfig, + RunnableSerializable, +) class SchemaAnnotationError(TypeError): @@ -52,7 +66,11 @@ def _get_filtered_args( """Get the arguments from a function's signature.""" schema = inferred_model.schema()["properties"] valid_keys = signature(func).parameters - return {k: schema[k] for k in valid_keys if k not in ("run_manager", "callbacks")} + return { + k: schema[k] + for k in valid_keys + if k not in ("run_manager", "callbacks") + } class _SchemaConfig: @@ -120,12 +138,11 @@ class ChildTool(BaseTool): ...""" name = cls.__name__ raise SchemaAnnotationError( - f"Tool definition for {name} must include valid type annotations" - " for argument 'args_schema' to behave as expected.\n" - "Expected annotation of 'Type[BaseModel]'" - f" but got '{args_schema_type}'.\n" - "Expected class looks like:\n" - f"{typehint_mandate}" + f"Tool definition for {name} must include valid type" + " annotations for argument 'args_schema' to behave as" + " expected.\nExpected annotation of 'Type[BaseModel]' but" + f" got '{args_schema_type}'.\nExpected class looks" + f" like:\n{typehint_mandate}" ) name: str @@ -147,7 +164,9 @@ class ChildTool(BaseTool): callbacks: Callbacks = Field(default=None, exclude=True) """Callbacks to be called during tool execution.""" - callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True) + callback_manager: Optional[BaseCallbackManager] = Field( + default=None, exclude=True + ) """Deprecated. Please use callbacks instead.""" tags: Optional[List[str]] = None """Optional list of tags associated with the tool. Defaults to None @@ -244,7 +263,9 @@ class ChildTool(BaseTool): else: if input_args is not None: result = input_args.parse_obj(tool_input) - return {k: v for k, v in result.dict().items() if k in tool_input} + return { + k: v for k, v in result.dict().items() if k in tool_input + } return tool_input @root_validator() @@ -286,7 +307,9 @@ class ChildTool(BaseTool): *args, ) - def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: + def _to_args_and_kwargs( + self, tool_input: Union[str, Dict] + ) -> Tuple[Tuple, Dict]: # For backwards compatibility, if run_input is a string, # pass as a positional argument. if isinstance(tool_input, str): @@ -353,8 +376,9 @@ class ChildTool(BaseTool): observation = self.handle_tool_error(e) else: raise ValueError( - "Got unexpected type of `handle_tool_error`. Expected bool, str " - f"or callable. Received: {self.handle_tool_error}" + "Got unexpected type of `handle_tool_error`. Expected" + " bool, str or callable. Received:" + f" {self.handle_tool_error}" ) run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs @@ -409,7 +433,9 @@ class ChildTool(BaseTool): # We then call the tool on the tool input to get an observation tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input) observation = ( - await self._arun(*tool_args, run_manager=run_manager, **tool_kwargs) + await self._arun( + *tool_args, run_manager=run_manager, **tool_kwargs + ) if new_arg_supported else await self._arun(*tool_args, **tool_kwargs) ) @@ -428,8 +454,9 @@ class ChildTool(BaseTool): observation = self.handle_tool_error(e) else: raise ValueError( - "Got unexpected type of `handle_tool_error`. Expected bool, str " - f"or callable. Received: {self.handle_tool_error}" + "Got unexpected type of `handle_tool_error`. Expected" + " bool, str or callable. Received:" + f" {self.handle_tool_error}" ) await run_manager.on_tool_end( str(observation), color="red", name=self.name, **kwargs @@ -484,14 +511,17 @@ class Tool(BaseTool): # assume it takes a single string input. return {"tool_input": {"type": "string"}} - def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]: + def _to_args_and_kwargs( + self, tool_input: Union[str, Dict] + ) -> Tuple[Tuple, Dict]: """Convert tool input to pydantic model.""" args, kwargs = super()._to_args_and_kwargs(tool_input) # For backwards compatibility. The tool must be run with a single input all_args = list(args) + list(kwargs.values()) if len(all_args) != 1: raise ToolException( - f"Too many arguments to single-input tool {self.name}. Args: {all_args}" + f"Too many arguments to single-input tool {self.name}. Args:" + f" {all_args}" ) return tuple(all_args), {} @@ -503,7 +533,9 @@ class Tool(BaseTool): ) -> Any: """Use the tool.""" if self.func: - new_argument_supported = signature(self.func).parameters.get("callbacks") + new_argument_supported = signature(self.func).parameters.get( + "callbacks" + ) return ( self.func( *args, @@ -537,12 +569,18 @@ class Tool(BaseTool): ) else: return await asyncio.get_running_loop().run_in_executor( - None, partial(self._run, run_manager=run_manager, **kwargs), *args + None, + partial(self._run, run_manager=run_manager, **kwargs), + *args, ) # TODO: this is for backwards compatibility, remove in future def __init__( - self, name: str, func: Optional[Callable], description: str, **kwargs: Any + self, + name: str, + func: Optional[Callable], + description: str, + **kwargs: Any, ) -> None: """Initialize tool.""" super(Tool, self).__init__( @@ -617,7 +655,9 @@ class StructuredTool(BaseTool): ) -> Any: """Use the tool.""" if self.func: - new_argument_supported = signature(self.func).parameters.get("callbacks") + new_argument_supported = signature(self.func).parameters.get( + "callbacks" + ) return ( self.func( *args, @@ -714,7 +754,9 @@ class StructuredTool(BaseTool): description = f"{name}{sig} - {description.strip()}" _args_schema = args_schema if _args_schema is None and infer_schema: - _args_schema = create_schema_from_function(f"{name}Schema", source_function) + _args_schema = create_schema_from_function( + f"{name}Schema", source_function + ) return cls( name=name, func=func, @@ -772,7 +814,9 @@ def tool( async def ainvoke_wrapper( callbacks: Optional[Callbacks] = None, **kwargs: Any ) -> Any: - return await runnable.ainvoke(kwargs, {"callbacks": callbacks}) + return await runnable.ainvoke( + kwargs, {"callbacks": callbacks} + ) def invoke_wrapper( callbacks: Optional[Callbacks] = None, **kwargs: Any @@ -821,7 +865,11 @@ def tool( return _make_tool - if len(args) == 2 and isinstance(args[0], str) and isinstance(args[1], Runnable): + if ( + len(args) == 2 + and isinstance(args[0], str) + and isinstance(args[1], Runnable) + ): return _make_with_name(args[0])(args[1]) elif len(args) == 1 and isinstance(args[0], str): # if the argument is a string, then we use the string as the tool name diff --git a/swarms/utils/apa.py b/swarms/utils/apa.py index 94c6f158..4adcb5cf 100644 --- a/swarms/utils/apa.py +++ b/swarms/utils/apa.py @@ -144,7 +144,9 @@ class Singleton(abc.ABCMeta, type): def __call__(cls, *args, **kwargs): """Call method for the singleton metaclass.""" if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) + cls._instances[cls] = super(Singleton, cls).__call__( + *args, **kwargs + ) return cls._instances[cls] diff --git a/swarms/utils/code_interpreter.py b/swarms/utils/code_interpreter.py index 86059a83..fc2f95f7 100644 --- a/swarms/utils/code_interpreter.py +++ b/swarms/utils/code_interpreter.py @@ -116,14 +116,20 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter): # Most of the time it doesn't matter, but we should figure out why it happens frequently with: # applescript yield {"output": traceback.format_exc()} - yield {"output": f"Retrying... ({retry_count}/{max_retries})"} + yield { + "output": f"Retrying... ({retry_count}/{max_retries})" + } yield {"output": "Restarting process."} self.start_process() retry_count += 1 if retry_count > max_retries: - yield {"output": "Maximum retries reached. Could not execute code."} + yield { + "output": ( + "Maximum retries reached. Could not execute code." + ) + } return while True: @@ -132,7 +138,9 @@ class SubprocessCodeInterpreter(BaseCodeInterpreter): else: time.sleep(0.1) try: - output = self.output_queue.get(timeout=0.3) # Waits for 0.3 seconds + output = self.output_queue.get( + timeout=0.3 + ) # Waits for 0.3 seconds yield output except queue.Empty: if self.done.is_set(): diff --git a/swarms/utils/decorators.py b/swarms/utils/decorators.py index 8a5a5d56..cf4a774c 100644 --- a/swarms/utils/decorators.py +++ b/swarms/utils/decorators.py @@ -31,7 +31,9 @@ def timing_decorator(func): start_time = time.time() result = func(*args, **kwargs) end_time = time.time() - logging.info(f"{func.__name__} executed in {end_time - start_time} seconds") + logging.info( + f"{func.__name__} executed in {end_time - start_time} seconds" + ) return result return wrapper @@ -79,7 +81,9 @@ def synchronized_decorator(func): def deprecated_decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): - warnings.warn(f"{func.__name__} is deprecated", category=DeprecationWarning) + warnings.warn( + f"{func.__name__} is deprecated", category=DeprecationWarning + ) return func(*args, **kwargs) return wrapper diff --git a/swarms/utils/futures.py b/swarms/utils/futures.py index 55a4e5d5..a5ffdf51 100644 --- a/swarms/utils/futures.py +++ b/swarms/utils/futures.py @@ -5,6 +5,8 @@ T = TypeVar("T") def execute_futures_dict(fs_dict: dict[str, futures.Future[T]]) -> dict[str, T]: - futures.wait(fs_dict.values(), timeout=None, return_when=futures.ALL_COMPLETED) + futures.wait( + fs_dict.values(), timeout=None, return_when=futures.ALL_COMPLETED + ) return {key: future.result() for key, future in fs_dict.items()} diff --git a/swarms/utils/loggers.py b/swarms/utils/loggers.py index da822d1a..d9845543 100644 --- a/swarms/utils/loggers.py +++ b/swarms/utils/loggers.py @@ -113,8 +113,8 @@ class Logger: ) error_handler.setLevel(logging.ERROR) error_formatter = AutoGptFormatter( - "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" - " %(message_no_color)s" + "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d" + " %(title)s %(message_no_color)s" ) error_handler.setFormatter(error_formatter) @@ -140,7 +140,12 @@ class Logger: self.chat_plugins = [] def typewriter_log( - self, title="", title_color="", content="", speak_text=False, level=logging.INFO + self, + title="", + title_color="", + content="", + speak_text=False, + level=logging.INFO, ): """ Logs a message to the typewriter. @@ -255,7 +260,9 @@ class Logger: if isinstance(message, list): message = " ".join(message) self.logger.log( - level, message, extra={"title": str(title), "color": str(title_color)} + level, + message, + extra={"title": str(title), "color": str(title_color)}, ) def set_level(self, level): @@ -284,12 +291,15 @@ class Logger: if not additionalText: additionalText = ( "Please ensure you've setup and configured everything" - " correctly. Read https://github.com/Torantulino/Auto-GPT#readme to " - "double check. You can also create a github issue or join the discord" + " correctly. Read" + " https://github.com/Torantulino/Auto-GPT#readme to double" + " check. You can also create a github issue or join the discord" " and ask there!" ) - self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText) + self.typewriter_log( + "DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText + ) def log_json(self, data: Any, file_name: str) -> None: """ @@ -367,7 +377,9 @@ class TypingConsoleHandler(logging.StreamHandler): print(word, end="", flush=True) if i < len(words) - 1: print(" ", end="", flush=True) - typing_speed = random.uniform(min_typing_speed, max_typing_speed) + typing_speed = random.uniform( + min_typing_speed, max_typing_speed + ) time.sleep(typing_speed) # type faster after each word min_typing_speed = min_typing_speed * 0.95 diff --git a/swarms/utils/main.py b/swarms/utils/main.py index a17d4782..73704552 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -201,7 +201,9 @@ def dim_multiline(message: str) -> str: lines = message.split("\n") if len(lines) <= 1: return lines[0] - return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to(Color.black().bright()) + return lines[0] + ANSI("\n... ".join([""] + lines[1:])).to( + Color.black().bright() + ) # +=============================> ANSI Ending @@ -227,7 +229,9 @@ class AbstractUploader(ABC): class S3Uploader(AbstractUploader): - def __init__(self, accessKey: str, secretKey: str, region: str, bucket: str): + def __init__( + self, accessKey: str, secretKey: str, region: str, bucket: str + ): self.accessKey = accessKey self.secretKey = secretKey self.region = region @@ -338,7 +342,9 @@ class FileHandler: self.handlers = handlers self.path = path - def register(self, filetype: FileType, handler: BaseHandler) -> "FileHandler": + def register( + self, filetype: FileType, handler: BaseHandler + ) -> "FileHandler": self.handlers[filetype] = handler return self @@ -356,7 +362,9 @@ class FileHandler: def handle(self, url: str) -> str: try: - if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): + if url.startswith( + os.environ.get("SERVER", "http://localhost:8000") + ): local_filepath = url[ len(os.environ.get("SERVER", "http://localhost:8000")) + 1 : ] diff --git a/swarms/utils/parse_code.py b/swarms/utils/parse_code.py index a2f346ea..9e3b8cb4 100644 --- a/swarms/utils/parse_code.py +++ b/swarms/utils/parse_code.py @@ -7,5 +7,7 @@ def extract_code_in_backticks_in_string(message: str) -> str: """ pattern = r"`` ``(.*?)`` " # Non-greedy match between six backticks - match = re.search(pattern, message, re.DOTALL) # re.DOTALL to match newline chars + match = re.search( + pattern, message, re.DOTALL + ) # re.DOTALL to match newline chars return match.group(1).strip() if match else None diff --git a/swarms/utils/serializable.py b/swarms/utils/serializable.py index 8f0e5ccf..c7f9bc2c 100644 --- a/swarms/utils/serializable.py +++ b/swarms/utils/serializable.py @@ -109,9 +109,11 @@ class Serializable(BaseModel, ABC): "lc": 1, "type": "constructor", "id": [*self.lc_namespace, self.__class__.__name__], - "kwargs": lc_kwargs - if not secrets - else _replace_secrets(lc_kwargs, secrets), + "kwargs": ( + lc_kwargs + if not secrets + else _replace_secrets(lc_kwargs, secrets) + ), } def to_json_not_implemented(self) -> SerializedNotImplemented: diff --git a/tests/agents/omni_modal.py b/tests/agents/omni_modal.py index d106f66c..41aa050b 100644 --- a/tests/agents/omni_modal.py +++ b/tests/agents/omni_modal.py @@ -35,4 +35,6 @@ def test_omnimodalagent_run(omni_agent): def test_task_executor_initialization(omni_agent): - assert omni_agent.task_executor is not None, "TaskExecutor initialization failed" + assert ( + omni_agent.task_executor is not None + ), "TaskExecutor initialization failed" diff --git a/tests/memory/oceandb.py b/tests/memory/oceandb.py index 3e31afab..c74b7c15 100644 --- a/tests/memory/oceandb.py +++ b/tests/memory/oceandb.py @@ -30,7 +30,9 @@ def test_create_collection(): def test_create_collection_exception(): with patch("oceandb.Client") as MockClient: - MockClient.create_collection.side_effect = Exception("Create collection error") + MockClient.create_collection.side_effect = Exception( + "Create collection error" + ) db = OceanDB(MockClient) with pytest.raises(Exception) as e: db.create_collection("test", "modality") diff --git a/tests/memory/pinecone.py b/tests/memory/pinecone.py index bd037bef..106a6e81 100644 --- a/tests/memory/pinecone.py +++ b/tests/memory/pinecone.py @@ -6,7 +6,9 @@ api_key = os.getenv("PINECONE_API_KEY") or "" def test_init(): - with patch("pinecone.init") as MockInit, patch("pinecone.Index") as MockIndex: + with patch("pinecone.init") as MockInit, patch( + "pinecone.Index" + ) as MockIndex: store = PineconeVectorStore( api_key=api_key, index_name="test_index", environment="test_env" ) diff --git a/tests/models/LLM.py b/tests/models/LLM.py index 20493519..a7ca149f 100644 --- a/tests/models/LLM.py +++ b/tests/models/LLM.py @@ -11,7 +11,9 @@ class TestLLM(unittest.TestCase): @patch.object(ChatOpenAI, "__init__", return_value=None) def setUp(self, mock_hf_init, mock_openai_init): self.llm_openai = LLM(openai_api_key="mock_openai_key") - self.llm_hf = LLM(hf_repo_id="mock_repo_id", hf_api_token="mock_hf_token") + self.llm_hf = LLM( + hf_repo_id="mock_repo_id", hf_api_token="mock_hf_token" + ) self.prompt = "Who won the FIFA World Cup in 1998?" def test_init(self): diff --git a/tests/models/anthropic.py b/tests/models/anthropic.py index e2447614..fecd3585 100644 --- a/tests/models/anthropic.py +++ b/tests/models/anthropic.py @@ -74,7 +74,9 @@ def test_anthropic_default_params(anthropic_instance): } -def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instance): +def test_anthropic_run( + mock_anthropic_env, mock_requests_post, anthropic_instance +): mock_response = Mock() mock_response.json.return_value = {"completion": "Generated text"} mock_requests_post.return_value = mock_response @@ -98,7 +100,9 @@ def test_anthropic_run(mock_anthropic_env, mock_requests_post, anthropic_instanc ) -def test_anthropic_call(mock_anthropic_env, mock_requests_post, anthropic_instance): +def test_anthropic_call( + mock_anthropic_env, mock_requests_post, anthropic_instance +): mock_response = Mock() mock_response.json.return_value = {"completion": "Generated text"} mock_requests_post.return_value = mock_response @@ -193,18 +197,24 @@ def test_anthropic_convert_prompt(anthropic_instance): def test_anthropic_call_with_stop(anthropic_instance): - response = anthropic_instance("Translate to French.", stop=["stop1", "stop2"]) + response = anthropic_instance( + "Translate to French.", stop=["stop1", "stop2"] + ) assert response == "Mocked Response from Anthropic" def test_anthropic_stream_with_stop(anthropic_instance): - generator = anthropic_instance.stream("Write a story.", stop=["stop1", "stop2"]) + generator = anthropic_instance.stream( + "Write a story.", stop=["stop1", "stop2"] + ) for token in generator: assert isinstance(token, str) def test_anthropic_async_call_with_stop(anthropic_instance): - response = anthropic_instance.async_call("Tell me a joke.", stop=["stop1", "stop2"]) + response = anthropic_instance.async_call( + "Tell me a joke.", stop=["stop1", "stop2"] + ) assert response == "Mocked Response from Anthropic" diff --git a/tests/models/auto_temp.py b/tests/models/auto_temp.py index bd37e5bb..76cdc7c3 100644 --- a/tests/models/auto_temp.py +++ b/tests/models/auto_temp.py @@ -47,7 +47,9 @@ def test_run_auto_select(auto_temp_agent): def test_run_no_scores(auto_temp_agent): task = "Invalid task." temperature_string = "0.4,0.6,0.8,1.0,1.2,1.4" - with ThreadPoolExecutor(max_workers=auto_temp_agent.max_workers) as executor: + with ThreadPoolExecutor( + max_workers=auto_temp_agent.max_workers + ) as executor: with patch.object( executor, "submit", side_effect=[None, None, None, None, None, None] ): diff --git a/tests/models/bingchat.py b/tests/models/bingchat.py index ce3af99d..8f29f905 100644 --- a/tests/models/bingchat.py +++ b/tests/models/bingchat.py @@ -44,7 +44,9 @@ class TestBingChat(unittest.TestCase): original_image_gen = BingChat.ImageGen BingChat.ImageGen = MockImageGen - img_path = self.chat.create_img("Test prompt", auth_cookie="mock_auth_cookie") + img_path = self.chat.create_img( + "Test prompt", auth_cookie="mock_auth_cookie" + ) self.assertEqual(img_path, "./output/mock_image.png") BingChat.ImageGen = original_image_gen diff --git a/tests/models/bioclip.py b/tests/models/bioclip.py index 50a65570..54ab5bb9 100644 --- a/tests/models/bioclip.py +++ b/tests/models/bioclip.py @@ -127,7 +127,9 @@ def test_clip_multiple_images(clip_instance, sample_image_path): # Test model inference performance -def test_clip_inference_performance(clip_instance, sample_image_path, benchmark): +def test_clip_inference_performance( + clip_instance, sample_image_path, benchmark +): labels = [ "adenocarcinoma histopathology", "brain MRI", diff --git a/tests/models/biogpt.py b/tests/models/biogpt.py index f420292b..e1daa14e 100644 --- a/tests/models/biogpt.py +++ b/tests/models/biogpt.py @@ -46,7 +46,10 @@ def test_cell_biology_response(biogpt_instance): # 40. Test for a question about protein structure def test_protein_structure_response(biogpt_instance): - question = "What's the difference between alpha helix and beta sheet structures in proteins?" + question = ( + "What's the difference between alpha helix and beta sheet structures in" + " proteins?" + ) response = biogpt_instance(question) assert response and isinstance(response, str) diff --git a/tests/models/cohere.py b/tests/models/cohere.py index d1bea935..08a0e39d 100644 --- a/tests/models/cohere.py +++ b/tests/models/cohere.py @@ -49,7 +49,9 @@ def test_cohere_stream_api_error_handling(cohere_instance): cohere_instance.model = "base" cohere_instance.cohere_api_key = "invalid-api-key" with pytest.raises(Exception): - generator = cohere_instance.stream("Error handling with invalid API key.") + generator = cohere_instance.stream( + "Error handling with invalid API key." + ) for token in generator: pass @@ -94,13 +96,17 @@ def test_cohere_call_with_stop(cohere_instance): def test_cohere_stream_with_stop(cohere_instance): - generator = cohere_instance.stream("Write a story.", stop=["stop1", "stop2"]) + generator = cohere_instance.stream( + "Write a story.", stop=["stop1", "stop2"] + ) for token in generator: assert isinstance(token, str) def test_cohere_async_call_with_stop(cohere_instance): - response = cohere_instance.async_call("Tell me a joke.", stop=["stop1", "stop2"]) + response = cohere_instance.async_call( + "Tell me a joke.", stop=["stop1", "stop2"] + ) assert response == "Mocked Response from Cohere" @@ -187,14 +193,22 @@ def test_cohere_generate_with_embed_english_v2(cohere_instance): def test_cohere_generate_with_embed_english_light_v2(cohere_instance): cohere_instance.model = "embed-english-light-v2.0" - response = cohere_instance("Generate embeddings with English Light v2.0 model.") - assert response.startswith("Generated embeddings with English Light v2.0 model") + response = cohere_instance( + "Generate embeddings with English Light v2.0 model." + ) + assert response.startswith( + "Generated embeddings with English Light v2.0 model" + ) def test_cohere_generate_with_embed_multilingual_v2(cohere_instance): cohere_instance.model = "embed-multilingual-v2.0" - response = cohere_instance("Generate embeddings with Multilingual v2.0 model.") - assert response.startswith("Generated embeddings with Multilingual v2.0 model") + response = cohere_instance( + "Generate embeddings with Multilingual v2.0 model." + ) + assert response.startswith( + "Generated embeddings with Multilingual v2.0 model" + ) def test_cohere_generate_with_embed_english_v3(cohere_instance): @@ -205,14 +219,22 @@ def test_cohere_generate_with_embed_english_v3(cohere_instance): def test_cohere_generate_with_embed_english_light_v3(cohere_instance): cohere_instance.model = "embed-english-light-v3.0" - response = cohere_instance("Generate embeddings with English Light v3.0 model.") - assert response.startswith("Generated embeddings with English Light v3.0 model") + response = cohere_instance( + "Generate embeddings with English Light v3.0 model." + ) + assert response.startswith( + "Generated embeddings with English Light v3.0 model" + ) def test_cohere_generate_with_embed_multilingual_v3(cohere_instance): cohere_instance.model = "embed-multilingual-v3.0" - response = cohere_instance("Generate embeddings with Multilingual v3.0 model.") - assert response.startswith("Generated embeddings with Multilingual v3.0 model") + response = cohere_instance( + "Generate embeddings with Multilingual v3.0 model." + ) + assert response.startswith( + "Generated embeddings with Multilingual v3.0 model" + ) def test_cohere_generate_with_embed_multilingual_light_v3(cohere_instance): @@ -423,7 +445,9 @@ def test_cohere_representation_model_classification(cohere_instance): def test_cohere_representation_model_language_detection(cohere_instance): # Test using the Representation model for language detection cohere_instance.model = "embed-english-v3.0" - language = cohere_instance.detect_language("Detect the language of this text.") + language = cohere_instance.detect_language( + "Detect the language of this text." + ) assert isinstance(language, str) @@ -447,7 +471,9 @@ def test_cohere_representation_model_multilingual_embedding(cohere_instance): assert len(embedding) > 0 -def test_cohere_representation_model_multilingual_classification(cohere_instance): +def test_cohere_representation_model_multilingual_classification( + cohere_instance, +): # Test using the Representation model for multilingual text classification cohere_instance.model = "embed-multilingual-v3.0" classification = cohere_instance.classify("Classify multilingual text.") @@ -456,7 +482,9 @@ def test_cohere_representation_model_multilingual_classification(cohere_instance assert "score" in classification -def test_cohere_representation_model_multilingual_language_detection(cohere_instance): +def test_cohere_representation_model_multilingual_language_detection( + cohere_instance, +): # Test using the Representation model for multilingual language detection cohere_instance.model = "embed-multilingual-v3.0" language = cohere_instance.detect_language( @@ -471,12 +499,17 @@ def test_cohere_representation_model_multilingual_max_tokens_limit_exceeded( # Test handling max tokens limit exceeded error for multilingual model cohere_instance.model = "embed-multilingual-v3.0" cohere_instance.max_tokens = 10 - prompt = "This is a test prompt that will exceed the max tokens limit for multilingual model." + prompt = ( + "This is a test prompt that will exceed the max tokens limit for" + " multilingual model." + ) with pytest.raises(ValueError): cohere_instance.embed(prompt) -def test_cohere_representation_model_multilingual_light_embedding(cohere_instance): +def test_cohere_representation_model_multilingual_light_embedding( + cohere_instance, +): # Test using the Representation model for multilingual light text embedding cohere_instance.model = "embed-multilingual-light-v3.0" embedding = cohere_instance.embed("Generate multilingual light embeddings.") @@ -484,10 +517,14 @@ def test_cohere_representation_model_multilingual_light_embedding(cohere_instanc assert len(embedding) > 0 -def test_cohere_representation_model_multilingual_light_classification(cohere_instance): +def test_cohere_representation_model_multilingual_light_classification( + cohere_instance, +): # Test using the Representation model for multilingual light text classification cohere_instance.model = "embed-multilingual-light-v3.0" - classification = cohere_instance.classify("Classify multilingual light text.") + classification = cohere_instance.classify( + "Classify multilingual light text." + ) assert isinstance(classification, dict) assert "class" in classification assert "score" in classification @@ -510,7 +547,10 @@ def test_cohere_representation_model_multilingual_light_max_tokens_limit_exceede # Test handling max tokens limit exceeded error for multilingual light model cohere_instance.model = "embed-multilingual-light-v3.0" cohere_instance.max_tokens = 10 - prompt = "This is a test prompt that will exceed the max tokens limit for multilingual light model." + prompt = ( + "This is a test prompt that will exceed the max tokens limit for" + " multilingual light model." + ) with pytest.raises(ValueError): cohere_instance.embed(prompt) @@ -553,19 +593,26 @@ def test_cohere_representation_model_english_classification(cohere_instance): assert "score" in classification -def test_cohere_representation_model_english_language_detection(cohere_instance): +def test_cohere_representation_model_english_language_detection( + cohere_instance, +): # Test using the Representation model for English language detection cohere_instance.model = "embed-english-v3.0" - language = cohere_instance.detect_language("Detect the language of English text.") + language = cohere_instance.detect_language( + "Detect the language of English text." + ) assert isinstance(language, str) -def test_cohere_representation_model_english_max_tokens_limit_exceeded(cohere_instance): +def test_cohere_representation_model_english_max_tokens_limit_exceeded( + cohere_instance, +): # Test handling max tokens limit exceeded error for English model cohere_instance.model = "embed-english-v3.0" cohere_instance.max_tokens = 10 prompt = ( - "This is a test prompt that will exceed the max tokens limit for English model." + "This is a test prompt that will exceed the max tokens limit for" + " English model." ) with pytest.raises(ValueError): cohere_instance.embed(prompt) @@ -579,7 +626,9 @@ def test_cohere_representation_model_english_light_embedding(cohere_instance): assert len(embedding) > 0 -def test_cohere_representation_model_english_light_classification(cohere_instance): +def test_cohere_representation_model_english_light_classification( + cohere_instance, +): # Test using the Representation model for English light text classification cohere_instance.model = "embed-english-light-v3.0" classification = cohere_instance.classify("Classify English light text.") @@ -588,7 +637,9 @@ def test_cohere_representation_model_english_light_classification(cohere_instanc assert "score" in classification -def test_cohere_representation_model_english_light_language_detection(cohere_instance): +def test_cohere_representation_model_english_light_language_detection( + cohere_instance, +): # Test using the Representation model for English light language detection cohere_instance.model = "embed-english-light-v3.0" language = cohere_instance.detect_language( @@ -603,7 +654,10 @@ def test_cohere_representation_model_english_light_max_tokens_limit_exceeded( # Test handling max tokens limit exceeded error for English light model cohere_instance.model = "embed-english-light-v3.0" cohere_instance.max_tokens = 10 - prompt = "This is a test prompt that will exceed the max tokens limit for English light model." + prompt = ( + "This is a test prompt that will exceed the max tokens limit for" + " English light model." + ) with pytest.raises(ValueError): cohere_instance.embed(prompt) diff --git a/tests/models/dalle3.py b/tests/models/dalle3.py index a23d077e..9b7cf0e1 100644 --- a/tests/models/dalle3.py +++ b/tests/models/dalle3.py @@ -62,7 +62,9 @@ def test_dalle3_call_failure(dalle3, mock_openai_client, capsys): def test_dalle3_create_variations_success(dalle3, mock_openai_client): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) expected_variation_url = ( "https://cdn.openai.com/dall-e/encoded/feats/feats_02ABCDE.png" ) @@ -84,7 +86,9 @@ def test_dalle3_create_variations_success(dalle3, mock_openai_client): def test_dalle3_create_variations_failure(dalle3, mock_openai_client, capsys): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) expected_error_message = "Error running Dalle3: API Error" # Mocking OpenAIError @@ -186,7 +190,9 @@ def test_dalle3_call_with_large_input(dalle3, mock_openai_client): assert str(excinfo.value) == expected_error_message -def test_dalle3_create_variations_with_invalid_image_url(dalle3, mock_openai_client): +def test_dalle3_create_variations_with_invalid_image_url( + dalle3, mock_openai_client +): # Arrange img_url = "https://invalid-image-url.com" expected_error_message = "Error running Dalle3: Invalid image URL" @@ -228,7 +234,9 @@ def test_dalle3_call_with_retry(dalle3, mock_openai_client): # Simulate a retry scenario mock_openai_client.images.generate.side_effect = [ - OpenAIError("Temporary error", http_status=500, error="Internal Server Error"), + OpenAIError( + "Temporary error", http_status=500, error="Internal Server Error" + ), Mock(data=[Mock(url=expected_img_url)]), ] @@ -242,14 +250,18 @@ def test_dalle3_call_with_retry(dalle3, mock_openai_client): def test_dalle3_create_variations_with_retry(dalle3, mock_openai_client): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) expected_variation_url = ( "https://cdn.openai.com/dall-e/encoded/feats/feats_02ABCDE.png" ) # Simulate a retry scenario mock_openai_client.images.create_variation.side_effect = [ - OpenAIError("Temporary error", http_status=500, error="Internal Server Error"), + OpenAIError( + "Temporary error", http_status=500, error="Internal Server Error" + ), Mock(data=[Mock(url=expected_variation_url)]), ] @@ -280,9 +292,13 @@ def test_dalle3_call_exception_logging(dalle3, mock_openai_client, capsys): assert expected_error_message in captured.err -def test_dalle3_create_variations_exception_logging(dalle3, mock_openai_client, capsys): +def test_dalle3_create_variations_exception_logging( + dalle3, mock_openai_client, capsys +): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) expected_error_message = "Error running Dalle3: API Error" # Mocking OpenAIError @@ -323,7 +339,9 @@ def test_dalle3_call_no_api_key(): def test_dalle3_create_variations_no_api_key(): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) dalle3 = Dalle3(api_key=None) expected_error_message = "Error running Dalle3: API Key is missing" @@ -334,7 +352,9 @@ def test_dalle3_create_variations_no_api_key(): assert str(excinfo.value) == expected_error_message -def test_dalle3_call_with_retry_max_retries_exceeded(dalle3, mock_openai_client): +def test_dalle3_call_with_retry_max_retries_exceeded( + dalle3, mock_openai_client +): # Arrange task = "A painting of a dog" @@ -354,7 +374,9 @@ def test_dalle3_create_variations_with_retry_max_retries_exceeded( dalle3, mock_openai_client ): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) # Simulate max retries exceeded mock_openai_client.images.create_variation.side_effect = OpenAIError( @@ -377,7 +399,9 @@ def test_dalle3_call_retry_with_success(dalle3, mock_openai_client): # Simulate success after a retry mock_openai_client.images.generate.side_effect = [ - OpenAIError("Temporary error", http_status=500, error="Internal Server Error"), + OpenAIError( + "Temporary error", http_status=500, error="Internal Server Error" + ), Mock(data=[Mock(url=expected_img_url)]), ] @@ -389,16 +413,22 @@ def test_dalle3_call_retry_with_success(dalle3, mock_openai_client): assert mock_openai_client.images.generate.call_count == 2 -def test_dalle3_create_variations_retry_with_success(dalle3, mock_openai_client): +def test_dalle3_create_variations_retry_with_success( + dalle3, mock_openai_client +): # Arrange - img_url = "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + img_url = ( + "https://cdn.openai.com/dall-e/encoded/feats/feats_01J9J5ZKJZJY9.png" + ) expected_variation_url = ( "https://cdn.openai.com/dall-e/encoded/feats/feats_02ABCDE.png" ) # Simulate success after a retry mock_openai_client.images.create_variation.side_effect = [ - OpenAIError("Temporary error", http_status=500, error="Internal Server Error"), + OpenAIError( + "Temporary error", http_status=500, error="Internal Server Error" + ), Mock(data=[Mock(url=expected_variation_url)]), ] diff --git a/tests/models/distill_whisper.py b/tests/models/distill_whisper.py index d83caf62..6f95a0e3 100644 --- a/tests/models/distill_whisper.py +++ b/tests/models/distill_whisper.py @@ -44,7 +44,9 @@ async def test_async_transcribe_audio_file(distil_whisper_model): test_data = np.random.rand(16000) # Simulated audio data (1 second) with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as audio_file: audio_file_path = create_audio_file(test_data, 16000, audio_file.name) - transcription = await distil_whisper_model.async_transcribe(audio_file_path) + transcription = await distil_whisper_model.async_transcribe( + audio_file_path + ) os.remove(audio_file_path) assert isinstance(transcription, str) @@ -62,7 +64,9 @@ def test_transcribe_audio_data(distil_whisper_model): @pytest.mark.asyncio async def test_async_transcribe_audio_data(distil_whisper_model): test_data = np.random.rand(16000) # Simulated audio data (1 second) - transcription = await distil_whisper_model.async_transcribe(test_data.tobytes()) + transcription = await distil_whisper_model.async_transcribe( + test_data.tobytes() + ) assert isinstance(transcription, str) assert transcription.strip() != "" @@ -73,7 +77,9 @@ def test_real_time_transcribe(distil_whisper_model, capsys): with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as audio_file: audio_file_path = create_audio_file(test_data, 16000, audio_file.name) - distil_whisper_model.real_time_transcribe(audio_file_path, chunk_duration=1) + distil_whisper_model.real_time_transcribe( + audio_file_path, chunk_duration=1 + ) os.remove(audio_file_path) @@ -82,7 +88,9 @@ def test_real_time_transcribe(distil_whisper_model, capsys): assert "Chunk" in captured.out -def test_real_time_transcribe_audio_file_not_found(distil_whisper_model, capsys): +def test_real_time_transcribe_audio_file_not_found( + distil_whisper_model, capsys +): audio_file_path = "non_existent_audio.wav" distil_whisper_model.real_time_transcribe(audio_file_path, chunk_duration=1) @@ -145,7 +153,9 @@ def test_create_audio_file(): test_data = np.random.rand(16000) # Simulated audio data (1 second) sample_rate = 16000 with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as audio_file: - audio_file_path = create_audio_file(test_data, sample_rate, audio_file.name) + audio_file_path = create_audio_file( + test_data, sample_rate, audio_file.name + ) assert os.path.exists(audio_file_path) os.remove(audio_file_path) @@ -215,7 +225,9 @@ async def test_async_transcription_success(whisper_model, audio_file_path): @pytest.mark.asyncio -async def test_async_transcription_failure(whisper_model, invalid_audio_file_path): +async def test_async_transcription_failure( + whisper_model, invalid_audio_file_path +): with pytest.raises(Exception): await whisper_model.async_transcribe(invalid_audio_file_path) @@ -248,17 +260,22 @@ def mocked_model(monkeypatch): model_mock, ) monkeypatch.setattr( - "swarms.models.distilled_whisperx.AutoProcessor.from_pretrained", processor_mock + "swarms.models.distilled_whisperx.AutoProcessor.from_pretrained", + processor_mock, ) return model_mock, processor_mock @pytest.mark.asyncio -async def test_async_transcribe_with_mocked_model(mocked_model, audio_file_path): +async def test_async_transcribe_with_mocked_model( + mocked_model, audio_file_path +): model_mock, processor_mock = mocked_model # Set up what the mock should return when it's called model_mock.return_value.generate.return_value = torch.tensor([[0]]) - processor_mock.return_value.batch_decode.return_value = ["mocked transcription"] + processor_mock.return_value.batch_decode.return_value = [ + "mocked transcription" + ] model_wrapper = DistilWhisperModel() transcription = await model_wrapper.async_transcribe(audio_file_path) assert transcription == "mocked transcription" diff --git a/tests/models/elevenlab.py b/tests/models/elevenlab.py index 7dbcf2ea..986ce937 100644 --- a/tests/models/elevenlab.py +++ b/tests/models/elevenlab.py @@ -61,10 +61,14 @@ def test_run_text_to_speech_with_mock(eleven_labs_tool): def test_run_text_to_speech_error_handling(eleven_labs_tool): with patch("your_module._import_elevenlabs") as mock_elevenlabs: mock_elevenlabs_instance = mock_elevenlabs.return_value - mock_elevenlabs_instance.generate.side_effect = Exception("Test Exception") + mock_elevenlabs_instance.generate.side_effect = Exception( + "Test Exception" + ) with pytest.raises( RuntimeError, - match="Error while running ElevenLabsText2SpeechTool: Test Exception", + match=( + "Error while running ElevenLabsText2SpeechTool: Test Exception" + ), ): eleven_labs_tool.run(SAMPLE_TEXT) diff --git a/tests/models/fuyu.py b/tests/models/fuyu.py index 9a26dbfb..a70cb42a 100644 --- a/tests/models/fuyu.py +++ b/tests/models/fuyu.py @@ -75,7 +75,9 @@ def test_tokenizer_type(fuyu_instance): def test_processor_has_image_processor_and_tokenizer(fuyu_instance): - assert fuyu_instance.processor.image_processor == fuyu_instance.image_processor + assert ( + fuyu_instance.processor.image_processor == fuyu_instance.image_processor + ) assert fuyu_instance.processor.tokenizer == fuyu_instance.tokenizer diff --git a/tests/models/gpt4v.py b/tests/models/gpt4v.py index 23e97d03..8532d313 100644 --- a/tests/models/gpt4v.py +++ b/tests/models/gpt4v.py @@ -4,7 +4,12 @@ from unittest.mock import Mock import pytest from dotenv import load_dotenv -from requests.exceptions import ConnectionError, HTTPError, RequestException, Timeout +from requests.exceptions import ( + ConnectionError, + HTTPError, + RequestException, + Timeout, +) from swarms.models.gpt4v import GPT4Vision, GPT4VisionResponse @@ -173,7 +178,11 @@ def test_gpt4vision_call_retry_with_success_after_timeout( Timeout("Request timed out"), { "choices": [ - {"message": {"content": {"text": "A description of the image."}}} + { + "message": { + "content": {"text": "A description of the image."} + } + } ], }, ] @@ -200,16 +209,18 @@ def test_gpt4vision_process_img(): assert img_data.startswith("/9j/") # Base64-encoded image data -def test_gpt4vision_call_single_task_single_image(gpt4vision, mock_openai_client): +def test_gpt4vision_call_single_task_single_image( + gpt4vision, mock_openai_client +): # Arrange img_url = "https://images.unsplash.com/photo-1694734479942-8cc7f4660578?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D" task = "Describe this image." expected_response = GPT4VisionResponse(answer="A description of the image.") - mock_openai_client.chat.completions.create.return_value.choices[ - 0 - ].text = expected_response.answer + mock_openai_client.chat.completions.create.return_value.choices[0].text = ( + expected_response.answer + ) # Act response = gpt4vision(img_url, [task]) @@ -219,16 +230,21 @@ def test_gpt4vision_call_single_task_single_image(gpt4vision, mock_openai_client mock_openai_client.chat.completions.create.assert_called_once() -def test_gpt4vision_call_single_task_multiple_images(gpt4vision, mock_openai_client): +def test_gpt4vision_call_single_task_multiple_images( + gpt4vision, mock_openai_client +): # Arrange - img_urls = ["https://example.com/image1.jpg", "https://example.com/image2.jpg"] + img_urls = [ + "https://example.com/image1.jpg", + "https://example.com/image2.jpg", + ] task = "Describe these images." expected_response = GPT4VisionResponse(answer="Descriptions of the images.") - mock_openai_client.chat.completions.create.return_value.choices[ - 0 - ].text = expected_response.answer + mock_openai_client.chat.completions.create.return_value.choices[0].text = ( + expected_response.answer + ) # Act response = gpt4vision(img_urls, [task]) @@ -238,7 +254,9 @@ def test_gpt4vision_call_single_task_multiple_images(gpt4vision, mock_openai_cli mock_openai_client.chat.completions.create.assert_called_once() -def test_gpt4vision_call_multiple_tasks_single_image(gpt4vision, mock_openai_client): +def test_gpt4vision_call_multiple_tasks_single_image( + gpt4vision, mock_openai_client +): # Arrange img_url = "https://images.unsplash.com/photo-1694734479942-8cc7f4660578?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D" tasks = ["Describe this image.", "What's in this picture?"] @@ -249,7 +267,9 @@ def test_gpt4vision_call_multiple_tasks_single_image(gpt4vision, mock_openai_cli ] def create_mock_response(response): - return {"choices": [{"message": {"content": {"text": response.answer}}}]} + return { + "choices": [{"message": {"content": {"text": response.answer}}}] + } mock_openai_client.chat.completions.create.side_effect = [ create_mock_response(response) for response in expected_responses @@ -279,7 +299,11 @@ def test_gpt4vision_call_multiple_tasks_single_image(gpt4vision, mock_openai_cli mock_openai_client.chat.completions.create.side_effect = [ { "choices": [ - {"message": {"content": {"text": expected_responses[i].answer}}} + { + "message": { + "content": {"text": expected_responses[i].answer} + } + } ] } for i in range(len(expected_responses)) @@ -295,7 +319,9 @@ def test_gpt4vision_call_multiple_tasks_single_image(gpt4vision, mock_openai_cli ) # Should be called only once -def test_gpt4vision_call_multiple_tasks_multiple_images(gpt4vision, mock_openai_client): +def test_gpt4vision_call_multiple_tasks_multiple_images( + gpt4vision, mock_openai_client +): # Arrange img_urls = [ "https://images.unsplash.com/photo-1694734479857-626882b6db37?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D", @@ -328,7 +354,9 @@ def test_gpt4vision_call_http_error(gpt4vision, mock_openai_client): img_url = "https://images.unsplash.com/photo-1694734479942-8cc7f4660578?q=80&w=1287&auto=format&fit=crop&ixlib=rb-4.0.3&ixid=M3wxMjA3fDB8MHxwaG90by1wYWdlfHx8fGVufDB8fHx8fA%3D%3D" task = "Describe this image." - mock_openai_client.chat.completions.create.side_effect = HTTPError("HTTP Error") + mock_openai_client.chat.completions.create.side_effect = HTTPError( + "HTTP Error" + ) # Act and Assert with pytest.raises(HTTPError): diff --git a/tests/models/hf.py b/tests/models/hf.py index ab3b648d..d3ff9a04 100644 --- a/tests/models/hf.py +++ b/tests/models/hf.py @@ -26,7 +26,10 @@ def mock_bitsandbytesconfig(): @pytest.fixture def hugging_face_llm( - mock_torch, mock_autotokenizer, mock_automodelforcausallm, mock_bitsandbytesconfig + mock_torch, + mock_autotokenizer, + mock_automodelforcausallm, + mock_bitsandbytesconfig, ): HuggingFaceLLM.torch = mock_torch HuggingFaceLLM.AutoTokenizer = mock_autotokenizer @@ -70,8 +73,12 @@ def test_init_with_quantize( def test_generate_text(hugging_face_llm): prompt_text = "test prompt" expected_output = "test output" - hugging_face_llm.tokenizer.encode.return_value = torch.tensor([0]) # Mock tensor - hugging_face_llm.model.generate.return_value = torch.tensor([0]) # Mock tensor + hugging_face_llm.tokenizer.encode.return_value = torch.tensor( + [0] + ) # Mock tensor + hugging_face_llm.model.generate.return_value = torch.tensor( + [0] + ) # Mock tensor hugging_face_llm.tokenizer.decode.return_value = expected_output output = hugging_face_llm.generate_text(prompt_text) diff --git a/tests/models/huggingface.py b/tests/models/huggingface.py index 9a27054a..62261b9c 100644 --- a/tests/models/huggingface.py +++ b/tests/models/huggingface.py @@ -84,7 +84,9 @@ def test_llm_initialization_params(model_id, max_length): assert instance.max_length == max_length else: instance = HuggingfaceLLM(model_id=model_id) - assert instance.max_length == 500 # Assuming 500 is the default max_length + assert ( + instance.max_length == 500 + ) # Assuming 500 is the default max_length # Test for setting an invalid device @@ -180,7 +182,8 @@ def test_llm_long_input_warning(mock_warning, llm_instance): # Test for run method behavior when model raises an exception @patch( - "swarms.models.huggingface.HuggingfaceLLM._model.generate", side_effect=RuntimeError + "swarms.models.huggingface.HuggingfaceLLM._model.generate", + side_effect=RuntimeError, ) def test_llm_run_model_exception(mock_generate, llm_instance): with pytest.raises(RuntimeError): @@ -191,7 +194,9 @@ def test_llm_run_model_exception(mock_generate, llm_instance): @patch("torch.cuda.is_available", return_value=False) def test_llm_force_gpu_when_unavailable(mock_is_available, llm_instance): with pytest.raises(EnvironmentError): - llm_instance.set_device("cuda") # Attempt to set CUDA when it's not available + llm_instance.set_device( + "cuda" + ) # Attempt to set CUDA when it's not available # Test for proper cleanup after model use (releasing resources) @@ -225,7 +230,9 @@ def test_llm_multilingual_input(mock_run, llm_instance): mock_run.return_value = "mocked multilingual output" multilingual_input = "Bonjour, ceci est un test multilingue." result = llm_instance.run(multilingual_input) - assert isinstance(result, str) # Simple check to ensure output is string type + assert isinstance( + result, str + ) # Simple check to ensure output is string type # Test caching mechanism to prevent re-running the same inputs diff --git a/tests/models/idefics.py b/tests/models/idefics.py index 610657bd..2ee9f010 100644 --- a/tests/models/idefics.py +++ b/tests/models/idefics.py @@ -1,7 +1,11 @@ import pytest from unittest.mock import patch import torch -from swarms.models.idefics import Idefics, IdeficsForVisionText2Text, AutoProcessor +from swarms.models.idefics import ( + Idefics, + IdeficsForVisionText2Text, + AutoProcessor, +) @pytest.fixture @@ -30,7 +34,8 @@ def test_init_default(idefics_instance): ) def test_init_device(device, expected): with patch( - "torch.cuda.is_available", return_value=True if expected == "cuda" else False + "torch.cuda.is_available", + return_value=True if expected == "cuda" else False, ): instance = Idefics(device=device) assert instance.device == expected @@ -39,9 +44,9 @@ def test_init_device(device, expected): # Test `run` method def test_run(idefics_instance): prompts = [["User: Test"]] - with patch.object(idefics_instance, "processor") as mock_processor, patch.object( - idefics_instance, "model" - ) as mock_model: + with patch.object( + idefics_instance, "processor" + ) as mock_processor, patch.object(idefics_instance, "model") as mock_model: mock_processor.return_value = {"input_ids": torch.tensor([1, 2, 3])} mock_model.generate.return_value = torch.tensor([1, 2, 3]) mock_processor.batch_decode.return_value = ["Test"] @@ -54,9 +59,9 @@ def test_run(idefics_instance): # Test `__call__` method (using the same logic as run for simplicity) def test_call(idefics_instance): prompts = [["User: Test"]] - with patch.object(idefics_instance, "processor") as mock_processor, patch.object( - idefics_instance, "model" - ) as mock_model: + with patch.object( + idefics_instance, "processor" + ) as mock_processor, patch.object(idefics_instance, "model") as mock_model: mock_processor.return_value = {"input_ids": torch.tensor([1, 2, 3])} mock_model.generate.return_value = torch.tensor([1, 2, 3]) mock_processor.batch_decode.return_value = ["Test"] @@ -85,7 +90,9 @@ def test_set_checkpoint(idefics_instance): ) as mock_from_pretrained, patch.object(AutoProcessor, "from_pretrained"): idefics_instance.set_checkpoint(new_checkpoint) - mock_from_pretrained.assert_called_with(new_checkpoint, torch_dtype=torch.bfloat16) + mock_from_pretrained.assert_called_with( + new_checkpoint, torch_dtype=torch.bfloat16 + ) # Test `set_device` method diff --git a/tests/models/kosmos.py b/tests/models/kosmos.py index 11d224d1..aaa756a3 100644 --- a/tests/models/kosmos.py +++ b/tests/models/kosmos.py @@ -45,7 +45,9 @@ def test_get_image(mock_image_request): # Test multimodal grounding def test_multimodal_grounding(mock_image_request): kosmos = Kosmos() - kosmos.multimodal_grounding("Find the red apple in the image.", TEST_IMAGE_URL) + kosmos.multimodal_grounding( + "Find the red apple in the image.", TEST_IMAGE_URL + ) # TODO: Validate the result if possible @@ -117,7 +119,9 @@ def test_multimodal_grounding(kosmos): @pytest.mark.usefixtures("mock_request_get") def test_referring_expression_comprehension(kosmos): - kosmos.referring_expression_comprehension("Show me the green bottle.", IMG_URL2) + kosmos.referring_expression_comprehension( + "Show me the green bottle.", IMG_URL2 + ) @pytest.mark.usefixtures("mock_request_get") @@ -147,7 +151,9 @@ def test_multimodal_grounding_2(kosmos): @pytest.mark.usefixtures("mock_request_get") def test_referring_expression_comprehension_2(kosmos): - kosmos.referring_expression_comprehension("Where is the water bottle?", IMG_URL3) + kosmos.referring_expression_comprehension( + "Where is the water bottle?", IMG_URL3 + ) @pytest.mark.usefixtures("mock_request_get") diff --git a/tests/models/kosmos2.py b/tests/models/kosmos2.py index 2ff01092..1ad824cc 100644 --- a/tests/models/kosmos2.py +++ b/tests/models/kosmos2.py @@ -36,7 +36,10 @@ def test_kosmos2_with_sample_image(kosmos2, sample_image): # Mocked extract_entities function for testing def mock_extract_entities(text): - return [("entity1", (0.1, 0.2, 0.3, 0.4)), ("entity2", (0.5, 0.6, 0.7, 0.8))] + return [ + ("entity1", (0.1, 0.2, 0.3, 0.4)), + ("entity2", (0.5, 0.6, 0.7, 0.8)), + ] # Mocked process_entities_to_detections function for testing @@ -54,7 +57,9 @@ def test_kosmos2_with_mocked_extraction_and_detection( ): monkeypatch.setattr(kosmos2, "extract_entities", mock_extract_entities) monkeypatch.setattr( - kosmos2, "process_entities_to_detections", mock_process_entities_to_detections + kosmos2, + "process_entities_to_detections", + mock_process_entities_to_detections, ) detections = kosmos2(img=sample_image) @@ -234,7 +239,12 @@ def test_kosmos2_with_entities_containing_special_characters( kosmos2, sample_image, monkeypatch ): def mock_extract_entities(text): - return [("entity1 with special characters (ü, ö, etc.)", (0.1, 0.2, 0.3, 0.4))] + return [ + ( + "entity1 with special characters (ü, ö, etc.)", + (0.1, 0.2, 0.3, 0.4), + ) + ] monkeypatch.setattr(kosmos2, "extract_entities", mock_extract_entities) detections = kosmos2(img=sample_image) @@ -252,7 +262,10 @@ def test_kosmos2_with_image_containing_multiple_objects( kosmos2, sample_image, monkeypatch ): def mock_extract_entities(text): - return [("entity1", (0.1, 0.2, 0.3, 0.4)), ("entity2", (0.5, 0.6, 0.7, 0.8))] + return [ + ("entity1", (0.1, 0.2, 0.3, 0.4)), + ("entity2", (0.5, 0.6, 0.7, 0.8)), + ] monkeypatch.setattr(kosmos2, "extract_entities", mock_extract_entities) detections = kosmos2(img=sample_image) @@ -266,7 +279,9 @@ def test_kosmos2_with_image_containing_multiple_objects( # Test Kosmos2 with image containing no objects -def test_kosmos2_with_image_containing_no_objects(kosmos2, sample_image, monkeypatch): +def test_kosmos2_with_image_containing_no_objects( + kosmos2, sample_image, monkeypatch +): def mock_extract_entities(text): return [] diff --git a/tests/models/llama_function_caller.py b/tests/models/llama_function_caller.py index c54c264b..c38b2267 100644 --- a/tests/models/llama_function_caller.py +++ b/tests/models/llama_function_caller.py @@ -72,7 +72,9 @@ def test_llama_custom_function_invalid_arguments(llama_caller): # Test streaming with custom runtime def test_llama_custom_runtime(): llama_caller = LlamaFunctionCaller( - model_id="Your-Model-ID", cache_dir="Your-Cache-Directory", runtime="cuda" + model_id="Your-Model-ID", + cache_dir="Your-Cache-Directory", + runtime="cuda", ) user_prompt = "Tell me about the tallest mountain in the world." response = llama_caller(user_prompt) @@ -83,7 +85,9 @@ def test_llama_custom_runtime(): # Test caching functionality def test_llama_cache(): llama_caller = LlamaFunctionCaller( - model_id="Your-Model-ID", cache_dir="Your-Cache-Directory", runtime="cuda" + model_id="Your-Model-ID", + cache_dir="Your-Cache-Directory", + runtime="cuda", ) # Perform a request to populate the cache @@ -99,7 +103,9 @@ def test_llama_cache(): # Test response length within max_tokens limit def test_llama_response_length(): llama_caller = LlamaFunctionCaller( - model_id="Your-Model-ID", cache_dir="Your-Cache-Directory", runtime="cuda" + model_id="Your-Model-ID", + cache_dir="Your-Cache-Directory", + runtime="cuda", ) # Generate a long prompt diff --git a/tests/models/nougat.py b/tests/models/nougat.py index e61a45af..ac972e07 100644 --- a/tests/models/nougat.py +++ b/tests/models/nougat.py @@ -21,7 +21,9 @@ def test_nougat_default_initialization(setup_nougat): def test_nougat_custom_initialization(): - nougat = Nougat(model_name_or_path="custom_path", min_length=10, max_new_tokens=50) + nougat = Nougat( + model_name_or_path="custom_path", min_length=10, max_new_tokens=50 + ) assert nougat.model_name_or_path == "custom_path" assert nougat.min_length == 10 assert nougat.max_new_tokens == 50 @@ -98,7 +100,8 @@ def mock_processor_and_model(): with patch( "transformers.NougatProcessor.from_pretrained", return_value=Mock() ), patch( - "transformers.VisionEncoderDecoderModel.from_pretrained", return_value=Mock() + "transformers.VisionEncoderDecoderModel.from_pretrained", + return_value=Mock(), ): yield diff --git a/tests/models/revgptv1.py b/tests/models/revgptv1.py index 12ceeea0..5908b64e 100644 --- a/tests/models/revgptv1.py +++ b/tests/models/revgptv1.py @@ -19,7 +19,10 @@ class TestRevChatGPT(unittest.TestCase): self.assertLess(self.model.end_time - self.model.start_time, 60) def test_generate_summary(self): - text = "This is a sample text to summarize. It has multiple sentences and details. The summary should be concise." + text = ( + "This is a sample text to summarize. It has multiple sentences and" + " details. The summary should be concise." + ) summary = self.model.generate_summary(text) self.assertLess(len(summary), len(text) / 2) @@ -60,7 +63,9 @@ class TestRevChatGPT(unittest.TestCase): convo_id = "123" title = "New Title" self.model.chatbot.change_title(convo_id, title) - self.assertEqual(self.model.chatbot.get_msg_history(convo_id)["title"], title) + self.assertEqual( + self.model.chatbot.get_msg_history(convo_id)["title"], title + ) def test_delete_conversation(self): convo_id = "123" @@ -76,7 +81,9 @@ class TestRevChatGPT(unittest.TestCase): def test_rollback_conversation(self): original_convo_id = self.model.chatbot.conversation_id self.model.chatbot.rollback_conversation(1) - self.assertNotEqual(original_convo_id, self.model.chatbot.conversation_id) + self.assertNotEqual( + original_convo_id, self.model.chatbot.conversation_id + ) if __name__ == "__main__": diff --git a/tests/models/speech_t5.py b/tests/models/speech_t5.py index 4e5f4cb1..f4d21a30 100644 --- a/tests/models/speech_t5.py +++ b/tests/models/speech_t5.py @@ -17,7 +17,9 @@ def test_speecht5_init(speecht5_model): assert isinstance(speecht5_model.processor, SpeechT5.processor.__class__) assert isinstance(speecht5_model.model, SpeechT5.model.__class__) assert isinstance(speecht5_model.vocoder, SpeechT5.vocoder.__class__) - assert isinstance(speecht5_model.embeddings_dataset, torch.utils.data.Dataset) + assert isinstance( + speecht5_model.embeddings_dataset, torch.utils.data.Dataset + ) def test_speecht5_call(speecht5_model): @@ -59,8 +61,12 @@ def test_speecht5_set_embeddings_dataset(speecht5_model): new_dataset_name = "Matthijs/cmu-arctic-xvectors-test" speecht5_model.set_embeddings_dataset(new_dataset_name) assert speecht5_model.dataset_name == new_dataset_name - assert isinstance(speecht5_model.embeddings_dataset, torch.utils.data.Dataset) - speecht5_model.set_embeddings_dataset(old_dataset_name) # Restore original dataset + assert isinstance( + speecht5_model.embeddings_dataset, torch.utils.data.Dataset + ) + speecht5_model.set_embeddings_dataset( + old_dataset_name + ) # Restore original dataset def test_speecht5_get_sampling_rate(speecht5_model): diff --git a/tests/models/ssd_1b.py b/tests/models/ssd_1b.py index 7bd3154c..7a7a897f 100644 --- a/tests/models/ssd_1b.py +++ b/tests/models/ssd_1b.py @@ -19,18 +19,24 @@ def test_ssd1b_call(ssd1b_model): neg_prompt = "ugly, blurry, poor quality" image_url = ssd1b_model(task, neg_prompt) assert isinstance(image_url, str) - assert image_url.startswith("https://") # Assuming it starts with "https://" + assert image_url.startswith( + "https://" + ) # Assuming it starts with "https://" # Add more tests for various aspects of the class and methods # Example of a parameterized test for different tasks -@pytest.mark.parametrize("task", ["A painting of a cat", "A painting of a tree"]) +@pytest.mark.parametrize( + "task", ["A painting of a cat", "A painting of a tree"] +) def test_ssd1b_parameterized_task(ssd1b_model, task): image_url = ssd1b_model(task) assert isinstance(image_url, str) - assert image_url.startswith("https://") # Assuming it starts with "https://" + assert image_url.startswith( + "https://" + ) # Assuming it starts with "https://" # Example of a test using mocks to isolate units of code @@ -39,7 +45,9 @@ def test_ssd1b_with_mock(ssd1b_model, mocker): task = "A painting of a cat" image_url = ssd1b_model(task) assert isinstance(image_url, str) - assert image_url.startswith("https://") # Assuming it starts with "https://" + assert image_url.startswith( + "https://" + ) # Assuming it starts with "https://" def test_ssd1b_call_with_cache(ssd1b_model): diff --git a/tests/models/timm_model.py b/tests/models/timm_model.py index a3e62605..07f68b05 100644 --- a/tests/models/timm_model.py +++ b/tests/models/timm_model.py @@ -66,7 +66,9 @@ def test_call_parameterized(model_name, pretrained, in_chans): def test_get_supported_models_mock(): model_handler = TimmModel() - model_handler._get_supported_models = Mock(return_value=["resnet18", "resnet50"]) + model_handler._get_supported_models = Mock( + return_value=["resnet18", "resnet50"] + ) supported_models = model_handler._get_supported_models() assert supported_models == ["resnet18", "resnet50"] @@ -80,7 +82,9 @@ def test_create_model_mock(sample_model_info): def test_call_exception(): model_handler = TimmModel() - model_info = TimmModelInfo(model_name="invalid_model", pretrained=True, in_chans=3) + model_info = TimmModelInfo( + model_name="invalid_model", pretrained=True, in_chans=3 + ) input_tensor = torch.randn(1, 3, 224, 224) with pytest.raises(Exception): model_handler.__call__(model_info, input_tensor) @@ -111,7 +115,9 @@ def test_environment_variable(): @pytest.mark.slow def test_marked_slow(): model_handler = TimmModel() - model_info = TimmModelInfo(model_name="resnet18", pretrained=True, in_chans=3) + model_info = TimmModelInfo( + model_name="resnet18", pretrained=True, in_chans=3 + ) input_tensor = torch.randn(1, 3, 224, 224) output_shape = model_handler(model_info, input_tensor) assert isinstance(output_shape, torch.Size) @@ -136,7 +142,9 @@ def test_marked_parameterized(model_name, pretrained, in_chans): def test_exception_testing(): model_handler = TimmModel() - model_info = TimmModelInfo(model_name="invalid_model", pretrained=True, in_chans=3) + model_info = TimmModelInfo( + model_name="invalid_model", pretrained=True, in_chans=3 + ) input_tensor = torch.randn(1, 3, 224, 224) with pytest.raises(Exception): model_handler.__call__(model_info, input_tensor) @@ -144,7 +152,9 @@ def test_exception_testing(): def test_parameterized_testing(): model_handler = TimmModel() - model_info = TimmModelInfo(model_name="resnet18", pretrained=True, in_chans=3) + model_info = TimmModelInfo( + model_name="resnet18", pretrained=True, in_chans=3 + ) input_tensor = torch.randn(1, 3, 224, 224) output_shape = model_handler.__call__(model_info, input_tensor) assert isinstance(output_shape, torch.Size) @@ -153,7 +163,9 @@ def test_parameterized_testing(): def test_use_mocks_and_monkeypatching(): model_handler = TimmModel() model_handler._create_model = Mock(return_value=torch.nn.Module()) - model_info = TimmModelInfo(model_name="resnet18", pretrained=True, in_chans=3) + model_info = TimmModelInfo( + model_name="resnet18", pretrained=True, in_chans=3 + ) model = model_handler._create_model(model_info) assert isinstance(model, torch.nn.Module) diff --git a/tests/models/vilt.py b/tests/models/vilt.py index b376f41b..8dcdce88 100644 --- a/tests/models/vilt.py +++ b/tests/models/vilt.py @@ -33,7 +33,9 @@ def test_vilt_prediction(mock_image_open, mock_requests_get, vilt_instance): # 3. Test Exception Handling for network -@patch.object(requests, "get", side_effect=requests.RequestException("Network error")) +@patch.object( + requests, "get", side_effect=requests.RequestException("Network error") +) def test_vilt_network_exception(vilt_instance): with pytest.raises(requests.RequestException): vilt_instance( diff --git a/tests/models/whisperx.py b/tests/models/whisperx.py index bcbd02e9..5fad3431 100644 --- a/tests/models/whisperx.py +++ b/tests/models/whisperx.py @@ -28,7 +28,9 @@ def test_speech_to_text_install(mock_run): # Mock pytube.YouTube and pytube.Streams for download tests @patch("pytube.YouTube") @patch.object(YouTube, "streams") -def test_speech_to_text_download_youtube_video(mock_streams, mock_youtube, temp_dir): +def test_speech_to_text_download_youtube_video( + mock_streams, mock_youtube, temp_dir +): # Mock YouTube and streams video_url = "https://www.youtube.com/watch?v=MJd6pr16LRM" mock_stream = mock_streams().filter().first() @@ -116,7 +118,11 @@ def test_speech_to_text_transcribe_whisperx_failure( @patch("whisperx.align") @patch.object(whisperx.DiarizationPipeline, "__call__") def test_speech_to_text_transcribe_missing_segments( - mock_diarization, mock_align, mock_align_model, mock_load_audio, mock_load_model + mock_diarization, + mock_align, + mock_align_model, + mock_load_audio, + mock_load_model, ): # Mock whisperx functions to return incomplete output mock_load_model.return_value = mock_load_model @@ -142,7 +148,11 @@ def test_speech_to_text_transcribe_missing_segments( @patch("whisperx.align") @patch.object(whisperx.DiarizationPipeline, "__call__") def test_speech_to_text_transcribe_align_failure( - mock_diarization, mock_align, mock_align_model, mock_load_audio, mock_load_model + mock_diarization, + mock_align, + mock_align_model, + mock_load_audio, + mock_load_model, ): # Mock whisperx functions to raise an exception during align mock_load_model.return_value = mock_load_model diff --git a/tests/models/yi_200k.py b/tests/models/yi_200k.py index 72a6d1b2..6b179ca1 100644 --- a/tests/models/yi_200k.py +++ b/tests/models/yi_200k.py @@ -39,21 +39,27 @@ def test_yi34b_generate_text_with_temperature(yi34b_model, temperature): def test_yi34b_generate_text_with_invalid_prompt(yi34b_model): prompt = None # Invalid prompt - with pytest.raises(ValueError, match="Input prompt must be a non-empty string"): + with pytest.raises( + ValueError, match="Input prompt must be a non-empty string" + ): yi34b_model(prompt) def test_yi34b_generate_text_with_invalid_max_length(yi34b_model): prompt = "There's a place where time stands still." max_length = -1 # Invalid max_length - with pytest.raises(ValueError, match="max_length must be a positive integer"): + with pytest.raises( + ValueError, match="max_length must be a positive integer" + ): yi34b_model(prompt, max_length=max_length) def test_yi34b_generate_text_with_invalid_temperature(yi34b_model): prompt = "There's a place where time stands still." temperature = 2.0 # Invalid temperature - with pytest.raises(ValueError, match="temperature must be between 0.01 and 1.0"): + with pytest.raises( + ValueError, match="temperature must be between 0.01 and 1.0" + ): yi34b_model(prompt, temperature=temperature) @@ -74,7 +80,9 @@ def test_yi34b_generate_text_with_top_p(yi34b_model, top_p): def test_yi34b_generate_text_with_invalid_top_k(yi34b_model): prompt = "There's a place where time stands still." top_k = -1 # Invalid top_k - with pytest.raises(ValueError, match="top_k must be a non-negative integer"): + with pytest.raises( + ValueError, match="top_k must be a non-negative integer" + ): yi34b_model(prompt, top_k=top_k) @@ -86,7 +94,9 @@ def test_yi34b_generate_text_with_invalid_top_p(yi34b_model): @pytest.mark.parametrize("repitition_penalty", [1.0, 1.2, 1.5]) -def test_yi34b_generate_text_with_repitition_penalty(yi34b_model, repitition_penalty): +def test_yi34b_generate_text_with_repitition_penalty( + yi34b_model, repitition_penalty +): prompt = "There's a place where time stands still." generated_text = yi34b_model(prompt, repitition_penalty=repitition_penalty) assert isinstance(generated_text, str) @@ -95,7 +105,9 @@ def test_yi34b_generate_text_with_repitition_penalty(yi34b_model, repitition_pen def test_yi34b_generate_text_with_invalid_repitition_penalty(yi34b_model): prompt = "There's a place where time stands still." repitition_penalty = 0.0 # Invalid repitition_penalty - with pytest.raises(ValueError, match="repitition_penalty must be a positive float"): + with pytest.raises( + ValueError, match="repitition_penalty must be a positive float" + ): yi34b_model(prompt, repitition_penalty=repitition_penalty) diff --git a/tests/structs/flow.py b/tests/structs/flow.py index edc4b9c7..84034a08 100644 --- a/tests/structs/flow.py +++ b/tests/structs/flow.py @@ -30,7 +30,9 @@ def basic_flow(mocked_llm): @pytest.fixture def flow_with_condition(mocked_llm): - return Flow(llm=mocked_llm, max_loops=5, stopping_condition=stop_when_repeats) + return Flow( + llm=mocked_llm, max_loops=5, stopping_condition=stop_when_repeats + ) # Basic Tests @@ -61,7 +63,9 @@ def test_provide_feedback(basic_flow): @patch("time.sleep", return_value=None) # to speed up tests def test_run_without_stopping_condition(mocked_sleep, basic_flow): response = basic_flow.run("Test task") - assert response == "Test task" # since our mocked llm doesn't modify the response + assert ( + response == "Test task" + ) # since our mocked llm doesn't modify the response @patch("time.sleep", return_value=None) # to speed up tests @@ -108,7 +112,9 @@ def test_flow_with_custom_stopping_condition(mocked_llm): def stopping_condition(x): return "terminate" in x.lower() - flow = Flow(llm=mocked_llm, max_loops=5, stopping_condition=stopping_condition) + flow = Flow( + llm=mocked_llm, max_loops=5, stopping_condition=stopping_condition + ) assert flow.stopping_condition("Please terminate now") assert not flow.stopping_condition("Continue the process") @@ -174,7 +180,9 @@ def test_save_different_memory(basic_flow, tmp_path): # Test the stopping condition check def test_check_stopping_condition(flow_with_condition): assert flow_with_condition._check_stopping_condition("Stop this process") - assert not flow_with_condition._check_stopping_condition("Continue the task") + assert not flow_with_condition._check_stopping_condition( + "Continue the task" + ) # Test without providing max loops (default value should be 5) @@ -370,7 +378,8 @@ def test_flow_autosave_path(flow_instance): def test_flow_response_length(flow_instance): # Test checking the length of the response response = flow_instance.run( - "Generate a 10,000 word long blog on mental clarity and the benefits of meditation." + "Generate a 10,000 word long blog on mental clarity and the benefits of" + " meditation." ) assert len(response) > flow_instance.get_response_length_threshold() @@ -550,7 +559,10 @@ def test_flow_rollback(flow_instance): assert flow_instance.get_user_messages() == state1["user_messages"] assert flow_instance.get_response_history() == state1["response_history"] assert flow_instance.get_conversation_log() == state1["conversation_log"] - assert flow_instance.is_dynamic_pacing_enabled() == state1["dynamic_pacing_enabled"] + assert ( + flow_instance.is_dynamic_pacing_enabled() + == state1["dynamic_pacing_enabled"] + ) assert ( flow_instance.get_response_length_threshold() == state1["response_length_threshold"] @@ -565,7 +577,9 @@ def test_flow_contextual_intent(flow_instance): # Test contextual intent handling flow_instance.add_context("location", "New York") flow_instance.add_context("time", "tomorrow") - response = flow_instance.run("What's the weather like in {location} at {time}?") + response = flow_instance.run( + "What's the weather like in {location} at {time}?" + ) assert "New York" in response assert "tomorrow" in response @@ -689,7 +703,9 @@ def test_flow_clear_injected_messages(flow_instance): def test_flow_disable_message_history(flow_instance): # Test disabling message history recording flow_instance.disable_message_history() - response = flow_instance.run("This message should not be recorded in history.") + response = flow_instance.run( + "This message should not be recorded in history." + ) assert "This message should not be recorded in history." in response assert len(flow_instance.get_message_history()) == 0 # History is empty @@ -800,16 +816,24 @@ def test_flow_input_validation(flow_instance): ) # Invalid logger type, should raise ValueError with pytest.raises(ValueError): - flow_instance.add_context(None, "value") # None key, should raise ValueError + flow_instance.add_context( + None, "value" + ) # None key, should raise ValueError with pytest.raises(ValueError): - flow_instance.add_context("key", None) # None value, should raise ValueError + flow_instance.add_context( + "key", None + ) # None value, should raise ValueError with pytest.raises(ValueError): - flow_instance.update_context(None, "value") # None key, should raise ValueError + flow_instance.update_context( + None, "value" + ) # None key, should raise ValueError with pytest.raises(ValueError): - flow_instance.update_context("key", None) # None value, should raise ValueError + flow_instance.update_context( + "key", None + ) # None value, should raise ValueError def test_flow_conversation_reset(flow_instance): @@ -913,16 +937,24 @@ def test_flow_error_handling(flow_instance): ) # Invalid logger type, should raise ValueError with pytest.raises(ValueError): - flow_instance.add_context(None, "value") # None key, should raise ValueError + flow_instance.add_context( + None, "value" + ) # None key, should raise ValueError with pytest.raises(ValueError): - flow_instance.add_context("key", None) # None value, should raise ValueError + flow_instance.add_context( + "key", None + ) # None value, should raise ValueError with pytest.raises(ValueError): - flow_instance.update_context(None, "value") # None key, should raise ValueError + flow_instance.update_context( + None, "value" + ) # None key, should raise ValueError with pytest.raises(ValueError): - flow_instance.update_context("key", None) # None value, should raise ValueError + flow_instance.update_context( + "key", None + ) # None value, should raise ValueError def test_flow_context_operations(flow_instance): @@ -1089,7 +1121,9 @@ def test_flow_agent_history_prompt(flow_instance): system_prompt = "This is the system prompt." history = ["User: Hi", "AI: Hello"] - agent_history_prompt = flow_instance.agent_history_prompt(system_prompt, history) + agent_history_prompt = flow_instance.agent_history_prompt( + system_prompt, history + ) assert "SYSTEM_PROMPT: This is the system prompt." in agent_history_prompt assert "History: ['User: Hi', 'AI: Hello']" in agent_history_prompt diff --git a/tests/swarms/godmode.py b/tests/swarms/godmode.py index fa9e0c13..8f528026 100644 --- a/tests/swarms/godmode.py +++ b/tests/swarms/godmode.py @@ -16,7 +16,13 @@ def test_godmode_run(monkeypatch): godmode = GodMode(llms=[LLM] * 5) responses = godmode.run("task1") assert len(responses) == 5 - assert responses == ["response", "response", "response", "response", "response"] + assert responses == [ + "response", + "response", + "response", + "response", + "response", + ] @patch("builtins.print") diff --git a/tests/swarms/groupchat.py b/tests/swarms/groupchat.py index b25e7f91..56979d52 100644 --- a/tests/swarms/groupchat.py +++ b/tests/swarms/groupchat.py @@ -165,7 +165,9 @@ def test_groupchat_select_speaker(): # Simulate selecting the next speaker last_speaker = agent1 - next_speaker = manager.select_speaker(last_speaker=last_speaker, selector=selector) + next_speaker = manager.select_speaker( + last_speaker=last_speaker, selector=selector + ) # Ensure the next speaker is agent2 assert next_speaker == agent2 @@ -183,7 +185,9 @@ def test_groupchat_underpopulated_group(): # Simulate selecting the next speaker in an underpopulated group last_speaker = agent1 - next_speaker = manager.select_speaker(last_speaker=last_speaker, selector=selector) + next_speaker = manager.select_speaker( + last_speaker=last_speaker, selector=selector + ) # Ensure the next speaker is the same as the last speaker in an underpopulated group assert next_speaker == last_speaker @@ -207,7 +211,9 @@ def test_groupchat_max_rounds(): last_speaker = next_speaker # Try one more round, should stay with the last speaker - next_speaker = manager.select_speaker(last_speaker=last_speaker, selector=selector) + next_speaker = manager.select_speaker( + last_speaker=last_speaker, selector=selector + ) # Ensure the next speaker is the same as the last speaker after reaching max rounds assert next_speaker == last_speaker diff --git a/tests/swarms/multi_agent_collab.py b/tests/swarms/multi_agent_collab.py index 3f7a0729..bea2c795 100644 --- a/tests/swarms/multi_agent_collab.py +++ b/tests/swarms/multi_agent_collab.py @@ -115,7 +115,10 @@ def test_repr(collaboration): def test_load(collaboration): - state = {"step": 5, "results": [{"agent": "Agent1", "response": "Response1"}]} + state = { + "step": 5, + "results": [{"agent": "Agent1", "response": "Response1"}], + } with open(collaboration.saved_file_path_name, "w") as file: json.dump(state, file) @@ -140,7 +143,8 @@ def test_save(collaboration, tmp_path): # Example of parameterized test for different selection functions @pytest.mark.parametrize( - "selection_function", [select_next_speaker_director, select_speaker_round_table] + "selection_function", + [select_next_speaker_director, select_speaker_round_table], ) def test_selection_functions(collaboration, selection_function): collaboration.select_next_speaker = selection_function diff --git a/tests/swarms/multi_agent_debate.py b/tests/swarms/multi_agent_debate.py index a2687f54..25e15ae5 100644 --- a/tests/swarms/multi_agent_debate.py +++ b/tests/swarms/multi_agent_debate.py @@ -1,5 +1,9 @@ from unittest.mock import patch -from swarms.swarms.multi_agent_debate import MultiAgentDebate, Worker, select_speaker +from swarms.swarms.multi_agent_debate import ( + MultiAgentDebate, + Worker, + select_speaker, +) def test_multiagentdebate_initialization(): @@ -57,5 +61,6 @@ def test_multiagentdebate_format_results(): formatted_results = multiagentdebate.format_results(results) assert ( formatted_results - == "Agent Agent 1 responded: Hello, world!\nAgent Agent 2 responded: Goodbye, world!" + == "Agent Agent 1 responded: Hello, world!\nAgent Agent 2 responded:" + " Goodbye, world!" ) diff --git a/tests/tools/base.py b/tests/tools/base.py index 4f7e2b4b..007719b2 100644 --- a/tests/tools/base.py +++ b/tests/tools/base.py @@ -75,7 +75,9 @@ def test_tool_ainvoke_with_coroutine(): async def async_function(input_data): return input_data - tool = Tool(name="test_tool", coroutine=async_function, description="Test tool") + tool = Tool( + name="test_tool", coroutine=async_function, description="Test tool" + ) result = tool.ainvoke("input_data") assert result == "input_data" @@ -560,7 +562,9 @@ class TestTool: with pytest.raises(ValueError): tool(no_doc_func) - def test_tool_raises_error_runnable_without_object_schema(self, mock_runnable): + def test_tool_raises_error_runnable_without_object_schema( + self, mock_runnable + ): with pytest.raises(ValueError): tool(mock_runnable) diff --git a/tests/utils/subprocess_code_interpreter.py b/tests/utils/subprocess_code_interpreter.py index ab7c748f..c15c0e16 100644 --- a/tests/utils/subprocess_code_interpreter.py +++ b/tests/utils/subprocess_code_interpreter.py @@ -4,7 +4,10 @@ import time import pytest -from swarms.utils.code_interpreter import BaseCodeInterpreter, SubprocessCodeInterpreter +from swarms.utils.code_interpreter import ( + BaseCodeInterpreter, + SubprocessCodeInterpreter, +) @pytest.fixture @@ -53,7 +56,9 @@ def test_subprocess_code_interpreter_run_success(subprocess_code_interpreter): assert any("Hello, World!" in output.get("output", "") for output in result) -def test_subprocess_code_interpreter_run_with_error(subprocess_code_interpreter): +def test_subprocess_code_interpreter_run_with_error( + subprocess_code_interpreter, +): code = 'print("Hello, World")\nraise ValueError("Error!")' result = list(subprocess_code_interpreter.run(code)) assert any("Error!" in output.get("output", "") for output in result) @@ -62,9 +67,14 @@ def test_subprocess_code_interpreter_run_with_error(subprocess_code_interpreter) def test_subprocess_code_interpreter_run_with_keyboard_interrupt( subprocess_code_interpreter, ): - code = 'import time\ntime.sleep(2)\nprint("Hello, World")\nraise KeyboardInterrupt' + code = ( + 'import time\ntime.sleep(2)\nprint("Hello, World")\nraise' + " KeyboardInterrupt" + ) result = list(subprocess_code_interpreter.run(code)) - assert any("KeyboardInterrupt" in output.get("output", "") for output in result) + assert any( + "KeyboardInterrupt" in output.get("output", "") for output in result + ) def test_subprocess_code_interpreter_run_max_retries( @@ -78,7 +88,8 @@ def test_subprocess_code_interpreter_run_max_retries( code = 'print("Hello, World!")' result = list(subprocess_code_interpreter.run(code)) assert any( - "Maximum retries reached. Could not execute code." in output.get("output", "") + "Maximum retries reached. Could not execute code." + in output.get("output", "") for output in result ) @@ -112,19 +123,25 @@ def test_subprocess_code_interpreter_run_retry_on_error( # Import statements and fixtures from the previous code block -def test_subprocess_code_interpreter_line_postprocessor(subprocess_code_interpreter): +def test_subprocess_code_interpreter_line_postprocessor( + subprocess_code_interpreter, +): line = "This is a test line" processed_line = subprocess_code_interpreter.line_postprocessor(line) assert processed_line == line # No processing, should remain the same -def test_subprocess_code_interpreter_preprocess_code(subprocess_code_interpreter): +def test_subprocess_code_interpreter_preprocess_code( + subprocess_code_interpreter, +): code = 'print("Hello, World!")' preprocessed_code = subprocess_code_interpreter.preprocess_code(code) assert preprocessed_code == code # No preprocessing, should remain the same -def test_subprocess_code_interpreter_detect_active_line(subprocess_code_interpreter): +def test_subprocess_code_interpreter_detect_active_line( + subprocess_code_interpreter, +): line = "Active line: 5" active_line = subprocess_code_interpreter.detect_active_line(line) assert active_line == 5 @@ -172,7 +189,9 @@ def test_subprocess_code_interpreter_handle_stream_output_stdout( subprocess_code_interpreter, ): line = "This is a test line" - subprocess_code_interpreter.handle_stream_output(threading.current_thread(), False) + subprocess_code_interpreter.handle_stream_output( + threading.current_thread(), False + ) subprocess_code_interpreter.process.stdout.write(line + "\n") subprocess_code_interpreter.process.stdout.flush() time.sleep(0.1) @@ -184,7 +203,9 @@ def test_subprocess_code_interpreter_handle_stream_output_stderr( subprocess_code_interpreter, ): line = "This is an error line" - subprocess_code_interpreter.handle_stream_output(threading.current_thread(), True) + subprocess_code_interpreter.handle_stream_output( + threading.current_thread(), True + ) subprocess_code_interpreter.process.stderr.write(line + "\n") subprocess_code_interpreter.process.stderr.flush() time.sleep(0.1) @@ -207,12 +228,13 @@ def test_subprocess_code_interpreter_run_with_exception( subprocess_code_interpreter, capsys ): code = 'print("Hello, World!")' - subprocess_code_interpreter.start_cmd = ( - "nonexistent_command" # Force an exception during subprocess creation + subprocess_code_interpreter.start_cmd = ( # Force an exception during subprocess creation + "nonexistent_command" ) result = list(subprocess_code_interpreter.run(code)) assert any( - "Maximum retries reached" in output.get("output", "") for output in result + "Maximum retries reached" in output.get("output", "") + for output in result ) @@ -245,4 +267,6 @@ def test_subprocess_code_interpreter_run_with_unicode_characters( ): code = 'print("こんにちは、世界")' # Contains unicode characters result = list(subprocess_code_interpreter.run(code)) - assert any("こんにちは、世界" in output.get("output", "") for output in result) + assert any( + "こんにちは、世界" in output.get("output", "") for output in result + ) From 9f20592612385196ea2e00ffec57cc5a365a9368 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 23:30:05 -0800 Subject: [PATCH 39/40] requirements.txt --- requirements.txt | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index bd7c513c..d75d5d1a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,5 @@ -# faiss-gpu +torch==2.1.1 transformers -revChatGPT pandas langchain nest_asyncio @@ -12,7 +11,6 @@ playwright wget==3.2 simpleaichat httpx -torch==2.1.1 open_clip_torch ggl beautifulsoup4 @@ -60,7 +58,6 @@ streamlit test-tube timm torchmetrics -transformers webdataset marshmallow yapf From 36a641b5f4c3535611e44105ac8e7cf4cfe5a611 Mon Sep 17 00:00:00 2001 From: Kye Date: Thu, 23 Nov 2023 23:44:13 -0800 Subject: [PATCH 40/40] requirements.txt clean up --- .pre-commit-config.yaml | 2 +- requirements.txt | 10 +--------- swarms/structs/flow.py | 6 +++--- swarms/structs/sequential_workflow.py | 1 + 4 files changed, 6 insertions(+), 13 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae0a4fc0..0c936705 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: rev: 'v0.0.255' hooks: - id: ruff - args: [--fix] + args: [----unsafe-fixes] - repo: https://github.com/nbQA-dev/nbQA rev: 1.6.3 hooks: diff --git a/requirements.txt b/requirements.txt index d75d5d1a..067356df 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,9 +3,6 @@ transformers pandas langchain nest_asyncio -pegasusx -google-generativeai -EdgeGPT langchain-experimental playwright wget==3.2 @@ -24,9 +21,7 @@ soundfile huggingface-hub google-generativeai sentencepiece -duckduckgo-search PyPDF2 -agent-protocol accelerate chromadb tiktoken @@ -54,7 +49,6 @@ openai opencv-python prettytable safetensors -streamlit test-tube timm torchmetrics @@ -62,7 +56,6 @@ webdataset marshmallow yapf autopep8 -dalle3 cohere torchvision rich @@ -71,5 +64,4 @@ rich mkdocs mkdocs-material mkdocs-glightbox - -pre-commit +pre-commit \ No newline at end of file diff --git a/swarms/structs/flow.py b/swarms/structs/flow.py index 166d619e..18a141a3 100644 --- a/swarms/structs/flow.py +++ b/swarms/structs/flow.py @@ -135,9 +135,9 @@ def parse_done_token(response: str) -> bool: class Flow: """ - Flow is a chain like structure from langchain that provides the autonomy to language models - to generate sequential responses. - + Flow is the structure that provides autonomy to any llm in a reliable and effective fashion. + The flow structure is designed to be used with any llm and provides the following features: + Features: * Interactive, AI generates, then user input * Message history and performance history fed -> into context -> truncate if too long diff --git a/swarms/structs/sequential_workflow.py b/swarms/structs/sequential_workflow.py index 1d7f411d..753ada15 100644 --- a/swarms/structs/sequential_workflow.py +++ b/swarms/structs/sequential_workflow.py @@ -149,6 +149,7 @@ class SequentialWorkflow: return {task.description: task.result for task in self.tasks} def remove_task(self, task_description: str) -> None: + """Remove tasks from sequential workflow""" self.tasks = [ task for task in self.tasks if task.description != task_description ]