diff --git a/.github/workflows/code_quality_control.yml b/.github/workflows/code_quality_control.yml index 4b94b454..8103750a 100644 --- a/.github/workflows/code_quality_control.yml +++ b/.github/workflows/code_quality_control.yml @@ -22,7 +22,7 @@ jobs: run: pip install -r requirements.txt - name: Find Python files - run: find swarms -name "*.py" -type f -exec autopep8 --in-place --aggressive --aggressive {} + + run: find swarms_torch -name "*.py" -type f -exec autopep8 --in-place --aggressive --aggressive {} + - name: Push changes uses: ad-m/github-push-action@master diff --git a/.github/workflows/lints.yml b/.github/workflows/lints.yml index b61e471c..229f4b83 100644 --- a/.github/workflows/lints.yml +++ b/.github/workflows/lints.yml @@ -22,4 +22,4 @@ jobs: run: pip install -r requirements.txt - name: Run linters - run: pylint swarms \ No newline at end of file + run: pylint swarms_torch \ No newline at end of file diff --git a/.github/workflows/pr_request_checks.yml b/.github/workflows/pr_request_checks.yml index f6ca90ec..4819cd76 100644 --- a/.github/workflows/pr_request_checks.yml +++ b/.github/workflows/pr_request_checks.yml @@ -23,5 +23,5 @@ jobs: - name: Run tests and checks run: | - pytest tests/unit - pylint swarms \ No newline at end of file + pytest tests/ + pylint swarms_torch \ No newline at end of file diff --git a/.github/workflows/pull-request-links.yml b/.github/workflows/pull-request-links.yml index 208fad31..e5812fbb 100644 --- a/.github/workflows/pull-request-links.yml +++ b/.github/workflows/pull-request-links.yml @@ -15,4 +15,4 @@ jobs: steps: - uses: readthedocs/actions/preview@v1 with: - project-slug: zeta \ No newline at end of file + project-slug: swarms_torch \ No newline at end of file diff --git a/.github/workflows/unit-test.yml b/.github/workflows/unit-test.yml index d5f72c08..6a0a6481 100644 --- a/.github/workflows/unit-test.yml +++ b/.github/workflows/unit-test.yml @@ -24,22 +24,10 @@ jobs: run: pip install -r requirements.txt - name: Run Python unit tests - run: python3 -m unittest tests/swarms + run: python3 -m unittest tests/ - name: Verify that the Docker image for the action builds run: docker build . --file Dockerfile - - name: Integration test 1 - uses: ./ - with: - input-one: something - input-two: true - - - name: Integration test 2 - uses: ./ - with: - input-one: something else - input-two: false - - name: Verify integration test results - run: python3 -m unittest unittesting/swarms + run: python3 -m unittest tests/ diff --git a/pyproject.toml b/pyproject.toml index a10e3014..c2e52944 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -50,6 +50,7 @@ exxa = "*" open-interpreter = "*" tabulate = "*" termcolor = "*" +black = "*" [tool.poetry.dev-dependencies] first_dependency = {git = "https://github.com/IDEA-Research/GroundingDINO.git"} diff --git a/swarms/agents/conversabe_agent.py b/swarms/agents/conversabe_agent.py index 35808c4b..1ef2f647 100644 --- a/swarms/agents/conversabe_agent.py +++ b/swarms/agents/conversabe_agent.py @@ -977,7 +977,7 @@ class ConversableAgent(Agent): ) elif lang in ["python", "Python"]: if code.startswith("# filename: "): - filename = code[11 : code.find("\n")].strip() + filename = code[11: code.find("\n")].strip() else: filename = None exitcode, logs, image = self.run_code( diff --git a/swarms/agents/models/groundingdino/datasets/transforms.py b/swarms/agents/models/groundingdino/datasets/transforms.py index c34a1453..5d6d2cfd 100644 --- a/swarms/agents/models/groundingdino/datasets/transforms.py +++ b/swarms/agents/models/groundingdino/datasets/transforms.py @@ -38,7 +38,7 @@ def crop(image, target, region): if "masks" in target: # FIXME should we update the area here if there are no boxes? - target["masks"] = target["masks"][:, i : i + h, j : j + w] + target["masks"] = target["masks"][:, i: i + h, j: j + w] fields.append("masks") # remove elements for which the boxes or masks that have zero area diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py index a56f369e..91e74de4 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/backbone.py @@ -159,7 +159,7 @@ class Backbone(BackboneBase): ), "Only resnet50 and resnet101 are available." assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]] num_channels_all = [256, 512, 1024, 2048] - num_channels = num_channels_all[4 - len(return_interm_indices) :] + num_channels = num_channels_all[4 - len(return_interm_indices):] super().__init__(backbone, train_backbone, num_channels, return_interm_indices) @@ -224,7 +224,7 @@ def build_backbone(args): use_checkpoint=use_checkpoint, ) - bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :] + bb_num_channels = backbone.num_features[4 - len(return_interm_indices):] else: raise NotImplementedError("Unknown backbone {}".format(args.backbone)) diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py index 1a74ca36..b476627e 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/backbone/swin_transformer.py @@ -649,7 +649,7 @@ class SwinTransformer(nn.Module): qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], + drop_path=dpr[sum(depths[:i_layer]): sum(depths[: i_layer + 1])], norm_layer=norm_layer, # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, downsample=downsamplelist[i_layer], diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py b/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py index 2ad9c020..7a46aa70 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/bertwarper.py @@ -221,9 +221,9 @@ def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer position_ids[row, col] = 0 else: attention_mask[ - row, previous_col + 1 : col + 1, previous_col + 1 : col + 1 + row, previous_col + 1: col + 1, previous_col + 1: col + 1 ] = True - position_ids[row, previous_col + 1 : col + 1] = torch.arange( + position_ids[row, previous_col + 1: col + 1] = torch.arange( 0, col - previous_col, device=input_ids.device ) @@ -273,13 +273,13 @@ def generate_masks_with_special_tokens_and_transfer_map( position_ids[row, col] = 0 else: attention_mask[ - row, previous_col + 1 : col + 1, previous_col + 1 : col + 1 + row, previous_col + 1: col + 1, previous_col + 1: col + 1 ] = True - position_ids[row, previous_col + 1 : col + 1] = torch.arange( + position_ids[row, previous_col + 1: col + 1] = torch.arange( 0, col - previous_col, device=input_ids.device ) c2t_maski = torch.zeros((num_token), device=input_ids.device).bool() - c2t_maski[previous_col + 1 : col] = True + c2t_maski[previous_col + 1: col] = True cate_to_token_mask_list[row].append(c2t_maski) previous_col = col diff --git a/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py b/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py index 2bb3e9b8..9488f827 100644 --- a/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py +++ b/swarms/agents/models/groundingdino/models/GroundingDINO/utils.py @@ -76,7 +76,7 @@ def gen_encoder_output_proposals( proposals = [] _cur = 0 for lvl, (H_, W_) in enumerate(spatial_shapes): - mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view( + mask_flatten_ = memory_padding_mask[:, _cur: (_cur + H_ * W_)].view( N_, H_, W_, 1 ) valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1) diff --git a/swarms/agents/models/groundingdino/util/utils.py b/swarms/agents/models/groundingdino/util/utils.py index 7a0815ef..90af343d 100644 --- a/swarms/agents/models/groundingdino/util/utils.py +++ b/swarms/agents/models/groundingdino/util/utils.py @@ -619,7 +619,7 @@ def get_phrases_from_posmap( ): assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor" if posmap.dim() == 1: - posmap[0 : left_idx + 1] = False + posmap[0: left_idx + 1] = False posmap[right_idx:] = False non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist() token_ids = [tokenized["input_ids"][i] for i in non_zero_idx] diff --git a/swarms/agents/models/groundingdino/util/vl_utils.py b/swarms/agents/models/groundingdino/util/vl_utils.py index 4fd8592c..44ff4d5e 100644 --- a/swarms/agents/models/groundingdino/util/vl_utils.py +++ b/swarms/agents/models/groundingdino/util/vl_utils.py @@ -41,7 +41,7 @@ def create_positive_map_from_span(tokenized, token_span, max_text_len=256): positive_map[j, beg_pos] = 1 break else: - positive_map[j, beg_pos : end_pos + 1].fill_(1) + positive_map[j, beg_pos: end_pos + 1].fill_(1) return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) diff --git a/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py b/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py index f94bee1f..35170835 100644 --- a/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py +++ b/swarms/agents/models/segment_anything/segment_anything/modeling/mask_decoder.py @@ -139,7 +139,7 @@ class MaskDecoder(nn.Module): # Run the transformer hs, src = self.transformer(src, pos_src, tokens) iou_token_out = hs[:, 0, :] - mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :] + mask_tokens_out = hs[:, 1: (1 + self.num_mask_tokens), :] # Upscale mask embeddings and predict masks using the mask tokens src = src.transpose(1, 2).view(b, c, h, w) diff --git a/swarms/agents/models/segment_anything/segment_anything/utils/amg.py b/swarms/agents/models/segment_anything/segment_anything/utils/amg.py index be064071..cb67232a 100644 --- a/swarms/agents/models/segment_anything/segment_anything/utils/amg.py +++ b/swarms/agents/models/segment_anything/segment_anything/utils/amg.py @@ -101,7 +101,7 @@ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]: ), "Batched iteration must have inputs of all the same size." n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0) for b in range(n_batches): - yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args] + yield [arg[b * batch_size: (b + 1) * batch_size] for arg in args] def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]: @@ -142,7 +142,7 @@ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray: idx = 0 parity = False for count in rle["counts"]: - mask[idx : idx + count] = parity + mask[idx: idx + count] = parity idx += count parity ^= True mask = mask.reshape(w, h) diff --git a/swarms/agents/multi_modal_visual_agent.py b/swarms/agents/multi_modal_visual_agent.py index 7ec8b03d..e4d005db 100644 --- a/swarms/agents/multi_modal_visual_agent.py +++ b/swarms/agents/multi_modal_visual_agent.py @@ -213,12 +213,12 @@ def blend_gt2pt(old_image, new_image, sigma=0.15, steps=100): kernel[steps:-steps, :steps] = left kernel[steps:-steps, -steps:] = right - pt_gt_img = easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] + pt_gt_img = easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] gaussian_gt_img = ( kernel * gt_img_array + (1 - kernel) * pt_gt_img ) # gt img with blur img gaussian_gt_img = gaussian_gt_img.astype(np.int64) - easy_img[pos_h : pos_h + old_size[1], pos_w : pos_w + old_size[0]] = gaussian_gt_img + easy_img[pos_h: pos_h + old_size[1], pos_w: pos_w + old_size[0]] = gaussian_gt_img gaussian_img = Image.fromarray(easy_img) return gaussian_img diff --git a/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py b/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py index 2198af25..18d87578 100644 --- a/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py +++ b/swarms/agents/multi_modal_workers/omni_agent/omni_chat.py @@ -317,7 +317,7 @@ def find_json(s): s = s.replace("'", '"') start = s.find("{") end = s.rfind("}") - res = s[start : end + 1] + res = s[start: end + 1] res = res.replace("\n", "") return res diff --git a/swarms/embeddings/openai.py b/swarms/embeddings/openai.py index 230dade9..2eba8c71 100644 --- a/swarms/embeddings/openai.py +++ b/swarms/embeddings/openai.py @@ -347,7 +347,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): - tokens.append(token[j : j + self.embedding_ctx_length]) + tokens.append(token[j: j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] @@ -366,7 +366,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): for i in _iter: response = embed_with_retry( self, - input=tokens[i : i + _chunk_size], + input=tokens[i: i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) @@ -428,7 +428,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): - tokens.append(token[j : j + self.embedding_ctx_length]) + tokens.append(token[j: j + self.embedding_ctx_length]) indices.append(i) batched_embeddings: List[List[float]] = [] @@ -436,7 +436,7 @@ class OpenAIEmbeddings(BaseModel, Embeddings): for i in range(0, len(tokens), _chunk_size): response = await async_embed_with_retry( self, - input=tokens[i : i + _chunk_size], + input=tokens[i: i + _chunk_size], **self._invocation_params, ) batched_embeddings.extend(r["embedding"] for r in response["data"]) diff --git a/swarms/models/chat_openai.py b/swarms/models/chat_openai.py index 380623c3..7ffc9136 100644 --- a/swarms/models/chat_openai.py +++ b/swarms/models/chat_openai.py @@ -458,7 +458,7 @@ class BaseOpenAI(BaseLLM): ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ - prompts[i : i + self.batch_size] + prompts[i: i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts @@ -469,7 +469,7 @@ class BaseOpenAI(BaseLLM): """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): - sub_choices = choices[i * self.n : (i + 1) * self.n] + sub_choices = choices[i * self.n: (i + 1) * self.n] generations.append( [ Generation( diff --git a/swarms/structs/nonlinear_workflow.py b/swarms/structs/nonlinear_workflow.py index 831d106e..33c9d5d3 100644 --- a/swarms/structs/nonlinear_workflow.py +++ b/swarms/structs/nonlinear_workflow.py @@ -1,6 +1,6 @@ -from typing import List, Dict, Any, Union -from concurrent.futures import Executor, ThreadPoolExecutor, as_completed +from concurrent.futures import ThreadPoolExecutor, as_completed from graphlib import TopologicalSorter +from typing import Dict, List class Task: diff --git a/swarms/tools/autogpt.py b/swarms/tools/autogpt.py index 0603d6f3..3d3e6600 100644 --- a/swarms/tools/autogpt.py +++ b/swarms/tools/autogpt.py @@ -128,7 +128,7 @@ class WebpageQATool(BaseTool): results = [] # TODO: Handle this with a MapReduceChain for i in range(0, len(web_docs), 4): - input_docs = web_docs[i : i + 4] + input_docs = web_docs[i: i + 4] window_result = self.qa_chain( {"input_documents": input_docs, "question": question}, return_only_outputs=True, diff --git a/swarms/tools/developer.py b/swarms/tools/developer.py index 04e4b30a..062f463b 100644 --- a/swarms/tools/developer.py +++ b/swarms/tools/developer.py @@ -306,7 +306,7 @@ class WriteCommand: @staticmethod def from_str(command: str) -> "WriteCommand": filepath = command.split(WriteCommand.separator)[0] - return WriteCommand(filepath, command[len(filepath) + 1 :]) + return WriteCommand(filepath, command[len(filepath) + 1:]) class CodeWriter: @@ -433,7 +433,7 @@ class ReadCommand: if self.start == self.end: code = code[self.start - 1] else: - code = "".join(code[self.start - 1 : self.end]) + code = "".join(code[self.start - 1: self.end]) return code @staticmethod @@ -590,9 +590,9 @@ class PatchCommand: lines[self.start.line] = ( lines[self.start.line][: self.start.col] + self.content - + lines[self.end.line][self.end.col :] + + lines[self.end.line][self.end.col:] ) - lines = lines[: self.start.line + 1] + lines[self.end.line + 1 :] + lines = lines[: self.start.line + 1] + lines[self.end.line + 1:] after = self.write_lines(lines) diff --git a/swarms/utils/main.py b/swarms/utils/main.py index 6e5907b2..369c5967 100644 --- a/swarms/utils/main.py +++ b/swarms/utils/main.py @@ -365,7 +365,7 @@ class FileHandler: try: if url.startswith(os.environ.get("SERVER", "http://localhost:8000")): local_filepath = url[ - len(os.environ.get("SERVER", "http://localhost:8000")) + 1 : + len(os.environ.get("SERVER", "http://localhost:8000")) + 1: ] local_filename = Path("file") / local_filepath.split("/")[-1] src = self.path / local_filepath