From d09c67d7c7366a8b4a1ac51959e922c6f15fe267 Mon Sep 17 00:00:00 2001 From: Kye Date: Wed, 30 Aug 2023 11:16:35 -0400 Subject: [PATCH] model: no quantization Former-commit-id: fa71bb910d80e4bccb6bc666110d1f2e1a41e570 --- swarms/models/huggingface.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/swarms/models/huggingface.py b/swarms/models/huggingface.py index 9fd795c3..d0607dcb 100644 --- a/swarms/models/huggingface.py +++ b/swarms/models/huggingface.py @@ -30,7 +30,7 @@ class HuggingFaceLLM: try: self.tokenizer = AutoTokenizer.from_pretrained(self.model_id) - self.model = AutoModelForCausalLM.from_pretrained(self.model_id, quantization_config=bnb_config) + self.model = AutoModelForCausalLM.from_pretrained(self.model_id) # quantization_config=bnb_config) self.model.to(self.device) except Exception as e: self.logger.error(f"Failed to load the model or the tokenizer: {e}")