Update main_process/moe_router.py
Browse files
main_process/moe_router.py
CHANGED
|
@@ -60,6 +60,7 @@ class NState(dict):
|
|
| 60 |
|
| 61 |
# ---------------- LLM utilizado para el free_narration ----------------
|
| 62 |
class SalamandraClient:
|
|
|
|
| 63 |
def __init__(self, model_id="BSC-LT/salamandra-7b-instruct"):
|
| 64 |
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 65 |
self.model = AutoModelForCausalLM.from_pretrained(
|
|
@@ -67,6 +68,7 @@ class SalamandraClient:
|
|
| 67 |
device_map="auto",
|
| 68 |
torch_dtype=torch.bfloat16
|
| 69 |
)
|
|
|
|
| 70 |
|
| 71 |
def chat(self, prompt) -> str:
|
| 72 |
return get_from_prompt(prompt)
|
|
|
|
| 60 |
|
| 61 |
# ---------------- LLM utilizado para el free_narration ----------------
|
| 62 |
class SalamandraClient:
|
| 63 |
+
'''
|
| 64 |
def __init__(self, model_id="BSC-LT/salamandra-7b-instruct"):
|
| 65 |
self.tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 66 |
self.model = AutoModelForCausalLM.from_pretrained(
|
|
|
|
| 68 |
device_map="auto",
|
| 69 |
torch_dtype=torch.bfloat16
|
| 70 |
)
|
| 71 |
+
'''
|
| 72 |
|
| 73 |
def chat(self, prompt) -> str:
|
| 74 |
return get_from_prompt(prompt)
|