Update README.md
Browse files
README.md
CHANGED
|
@@ -22,8 +22,14 @@ This model converts legalese into short, human-readable summaries, based on data
|
|
| 22 |
```python
|
| 23 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 24 |
|
| 25 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
prompt = """<|begin_of_text|><|start_header_id|>system<|end_header_id|>
|
| 29 |
|
|
@@ -31,8 +37,13 @@ Please convert the following legal content into a short human-readable summary<|
|
|
| 31 |
|
| 32 |
[LEGAL_DOC]by using our services you agree to these terms...[END_LEGAL_DOC]<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
|
| 33 |
|
|
|
|
| 34 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
|
|
|
|
|
|
| 35 |
outputs = model.generate(**inputs, max_new_tokens=128)
|
|
|
|
|
|
|
| 36 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 37 |
|
| 38 |
```
|
|
|
|
| 22 |
```python
|
| 23 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 24 |
|
| 25 |
+
model = AutoModelForCausalLM.from_pretrained(
|
| 26 |
+
"FlamingNeuron/llama381binstruct_summarize_short_merged",
|
| 27 |
+
device_map="auto"
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
| 31 |
+
"FlamingNeuron/llama381binstruct_summarize_short_merged"
|
| 32 |
+
)
|
| 33 |
|
| 34 |
prompt = """<|begin_of_text|><|start_header_id|>system<|end_header_id|>
|
| 35 |
|
|
|
|
| 37 |
|
| 38 |
[LEGAL_DOC]by using our services you agree to these terms...[END_LEGAL_DOC]<|eot_id|><|start_header_id|>assistant<|end_header_id|>"""
|
| 39 |
|
| 40 |
+
# Tokenize and move to GPU
|
| 41 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
|
| 42 |
+
|
| 43 |
+
# Generate model response
|
| 44 |
outputs = model.generate(**inputs, max_new_tokens=128)
|
| 45 |
+
|
| 46 |
+
# Decode and print result
|
| 47 |
print(tokenizer.decode(outputs[0], skip_special_tokens=True))
|
| 48 |
|
| 49 |
```
|