Adjusted Models
Collection
3 items
โข
Updated
This is the joint sampling enabled Dream-Coder-v0-Instruct-7B model. Kindly refer to the paper below for details.
Here is a simple script for running the model. Setting the use_adjust flag as False generates from the base diffusion LM with naive parallel sampling.
from transformers import AutoModel, AutoTokenizer, AutoModelForCausalLM, set_seed
model_path = "pbansal/Dream-Coder-v0-Instruct-7B-Adjust"
model = AutoModel.from_pretrained(model_path, torch_dtype=torch.bfloat16, trust_remote_code=True)
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = model.to("cuda").eval()
use_adjust = True # Set to false to sample from just the base model
messages = [
{"role": "user", "content": "Write a quick sort algorithm."}
]
inputs = tokenizer.apply_chat_template(
messages, return_tensors="pt", return_dict=True, add_generation_prompt=True
)
input_ids = inputs.input_ids.to(device="cuda")
attention_mask = inputs.attention_mask.to(device="cuda")
output = model.diffusion_generate(
input_ids,
attention_mask=attention_mask,
max_new_tokens=768,
output_history=True,
return_dict_in_generate=True,
steps=768,
temperature=0.1,
top_p=0.95,
alg="entropy",
alg_temp=0.,
use_adjust=use_adjust,
)
generations = [
tokenizer.decode(g.tolist())
for p, g in zip(input_ids, output.sequences)
]
print(generations[0].split(tokenizer.eos_token)[0])