from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch
model = AutoModelForSeq2SeqLM.from_pretrained("Jahnviy/ember")
tokenizer = AutoTokenizer.from_pretrained("Jahnviy/ember")
text = "your input text here"
inputs = tokenizer(
text.strip(),
return_tensors="pt",
max_length=256,
truncation=True,
)
with torch.no_grad():
out_ids = model.generate(
**inputs,
max_length=40,
min_length=8,
num_beams=4,
length_penalty=2.0,
no_repeat_ngram_size=2,
early_stopping=True,
)
print(tokenizer.decode(out_ids[0], skip_special_tokens=True))