Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import BartTokenizer, BartForConditionalGeneration | |
| model_name = "facebook/bart-large-cnn" # Example BART model for demonstration | |
| tokenizer = BartTokenizer.from_pretrained(model_name) | |
| model = BartForConditionalGeneration.from_pretrained(model_name) | |
| def generate_text(prompt): | |
| inputs = tokenizer.encode("summarize: " + prompt, return_tensors="pt", max_length=1024, truncation=True) | |
| summary_ids = model.generate(inputs, max_length=150, min_length=40, length_penalty=2.0, num_beams=4, early_stopping=True) | |
| return tokenizer.decode(summary_ids[0], skip_special_tokens=True) | |
| interface = gr.Interface(fn=generate_text, | |
| inputs=gr.Textbox(lines=5, placeholder="Enter Text Here..."), | |
| outputs="text", | |
| title="Text Generation with BART", | |
| description="Enter text to generate a summary.") | |
| if __name__ == "__main__": | |
| interface.launch() | |