Sudheer-N
Add Gradio app for LoRA GPT-2
00a0cde
raw
history blame
1.04 kB
import gradio as gr
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
# Load base GPT-2 + your LoRA adapter from Hugging Face Hub
base_model = AutoModelForCausalLM.from_pretrained("gpt2")
tokenizer = AutoTokenizer.from_pretrained("n-sudheer/ns-lora-gpt2-demo")
model = PeftModel.from_pretrained(base_model, "n-sudheer/ns-lora-gpt2-demo")
def generate_text(prompt, max_length=50):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(
**inputs,
max_length=max_length,
do_sample=True,
top_k=50
)
return tokenizer.decode(outputs[0], skip_special_tokens=True)
demo = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(label="Prompt"),
gr.Slider(10, 200, value=50, step=5, label="Max length")
],
outputs=gr.Textbox(label="Generated Text"),
title="LoRA GPT-2 Demo",
description="A GPT-2 model fine-tuned with LoRA, deployed on Hugging Face Spaces."
)
if __name__ == "__main__":
demo.launch()