Sudheer-N commited on
Commit
00a0cde
·
1 Parent(s): 51a1fe7

Add Gradio app for LoRA GPT-2

Browse files
Files changed (2) hide show
  1. app.py +32 -0
  2. requirements.txt +10 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ from peft import PeftModel
4
+
5
+ # Load base GPT-2 + your LoRA adapter from Hugging Face Hub
6
+ base_model = AutoModelForCausalLM.from_pretrained("gpt2")
7
+ tokenizer = AutoTokenizer.from_pretrained("n-sudheer/ns-lora-gpt2-demo")
8
+ model = PeftModel.from_pretrained(base_model, "n-sudheer/ns-lora-gpt2-demo")
9
+
10
+ def generate_text(prompt, max_length=50):
11
+ inputs = tokenizer(prompt, return_tensors="pt")
12
+ outputs = model.generate(
13
+ **inputs,
14
+ max_length=max_length,
15
+ do_sample=True,
16
+ top_k=50
17
+ )
18
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
19
+
20
+ demo = gr.Interface(
21
+ fn=generate_text,
22
+ inputs=[
23
+ gr.Textbox(label="Prompt"),
24
+ gr.Slider(10, 200, value=50, step=5, label="Max length")
25
+ ],
26
+ outputs=gr.Textbox(label="Generated Text"),
27
+ title="LoRA GPT-2 Demo",
28
+ description="A GPT-2 model fine-tuned with LoRA, deployed on Hugging Face Spaces."
29
+ )
30
+
31
+ if __name__ == "__main__":
32
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ fastapi==0.117.1
3
+ huggingface-hub==0.35.1
4
+ pydantic==2.11.9
5
+ pydantic_core==2.33.2
6
+ torch==2.8.0
7
+ transformers==4.56.2
8
+ uvicorn==0.37.0
9
+ peft==0.17.1
10
+ gradio