Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
|
@@ -8,12 +8,12 @@ import torch
|
|
| 8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 9 |
|
| 10 |
# Load the tokenizer
|
| 11 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
| 12 |
|
| 13 |
# Load the base model and then the LoRA adapter
|
| 14 |
# Ensure the base model is also moved to the correct device
|
| 15 |
base_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small").to(device)
|
| 16 |
-
model = PeftModel.from_pretrained(base_model, "
|
| 17 |
model.eval()
|
| 18 |
|
| 19 |
def summarize(text):
|
|
@@ -26,11 +26,11 @@ def summarize(text):
|
|
| 26 |
return summary
|
| 27 |
except Exception as e:
|
| 28 |
# Log the error and return an informative message
|
| 29 |
-
print(f"Error during summarization: {
|
| 30 |
-
return f"An error occurred during summarization. Please check the Space logs for details. Error: {
|
| 31 |
|
| 32 |
# Create Gradio interface
|
| 33 |
iface = gr.Interface(fn=summarize, inputs="text", outputs="text", title="LoRA Fine-tuned T5-small Summarizer")
|
| 34 |
|
| 35 |
# Launch the interface
|
| 36 |
-
iface.launch(share=False)
|
|
|
|
| 8 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 9 |
|
| 10 |
# Load the tokenizer
|
| 11 |
+
tokenizer = AutoTokenizer.from_pretrained("rxpbtn21/t5-small-lora-summarizer")
|
| 12 |
|
| 13 |
# Load the base model and then the LoRA adapter
|
| 14 |
# Ensure the base model is also moved to the correct device
|
| 15 |
base_model = AutoModelForSeq2SeqLM.from_pretrained("t5-small").to(device)
|
| 16 |
+
model = PeftModel.from_pretrained(base_model, "rxpbtn21/t5-small-lora-summarizer")
|
| 17 |
model.eval()
|
| 18 |
|
| 19 |
def summarize(text):
|
|
|
|
| 26 |
return summary
|
| 27 |
except Exception as e:
|
| 28 |
# Log the error and return an informative message
|
| 29 |
+
print(f"Error during summarization: {e}")
|
| 30 |
+
return f"An error occurred during summarization. Please check the Space logs for details. Error: {e}"
|
| 31 |
|
| 32 |
# Create Gradio interface
|
| 33 |
iface = gr.Interface(fn=summarize, inputs="text", outputs="text", title="LoRA Fine-tuned T5-small Summarizer")
|
| 34 |
|
| 35 |
# Launch the interface
|
| 36 |
+
iface.launch(share=False)
|