Spaces:
Sleeping
Sleeping
| import modelrun.py | |
| from transformers import AutoTokenizer, MT5ForConditionalGeneration | |
| from transformers import T5Tokenizer | |
| import streamlit as st | |
| import pandas as pd | |
| from datasets import Dataset | |
| import torch | |
| from datasets import Dataset, DatasetDict | |
| from transformers import Trainer, TrainingArguments | |
| prompt = st.text_input("Enter your proverb: ") | |
| # Tokenize the input prompt | |
| input_ids = tokenizer.encode(prompt, return_tensors='pt') | |
| # Generate the output | |
| output_ids = model.generate(input_ids, max_length=256) | |
| # Decode the output to text | |
| output_text = tokenizer.decode(output_ids[0], skip_special_tokens=True) | |
| st.write(output_text) | |