Spaces:
Runtime error
Runtime error
kootaeng2
commited on
Commit
ยท
d2df06f
1
Parent(s):
4099675
Refactor: Separate model storage to Hugging Face Hub
Browse files- .gitattributes +0 -4
- src/emotion_engine.py +13 -16
.gitattributes
CHANGED
|
@@ -1,4 +0,0 @@
|
|
| 1 |
-
korean-emotion-classifier-final/model.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*safetensores filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/emotion_engine.py
CHANGED
|
@@ -4,29 +4,26 @@ import torch
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 5 |
import os
|
| 6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
def load_emotion_classifier():
|
| 8 |
# --- ์ด ๋ถ๋ถ์ ์์ ํฉ๋๋ค ---
|
| 9 |
-
#
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
base_dir = os.path.dirname(src_dir)
|
| 15 |
-
# ํ๋ก์ ํธ ๋ฃจํธ์ ๋ชจ๋ธ ํด๋ ์ด๋ฆ์ ํฉ์ณ ์ ํํ ๊ฒฝ๋ก๋ฅผ ๋ง๋ญ๋๋ค.
|
| 16 |
-
MODEL_PATH = os.path.join(base_dir, "korean-emotion-classifier-final")
|
| 17 |
-
|
| 18 |
-
print(f"--- ๋ฐฐํฌ ํ๊ฒฝ ๋ชจ๋ธ ๊ฒฝ๋ก ํ์ธ: [{MODEL_PATH}] ---")
|
| 19 |
-
|
| 20 |
try:
|
| 21 |
-
# local_files_only
|
| 22 |
-
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH
|
| 23 |
-
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH
|
| 24 |
-
print("โ
|
| 25 |
|
| 26 |
except Exception as e:
|
| 27 |
print(f"โ ๋ชจ๋ธ ๋ก๋ฉ ์ค ์ค๋ฅ: {e}")
|
| 28 |
return None
|
| 29 |
-
# --- ์ฌ๊ธฐ๊น์ง ์์ ---
|
| 30 |
|
| 31 |
device = 0 if torch.cuda.is_available() else -1
|
| 32 |
emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
|
|
|
|
| 4 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 5 |
import os
|
| 6 |
|
| 7 |
+
import torch
|
| 8 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
def load_emotion_classifier():
|
| 12 |
# --- ์ด ๋ถ๋ถ์ ์์ ํฉ๋๋ค ---
|
| 13 |
+
# ๋ก์ปฌ ๊ฒฝ๋ก ๋์ , Hugging Face Hub์ ๋ชจ๋ธ ID๋ฅผ ์ฌ์ฉํฉ๋๋ค.
|
| 14 |
+
MODEL_PATH = "koons/korean-emotion-classifier-final" # "์ฌ์ฉ์์ด๋ฆ/๋ชจ๋ธ์ด๋ฆ" ํ์
|
| 15 |
+
|
| 16 |
+
print(f"Hugging Face Hub ๋ชจ๋ธ '{MODEL_PATH}'์์ ๋ชจ๋ธ์ ๋ถ๋ฌ์ต๋๋ค...")
|
| 17 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
try:
|
| 19 |
+
# local_files_only ์ต์
์ ์ ๊ฑฐํ์ฌ ์จ๋ผ์ธ์์ ๋ค์ด๋ก๋ํ๋๋ก ํฉ๋๋ค.
|
| 20 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 21 |
+
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
|
| 22 |
+
print("โ
Hugging Face Hub ๋ชจ๋ธ ๋ก๋ฉ ์ฑ๊ณต!")
|
| 23 |
|
| 24 |
except Exception as e:
|
| 25 |
print(f"โ ๋ชจ๋ธ ๋ก๋ฉ ์ค ์ค๋ฅ: {e}")
|
| 26 |
return None
|
|
|
|
| 27 |
|
| 28 |
device = 0 if torch.cuda.is_available() else -1
|
| 29 |
emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
|