Spaces:
Sleeping
Sleeping
Update test.py
Browse files
test.py
CHANGED
|
@@ -5,6 +5,8 @@ import numpy as np
|
|
| 5 |
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
| 6 |
import streamlit as st
|
| 7 |
|
|
|
|
|
|
|
| 8 |
@st.cache_resource
|
| 9 |
def get_model_and_tokenizer(model_name):
|
| 10 |
return load_model(model_name)
|
|
@@ -14,7 +16,7 @@ default_model_name = "cahya/bert-base-indonesian-522M"
|
|
| 14 |
tokenizer, model = load_model(default_model_name)
|
| 15 |
|
| 16 |
# Move model to GPU
|
| 17 |
-
model = model.to(
|
| 18 |
|
| 19 |
# Prediction function
|
| 20 |
def predict_hoax(title, content):
|
|
@@ -26,7 +28,7 @@ def predict_hoax(title, content):
|
|
| 26 |
|
| 27 |
text = f"{title} [SEP] {content}"
|
| 28 |
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=256)
|
| 29 |
-
inputs = {key: value.to(
|
| 30 |
|
| 31 |
with torch.no_grad():
|
| 32 |
outputs = model(**inputs)
|
|
@@ -41,7 +43,7 @@ def predict_proba_for_lime(texts):
|
|
| 41 |
results = []
|
| 42 |
for text in texts:
|
| 43 |
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=256)
|
| 44 |
-
inputs = {key: value.to(
|
| 45 |
|
| 46 |
with torch.no_grad():
|
| 47 |
outputs = model(**inputs)
|
|
|
|
| 5 |
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
|
| 6 |
import streamlit as st
|
| 7 |
|
| 8 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 9 |
+
|
| 10 |
@st.cache_resource
|
| 11 |
def get_model_and_tokenizer(model_name):
|
| 12 |
return load_model(model_name)
|
|
|
|
| 16 |
tokenizer, model = load_model(default_model_name)
|
| 17 |
|
| 18 |
# Move model to GPU
|
| 19 |
+
model = model.to(device)
|
| 20 |
|
| 21 |
# Prediction function
|
| 22 |
def predict_hoax(title, content):
|
|
|
|
| 28 |
|
| 29 |
text = f"{title} [SEP] {content}"
|
| 30 |
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=256)
|
| 31 |
+
inputs = {key: value.to(device) for key, value in inputs.items()} # Move inputs to GPU
|
| 32 |
|
| 33 |
with torch.no_grad():
|
| 34 |
outputs = model(**inputs)
|
|
|
|
| 43 |
results = []
|
| 44 |
for text in texts:
|
| 45 |
inputs = tokenizer(text, return_tensors='pt', padding=True, truncation=True, max_length=256)
|
| 46 |
+
inputs = {key: value.to(device) for key, value in inputs.items()} # Move inputs to GPU
|
| 47 |
|
| 48 |
with torch.no_grad():
|
| 49 |
outputs = model(**inputs)
|