Upload 3 files
Browse filesApologies for the late upload of the run_inference.py and other necessary files for the functional inference and model verification.
5 :00 AM inspiration woke me up.
- emotion_model.py +52 -0
- requirements.txt +3 -0
- run_inference.py +29 -0
emotion_model.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# emotion_model.py
|
| 2 |
+
# ROSA: Recursive Ontology of Semantic Affect
|
| 3 |
+
# Sublime Emotional System by Willinton Triana Cardona
|
| 4 |
+
|
| 5 |
+
from transformers import BertModel
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch
|
| 8 |
+
|
| 9 |
+
class Rosa(nn.Module):
|
| 10 |
+
def __init__(
|
| 11 |
+
self,
|
| 12 |
+
model_name="bert-base-uncased",
|
| 13 |
+
num_emotions=29,
|
| 14 |
+
latent_dim=None,
|
| 15 |
+
return_vector=False,
|
| 16 |
+
emotion_labels=None
|
| 17 |
+
):
|
| 18 |
+
super().__init__()
|
| 19 |
+
self.heart = BertModel.from_pretrained(model_name)
|
| 20 |
+
self.grace = nn.Dropout(0.3)
|
| 21 |
+
self.bloom = nn.Linear(self.heart.config.hidden_size, num_emotions)
|
| 22 |
+
|
| 23 |
+
self.return_vector = return_vector
|
| 24 |
+
self.latent_dim = latent_dim
|
| 25 |
+
self.emotion_labels = emotion_labels or [
|
| 26 |
+
"admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity",
|
| 27 |
+
"desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear",
|
| 28 |
+
"gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief",
|
| 29 |
+
"remorse", "sadness", "surprise", "neutral"
|
| 30 |
+
]
|
| 31 |
+
|
| 32 |
+
if latent_dim:
|
| 33 |
+
self.rosa_embedding = nn.Linear(self.heart.config.hidden_size, latent_dim)
|
| 34 |
+
|
| 35 |
+
self.loss_fct = nn.BCEWithLogitsLoss()
|
| 36 |
+
|
| 37 |
+
def forward(self, input_ids, attention_mask, labels=None):
|
| 38 |
+
petals = self.heart(input_ids=input_ids, attention_mask=attention_mask)
|
| 39 |
+
pooled = petals.pooler_output
|
| 40 |
+
softened = self.grace(pooled)
|
| 41 |
+
|
| 42 |
+
if self.return_vector and hasattr(self, 'rosa_embedding'):
|
| 43 |
+
embedding = self.rosa_embedding(softened)
|
| 44 |
+
return {"embedding": embedding}
|
| 45 |
+
|
| 46 |
+
logits = self.bloom(softened)
|
| 47 |
+
|
| 48 |
+
if labels is not None:
|
| 49 |
+
loss = self.loss_fct(logits, labels.float())
|
| 50 |
+
return {"loss": loss, "logits": logits}
|
| 51 |
+
else:
|
| 52 |
+
return {"logits": logits}
|
requirements.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
transformers>=4.30.0
|
| 2 |
+
torch>=2.0.0
|
| 3 |
+
scikit-learn
|
run_inference.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# run_inference.py
|
| 2 |
+
# Simple test script to evaluate ROSA on custom input
|
| 3 |
+
|
| 4 |
+
from transformers import BertTokenizer
|
| 5 |
+
from emotion_model import Rosa
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
def predict(text):
|
| 9 |
+
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
|
| 10 |
+
emotion_labels = [
|
| 11 |
+
"admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity",
|
| 12 |
+
"desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear",
|
| 13 |
+
"gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief",
|
| 14 |
+
"remorse", "sadness", "surprise", "neutral"
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
model = Rosa(num_emotions=29, emotion_labels=emotion_labels)
|
| 18 |
+
model.load_state_dict(torch.load("rosa.pt", map_location=torch.device("cpu")))
|
| 19 |
+
model.eval()
|
| 20 |
+
|
| 21 |
+
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True)
|
| 22 |
+
with torch.no_grad():
|
| 23 |
+
logits = model(**inputs)['logits']
|
| 24 |
+
probs = torch.sigmoid(logits).squeeze()
|
| 25 |
+
|
| 26 |
+
return {label: float(prob) for label, prob in zip(emotion_labels, probs)}
|
| 27 |
+
|
| 28 |
+
if __name__ == "__main__":
|
| 29 |
+
print(predict("My heart is filled with longing and beauty."))
|