AlainDeLong commited on
Commit
025ec54
·
1 Parent(s): 59d1475

chore: push model from local

Browse files
.gitignore CHANGED
@@ -206,5 +206,5 @@ marimo/_static/
206
  marimo/_lsp/
207
  __marimo__/
208
 
209
- # Kafka Commands
210
- command.txt
 
206
  marimo/_lsp/
207
  __marimo__/
208
 
209
+ # Temp files
210
+ temp.py
app/models/twitter-roberta-base-sentiment-latest/config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/jupyter/misc/tweeteval/TweetEval_models/sentiment/sentiment_latest_2021/",
3
+ "architectures": [
4
+ "RobertaForSequenceClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "bos_token_id": 0,
8
+ "classifier_dropout": null,
9
+ "eos_token_id": 2,
10
+ "gradient_checkpointing": false,
11
+ "hidden_act": "gelu",
12
+ "hidden_dropout_prob": 0.1,
13
+ "hidden_size": 768,
14
+ "id2label": {
15
+ "0": "negative",
16
+ "1": "neutral",
17
+ "2": "positive"
18
+ },
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 3072,
21
+ "label2id": {
22
+ "negative": 0,
23
+ "neutral": 1,
24
+ "positive": 2
25
+ },
26
+ "layer_norm_eps": 1e-05,
27
+ "max_position_embeddings": 514,
28
+ "model_type": "roberta",
29
+ "num_attention_heads": 12,
30
+ "num_hidden_layers": 12,
31
+ "pad_token_id": 1,
32
+ "position_embedding_type": "absolute",
33
+ "torch_dtype": "float32",
34
+ "transformers_version": "4.13.0.dev0",
35
+ "type_vocab_size": 1,
36
+ "vocab_size": 50265
37
+ }
app/models/twitter-roberta-base-sentiment-latest/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
app/models/twitter-roberta-base-sentiment-latest/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d24a3e32a88ed1c4e5b789fc6644e2e767500554e954b27dccf52a8e762cbae
3
+ size 501045531
app/models/twitter-roberta-base-sentiment-latest/special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
app/models/twitter-roberta-base-sentiment-latest/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
app/services/sentiment_service.py CHANGED
@@ -1,6 +1,8 @@
1
  import os
 
2
  from typing import List, Dict, Any
3
- from app.core.config import settings
 
4
 
5
  import torch
6
  import numpy as np
@@ -27,14 +29,29 @@ class SentimentService:
27
  print("GPU not found. Loading model onto CPU.")
28
 
29
  # Load model, tokenizer, and config (for id2label mapping)
30
- model_name = settings.SENTIMENT_MODEL
31
- cache_dir = os.getenv("TRANSFORMERS_CACHE", "/tmp/hf_cache")
32
 
33
- self.tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
34
- self.config = AutoConfig.from_pretrained(model_name)
35
- self.model = AutoModelForSequenceClassification.from_pretrained(
36
- model_name, cache_dir=cache_dir
37
- ).to(self.device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  self.model.eval() # set model to inference mode
39
  print("Sentiment model loaded successfully.")
40
 
 
1
  import os
2
+ from pathlib import Path
3
  from typing import List, Dict, Any
4
+
5
+ # from app.core.config import settings
6
 
7
  import torch
8
  import numpy as np
 
29
  print("GPU not found. Loading model onto CPU.")
30
 
31
  # Load model, tokenizer, and config (for id2label mapping)
 
 
32
 
33
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
34
+ MODEL_DIR = os.path.join(
35
+ BASE_DIR, "models", "twitter-roberta-base-sentiment-latest"
36
+ )
37
+ print(MODEL_DIR)
38
+
39
+ if not os.path.exists(MODEL_DIR):
40
+ raise FileNotFoundError(f"Model folder not found: {MODEL_DIR}")
41
+
42
+ # Load tokenizer, config, model
43
+ self.tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
44
+ self.config = AutoConfig.from_pretrained(MODEL_DIR)
45
+ self.model = AutoModelForSequenceClassification.from_pretrained(MODEL_DIR).to(
46
+ self.device
47
+ )
48
+
49
+ # self.tokenizer = AutoTokenizer.from_pretrained(model_source)
50
+ # self.config = AutoConfig.from_pretrained(model_source)
51
+ # self.model = AutoModelForSequenceClassification.from_pretrained(
52
+ # model_source
53
+ # ).to(self.device)
54
+
55
  self.model.eval() # set model to inference mode
56
  print("Sentiment model loaded successfully.")
57