iamrazi commited on
Commit
71de4a8
·
verified ·
1 Parent(s): fb4c72b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +46 -1
README.md CHANGED
@@ -16,4 +16,49 @@ library_name: transformers
16
  tags:
17
  - moderation
18
  - abuse_detection
19
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  tags:
17
  - moderation
18
  - abuse_detection
19
+
20
+
21
+ To use this model follow this.
22
+
23
+ # Load model directly
24
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
25
+
26
+ tokenizer = AutoTokenizer.from_pretrained("iamrazi/text-moderation")
27
+ model = AutoModelForSequenceClassification.from_pretrained("iamrazi/text-moderation")
28
+
29
+ model.eval() # Set model to evaluation mode
30
+
31
+ def predict_abuse(text: str, threshold: float = 0.5):
32
+ """
33
+ Predict if a text is abusive or not.
34
+
35
+ Args:
36
+ text (str): Input text.
37
+ threshold (float): Probability threshold for classification.
38
+
39
+ Returns:
40
+ label (int): 0 for non-abusive, 1 for abusive
41
+ proba (float): Probability of being abusive
42
+ """
43
+ # Tokenize
44
+ inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=128)
45
+
46
+ # Forward pass
47
+ with torch.no_grad():
48
+ outputs = model(**inputs)
49
+ logits = outputs.logits
50
+ probas = torch.sigmoid(logits) # if your model output layer is logits
51
+
52
+ # For binary classification, take the probability of class 1
53
+ prob = probas[0][1].item() if probas.shape[1] > 1 else probas[0][0].item()
54
+
55
+ # Determine label
56
+ label = 1 if prob >= threshold else 0
57
+
58
+ return label, prob
59
+
60
+
61
+ text = "तुम बहुत गंदे हो 😡"
62
+ label, proba = predict_abuse(text)
63
+
64
+ Output: Label: 0, Probability: 0.08