prelington commited on
Commit
5f8a165
·
verified ·
1 Parent(s): 9511c2e

Create ProTalk_Stable.py

Browse files
Files changed (1) hide show
  1. ProTalk_Stable.py +45 -0
ProTalk_Stable.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ !pip install -q transformers torch accelerate
2
+
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
+
6
+ model_name = "distilgpt2"
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
10
+ model = AutoModelForCausalLM.from_pretrained(model_name).to(device)
11
+
12
+ system_prompt = "You are ProTalk, a professional AI assistant. Answer politely, be witty, and remember the conversation context."
13
+
14
+ chat_history = []
15
+
16
+ MAX_HISTORY = 6 # only keep last 6 messages to avoid repetition
17
+
18
+ while True:
19
+ user_input = input("User: ")
20
+ if user_input.lower() == "exit":
21
+ break
22
+
23
+ chat_history.append(f"User: {user_input}")
24
+ # keep only last MAX_HISTORY entries
25
+ relevant_history = chat_history[-MAX_HISTORY:]
26
+ prompt = system_prompt + "\n" + "\n".join(relevant_history) + "\nProTalk:"
27
+
28
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
29
+
30
+ outputs = model.generate(
31
+ **inputs,
32
+ max_new_tokens=100,
33
+ do_sample=True,
34
+ temperature=0.7,
35
+ top_p=0.9,
36
+ repetition_penalty=1.2,
37
+ pad_token_id=tokenizer.eos_token_id
38
+ )
39
+
40
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
41
+ # clean response to remove prompt echo
42
+ response = response.replace(prompt, "").strip()
43
+
44
+ print(f"ProTalk: {response}")
45
+ chat_history.append(f"ProTalk: {response}")