Madras1 commited on
Commit
1dfefd9
·
verified ·
1 Parent(s): fbe0aed

Update jade/heavy_mode.py

Browse files
Files changed (1) hide show
  1. jade/heavy_mode.py +4 -4
jade/heavy_mode.py CHANGED
@@ -45,7 +45,7 @@ class JadeHeavyAgent:
45
  self.models = {
46
  "Kimi": "moonshotai/kimi-k2-instruct-0905", # Groq (Logic/Reasoning)
47
  "Mistral": "mistral-large-latest", # Mistral API
48
- "Llama": "meta-llama/llama-4-maverick-17b-128e-instruct", # Groq
49
  "Qwen": "qwen/qwen3-coder:free" # OpenRouter (Fallback if key exists) or Groq equivalent
50
  # Note: The original script used qwen/qwen3-235b... on OpenRouter.
51
  # If no OpenRouter key, we might need a fallback on Groq or skip.
@@ -82,7 +82,7 @@ class JadeHeavyAgent:
82
  # If Mistral/OpenRouter key missing, fallback to Llama-3-70b on Groq for diversity?
83
  target_model = self.models.get(model_name)
84
  if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
85
- target_model = "llama-3.3-70b-versatile" # Fallback
86
 
87
  resp = await self.groq_client.chat.completions.create(
88
  model=target_model,
@@ -117,12 +117,12 @@ class JadeHeavyAgent:
117
  resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages)
118
  content = resp.choices[0].message.content
119
  elif model_name == "Qwen" and self.openrouter:
120
- resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-235b-a22b:free", messages=messages)
121
  content = resp.choices[0].message.content
122
  else:
123
  target_model = self.models.get(model_name)
124
  if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
125
- target_model = "llama-3.3-70b-versatile"
126
 
127
  resp = await self.groq_client.chat.completions.create(
128
  model=target_model,
 
45
  self.models = {
46
  "Kimi": "moonshotai/kimi-k2-instruct-0905", # Groq (Logic/Reasoning)
47
  "Mistral": "mistral-large-latest", # Mistral API
48
+ "Llama": "openai/gpt-oss-120b", # Groq
49
  "Qwen": "qwen/qwen3-coder:free" # OpenRouter (Fallback if key exists) or Groq equivalent
50
  # Note: The original script used qwen/qwen3-235b... on OpenRouter.
51
  # If no OpenRouter key, we might need a fallback on Groq or skip.
 
82
  # If Mistral/OpenRouter key missing, fallback to Llama-3-70b on Groq for diversity?
83
  target_model = self.models.get(model_name)
84
  if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
85
+ target_model = "openai/gpt-oss-120b" # Fallback
86
 
87
  resp = await self.groq_client.chat.completions.create(
88
  model=target_model,
 
117
  resp = await self.mistral.chat.complete_async(model=self.models["Mistral"], messages=messages)
118
  content = resp.choices[0].message.content
119
  elif model_name == "Qwen" and self.openrouter:
120
+ resp = await self.openrouter.chat.completions.create(model="qwen/qwen3-coder:free", messages=messages)
121
  content = resp.choices[0].message.content
122
  else:
123
  target_model = self.models.get(model_name)
124
  if not target_model or (model_name == "Mistral" and not self.mistral) or (model_name == "Qwen" and not self.openrouter):
125
+ target_model = "openai/gpt-oss-120b"
126
 
127
  resp = await self.groq_client.chat.completions.create(
128
  model=target_model,