File size: 2,859 Bytes
75c6930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
# model_manager.py
from transformers import pipeline
import asyncio

class ModelManager:
    def __init__(self, cache_dir="./model_cache"):
        self.models = {}
        self.models_loaded = False
        self.cache_dir = cache_dir

    async def load_models_async(self):
        async def load(key, model_name):
            try:
                self.models[key] = pipeline(
                    "text-generation",
                    model=model_name,
                    device="cpu",
                    model_kwargs={"cache_dir": self.cache_dir}
                )
            except Exception as e:
                self.models[key] = None
        tasks = [
            load("chat","microsoft/DialoGPT-small"),
            load("code","microsoft/CodeGPT-small-py"),
            load("creative","microsoft/DialoGPT-small"),
        ]
        await asyncio.gather(*tasks)
        self.models_loaded = True

    def get_model_for_task(self, task_type):
        mapping = {
            "CONVERSATION": self.models.get("chat"),
            "CODE_GENERATION": self.models.get("code"),
            "CREATIVE_WRITING": self.models.get("creative"),
        }
        return mapping.get(task_type, self.models.get("chat"))


# api_agent.py
import requests

class APIAgent:
    def __init__(self, config):
        self.config = config  # debe contener keys ya cargadas desde /config
        self.session = requests.Session()

    def call_openai(self, prompt, system_message=""):
        key = self.config.get("openai_api_key", "")
        if not key:
            return None
        url = "https://api.openai.com/v1/chat/completions"
        headers = {"Authorization": f"Bearer {key}", "Content-Type": "application/json"}
        payload = {"model":"gpt-3.5-turbo", "messages":[{"role":"system","content":system_message},{"role":"user","content":prompt}], "temperature": self.config.get("temperature", 0.7), "max_tokens": self.config.get("max_tokens", 600)}
        r = self.session.post(url, headers=headers, json=payload, timeout=self.config.get("timeout", 30))
        if r.ok:
            return r.json()["choices"][0]["message"]["content"]
        return None

    def call_deepseek(self, prompt, system_message=""):
        key = self.config.get("deepseek_api_key", "")
        if not key:
            return None
        # similar a OpenAI: payload adaptado
        url = "https://api.deepseek.com/v1/chat/completions"
        headers = {"Authorization": f"Bearer {key}", "Content-Type": "application/json"}
        payload = {"model":"deepseek-chat","messages":[{"role":"system","content":system_message},{"role":"user","content":prompt}]}
        r = self.session.post(url, headers=headers, json=payload, timeout=self.config.get("timeout", 30))
        if r.ok:
            return r.json()["choices"][0]["message"]["content"]
        return None