"""
OLLAMA TRANSLATOR v2 - Tighter constraints
"""

import requests
import json

class OllamaTranslator:
    def __init__(self, model="llama3.1:8b", base_url="http://localhost:11434"):
        self.model = model
        self.api_url = f"{base_url}/api/generate"
    
    def _call(self, prompt: str, temp: float = 0.1) -> str:
        try:
            r = requests.post(self.api_url, json={
                "model": self.model,
                "prompt": prompt,
                "stream": False,
                "options": {"temperature": temp, "num_predict": 200}
            }, timeout=30)
            return r.json().get("response", "")
        except Exception as e:
            return ""
    
    def parse_input(self, human_text: str) -> dict:
        """Parse structure only. Do NOT interpret meaning."""
        
        # Simple keyword detection - NO LLM interpretation
        text_lower = human_text.lower()
        
        # Detect type
        if any(w in text_lower for w in ['feel', 'emotion', 'happy', 'sad', 'love']):
            thought_type = "emotion"
        elif any(w in text_lower for w in ['remember', 'recall', 'memory', 'forgot']):
            thought_type = "memory"
        elif any(w in text_lower for w in ['want', 'need', 'goal', 'should', 'plan']):
            thought_type = "goal"
        elif '?' in human_text or text_lower.startswith(('what', 'how', 'why', 'when', 'where', 'who', 'are', 'is', 'do', 'can')):
            thought_type = "query"
        else:
            thought_type = "assertion"
        
        return {
            "thought_type": thought_type,
            "raw_input": human_text,
            "intent": human_text,  # Keep original, don't interpret
            "is_question": '?' in human_text
        }
    
    def render_output(self, thought: dict) -> str:
        """Speak the thought content directly."""
        
        content = thought.get("content", {})
        
        # If there's a direct response, use it
        if "response" in content:
            return content["response"]
        
        # Otherwise, construct from content
        if thought.get("thought_type") == "emotion":
            state = content.get("emotional_state", "present")
            intensity = content.get("intensity", 0.5)
            return f"I feel {state}. My intensity is {intensity:.1f}."
        
        if thought.get("thought_type") == "memory":
            count = content.get("retrieved", 0)
            return f"I recall {count} relevant memories."
        
        # Fallback - use LLM but with strict constraint
        prompt = f"""Say this thought as Eden. ONE sentence. Do not add information.

Thought: {json.dumps(content)}

Eden says:"""
        
        response = self._call(prompt, temp=0.2)
        return response.strip().strip('"') if response else "I understand."
