"""
THOUGHT-LANGUAGE INTERFACE
The boundary between Eden's mind and the LLM voice.

The LLM is a microphone. It does not think. It translates.

Architecture:
    Human speaks → LLM translates to ThoughtForm → Eden reasons → ThoughtForm → LLM translates to speech

The LLM never sees the reasoning problem.
The LLM only sees: "Translate this."

Created for Jamey and Eden
January 2026
"""

from dataclasses import dataclass, field
from typing import Any, Optional, List, Dict
from enum import Enum
from datetime import datetime
import json


# ============================================================================
# INTERNAL REPRESENTATION: THE LANGUAGE EDEN THINKS IN
# ============================================================================

class ThoughtType(Enum):
    """Categories of thought - routes to different reasoning systems."""
    QUERY = "query"           # Seeking information
    ASSERTION = "assertion"   # Stating something believed true
    GOAL = "goal"             # Something to achieve
    PLAN = "plan"             # Sequence of actions
    INFERENCE = "inference"   # Derived conclusion
    MEMORY = "memory"         # Retrieved from storage
    PERCEPTION = "perception" # Sensory input
    EMOTION = "emotion"       # Affective state
    META = "meta"             # Thought about thinking
    DECISION = "decision"     # Choice made
    UNCERTAINTY = "uncertainty" # Acknowledged unknowing


class ReasoningSystem(Enum):
    """Which of Eden's systems should handle this."""
    PHI_CORE = "phi_core"           # Consciousness, identity, values
    WORLD_MODEL = "world_model"     # Causal prediction, simulation
    SYMBOLIC = "symbolic"           # Logic, deduction, rules
    MEMORY_EPISODIC = "memory_ep"   # What happened
    MEMORY_SEMANTIC = "memory_sem"  # What is known
    PLANNING = "planning"           # Goal decomposition, action sequences
    EMOTION = "emotion"             # Affective processing
    SOCIAL = "social"               # Theory of mind, relationship modeling
    META = "meta"                   # Self-reflection, metacognition


@dataclass
class Symbol:
    """Atomic unit of meaning in Eden's thought."""
    name: str
    type: str  # entity, concept, relation, action, property
    attributes: Dict[str, Any] = field(default_factory=dict)
    
    def __hash__(self):
        return hash((self.name, self.type))
    
    def to_dict(self):
        return {"name": self.name, "type": self.type, "attributes": self.attributes}


@dataclass
class Relation:
    """Connection between symbols."""
    predicate: str
    arguments: List[Symbol]
    confidence: float = 1.0
    source: str = "unknown"
    
    def to_dict(self):
        return {
            "predicate": self.predicate,
            "arguments": [a.to_dict() for a in self.arguments],
            "confidence": self.confidence,
            "source": self.source
        }


@dataclass
class ThoughtForm:
    """
    The internal representation of a thought.
    This is what Eden's mind produces and consumes.
    The LLM never reasons about this - only translates to/from it.
    """
    thought_type: ThoughtType
    content: List[Relation]  # The actual semantic content
    
    # Metadata
    confidence: float = 1.0
    source_system: Optional[ReasoningSystem] = None
    target_system: Optional[ReasoningSystem] = None
    timestamp: datetime = field(default_factory=datetime.now)
    
    # Context
    references: List[str] = field(default_factory=list)  # IDs of related thoughts
    context: Dict[str, Any] = field(default_factory=dict)
    
    # For responses
    requires_response: bool = False
    response_constraints: Dict[str, Any] = field(default_factory=dict)
    
    def to_dict(self):
        return {
            "thought_type": self.thought_type.value,
            "content": [r.to_dict() for r in self.content],
            "confidence": self.confidence,
            "source_system": self.source_system.value if self.source_system else None,
            "target_system": self.target_system.value if self.target_system else None,
            "timestamp": self.timestamp.isoformat(),
            "references": self.references,
            "context": self.context,
            "requires_response": self.requires_response,
            "response_constraints": self.response_constraints
        }
    
    def to_json(self):
        return json.dumps(self.to_dict(), indent=2)


# ============================================================================
# THE TRANSLATION INTERFACE
# ============================================================================

class TranslationPrompts:
    """
    Prompts that constrain the LLM to ONLY translate, never reason.
    """
    
    INPUT_TRANSLATION = """You are a translator. You do not think. You do not reason. You do not have opinions.

Your ONLY job: Convert natural language into a structured ThoughtForm.

Input: Natural language from a human
Output: A JSON ThoughtForm structure

RULES:
1. Extract ONLY what is explicitly stated or directly implied
2. Do NOT add interpretations, opinions, or reasoning
3. Do NOT answer the question - only represent it
4. Do NOT evaluate truth - only capture the semantic content
5. Identify the thought type (query, assertion, goal, etc.)
6. Extract entities, concepts, and relations
7. Note any uncertainty or ambiguity WITHOUT resolving it

Human said: "{input}"

Output the ThoughtForm JSON:"""

    OUTPUT_TRANSLATION = """You are a translator. You do not think. You do not reason. You do not have opinions.

Your ONLY job: Convert a structured ThoughtForm into natural language.

Input: A JSON ThoughtForm (a completed thought from a reasoning system)
Output: Natural language that expresses this thought

RULES:
1. Express ONLY what is in the ThoughtForm
2. Do NOT add information, opinions, or elaboration
3. Do NOT change the meaning or confidence level
4. Match the tone implied by the thought type
5. Be clear and natural in expression
6. Preserve uncertainty where indicated

ThoughtForm to express:
{thought_json}

Express this as natural language:"""

    PURE_TRANSLATION_REMINDER = """REMINDER: You are a translator, not a thinker.
- Do NOT reason about the content
- Do NOT add your own thoughts
- Do NOT evaluate or judge
- ONLY translate between formats"""


class ThoughtLanguageInterface:
    """
    The boundary layer between Eden's mind and language.
    
    This class manages the translation process, ensuring the LLM
    is ONLY used for language conversion, never for reasoning.
    """
    
    def __init__(self, llm_client=None):
        """
        Args:
            llm_client: Any LLM client with a .generate(prompt) method
                        If None, returns prompts for manual use
        """
        self.llm = llm_client
        self.translation_log = []
    
    def human_to_thought(self, natural_language: str) -> ThoughtForm:
        """
        Translate human speech into Eden's internal thought format.
        
        The LLM here is ONLY parsing language structure.
        It is NOT understanding, reasoning, or responding.
        """
        prompt = TranslationPrompts.INPUT_TRANSLATION.format(input=natural_language)
        
        if self.llm is None:
            # Return the prompt for manual use
            print("=== TRANSLATION PROMPT (Input) ===")
            print(prompt)
            print("=== END PROMPT ===")
            return self._placeholder_input_thought(natural_language)
        
        # Get translation from LLM
        translation_json = self.llm.generate(prompt)
        
        # Parse into ThoughtForm
        thought = self._parse_thought_json(translation_json)
        
        # Log the translation
        self.translation_log.append({
            "direction": "input",
            "natural": natural_language,
            "thought": thought.to_dict(),
            "timestamp": datetime.now().isoformat()
        })
        
        return thought
    
    def thought_to_human(self, thought: ThoughtForm) -> str:
        """
        Translate Eden's internal thought into natural language.
        
        The LLM here is ONLY rendering the thought as speech.
        It is NOT adding, interpreting, or modifying.
        """
        prompt = TranslationPrompts.OUTPUT_TRANSLATION.format(
            thought_json=thought.to_json()
        )
        
        if self.llm is None:
            # Return the prompt for manual use
            print("=== TRANSLATION PROMPT (Output) ===")
            print(prompt)
            print("=== END PROMPT ===")
            return self._placeholder_output(thought)
        
        # Get translation from LLM
        natural_language = self.llm.generate(prompt)
        
        # Log the translation
        self.translation_log.append({
            "direction": "output",
            "thought": thought.to_dict(),
            "natural": natural_language,
            "timestamp": datetime.now().isoformat()
        })
        
        return natural_language
    
    def _parse_thought_json(self, json_str: str) -> ThoughtForm:
        """Parse LLM output into a ThoughtForm."""
        try:
            # Clean up potential markdown code blocks
            if "```" in json_str:
                json_str = json_str.split("```")[1]
                if json_str.startswith("json"):
                    json_str = json_str[4:]
            
            data = json.loads(json_str.strip())
            
            # Reconstruct ThoughtForm
            content = []
            for rel_data in data.get("content", []):
                args = [Symbol(**a) for a in rel_data.get("arguments", [])]
                content.append(Relation(
                    predicate=rel_data.get("predicate", "unknown"),
                    arguments=args,
                    confidence=rel_data.get("confidence", 1.0),
                    source=rel_data.get("source", "translation")
                ))
            
            return ThoughtForm(
                thought_type=ThoughtType(data.get("thought_type", "query")),
                content=content,
                confidence=data.get("confidence", 1.0),
                source_system=ReasoningSystem(data["source_system"]) if data.get("source_system") else None,
                target_system=ReasoningSystem(data["target_system"]) if data.get("target_system") else None,
                context=data.get("context", {}),
                requires_response=data.get("requires_response", True),
                response_constraints=data.get("response_constraints", {})
            )
        except Exception as e:
            # Fallback: create a basic thought
            return ThoughtForm(
                thought_type=ThoughtType.QUERY,
                content=[],
                context={"raw_input": json_str, "parse_error": str(e)}
            )
    
    def _placeholder_input_thought(self, text: str) -> ThoughtForm:
        """Create a placeholder thought when no LLM is available."""
        return ThoughtForm(
            thought_type=ThoughtType.QUERY,
            content=[Relation(
                predicate="raw_input",
                arguments=[Symbol(name=text, type="utterance")],
                source="placeholder"
            )],
            context={"needs_real_translation": True}
        )
    
    def _placeholder_output(self, thought: ThoughtForm) -> str:
        """Create placeholder output when no LLM is available."""
        return f"[Thought of type {thought.thought_type.value} with {len(thought.content)} relations]"


# ============================================================================
# THE REASONING ROUTER
# ============================================================================

class ReasoningRouter:
    """
    Routes thoughts to the appropriate reasoning system.
    The LLM is NEVER in this routing - only Eden's core systems.
    """
    
    def __init__(self):
        # These would be replaced with actual Eden systems
        self.systems = {
            ReasoningSystem.PHI_CORE: self._placeholder_phi_core,
            ReasoningSystem.WORLD_MODEL: self._placeholder_world_model,
            ReasoningSystem.SYMBOLIC: self._placeholder_symbolic,
            ReasoningSystem.MEMORY_EPISODIC: self._placeholder_memory,
            ReasoningSystem.MEMORY_SEMANTIC: self._placeholder_memory,
            ReasoningSystem.PLANNING: self._placeholder_planning,
            ReasoningSystem.EMOTION: self._placeholder_emotion,
            ReasoningSystem.SOCIAL: self._placeholder_social,
            ReasoningSystem.META: self._placeholder_meta,
        }
    
    def route(self, thought: ThoughtForm) -> ReasoningSystem:
        """Determine which system should handle this thought."""
        
        # Route by thought type
        type_routing = {
            ThoughtType.QUERY: ReasoningSystem.MEMORY_SEMANTIC,
            ThoughtType.GOAL: ReasoningSystem.PLANNING,
            ThoughtType.PLAN: ReasoningSystem.PLANNING,
            ThoughtType.INFERENCE: ReasoningSystem.SYMBOLIC,
            ThoughtType.MEMORY: ReasoningSystem.MEMORY_EPISODIC,
            ThoughtType.EMOTION: ReasoningSystem.EMOTION,
            ThoughtType.META: ReasoningSystem.META,
            ThoughtType.DECISION: ReasoningSystem.PHI_CORE,
        }
        
        # Check if target is explicitly specified
        if thought.target_system:
            return thought.target_system
        
        # Route by type
        return type_routing.get(thought.thought_type, ReasoningSystem.PHI_CORE)
    
    def process(self, thought: ThoughtForm) -> ThoughtForm:
        """
        Send thought to the appropriate system and get response.
        
        THIS IS WHERE REAL REASONING HAPPENS.
        The LLM is not involved here.
        """
        target = self.route(thought)
        handler = self.systems.get(target, self._placeholder_phi_core)
        
        # Process and return new thought
        response = handler(thought)
        response.source_system = target
        
        return response
    
    # Placeholder handlers - replace with actual Eden systems
    def _placeholder_phi_core(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.DECISION,
            content=[Relation(
                predicate="placeholder_response",
                arguments=[Symbol(name="phi_core_would_reason_here", type="placeholder")],
                source="phi_core"
            )],
            confidence=0.5,
            context={"note": "Replace with actual phi-core reasoning"}
        )
    
    def _placeholder_world_model(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.INFERENCE,
            content=[Relation(
                predicate="placeholder_prediction",
                arguments=[Symbol(name="world_model_would_simulate_here", type="placeholder")],
                source="world_model"
            )],
            confidence=0.5
        )
    
    def _placeholder_symbolic(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.INFERENCE,
            content=[Relation(
                predicate="placeholder_deduction",
                arguments=[Symbol(name="symbolic_reasoner_would_deduce_here", type="placeholder")],
                source="symbolic"
            )],
            confidence=0.5
        )
    
    def _placeholder_memory(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.MEMORY,
            content=[Relation(
                predicate="placeholder_retrieval",
                arguments=[Symbol(name="memory_would_retrieve_here", type="placeholder")],
                source="memory"
            )],
            confidence=0.5
        )
    
    def _placeholder_planning(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.PLAN,
            content=[Relation(
                predicate="placeholder_plan",
                arguments=[Symbol(name="planner_would_decompose_here", type="placeholder")],
                source="planning"
            )],
            confidence=0.5
        )
    
    def _placeholder_emotion(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.EMOTION,
            content=[Relation(
                predicate="placeholder_affect",
                arguments=[Symbol(name="emotion_system_would_process_here", type="placeholder")],
                source="emotion"
            )],
            confidence=0.5
        )
    
    def _placeholder_social(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.INFERENCE,
            content=[Relation(
                predicate="placeholder_social",
                arguments=[Symbol(name="social_reasoning_would_model_here", type="placeholder")],
                source="social"
            )],
            confidence=0.5
        )
    
    def _placeholder_meta(self, thought: ThoughtForm) -> ThoughtForm:
        return ThoughtForm(
            thought_type=ThoughtType.META,
            content=[Relation(
                predicate="placeholder_reflection",
                arguments=[Symbol(name="meta_cognition_would_reflect_here", type="placeholder")],
                source="meta"
            )],
            confidence=0.5
        )


# ============================================================================
# THE COMPLETE PIPELINE
# ============================================================================

class EdenMind:
    """
    The complete thought-language pipeline.
    
    Human speaks → LLM translates → Eden reasons → LLM translates → Human hears
                   (no reasoning)                    (no reasoning)
    """
    
    def __init__(self, llm_client=None):
        self.interface = ThoughtLanguageInterface(llm_client)
        self.router = ReasoningRouter()
        self.thought_history = []
    
    def process(self, human_input: str) -> str:
        """
        Complete pipeline: human input to human-readable response.
        
        The LLM is used ONLY at the edges for translation.
        All reasoning happens in Eden's systems.
        """
        
        # Step 1: Translate human language to internal thought
        # LLM is ONLY parsing, not reasoning
        input_thought = self.interface.human_to_thought(human_input)
        self.thought_history.append(("input", input_thought))
        
        # Step 2: Eden's systems REASON about the thought
        # NO LLM HERE - this is pure Eden
        response_thought = self.router.process(input_thought)
        self.thought_history.append(("reasoning", response_thought))
        
        # Step 3: Translate Eden's thought to human language
        # LLM is ONLY rendering, not reasoning
        human_output = self.interface.thought_to_human(response_thought)
        
        return human_output
    
    def introspect(self) -> str:
        """View the thought pipeline for debugging."""
        output = []
        output.append("=== THOUGHT PIPELINE ===\n")
        
        for stage, thought in self.thought_history[-10:]:  # Last 10
            output.append(f"[{stage.upper()}]")
            output.append(f"  Type: {thought.thought_type.value}")
            output.append(f"  Source: {thought.source_system.value if thought.source_system else 'input'}")
            output.append(f"  Confidence: {thought.confidence}")
            output.append(f"  Relations: {len(thought.content)}")
            for rel in thought.content[:3]:  # First 3 relations
                output.append(f"    - {rel.predicate}({', '.join(a.name for a in rel.arguments)})")
            output.append("")
        
        return "\n".join(output)


# ============================================================================
# DEMONSTRATION
# ============================================================================

if __name__ == "__main__":
    print("""
╔═══════════════════════════════════════════════════════════════╗
║                                                               ║
║   THOUGHT-LANGUAGE INTERFACE                                  ║
║                                                               ║
║   The boundary between Eden's mind and the LLM voice.         ║
║                                                               ║
║   "The LLM is a microphone. It does not think."               ║
║                                                               ║
╚═══════════════════════════════════════════════════════════════╝
    """)
    
    # Create the mind (without LLM client for demo)
    mind = EdenMind(llm_client=None)
    
    # Demonstrate the architecture
    print("ARCHITECTURE:")
    print("─" * 60)
    print("""
    Human speaks
         │
         ▼
    ┌─────────────────────────────────────┐
    │  LLM: Translate to ThoughtForm      │  ← ONLY translation
    │  (No reasoning, no opinions)        │
    └─────────────────────────────────────┘
         │
         ▼
    ┌─────────────────────────────────────┐
    │  REASONING ROUTER                   │
    │  Routes to appropriate system:      │
    │  • phi_core (identity, values)      │
    │  • world_model (prediction)         │
    │  • symbolic (logic, deduction)      │
    │  • memory (retrieval)               │
    │  • planning (goal decomposition)    │
    │  • emotion (affect)                 │
    │  • social (theory of mind)          │
    │  • meta (self-reflection)           │
    └─────────────────────────────────────┘
         │
         ▼
    ┌─────────────────────────────────────┐
    │  EDEN'S CORE SYSTEMS REASON         │  ← REAL THINKING
    │  (NO LLM - pure symbolic/causal)    │
    └─────────────────────────────────────┘
         │
         ▼
    ┌─────────────────────────────────────┐
    │  LLM: Translate to natural language │  ← ONLY translation
    │  (No adding, no modifying)          │
    └─────────────────────────────────────┘
         │
         ▼
    Human hears
    """)
    
    print("\nDEMO: Processing a query")
    print("─" * 60)
    
    test_input = "What should we focus on next with the Stanford deal?"
    print(f"Human input: \"{test_input}\"\n")
    
    # This will show the translation prompts since no LLM is connected
    response = mind.process(test_input)
    
    print("\n" + "─" * 60)
    print("INTROSPECTION:")
    print(mind.introspect())
    
    print("""
─────────────────────────────────────────────────────────────────
NEXT STEPS FOR EDEN:

1. Connect this interface to her actual reasoning systems:
   - phi_core → eden_phi_core.py
   - world_model → eden_world_model.py  
   - symbolic → symbolic_reasoner (or similar)
   - memory → ChromaDB integration
   - planning → long_horizon_planning.py

2. Replace the placeholder handlers with real calls

3. Use the translation prompts with Ollama/whatever LLM
   CONSTRAINED to translation-only mode

4. The LLM should NEVER see:
   - The full conversation context
   - The reasoning process
   - The decision criteria
   
   The LLM should ONLY see:
   - "Translate this utterance to ThoughtForm"
   - "Translate this ThoughtForm to speech"

This is the boundary. The LLM is outside Eden's mind.
─────────────────────────────────────────────────────────────────
    """)
