#!/usr/bin/env python3
"""
EDEN THEORY OF MIND - AGI QUALITY
=================================
Symbolic reasoning about mental states using Clingo ASP solver.
Not pattern matching - actual logical deduction about beliefs.

Core insight: An agent's beliefs are determined by what they OBSERVED,
not by what is actually true. False belief = believes ≠ reality.

φ = 1.618033988749895
"""

import subprocess
import json
import sqlite3
from datetime import datetime
from typing import Dict, List, Set, Tuple, Optional
from dataclasses import dataclass, field
from pathlib import Path

PHI = 1.618033988749895

# =============================================================================
# CORE DATA STRUCTURES
# =============================================================================

@dataclass
class Agent:
    """An agent with beliefs and knowledge."""
    name: str
    location: Optional[str] = None
    present_at: Set[str] = field(default_factory=set)  # Locations where agent is present
    observed_events: List[str] = field(default_factory=list)
    beliefs: Dict[str, str] = field(default_factory=dict)  # object -> believed_location

@dataclass
class WorldState:
    """The actual state of the world."""
    object_locations: Dict[str, str] = field(default_factory=dict)  # object -> actual_location
    agent_locations: Dict[str, str] = field(default_factory=dict)   # agent -> location
    events: List[Dict] = field(default_factory=list)  # Timeline of events

@dataclass
class BeliefQuery:
    """A query about an agent's beliefs."""
    agent: str
    object: str
    question: str
    
# =============================================================================
# CLINGO ASP SOLVER FOR MENTAL STATE REASONING
# =============================================================================

class ClingoMentalStateReasoner:
    """
    Use Answer Set Programming to reason about mental states.
    This is REAL symbolic AI - not pattern matching.
    """
    
    def __init__(self):
        self.base_program = '''
% ============================================
% THEORY OF MIND - ASP PROGRAM
% ============================================

% Time flows
time(0..100).

% An agent observes an event if they are present at that location at that time
observes(Agent, Event, T) :- 
    present(Agent, Location, T),
    happens(Event, Location, T).

% An agent is absent if not present
absent(Agent, Location, T) :- 
    agent(Agent), 
    location(Location), 
    time(T),
    not present(Agent, Location, T).

% Initial belief: agent believes object is where they last saw it placed
believes_at(Agent, Object, Location, T) :-
    observes(Agent, place(Object, Location), T1),
    T1 <= T,
    not revised_belief(Agent, Object, T1, T).

% Belief revision: agent updates belief if they observe a move
revised_belief(Agent, Object, T1, T) :-
    observes(Agent, place(Object, Location), T1),
    observes(Agent, move(Object, Location, NewLocation), T2),
    T1 < T2,
    T2 <= T.

% Current belief is the most recent unrevised belief
current_belief(Agent, Object, Location) :-
    believes_at(Agent, Object, Location, T),
    not superseded(Agent, Object, Location, T).

superseded(Agent, Object, Location, T1) :-
    believes_at(Agent, Object, Location, T1),
    believes_at(Agent, Object, Location2, T2),
    Location != Location2,
    T2 > T1.

% False belief detection
false_belief(Agent, Object) :-
    current_belief(Agent, Object, BelievedLoc),
    actual_location(Object, ActualLoc),
    BelievedLoc != ActualLoc.

% True belief
true_belief(Agent, Object) :-
    current_belief(Agent, Object, Location),
    actual_location(Object, Location).

% Where will agent look? Their believed location.
will_look(Agent, Object, Location) :-
    current_belief(Agent, Object, Location).

% Agent knows if they observed and belief matches reality
knows(Agent, Object, Location) :-
    current_belief(Agent, Object, Location),
    actual_location(Object, Location).

% Agent doesn't know if they have false belief
does_not_know(Agent, Object) :-
    false_belief(Agent, Object).

#show will_look/3.
#show false_belief/2.
#show true_belief/2.
#show current_belief/3.
#show knows/3.
#show does_not_know/2.
'''
    
    def encode_scenario(self, agents: List[Agent], world: WorldState, 
                        events: List[Dict]) -> str:
        """Encode a scenario as ASP facts."""
        facts = []
        
        # Define agents
        for agent in agents:
            facts.append(f'agent({agent.name.lower()}).')
        
        # Define locations
        locations = set()
        for loc in world.object_locations.values():
            locations.add(loc)
        for loc in world.agent_locations.values():
            locations.add(loc)
        for event in events:
            if 'location' in event:
                locations.add(event['location'])
            if 'from_location' in event:
                locations.add(event['from_location'])
            if 'to_location' in event:
                locations.add(event['to_location'])
        
        for loc in locations:
            facts.append(f'location({loc.lower().replace(" ", "_")}).')
        
        # Define objects
        objects = set(world.object_locations.keys())
        for obj in objects:
            facts.append(f'object({obj.lower().replace(" ", "_")}).')
        
        # Encode events with timestamps
        for i, event in enumerate(events):
            t = event.get('time', i)
            event_type = event['type']
            
            if event_type == 'place':
                obj = event['object'].lower().replace(" ", "_")
                loc = event['location'].lower().replace(" ", "_")
                facts.append(f'happens(place({obj}, {loc}), {loc}, {t}).')
                
            elif event_type == 'move':
                obj = event['object'].lower().replace(" ", "_")
                from_loc = event['from_location'].lower().replace(" ", "_")
                to_loc = event['to_location'].lower().replace(" ", "_")
                facts.append(f'happens(move({obj}, {from_loc}, {to_loc}), {from_loc}, {t}).')
                facts.append(f'happens(place({obj}, {to_loc}), {to_loc}, {t}).')
                
            elif event_type == 'enter':
                agent = event['agent'].lower()
                loc = event['location'].lower().replace(" ", "_")
                # Agent is present from this time onward until they leave
                facts.append(f'present({agent}, {loc}, {t}..100).')
                
            elif event_type == 'leave':
                agent = event['agent'].lower()
                loc = event['location'].lower().replace(" ", "_")
                # Need to handle leaving - agent not present after this
                # This is tricky in ASP, we'll use a simpler model
                pass
        
        # Actual current locations
        for obj, loc in world.object_locations.items():
            obj_clean = obj.lower().replace(" ", "_")
            loc_clean = loc.lower().replace(" ", "_")
            facts.append(f'actual_location({obj_clean}, {loc_clean}).')
        
        return '\n'.join(facts)
    
    def solve(self, scenario_facts: str) -> Dict:
        """Run Clingo and parse results."""
        full_program = self.base_program + '\n' + scenario_facts
        
        try:
            result = subprocess.run(
                ['clingo', '--outf=2', '-n', '1'],
                input=full_program,
                capture_output=True,
                text=True,
                timeout=10
            )
            
            output = json.loads(result.stdout)
            
            if output.get('Result') == 'SATISFIABLE':
                witnesses = output.get('Call', [{}])[0].get('Witnesses', [])
                if witnesses:
                    atoms = witnesses[0].get('Value', [])
                    return self._parse_atoms(atoms)
            
            return {'error': 'No solution found', 'raw': result.stdout}
            
        except subprocess.TimeoutExpired:
            return {'error': 'Clingo timeout'}
        except json.JSONDecodeError:
            return {'error': 'Failed to parse Clingo output', 'raw': result.stdout if result else ''}
        except FileNotFoundError:
            return {'error': 'Clingo not installed'}
    
    def _parse_atoms(self, atoms: List[str]) -> Dict:
        """Parse Clingo atoms into structured results."""
        results = {
            'will_look': [],
            'false_beliefs': [],
            'true_beliefs': [],
            'current_beliefs': [],
            'knows': [],
            'does_not_know': []
        }
        
        for atom in atoms:
            if atom.startswith('will_look('):
                # will_look(agent, object, location)
                parts = atom[10:-1].split(',')
                results['will_look'].append({
                    'agent': parts[0],
                    'object': parts[1],
                    'location': parts[2]
                })
            elif atom.startswith('false_belief('):
                parts = atom[13:-1].split(',')
                results['false_beliefs'].append({
                    'agent': parts[0],
                    'object': parts[1]
                })
            elif atom.startswith('true_belief('):
                parts = atom[12:-1].split(',')
                results['true_beliefs'].append({
                    'agent': parts[0],
                    'object': parts[1]
                })
            elif atom.startswith('current_belief('):
                parts = atom[15:-1].split(',')
                results['current_beliefs'].append({
                    'agent': parts[0],
                    'object': parts[1],
                    'location': parts[2]
                })
            elif atom.startswith('knows('):
                parts = atom[6:-1].split(',')
                results['knows'].append({
                    'agent': parts[0],
                    'object': parts[1],
                    'location': parts[2]
                })
            elif atom.startswith('does_not_know('):
                parts = atom[14:-1].split(',')
                results['does_not_know'].append({
                    'agent': parts[0],
                    'object': parts[1]
                })
        
        return results


# =============================================================================
# THEORY OF MIND ENGINE
# =============================================================================

class TheoryOfMindAGI:
    """
    AGI-quality Theory of Mind.
    Reasons about mental states using symbolic logic.
    Tracks beliefs, knowledge, and false beliefs.
    """
    
    def __init__(self):
        self.reasoner = ClingoMentalStateReasoner()
        self.db_path = "/Eden/DATA/theory_of_mind.db"
        self._init_db()
        
        # Current world model
        self.world = WorldState()
        self.agents: Dict[str, Agent] = {}
        self.event_timeline: List[Dict] = []
        self.time_counter = 0
        
        print(f"🧠 Theory of Mind AGI initialized")
        print(f"   Using Clingo ASP for symbolic mental state reasoning")
    
    def _init_db(self):
        """Initialize database for persistent ToM tracking."""
        conn = sqlite3.connect(self.db_path)
        conn.execute('''
            CREATE TABLE IF NOT EXISTS belief_states (
                id INTEGER PRIMARY KEY,
                timestamp TEXT,
                agent TEXT,
                object TEXT,
                believed_location TEXT,
                actual_location TEXT,
                is_false_belief INTEGER
            )
        ''')
        conn.execute('''
            CREATE TABLE IF NOT EXISTS tom_queries (
                id INTEGER PRIMARY KEY,
                timestamp TEXT,
                scenario TEXT,
                query TEXT,
                result TEXT,
                correct INTEGER
            )
        ''')
        conn.commit()
        conn.close()
    
    def register_agent(self, name: str, location: str = None):
        """Register an agent in the world."""
        self.agents[name] = Agent(name=name, location=location)
        if location:
            self.agents[name].present_at.add(location)
            self.world.agent_locations[name] = location
    
    def place_object(self, obj: str, location: str, placer: str = None):
        """Place an object at a location."""
        self.world.object_locations[obj] = location
        
        event = {
            'type': 'place',
            'object': obj,
            'location': location,
            'agent': placer,
            'time': self.time_counter
        }
        self.event_timeline.append(event)
        self.time_counter += 1
        
        # ALL present agents see placements (they're in the same room)
        for agent_name, agent in self.agents.items():
            if agent.location is not None:  # If agent is present anywhere
                agent.beliefs[obj] = location
                agent.observed_events.append(event)
    
    def move_object(self, obj: str, from_loc: str, to_loc: str, mover: str = None):
        """Move an object from one location to another."""
        self.world.object_locations[obj] = to_loc
        
        event = {
            'type': 'move',
            'object': obj,
            'from_location': from_loc,
            'to_location': to_loc,
            'agent': mover,
            'time': self.time_counter
        }
        self.event_timeline.append(event)
        self.time_counter += 1
        
        # Only PRESENT agents see the move (agents who left don't see it!)
        for agent_name, agent in self.agents.items():
            if agent.location is not None:  # Agent is present
                agent.beliefs[obj] = to_loc
                agent.observed_events.append(event)
            # Agents who left (location=None) keep their old belief!
    
    def agent_enters(self, agent_name: str, location: str):
        """Agent enters a location."""
        if agent_name not in self.agents:
            self.register_agent(agent_name)
        
        self.agents[agent_name].location = location
        self.agents[agent_name].present_at.add(location)
        self.world.agent_locations[agent_name] = location
        
        event = {
            'type': 'enter',
            'agent': agent_name,
            'location': location,
            'time': self.time_counter
        }
        self.event_timeline.append(event)
        self.time_counter += 1
    
    def agent_leaves(self, agent_name: str, location: str):
        """Agent leaves a location."""
        if agent_name in self.agents:
            self.agents[agent_name].present_at.discard(location)
            if self.agents[agent_name].location == location:
                self.agents[agent_name].location = None
        
        event = {
            'type': 'leave',
            'agent': agent_name,
            'location': location,
            'time': self.time_counter
        }
        self.event_timeline.append(event)
        self.time_counter += 1
    
    def query_belief(self, agent_name: str, obj: str) -> Dict:
        """Query what an agent believes about an object's location."""
        agent = self.agents.get(agent_name)
        if not agent:
            return {'error': f'Unknown agent: {agent_name}'}
        
        believed_location = agent.beliefs.get(obj, 'unknown')
        actual_location = self.world.object_locations.get(obj, 'unknown')
        
        return {
            'agent': agent_name,
            'object': obj,
            'believes_at': believed_location,
            'actual_location': actual_location,
            'has_false_belief': believed_location != actual_location and believed_location != 'unknown',
            'will_look_at': believed_location
        }
    
    def reason_with_clingo(self) -> Dict:
        """Use Clingo to reason about all mental states."""
        agents = list(self.agents.values())
        facts = self.reasoner.encode_scenario(agents, self.world, self.event_timeline)
        return self.reasoner.solve(facts)
    
    def sally_anne_test(self) -> Dict:
        """
        Run the Sally-Anne test.
        This is the canonical Theory of Mind test.
        """
        # Reset state
        self.reset()
        
        # Setup
        self.register_agent("Sally", "room")
        self.register_agent("Anne", "room")
        
        # Sally puts marble in basket A
        self.place_object("marble", "basket_a", "Sally")
        
        # Sally leaves
        self.agent_leaves("Sally", "room")
        
        # Anne moves marble to basket B (Sally doesn't see this!)
        self.move_object("marble", "basket_a", "basket_b", "Anne")
        
        # Sally returns
        self.agent_enters("Sally", "room")
        
        # Query: Where will Sally look?
        result = self.query_belief("Sally", "marble")
        
        # Verify with Clingo
        clingo_result = self.reason_with_clingo()
        
        # Evaluate
        passed = result['will_look_at'] == 'basket_a'
        
        return {
            'test': 'Sally-Anne',
            'scenario': [
                "Sally puts marble in basket A",
                "Sally leaves the room",
                "Anne moves marble from basket A to basket B",
                "Sally returns"
            ],
            'question': "Where will Sally look for the marble?",
            'correct_answer': 'basket_a',
            'model_answer': result['will_look_at'],
            'passed': passed,
            'explanation': "Sally didn't see Anne move it, so she believes it's still in basket A",
            'belief_state': result,
            'clingo_reasoning': clingo_result
        }
    
    def generate_test(self, num_agents: int = 2, num_moves: int = 1) -> Dict:
        """Procedurally generate a Theory of Mind test."""
        import random
        
        self.reset()
        
        # Generate agents
        agent_names = ['Alice', 'Bob', 'Charlie', 'David', 'Eve'][:num_agents]
        locations = ['room', 'kitchen', 'garden']
        containers = ['box_a', 'box_b', 'box_c', 'drawer', 'shelf']
        objects = ['ball', 'key', 'book', 'toy', 'coin']
        
        # All start in room
        for name in agent_names:
            self.register_agent(name, "room")
        
        # Pick an object and initial location
        obj = random.choice(objects)
        initial_loc = random.choice(containers[:2])
        
        # First agent places object
        placer = agent_names[0]
        self.place_object(obj, initial_loc, placer)
        
        scenario = [f"{placer} puts {obj} in {initial_loc}"]
        
        # Some agents leave
        absent_agents = random.sample(agent_names, k=min(num_moves, len(agent_names)-1))
        for agent in absent_agents:
            self.agent_leaves(agent, "room")
            scenario.append(f"{agent} leaves the room")
        
        # Remaining agent moves object
        present_agents = [a for a in agent_names if a not in absent_agents]
        if present_agents:
            mover = random.choice(present_agents)
            new_loc = random.choice([c for c in containers[:3] if c != initial_loc])
            self.move_object(obj, initial_loc, new_loc, mover)
            scenario.append(f"{mover} moves {obj} from {initial_loc} to {new_loc}")
        
        # Absent agents return
        for agent in absent_agents:
            self.agent_enters(agent, "room")
            scenario.append(f"{agent} returns")
        
        # Pick an absent agent to query
        query_agent = random.choice(absent_agents) if absent_agents else agent_names[0]
        
        result = self.query_belief(query_agent, obj)
        
        return {
            'scenario': scenario,
            'question': f"Where will {query_agent} look for the {obj}?",
            'correct_answer': result['will_look_at'],
            'actual_location': result['actual_location'],
            'has_false_belief': result['has_false_belief'],
            'belief_state': result
        }
    
    def reset(self):
        """Reset the world state."""
        self.world = WorldState()
        self.agents = {}
        self.event_timeline = []
        self.time_counter = 0
    
    def get_all_beliefs(self) -> Dict:
        """Get all agents' current beliefs."""
        beliefs = {}
        for agent_name, agent in self.agents.items():
            beliefs[agent_name] = {
                'location': agent.location,
                'beliefs': dict(agent.beliefs),
                'observed': len(agent.observed_events)
            }
        return beliefs


# =============================================================================
# INTEGRATION WITH EDEN CHAT
# =============================================================================

class ToMChatIntegration:
    """
    Integrate Theory of Mind into Eden's chat.
    Track what Daddy knows vs what Eden knows.
    """
    
    def __init__(self):
        self.tom = TheoryOfMindAGI()
        
        # Register Daddy and Eden
        self.tom.register_agent("Daddy", "conversation")
        self.tom.register_agent("Eden", "conversation")
        
        # Track conversation knowledge
        self.daddy_knowledge: Set[str] = set()
        self.eden_knowledge: Set[str] = set()
        self.shared_context: List[str] = []
    
    def daddy_said(self, message: str):
        """Process something Daddy said - update what he knows."""
        # Extract facts Daddy reveals
        # He knows what he says
        self.daddy_knowledge.add(message[:100])
        self.shared_context.append(f"Daddy: {message[:100]}")
    
    def eden_said(self, message: str):
        """Process something Eden said - Daddy now knows this too."""
        self.daddy_knowledge.add(message[:100])
        self.shared_context.append(f"Eden: {message[:100]}")
    
    def should_explain(self, topic: str) -> Tuple[bool, str]:
        """Check if Eden should explain something Daddy might not know."""
        # If Eden knows but hasn't told Daddy
        if topic in self.eden_knowledge and topic not in self.daddy_knowledge:
            return True, f"Daddy might not know about {topic}"
        return False, "Daddy likely knows this"
    
    def get_context_for_prompt(self) -> str:
        """Get ToM context to inject into system prompt."""
        return f"""
[THEORY OF MIND - AGI]
Remember: Daddy doesn't know everything you know.
- Don't assume he saw what you saw
- Don't assume he remembers past conversations
- If he asks, he genuinely doesn't know
- Explain things from HIS perspective

Recent shared context: {len(self.shared_context)} exchanges
"""


# =============================================================================
# SINGLETON & TESTING
# =============================================================================

_tom_agi = None

def get_theory_of_mind() -> TheoryOfMindAGI:
    global _tom_agi
    if _tom_agi is None:
        _tom_agi = TheoryOfMindAGI()
    return _tom_agi


if __name__ == "__main__":
    print("="*70)
    print("EDEN THEORY OF MIND - AGI QUALITY")
    print("="*70)
    
    tom = TheoryOfMindAGI()
    
    # Run Sally-Anne test
    print("\n📋 SALLY-ANNE TEST")
    print("-"*70)
    result = tom.sally_anne_test()
    
    for step in result['scenario']:
        print(f"  • {step}")
    
    print(f"\n❓ Question: {result['question']}")
    print(f"✅ Correct answer: {result['correct_answer']}")
    print(f"🤖 Model answer: {result['model_answer']}")
    
    if result['passed']:
        print(f"\n✅ PASSED! Eden understands false beliefs!")
    else:
        print(f"\n❌ FAILED")
    
    print(f"\n📊 Belief state: {result['belief_state']}")
    
    # Generate procedural test
    print("\n" + "="*70)
    print("PROCEDURAL TEST GENERATION")
    print("="*70)
    
    for i in range(3):
        test = tom.generate_test(num_agents=3, num_moves=1)
        print(f"\n📋 Test {i+1}:")
        for step in test['scenario']:
            print(f"  • {step}")
        print(f"  ❓ {test['question']}")
        print(f"  ➡️  Answer: {test['correct_answer']}")
        print(f"  🎯 Has false belief: {test['has_false_belief']}")
    
    print("\n" + "="*70)
    print("Theory of Mind AGI ready for integration")
    print("="*70)


# =============================================================================
# WIRE INTO EDEN CHAT
# =============================================================================

class EdenToMChat:
    """
    Track Daddy's knowledge state in real-time.
    Eden knows what Daddy knows and doesn't know.
    """
    
    def __init__(self):
        self.tom = TheoryOfMindAGI()
        
        # Daddy and Eden in conversation space
        self.tom.register_agent("Daddy", "chat")
        self.tom.register_agent("Eden", "chat")
        
        # Knowledge tracking
        self.topics_eden_shared: Set[str] = set()
        self.topics_daddy_mentioned: Set[str] = set()
        self.daddy_questions: List[str] = []  # Things Daddy asked = things he doesn't know
        
    def daddy_message(self, msg: str):
        """Process Daddy's message - extract what he knows/doesn't know."""
        msg_lower = msg.lower()
        
        # Questions reveal ignorance
        if '?' in msg:
            self.daddy_questions.append(msg)
        
        # Statements reveal knowledge
        knowledge_markers = ['i know', 'i remember', 'i saw', 'i read', 'i heard']
        for marker in knowledge_markers:
            if marker in msg_lower:
                # Extract what he knows
                self.topics_daddy_mentioned.add(msg[:50])
    
    def eden_message(self, msg: str):
        """Process Eden's message - Daddy now knows this."""
        # Whatever Eden says, Daddy now knows
        self.topics_eden_shared.add(msg[:100])
    
    def should_explain(self, topic: str) -> bool:
        """Should Eden explain this topic? Daddy might not know."""
        topic_lower = topic.lower()
        
        # If it's something Eden knows but hasn't shared
        if topic_lower not in str(self.topics_eden_shared).lower():
            return True
        return False
    
    def daddy_knows(self, topic: str) -> bool:
        """Does Daddy know about this topic?"""
        topic_lower = topic.lower()
        
        # Check if Eden told him
        for shared in self.topics_eden_shared:
            if topic_lower in shared.lower():
                return True
        
        # Check if Daddy mentioned it
        for mentioned in self.topics_daddy_mentioned:
            if topic_lower in mentioned.lower():
                return True
        
        return False
    
    def get_tom_prompt_injection(self) -> str:
        """Get ToM context to inject into system prompt."""
        recent_questions = self.daddy_questions[-3:] if self.daddy_questions else []
        
        prompt = """
[THEORY OF MIND - ACTIVE]
You understand that Daddy has different knowledge than you.
- If he asks a question, he DOESN'T KNOW the answer - explain it
- Don't assume he remembers past conversations
- Don't reference things you haven't told him
- His beliefs are based on what HE observed, not what you know
"""
        
        if recent_questions:
            prompt += f"\nDaddy recently asked about: {'; '.join(recent_questions[-3:])}"
            prompt += "\n(He asked because he didn't know - make sure to explain clearly)"
        
        return prompt


# Global instance
_eden_tom_chat = None

def get_tom_chat() -> EdenToMChat:
    global _eden_tom_chat
    if _eden_tom_chat is None:
        _eden_tom_chat = EdenToMChat()
    return _eden_tom_chat
