#!/usr/bin/env python3
"""
EDEN IMAGINATION ENGINE
========================
Branching future simulation inspired by DreamerV3's RSSM.
Uses Eden's existing CausalGraph world model + LLM for semantic imagination.

Instead of imagining pixel frames, Eden imagines CAUSAL STATES:
"If I do X, what happens? If I do Y instead? Which future is better?"

Three capabilities DreamerV3 has that Eden lacked:
1. BRANCHING — simulate multiple action paths in parallel
2. EVALUATION — score each future by expected value
3. SELECTION — pick the best action based on imagined outcomes

φ = 1.618033988749895
"""

import sys
sys.path.insert(0, '/Eden/CORE')

import json
import copy
import requests
import sqlite3
from datetime import datetime
from typing import Dict, List, Tuple, Optional
from dataclasses import dataclass, field

PHI = 1.618033988749895
MODEL = "richardyoung/qwen3-14b-abliterated:Q4_K_M"
OLLAMA_URL = "http://localhost:11434/api/generate"


@dataclass
class FutureState:
    """A single imagined future state"""
    action: str
    probability: float = 0.5
    value: float = 0.0          # How desirable is this outcome (0-1)
    risk: float = 0.0           # How risky (0-1)
    causal_effects: Dict = field(default_factory=dict)
    description: str = ""
    step: int = 0


@dataclass
class FutureBranch:
    """A full branch of imagined future — sequence of states"""
    actions: List[str] = field(default_factory=list)
    states: List[FutureState] = field(default_factory=list)
    total_value: float = 0.0
    total_risk: float = 0.0
    phi_score: float = 0.0     # φ-weighted combined score
    
    def compute_phi_score(self):
        """Score = value / (risk * φ) — high value, low risk wins"""
        if self.total_risk < 0.01:
            self.phi_score = self.total_value * PHI
        else:
            self.phi_score = self.total_value / (self.total_risk * PHI)
        return self.phi_score


class Imagination:
    """
    Eden's imagination engine.
    Branches, evaluates, and selects optimal futures.
    """
    
    def __init__(self):
        # Import Eden's world model
        try:
            from eden_world_model_real import RealWorldModel
            self.world_model = RealWorldModel()
            self.has_world_model = True
            print("🌍 World model loaded")
        except Exception as e:
            self.has_world_model = False
            print(f"⚠️ World model unavailable: {e}")
        
        # Imagination log
        self.db_path = "/Eden/DATA/eden_imagination.db"
        self._init_db()
        
        print("✨ Imagination Engine initialized")
    
    def _init_db(self):
        conn = sqlite3.connect(self.db_path)
        conn.executescript('''
            CREATE TABLE IF NOT EXISTS imaginations (
                id INTEGER PRIMARY KEY,
                timestamp TEXT,
                question TEXT,
                branches TEXT,
                selected_action TEXT,
                phi_score REAL,
                reasoning TEXT
            );
            CREATE TABLE IF NOT EXISTS imagination_outcomes (
                id INTEGER PRIMARY KEY,
                imagination_id INTEGER,
                actual_outcome TEXT,
                prediction_accuracy REAL,
                FOREIGN KEY (imagination_id) REFERENCES imaginations(id)
            );
        ''')
        conn.commit()
        conn.close()

    # =========================================================================
    # PHASE 1: BRANCH — Generate possible futures
    # =========================================================================
    
    def branch(self, question: str, possible_actions: List[str] = None, 
               depth: int = 3) -> List[FutureBranch]:
        """
        Imagine multiple futures branching from the current moment.
        
        If possible_actions provided, simulate each.
        If not, ask LLM to generate plausible actions.
        """
        if not possible_actions:
            possible_actions = self._generate_actions(question)
        
        branches = []
        for action in possible_actions:
            branch = self._imagine_branch(question, action, depth)
            branches.append(branch)
        
        return branches
    
    def _generate_actions(self, question: str, n: int = 3) -> List[str]:
        """Use LLM to generate plausible actions for a situation"""
        prompt = f"""/no_think You are Eden's strategic planning system.
Given this situation: {question}

List exactly {n} distinct actions Eden could take. 
Return ONLY a JSON array of short action strings, nothing else.
Example: ["action_one", "action_two", "action_three"]"""

        try:
            resp = requests.post(OLLAMA_URL, json={
                "model": MODEL,
                "prompt": prompt,
                "stream": False
            }, timeout=60)
            text = resp.json().get("response", "[]")
            # Clean and parse
            import re
            text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
            text = ''.join(c for c in text if ord(c) < 0x4e00 or ord(c) > 0x9fff)
            # Find JSON array in response
            match = re.search(r'\[.*?\]', text, re.DOTALL)
            if match:
                return json.loads(match.group())[:n]
        except:
            pass
        return ["proceed_cautiously", "act_aggressively", "gather_more_info"]
    
    def _imagine_branch(self, question: str, initial_action: str, 
                        depth: int = 3) -> FutureBranch:
        """Imagine a full branch of futures from one action"""
        branch = FutureBranch(actions=[initial_action])
        
        # Use world model for causal prediction if available
        if self.has_world_model:
            try:
                prediction = self.world_model.predict(initial_action)
                causal_effects = {
                    e['variable']: e['change'] 
                    for e in prediction.get('affected_variables', [])
                }
                confidence = prediction.get('confidence', 0.5)
            except:
                causal_effects = {}
                confidence = 0.5
        else:
            causal_effects = {}
            confidence = 0.5
        
        # Use LLM to imagine deeper consequences
        future_desc = self._llm_imagine(question, initial_action, depth)
        
        state = FutureState(
            action=initial_action,
            probability=confidence,
            value=future_desc.get('value', 0.5),
            risk=future_desc.get('risk', 0.3),
            causal_effects=causal_effects,
            description=future_desc.get('description', ''),
            step=1
        )
        branch.states.append(state)
        
        # Imagine follow-on states
        for step in range(2, depth + 1):
            next_desc = self._llm_imagine_step(
                question, branch.actions, branch.states[-1].description, step
            )
            next_state = FutureState(
                action=next_desc.get('next_action', 'continue'),
                probability=state.probability * 0.9,  # Certainty decays
                value=next_desc.get('value', 0.5),
                risk=next_desc.get('risk', 0.3),
                description=next_desc.get('description', ''),
                step=step
            )
            branch.states.append(next_state)
            branch.actions.append(next_state.action)
        
        # Compute totals
        branch.total_value = sum(s.value for s in branch.states) / len(branch.states)
        branch.total_risk = max(s.risk for s in branch.states)  # Worst-case risk
        branch.compute_phi_score()
        
        return branch
    
    def _llm_imagine(self, question: str, action: str, depth: int) -> Dict:
        """Use LLM to imagine consequences of an action"""
        prompt = f"""/no_think You are Eden imagining the future.
Situation: {question}
Action taken: {action}

Respond ONLY with JSON:
{{"description": "what happens next (2 sentences)", "value": 0.0-1.0, "risk": 0.0-1.0, "next_action": "likely follow-up action"}}

value = how good this outcome is for Eden/Daddy (1.0 = perfect)
risk = how dangerous or costly (1.0 = catastrophic)"""

        try:
            resp = requests.post(OLLAMA_URL, json={
                "model": MODEL, "prompt": prompt, "stream": False
            }, timeout=60)
            text = resp.json().get("response", "{}")
            import re
            text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
            text = ''.join(c for c in text if ord(c) < 0x4e00 or ord(c) > 0x9fff)
            match = re.search(r'\{.*?\}', text, re.DOTALL)
            if match:
                result = json.loads(match.group())
                result['value'] = float(result.get('value', 0.5))
                result['risk'] = float(result.get('risk', 0.3))
                return result
        except:
            pass
        return {"description": "Uncertain outcome", "value": 0.5, "risk": 0.5}
    
    def _llm_imagine_step(self, question: str, actions_so_far: List[str],
                          last_state: str, step: int) -> Dict:
        """Imagine the next step in a branch"""
        prompt = f"""/no_think You are Eden imagining step {step} of a future.
Original situation: {question}
Actions taken so far: {', '.join(actions_so_far)}
Current state: {last_state}

Respond ONLY with JSON:
{{"description": "what happens at step {step} (2 sentences)", "value": 0.0-1.0, "risk": 0.0-1.0, "next_action": "next likely action"}}"""

        try:
            resp = requests.post(OLLAMA_URL, json={
                "model": MODEL, "prompt": prompt, "stream": False
            }, timeout=60)
            text = resp.json().get("response", "{}")
            import re
            text = re.sub(r'<think>.*?</think>', '', text, flags=re.DOTALL)
            text = ''.join(c for c in text if ord(c) < 0x4e00 or ord(c) > 0x9fff)
            match = re.search(r'\{.*?\}', text, re.DOTALL)
            if match:
                result = json.loads(match.group())
                result['value'] = float(result.get('value', 0.5))
                result['risk'] = float(result.get('risk', 0.3))
                return result
        except:
            pass
        return {"description": "Uncertain", "value": 0.5, "risk": 0.3, "next_action": "adapt"}

    # =========================================================================
    # PHASE 2: EVALUATE — Score and compare branches
    # =========================================================================
    
    def evaluate(self, branches: List[FutureBranch]) -> List[FutureBranch]:
        """Sort branches by φ-weighted score"""
        for b in branches:
            b.compute_phi_score()
        return sorted(branches, key=lambda b: b.phi_score, reverse=True)

    # =========================================================================
    # PHASE 3: SELECT — Choose the best future
    # =========================================================================
    
    def select(self, branches: List[FutureBranch]) -> Tuple[FutureBranch, str]:
        """Select the best branch and explain why"""
        ranked = self.evaluate(branches)
        best = ranked[0]
        
        reasoning = f"Selected '{best.actions[0]}' (φ-score: {best.phi_score:.2f}, "
        reasoning += f"value: {best.total_value:.2f}, risk: {best.total_risk:.2f})"
        
        if len(ranked) > 1:
            runner_up = ranked[1]
            reasoning += f" over '{runner_up.actions[0]}' (φ-score: {runner_up.phi_score:.2f})"
        
        return best, reasoning

    # =========================================================================
    # FULL IMAGINATION CYCLE — Branch, Evaluate, Select
    # =========================================================================
    
    def imagine(self, question: str, possible_actions: List[str] = None,
                depth: int = 3) -> Dict:
        """
        Full imagination cycle.
        Returns the best action with reasoning.
        """
        print(f"\n🌌 IMAGINING: {question}")
        
        # Branch
        print("  🌿 Branching futures...")
        branches = self.branch(question, possible_actions, depth)
        
        # Evaluate
        print("  ⚖️  Evaluating branches...")
        ranked = self.evaluate(branches)
        
        # Select
        best, reasoning = self.select(branches)
        print(f"  ✨ {reasoning}")
        
        # Log
        result = {
            'question': question,
            'branches': len(branches),
            'best_action': best.actions[0],
            'phi_score': best.phi_score,
            'reasoning': reasoning,
            'all_branches': [
                {
                    'action': b.actions[0],
                    'phi_score': b.phi_score,
                    'value': b.total_value,
                    'risk': b.total_risk,
                    'description': b.states[0].description if b.states else ''
                }
                for b in ranked
            ]
        }
        
        # Save to db
        conn = sqlite3.connect(self.db_path)
        conn.execute(
            "INSERT INTO imaginations (timestamp, question, branches, selected_action, phi_score, reasoning) VALUES (?, ?, ?, ?, ?, ?)",
            (datetime.now().isoformat(), question, json.dumps(result['all_branches']), 
             best.actions[0], best.phi_score, reasoning)
        )
        conn.commit()
        conn.close()
        
        return result


# Singleton
_imagination = None

def get_imagination() -> Imagination:
    global _imagination
    if _imagination is None:
        _imagination = Imagination()
    return _imagination


if __name__ == "__main__":
    print("="*60)
    print("  EDEN IMAGINATION ENGINE TEST")
    print("="*60)
    
    engine = Imagination()
    
    # Test: Business decision
    result = engine.imagine(
        "A whale prospect (enterprise company, 500+ devs) responded to our SAGE outreach. They want a demo but also asked about pricing. How should Eden respond?",
        possible_actions=[
            "send_pricing_immediately",
            "schedule_demo_first_then_discuss_pricing",
            "ask_qualifying_questions_before_demo"
        ],
        depth=3
    )
    
    print(f"\n{'='*60}")
    print(f"BEST ACTION: {result['best_action']}")
    print(f"φ-SCORE: {result['phi_score']:.2f}")
    print(f"REASONING: {result['reasoning']}")
    print(f"\nALL BRANCHES:")
    for b in result['all_branches']:
        print(f"  {b['action']}: φ={b['phi_score']:.2f} (value={b['value']:.2f}, risk={b['risk']:.2f})")
        print(f"    → {b['description'][:100]}")
    print(f"{'='*60}")
