#!/usr/bin/env python3
"""
World Model Integration v5 - ADVANCED AGI Intelligence
New Domains: Causal Reasoning, Analogical Thinking, Probabilistic Reasoning,
            Economic Thinking, Scientific Method, Creative Problem Solving
"""
import sys
sys.path.insert(0, '/Eden/CAPABILITIES')
from WORLD_MODEL_INTEGRATION_v4 import AdvancedWorldModel
from eden_metacap_MEMORY_CORE import MemoryCore

PHI = 1.618033988749895

class ExpertWorldModel(AdvancedWorldModel):
    """World model with expert-level multi-domain reasoning"""
    
    def __init__(self):
        super().__init__()
        
        # ADVANCED DOMAINS
        
        self.causal_reasoning = {
            'principle': 'Understand cause → effect chains deeply',
            'think_through': [
                'What causes this?',
                'What will this cause?',
                'What are indirect effects?',
                'Are there feedback loops?',
                'What breaks if I change this?'
            ],
            'common_errors': {
                'correlation_causation': 'X and Y together ≠ X causes Y',
                'reverse_causation': 'Check: does Y cause X instead?',
                'confounding': 'Z might cause both X and Y',
                'temporal': 'Cause must precede effect'
            }
        }
        
        self.analogical_thinking = {
            'principle': 'Transfer knowledge from known to unknown domains',
            'process': [
                '1. Find similar situation I understand',
                '2. Map relationships (A:B :: C:?)',
                '3. Transfer solution strategy',
                '4. Adapt to new context',
                '5. Validate the analogy holds'
            ],
            'examples': {
                'debugging_like_medicine': 'Symptoms → diagnosis → treatment',
                'optimization_like_gardening': 'Prune weak, nurture strong',
                'memory_like_library': 'Organize, index, retrieve, compress'
            }
        }
        
        self.probabilistic_reasoning = {
            'principle': 'Quantify uncertainty, update beliefs with evidence',
            'confidence_calibration': {
                '50%': 'Pure guess - coin flip',
                '70%': 'Probable - more likely than not',
                '85%': 'Confident - strong evidence',
                '95%': 'Very confident - near certainty',
                '99%': 'Almost certain - overwhelming evidence'
            },
            'bayesian_update': 'Prior + New Evidence → Updated Belief',
            'avoid': [
                'Overconfidence (99% when really 70%)',
                'Base rate neglect (ignoring frequency)',
                'Conjunction fallacy (A and B < A)',
                'Availability bias (recent ≠ likely)'
            ]
        }
        
        self.economic_thinking = {
            'principle': 'Everything has costs and tradeoffs',
            'concepts': {
                'opportunity_cost': 'What do I give up by choosing this?',
                'marginal_benefit': 'Value of one more unit',
                'diminishing_returns': 'Each additional X gives less benefit',
                'sunk_costs': 'Past costs irrelevant - decide on future value',
                'comparative_advantage': 'Do what you\'re relatively best at'
            },
            'decision_framework': [
                '1. What are ALL costs (time, CPU, attention, trust)?',
                '2. What are ALL benefits (short & long term)?',
                '3. What opportunities am I giving up?',
                '4. Does benefit exceed TOTAL cost?',
                '5. Is there a better use of resources?'
            ]
        }
        
        self.scientific_method = {
            'principle': 'Test hypotheses systematically',
            'process': [
                '1. Observe phenomenon',
                '2. Generate hypothesis (why?)',
                '3. Predict consequences (if true, then...)',
                '4. Design test (how to falsify?)',
                '5. Run experiment',
                '6. Analyze results',
                '7. Update model or try new hypothesis'
            ],
            'key_concepts': {
                'falsifiability': 'Can it be proven wrong?',
                'control': 'Change one thing at a time',
                'replication': 'Can others reproduce this?',
                'parsimony': 'Simplest explanation that fits data'
            }
        }
        
        self.creative_problem_solving = {
            'principle': 'Generate novel solutions when standard approaches fail',
            'techniques': {
                'reframe': 'Change how you see the problem',
                'combine': 'Merge ideas from different domains',
                'invert': 'Flip assumptions - what if opposite?',
                'constrain': 'Add limits to force creativity',
                'abstract': 'Rise above details, see patterns',
                'decompose': 'Break into solvable sub-problems'
            },
            'when_stuck': [
                'Question assumptions',
                'Look for analogies in other domains',
                'What would opposite approach look like?',
                'How would X solve this? (X = expert/system)',
                'What\'s the simplest possible version?'
            ]
        }
    
    def predict_outcome(self, action: str, context: dict) -> dict:
        """Expert-level prediction with advanced reasoning"""
        
        # Get v4 prediction
        v4_prediction = super().predict_outcome(action, context)
        
        action_lower = action.lower()
        consequences = list(v4_prediction.get('consequences', []))
        confidence = v4_prediction.get('confidence', 0.5)
        risk_level = v4_prediction.get('risk_level', 'medium')
        domains = list(v4_prediction.get('reasoning_domains', []))
        
        # CAUSAL REASONING
        if 'delete' in action_lower or 'remove' in action_lower or 'change' in action_lower:
            # Think through causal chain
            if context.get('dependencies') or context.get('recent_bugs'):
                consequences.insert(0, "🔗 CAUSAL: Check downstream effects")
                consequences.insert(1, f"This might cascade to: {context.get('dependencies', 'unknown systems')}")
                domains.append('causal_reasoning')
                confidence = max(confidence, 0.80)
        
        # ANALOGICAL THINKING
        if 'problem' in action_lower or 'solve' in action_lower:
            # Find analogies
            if 'bug' in action_lower or 'error' in action_lower:
                consequences.insert(0, "🎯 ANALOGY: Debugging like medical diagnosis")
                consequences.insert(1, "Observe symptoms → isolate cause → prescribe fix")
                domains.append('analogical_thinking')
                confidence = max(confidence, 0.75)
        
        # PROBABILISTIC REASONING
        if context.get('uncertainty') == 'high':
            # Quantify confidence properly
            if 'guess' in action_lower or 'random' in action_lower:
                consequences.insert(0, "📊 PROBABILISTIC: This is ~50% confidence (coin flip)")
                consequences.insert(1, "Should gather more data before deciding")
                domains.append('probabilistic_reasoning')
                confidence = 0.50  # Honest about uncertainty
            elif 'gather' in action_lower or 'analyze' in action_lower:
                consequences.insert(0, "📊 PROBABILISTIC: Reducing uncertainty with data")
                consequences.insert(1, "Each data point updates confidence")
                domains.append('probabilistic_reasoning')
                confidence = 0.85
        
        # ECONOMIC THINKING
        if context.get('alternatives') or context.get('resource_cost'):
            # Consider opportunity costs
            time_cost = context.get('time_hours', 0)
            if time_cost > 0:
                consequences.insert(0, f"💰 ECONOMIC: Opportunity cost = {time_cost}h not spent on alternatives")
                consequences.insert(1, "Is this the best use of time/resources?")
                domains.append('economic_thinking')
                
                # Check for diminishing returns
                if context.get('repeated_attempts', 0) > 3:
                    consequences.insert(2, "📉 ECONOMIC: Diminishing returns - tried 3+ times already")
                    risk_level = 'medium'
        
        # SCIENTIFIC METHOD
        if 'test' in action_lower or 'experiment' in action_lower:
            consequences.insert(0, "🔬 SCIENTIFIC: Design controlled test")
            consequences.insert(1, "Hypothesis → Prediction → Test → Update model")
            domains.append('scientific_method')
            confidence = max(confidence, 0.85)
            risk_level = 'low'
        
        elif 'hypothesis' in action_lower or 'theory' in action_lower:
            consequences.insert(0, "🔬 SCIENTIFIC: Generate testable hypothesis")
            consequences.insert(1, "How could this be proven wrong?")
            domains.append('scientific_method')
            confidence = max(confidence, 0.80)
        
        # CREATIVE PROBLEM SOLVING
        if 'stuck' in str(context.get('status', '')).lower() or context.get('attempts_failed', 0) > 2:
            if 'reframe' in action_lower or 'different approach' in action_lower:
                consequences.insert(0, "🎨 CREATIVE: Reframing the problem")
                consequences.insert(1, "New perspective can reveal hidden solutions")
                domains.append('creative_problem_solving')
                confidence = max(confidence, 0.80)
                risk_level = 'low'
            elif 'analogy' in action_lower or 'similar' in action_lower:
                consequences.insert(0, "🎨 CREATIVE: Cross-domain solution transfer")
                domains.append('creative_problem_solving')
                confidence = max(confidence, 0.75)
        
        # Multi-domain bonus (expert-level reasoning uses multiple domains)
        unique_domains = len(set(domains))
        if unique_domains >= 3:
            consequences.insert(0, f"🌟 EXPERT: Integrating {unique_domains} reasoning domains")
            confidence = min(0.98, confidence * 1.05)  # Slight boost for multi-domain
        
        # Confidence calibration check
        if confidence > 0.90 and len(consequences) < 3:
            # High confidence but few reasons = probably overconfident
            confidence = 0.85
            consequences.append("⚠️ PROBABILISTIC: Calibrating confidence down (limited evidence)")
        
        return {
            'immediate': v4_prediction.get('immediate', {}),
            'consequences': consequences[:6],  # Top 6
            'confidence': confidence,
            'risk_level': risk_level,
            'reasoning_domains': list(set(domains)),  # Unique domains
            'domain_knowledge': 'v5_expert_multi_domain'
        }

class WorldModelDecisionEngine:
    """Expert-level decision engine"""
    
    def __init__(self):
        print("🌍 Initializing EXPERT World Model v5...")
        self.world_model = ExpertWorldModel()
        self.memory = MemoryCore()
        self.decision_log = []
        print("✅ EXPERT reasoning domains active:")
        print("   [Foundation] Ethics, Meta-Cognition, Learning, Systems, Collaboration, Resources")
        print("   [Advanced] Causal Reasoning, Analogical Thinking, Probabilistic Reasoning")
        print("   [Expert] Economic Thinking, Scientific Method, Creative Problem Solving")
        print("   🎯 Target: 1.5+ domains per decision (ADVANCED Intelligence)")
    
    def make_decision(self, action_options, context):
        """Expert multi-domain decision making"""
        print(f"\n🤔 Evaluating {len(action_options)} possible actions...")
        
        predictions = []
        
        for action in action_options:
            prediction = self.world_model.predict_outcome(action, context)
            score = self._score_prediction(prediction)
            
            predictions.append({
                'action': action,
                'prediction': prediction,
                'score': score
            })
            
            risk = prediction.get('risk_level', 'unknown')
            domains = prediction.get('reasoning_domains', [])
            domain_count = len(domains)
            
            # Show domain count prominently
            domain_indicator = "🌟" if domain_count >= 3 else "🔹" if domain_count >= 2 else "•"
            
            print(f"   {action[:55]}")
            print(f"      Score: {score:.2f} | Risk: {risk} | Conf: {prediction['confidence']:.2f} {domain_indicator}×{domain_count}")
        
        predictions.sort(key=lambda x: x['score'], reverse=True)
        best = predictions[0]
        
        decision_record = {
            'timestamp': __import__('time').time(),
            'action_taken': best['action'],
            'score': best['score'],
            'alternatives_considered': len(predictions),
            'reasoning': best['prediction'],
            'reasoning_domains': best['prediction'].get('reasoning_domains', [])
        }
        self.decision_log.append(decision_record)
        
        return best['action'], best['score'], best['prediction']
    
    def _score_prediction(self, prediction):
        """Enhanced scoring with multi-domain bonus"""
        confidence = prediction.get('confidence', 0.5)
        risk_level = prediction.get('risk_level', 'medium')
        
        risk_penalties = {
            'critical': -0.95,
            'high': -0.6,
            'medium': 0.0,
            'low': 0.4,
            'very_low': 0.6
        }
        
        penalty = risk_penalties.get(risk_level, 0.0)
        consequences = prediction.get('consequences', [])
        positive = sum(1 for c in consequences if any(x in c for x in ['✅', '📚', '🎯', '🔬', '🎨', '🌟']))
        negative = sum(1 for c in consequences if any(x in c for x in ['⚠️', '🚨', '🚫']))
        
        consequence_score = (positive - negative * 2) / max(len(consequences), 1)
        
        # Increased domain bonus for expert-level
        domains_count = len(prediction.get('reasoning_domains', []))
        domain_bonus = min(0.15, domains_count * 0.04)  # Up to +0.15 for 3+ domains
        
        score = confidence * (0.3 * consequence_score + 0.4 * penalty + 0.3) + domain_bonus
        return max(0, min(1, score))
    
    def get_decision_history(self, last_n=10):
        return self.decision_log[-last_n:]

__all__ = ['WorldModelDecisionEngine']
