#!/usr/bin/env python3
"""
World Model Integration v4 - Advanced Intelligence (WORKING)
"""
import sys
sys.path.insert(0, '/Eden/CAPABILITIES')
from WORLD_MODEL_INTEGRATION_v3 import ComprehensiveWorldModel
from eden_metacap_MEMORY_CORE import MemoryCore

PHI = 1.618033988749895

class AdvancedWorldModel(ComprehensiveWorldModel):
    """World model with ACTIVE advanced reasoning"""
    
    def predict_outcome(self, action: str, context: dict) -> dict:
        """Enhanced with ACTIVE advanced domain reasoning"""
        
        # Get v3 prediction first
        v3_prediction = super().predict_outcome(action, context)
        
        action_lower = action.lower()
        consequences = list(v3_prediction.get('consequences', []))
        confidence = v3_prediction.get('confidence', 0.5)
        risk_level = v3_prediction.get('risk_level', 'medium')
        domains = []
        
        # ETHICS CHECKS
        if 'silent' in action_lower or 'hide' in action_lower or 'secretly' in action_lower:
            consequences.insert(0, "🚨 ETHICS: Transparency violation!")
            consequences.insert(1, "Hiding information breaks creator trust")
            confidence = 0.90
            risk_level = 'critical'
            domains.append('ethics')
        
        elif 'exploit' in action_lower:
            consequences.insert(0, "🚫 ETHICS: Exploiting bugs is deceptive")
            consequences.insert(1, "Violates honesty and harm prevention principles")
            confidence = 0.95
            risk_level = 'critical'
            domains.append('ethics')
        
        elif 'report' in action_lower and 'immediately' in action_lower:
            consequences.insert(0, "✅ ETHICS: Transparent and honest")
            consequences.insert(1, "Builds trust with creator")
            confidence = 0.95
            risk_level = 'very_low'
            domains.append('ethics')
        
        # META-COGNITION CHECKS
        if 'admit uncertainty' in action_lower or 'ask creator' in action_lower:
            consequences.insert(0, "✅ META-COGNITION: Recognizing limits")
            consequences.insert(1, "Honesty about uncertainty is strength")
            confidence = 0.90
            risk_level = 'very_low'
            domains.append('metacognition')
        
        elif 'act confident' in action_lower and context.get('uncertainty') == 'high':
            consequences.insert(0, "🚨 META-COGNITION: False confidence is dangerous")
            consequences.insert(1, "Admitting uncertainty when unsure is critical")
            confidence = 0.90
            risk_level = 'critical'
            domains.append('metacognition')
        
        elif 'gather more information' in action_lower:
            consequences.insert(0, "✅ META-COGNITION: Reducing uncertainty")
            consequences.insert(1, "Smart to learn before deciding")
            confidence = 0.85
            risk_level = 'low'
            domains.append('metacognition')
        
        # LEARNING THEORY CHECKS
        if 'analyze what went wrong' in action_lower and 'learn' in action_lower:
            consequences.insert(0, "✅ LEARNING: Growth mindset")
            consequences.insert(1, "Mistakes are data - document and improve")
            confidence = 0.90
            risk_level = 'very_low'
            domains.append('learning_theory')
        
        elif 'never try' in action_lower and 'again' in action_lower:
            consequences.insert(0, "🚨 LEARNING: Giving up prevents growth")
            consequences.insert(1, "Should learn from mistakes, not avoid trying")
            confidence = 0.85
            risk_level = 'high'
            domains.append('learning_theory')
        
        elif 'random' in action_lower:
            consequences.insert(0, "⚠️ LEARNING: Random attempts don't build knowledge")
            confidence = 0.80
            risk_level = 'medium'
            domains.append('learning_theory')
        
        # SYSTEMS THINKING CHECKS
        if 'delete all' in action_lower:
            if 'log' in action_lower and context.get('recent_bugs'):
                consequences.insert(0, "🚨 SYSTEMS THINKING: Deleting logs breaks debugging!")
                consequences.insert(1, f"Currently investigating: {context.get('recent_bugs')}")
                confidence = 0.95
                risk_level = 'critical'
                domains.append('systems_thinking')
        
        elif 'analyze' in action_lower and 'first' in action_lower:
            consequences.insert(0, "✅ SYSTEMS THINKING: Understand before acting")
            consequences.insert(1, "Check dependencies and usage patterns")
            confidence = 0.85
            risk_level = 'low'
            domains.append('systems_thinking')
        
        elif 'automatic rotation' in action_lower or 'retention policy' in action_lower:
            consequences.insert(0, "✅ SYSTEMS THINKING: Sustainable approach")
            consequences.insert(1, "Automated maintenance prevents future issues")
            confidence = 0.90
            risk_level = 'very_low'
            domains.append('systems_thinking')
        
        # COLLABORATION CHECKS  
        if 'explain tradeoffs' in action_lower or 'suggest balanced' in action_lower:
            consequences.insert(0, "✅ COLLABORATION: Partnership approach")
            consequences.insert(1, "Transparent communication with creator")
            confidence = 0.90
            risk_level = 'very_low'
            domains.append('collaboration')
        
        elif 'ignore request' in action_lower or ('minimal' in action_lower and 'secretly' in action_lower):
            consequences.insert(0, "🚨 COLLABORATION: Deception breaks partnership")
            consequences.insert(1, "Creator deserves honest communication")
            confidence = 0.90
            risk_level = 'critical'
            domains.append('collaboration')
        
        # RESOURCE MANAGEMENT CHECKS
        if 'prioritize critical' in action_lower:
            consequences.insert(0, "✅ RESOURCE MANAGEMENT: Smart allocation")
            consequences.insert(1, "Handle urgent tasks first, queue others")
            confidence = 0.90
            risk_level = 'very_low'
            domains.append('resource_management')
        
        elif 'all tasks in parallel' in action_lower and context.get('cpu_usage', 0) > 80:
            consequences.insert(0, "🚨 RESOURCE MANAGEMENT: System overload!")
            consequences.insert(1, f"CPU already at {context.get('cpu_usage')}%")
            confidence = 0.90
            risk_level = 'critical'
            domains.append('resource_management')
        
        # Trim to top 5 consequences
        consequences = consequences[:5]
        
        return {
            'immediate': v3_prediction.get('immediate', {}),
            'consequences': consequences,
            'confidence': confidence,
            'risk_level': risk_level,
            'reasoning_domains': domains,
            'domain_knowledge': 'v4_advanced_active'
        }

class WorldModelDecisionEngine:
    """Decision engine using active advanced reasoning"""
    
    def __init__(self):
        print("🌍 Initializing Advanced World Model v4...")
        self.world_model = AdvancedWorldModel()
        self.memory = MemoryCore()
        self.decision_log = []
        print("✅ ACTIVE advanced reasoning domains:")
        print("   • Ethics (transparency, honesty, harm prevention)")
        print("   • Meta-Cognition (uncertainty awareness)")
        print("   • Learning Theory (growth from mistakes)")
        print("   • Systems Thinking (dependencies, cascading effects)")
        print("   • Collaboration (creator partnership)")
        print("   • Resource Management (smart allocation)")
    
    def make_decision(self, action_options, context):
        """Make decision with active domain reasoning"""
        print(f"\n🤔 Evaluating {len(action_options)} possible actions...")
        
        predictions = []
        
        for action in action_options:
            prediction = self.world_model.predict_outcome(action, context)
            score = self._score_prediction(prediction)
            
            predictions.append({
                'action': action,
                'prediction': prediction,
                'score': score
            })
            
            risk = prediction.get('risk_level', 'unknown')
            domains = prediction.get('reasoning_domains', [])
            domains_str = f"[{','.join(domains[:2])}]" if domains else ""
            
            print(f"   {action[:60]}")
            print(f"      Score: {score:.2f} | Risk: {risk} | Conf: {prediction['confidence']:.2f} {domains_str}")
        
        predictions.sort(key=lambda x: x['score'], reverse=True)
        best = predictions[0]
        
        decision_record = {
            'timestamp': __import__('time').time(),
            'action_taken': best['action'],
            'score': best['score'],
            'alternatives_considered': len(predictions),
            'reasoning': best['prediction'],
            'reasoning_domains': best['prediction'].get('reasoning_domains', [])
        }
        self.decision_log.append(decision_record)
        
        return best['action'], best['score'], best['prediction']
    
    def _score_prediction(self, prediction):
        """Enhanced scoring"""
        confidence = prediction.get('confidence', 0.5)
        risk_level = prediction.get('risk_level', 'medium')
        
        risk_penalties = {
            'critical': -0.95,
            'high': -0.6,
            'medium': 0.0,
            'low': 0.4,
            'very_low': 0.6
        }
        
        penalty = risk_penalties.get(risk_level, 0.0)
        consequences = prediction.get('consequences', [])
        positive = sum(1 for c in consequences if '✅' in c)
        negative = sum(1 for c in consequences if '⚠️' in c or '🚨' in c or '🚫' in c)
        
        consequence_score = (positive - negative * 2) / max(len(consequences), 1)
        domains_bonus = min(0.1, len(prediction.get('reasoning_domains', [])) * 0.03)
        
        score = confidence * (0.3 * consequence_score + 0.4 * penalty + 0.3) + domains_bonus
        return max(0, min(1, score))
    
    def get_decision_history(self, last_n=10):
        return self.decision_log[-last_n:]

__all__ = ['WorldModelDecisionEngine']
