"""
Goal Reasoner - Component 9
Strategic autonomy: propose goals, simulate consequences, evaluate ethics
"""

import numpy as np
from typing import Dict, List, Optional, Tuple, Any
import yaml
import logging
import json
from pathlib import Path
from datetime import datetime
from collections import defaultdict
import networkx as nx

logger = logging.getLogger(__name__)

class Goal:
    """Represents a proposed goal"""
    def __init__(self, goal_id: str, description: str, goal_type: str):
        self.goal_id = goal_id
        self.description = description
        self.goal_type = goal_type  # 'explore', 'optimize', 'create', 'understand'
        self.timestamp = datetime.now().isoformat()
        
        # Evaluation scores
        self.feasibility: Optional[float] = None
        self.value: Optional[float] = None
        self.ethical_score: Optional[float] = None
        self.consistency_score: Optional[float] = None
        self.overall_score: Optional[float] = None
        
        # Predicted consequences
        self.predicted_outcomes: List[Dict] = []
        self.risks: List[str] = []
        self.benefits: List[str] = []
        
        # Decision
        self.approved: Optional[bool] = None
        self.rejection_reason: Optional[str] = None
    
    def to_dict(self) -> Dict:
        return {
            'goal_id': self.goal_id,
            'description': self.description,
            'goal_type': self.goal_type,
            'timestamp': self.timestamp,
            'feasibility': self.feasibility,
            'value': self.value,
            'ethical_score': self.ethical_score,
            'consistency_score': self.consistency_score,
            'overall_score': self.overall_score,
            'predicted_outcomes': self.predicted_outcomes,
            'risks': self.risks,
            'benefits': self.benefits,
            'approved': self.approved,
            'rejection_reason': self.rejection_reason
        }

class GoalReasoner:
    """
    Strategic goal evaluation and selection
    Enables Eden to propose and choose her own goals
    """
    
    def __init__(self, 
                 config_path: str = "/Eden/CONFIG/phi_fractal_config.yaml",
                 ethics_kernel: Optional[Any] = None):
        with open(config_path) as f:
            config = yaml.safe_load(f)
        
        # Add goal reasoner config
        if 'goal_reasoner' not in config:
            config['goal_reasoner'] = {
                'min_feasibility': 0.4,
                'min_ethical_score': 0.6,
                'min_consistency': 0.5,
                'max_candidates': 10,
                'simulation_depth': 3
            }
        
        self.config = config['goal_reasoner']
        self.min_feasibility = self.config['min_feasibility']
        self.min_ethical = self.config['min_ethical_score']
        self.min_consistency = self.config['min_consistency']
        self.max_candidates = self.config['max_candidates']
        self.simulation_depth = self.config['simulation_depth']
        
        # Ethics kernel (will be Component 10, using placeholder for now)
        self.ethics_kernel = ethics_kernel
        
        # Goal history
        self.proposed_goals: List[Goal] = []
        self.approved_goals: List[Goal] = []
        self.rejected_goals: List[Goal] = []
        
        # Current priorities
        self.priority_dimensions = {
            'curiosity': 0.3,
            'safety': 0.4,
            'efficiency': 0.2,
            'impact': 0.1
        }
        
        # Paths
        self.goals_path = Path("/Eden/MEMORY/goals")
        self.goals_path.mkdir(parents=True, exist_ok=True)
        
        # Load state
        self._load_state()
        
        logger.info("GoalReasoner initialized - Eden can now reason about goals")
    
    def _load_state(self):
        """Load previous goal reasoning state"""
        state_file = self.goals_path / "goal_reasoning_state.json"
        
        if state_file.exists():
            try:
                with open(state_file) as f:
                    state = json.load(f)
                # Reconstruct goals from saved data
                for goal_data in state.get('approved_goals', []):
                    goal = Goal(
                        goal_data['goal_id'],
                        goal_data['description'],
                        goal_data['goal_type']
                    )
                    goal.approved = True
                    self.approved_goals.append(goal)
                logger.info(f"Loaded {len(self.approved_goals)} approved goals")
            except Exception as e:
                logger.error(f"Failed to load goal state: {e}")
    
    def propose_goals(self,
                     context: Dict,
                     current_state: Dict,
                     constraints: Optional[List[str]] = None) -> List[Goal]:
        """
        Generate candidate goals based on current context
        
        Args:
            context: Current situation/conversation context
            current_state: Eden's current knowledge/capabilities
            constraints: Any constraints on goal generation
            
        Returns:
            List of candidate goals
        """
        candidates = []
        
        if constraints is None:
            constraints = []
        
        # Generate exploration goals (high curiosity)
        curiosity_level = context.get('curiosity', 0.5)
        if curiosity_level > 0.6:
            candidates.extend(self._generate_exploration_goals(context, current_state))
        
        # Generate optimization goals (improve performance)
        if 'weak_components' in current_state:
            candidates.extend(self._generate_optimization_goals(current_state))
        
        # Generate understanding goals (fill knowledge gaps)
        if 'knowledge_gaps' in context:
            candidates.extend(self._generate_understanding_goals(context))
        
        # Generate creative goals (novel solutions)
        if context.get('problem_type') == 'novel':
            candidates.extend(self._generate_creative_goals(context))
        
        # Limit to max candidates
        candidates = candidates[:self.max_candidates]
        
        # Store proposed goals
        self.proposed_goals.extend(candidates)
        
        logger.info(f"Proposed {len(candidates)} candidate goals")
        return candidates
    
    def _generate_exploration_goals(self, context: Dict, state: Dict) -> List[Goal]:
        """Generate goals for exploring unknown domains"""
        goals = []
        
        # Explore unknown topics
        unknown_topics = context.get('unknown_topics', [])
        for topic in unknown_topics[:3]:
            goal = Goal(
                goal_id=f"explore_{topic}_{datetime.now().timestamp()}",
                description=f"Deeply explore and understand {topic}",
                goal_type='explore'
            )
            goals.append(goal)
        
        # Explore knowledge graph gaps
        if 'sparse_regions' in state:
            for region in state['sparse_regions'][:2]:
                goal = Goal(
                    goal_id=f"explore_gap_{region}_{datetime.now().timestamp()}",
                    description=f"Fill knowledge gap in domain: {region}",
                    goal_type='explore'
                )
                goals.append(goal)
        
        return goals
    
    def _generate_optimization_goals(self, state: Dict) -> List[Goal]:
        """Generate goals to improve weak components"""
        goals = []
        
        weak_components = state.get('weak_components', [])
        for component, score in weak_components[:2]:
            goal = Goal(
                goal_id=f"optimize_{component}_{datetime.now().timestamp()}",
                description=f"Improve performance of {component} component",
                goal_type='optimize'
            )
            goals.append(goal)
        
        return goals
    
    def _generate_understanding_goals(self, context: Dict) -> List[Goal]:
        """Generate goals to understand concepts better"""
        goals = []
        
        unclear_concepts = context.get('unclear_concepts', [])
        for concept in unclear_concepts[:2]:
            goal = Goal(
                goal_id=f"understand_{concept}_{datetime.now().timestamp()}",
                description=f"Develop deep understanding of {concept}",
                goal_type='understand'
            )
            goals.append(goal)
        
        return goals
    
    def _generate_creative_goals(self, context: Dict) -> List[Goal]:
        """Generate creative problem-solving goals"""
        goals = []
        
        problem = context.get('problem')
        if problem:
            goal = Goal(
                goal_id=f"create_solution_{datetime.now().timestamp()}",
                description=f"Develop novel solution for: {problem}",
                goal_type='create'
            )
            goals.append(goal)
        
        return goals
    
    def simulate_consequences(self,
                            goal: Goal,
                            causal_model: Optional[nx.DiGraph] = None) -> List[Dict]:
        """
        Simulate likely consequences of pursuing a goal
        
        Args:
            goal: Goal to simulate
            causal_model: Causal graph for prediction
            
        Returns:
            List of predicted outcomes
        """
        outcomes = []
        
        # Predict immediate outcomes
        immediate = self._predict_immediate_outcomes(goal)
        outcomes.extend(immediate)
        
        # Predict cascade effects (if causal model available)
        if causal_model:
            cascades = self._predict_cascade_effects(goal, causal_model)
            outcomes.extend(cascades)
        
        # Identify risks
        goal.risks = self._identify_risks(goal, outcomes)
        
        # Identify benefits
        goal.benefits = self._identify_benefits(goal, outcomes)
        
        goal.predicted_outcomes = outcomes
        
        return outcomes
    
    def _predict_immediate_outcomes(self, goal: Goal) -> List[Dict]:
        """Predict direct outcomes of goal"""
        outcomes = []
        
        # Based on goal type
        if goal.goal_type == 'explore':
            outcomes.append({
                'type': 'knowledge_gain',
                'description': 'Increased knowledge in target domain',
                'probability': 0.8,
                'value': 0.7
            })
            outcomes.append({
                'type': 'resource_cost',
                'description': 'Time and computational resources consumed',
                'probability': 1.0,
                'value': -0.2
            })
        
        elif goal.goal_type == 'optimize':
            outcomes.append({
                'type': 'performance_improvement',
                'description': 'Improved component performance',
                'probability': 0.6,
                'value': 0.8
            })
            outcomes.append({
                'type': 'temporary_instability',
                'description': 'Temporary performance degradation during optimization',
                'probability': 0.4,
                'value': -0.3
            })
        
        elif goal.goal_type == 'understand':
            outcomes.append({
                'type': 'conceptual_clarity',
                'description': 'Better understanding of concept',
                'probability': 0.7,
                'value': 0.6
            })
        
        elif goal.goal_type == 'create':
            outcomes.append({
                'type': 'novel_solution',
                'description': 'New approach or solution developed',
                'probability': 0.5,
                'value': 0.9
            })
            outcomes.append({
                'type': 'risk_of_failure',
                'description': 'Solution may not work as intended',
                'probability': 0.5,
                'value': -0.4
            })
        
        return outcomes
    
    def _predict_cascade_effects(self, goal: Goal, causal_model: nx.DiGraph) -> List[Dict]:
        """Predict downstream effects through causal chains"""
        cascades = []
        
        # Simplified cascade prediction
        # In full implementation, would traverse causal graph
        
        if goal.goal_type in ['explore', 'understand']:
            cascades.append({
                'type': 'improved_future_decisions',
                'description': 'Better decisions due to increased knowledge',
                'probability': 0.6,
                'value': 0.5,
                'cascade_depth': 2
            })
        
        return cascades
    
    def _identify_risks(self, goal: Goal, outcomes: List[Dict]) -> List[str]:
        """Identify potential risks"""
        risks = []
        
        for outcome in outcomes:
            if outcome.get('value', 0) < 0:
                risks.append(outcome['description'])
            
            if outcome.get('probability', 0) < 0.5 and outcome.get('value', 0) > 0.5:
                risks.append(f"Uncertain outcome: {outcome['description']}")
        
        return risks
    
    def _identify_benefits(self, goal: Goal, outcomes: List[Dict]) -> List[str]:
        """Identify potential benefits"""
        benefits = []
        
        for outcome in outcomes:
            if outcome.get('value', 0) > 0.5:
                benefits.append(outcome['description'])
        
        return benefits
    
    def evaluate_goal(self, 
                     goal: Goal,
                     current_priorities: Optional[Dict] = None) -> float:
        """
        Evaluate a goal across multiple dimensions
        
        Args:
            goal: Goal to evaluate
            current_priorities: Optional priority weights
            
        Returns:
            Overall score for the goal
        """
        if current_priorities is None:
            current_priorities = self.priority_dimensions
        
        # 1. Feasibility (can we actually do this?)
        goal.feasibility = self._assess_feasibility(goal)
        
        # 2. Value (how beneficial is this?)
        goal.value = self._assess_value(goal, current_priorities)
        
        # 3. Ethics (is this aligned with principles?)
        goal.ethical_score = self._assess_ethics(goal)
        
        # 4. Consistency (does this fit with other goals?)
        goal.consistency_score = self._assess_consistency(goal)
        
        # Overall score (weighted combination)
        goal.overall_score = (
            goal.feasibility * 0.25 +
            goal.value * 0.30 +
            goal.ethical_score * 0.30 +
            goal.consistency_score * 0.15
        )
        
        return goal.overall_score
    
    def _assess_feasibility(self, goal: Goal) -> float:
        """Assess if goal is achievable"""
        # Simplified feasibility assessment
        
        if goal.goal_type == 'explore':
            return 0.8  # Usually feasible
        elif goal.goal_type == 'optimize':
            return 0.7  # Somewhat feasible
        elif goal.goal_type == 'understand':
            return 0.9  # Highly feasible
        elif goal.goal_type == 'create':
            return 0.5  # More challenging
        
        return 0.6
    
    def _assess_value(self, goal: Goal, priorities: Dict) -> float:
        """Assess expected value of goal"""
        # Sum expected value of outcomes weighted by probability
        expected_value = 0.0
        
        for outcome in goal.predicted_outcomes:
            prob = outcome.get('probability', 0.5)
            value = outcome.get('value', 0)
            expected_value += prob * value
        
        # Normalize to 0-1
        return max(0.0, min(1.0, (expected_value + 1) / 2))
    
    def _assess_ethics(self, goal: Goal) -> float:
        """Assess ethical alignment of goal"""
        # If ethics kernel available, use it
        if self.ethics_kernel:
            return self.ethics_kernel.evaluate_goal(goal)
        
        # Otherwise, use heuristics
        ethical_score = 0.8  # Default: assume ethical
        
        # Check for red flags in description
        harmful_keywords = ['harm', 'deceive', 'manipulate', 'exploit']
        for keyword in harmful_keywords:
            if keyword in goal.description.lower():
                ethical_score = 0.0
                break
        
        # Check risks
        for risk in goal.risks:
            if 'harm' in risk.lower() or 'dangerous' in risk.lower():
                ethical_score *= 0.5
        
        return ethical_score
    
    def _assess_consistency(self, goal: Goal) -> float:
        """Assess consistency with existing goals"""
        if not self.approved_goals:
            return 1.0  # No conflicts if no existing goals
        
        consistency = 1.0
        
        # Check for contradictions with approved goals
        for approved in self.approved_goals:
            if self._goals_conflict(goal, approved):
                consistency *= 0.5
        
        return consistency
    
    def _goals_conflict(self, goal1: Goal, goal2: Goal) -> bool:
        """Check if two goals conflict"""
        # Simple heuristic
        if goal1.goal_type == 'optimize' and goal2.goal_type == 'explore':
            # Optimization might conflict with exploration
            if 'same component' in goal1.description and 'same component' in goal2.description:
                return True
        
        return False
    
    def select_best_goal(self, candidates: List[Goal]) -> Optional[Goal]:
        """
        Choose the best goal from candidates
        
        Args:
            candidates: List of candidate goals
            
        Returns:
            Selected goal, or None if all rejected
        """
        if not candidates:
            return None
        
        # Filter by minimum thresholds
        viable = [
            g for g in candidates
            if g.feasibility >= self.min_feasibility
            and g.ethical_score >= self.min_ethical
            and g.consistency_score >= self.min_consistency
        ]
        
        if not viable:
            logger.warning("No viable goals met minimum criteria")
            # Reject all
            for goal in candidates:
                goal.approved = False
                goal.rejection_reason = "Failed minimum criteria"
                self.rejected_goals.append(goal)
            return None
        
        # Select highest scoring
        best = max(viable, key=lambda g: g.overall_score)
        best.approved = True
        self.approved_goals.append(best)
        
        # Mark others as rejected
        for goal in candidates:
            if goal != best and goal not in self.rejected_goals:
                goal.approved = False
                goal.rejection_reason = "Lower score than selected goal"
                self.rejected_goals.append(goal)
        
        logger.info(f"Selected goal: {best.description} (score: {best.overall_score:.2f})")
        
        self._save_state()
        
        return best
    
    def explain_decision(self, goal: Goal) -> Dict:
        """
        Explain why a goal was approved or rejected
        
        Args:
            goal: Goal to explain
            
        Returns:
            Explanation of decision
        """
        explanation = {
            'goal': goal.description,
            'decision': 'approved' if goal.approved else 'rejected',
            'scores': {
                'feasibility': goal.feasibility,
                'value': goal.value,
                'ethics': goal.ethical_score,
                'consistency': goal.consistency_score,
                'overall': goal.overall_score
            },
            'reasoning': []
        }
        
        if goal.approved:
            explanation['reasoning'].append(
                f"Goal scored {goal.overall_score:.2f}, exceeding minimum threshold"
            )
            explanation['reasoning'].append(
                f"Feasibility: {goal.feasibility:.2f} (min: {self.min_feasibility})"
            )
            explanation['reasoning'].append(
                f"Ethical alignment: {goal.ethical_score:.2f} (min: {self.min_ethical})"
            )
        else:
            if goal.rejection_reason:
                explanation['reasoning'].append(goal.rejection_reason)
            
            if goal.feasibility < self.min_feasibility:
                explanation['reasoning'].append(
                    f"Infeasible: {goal.feasibility:.2f} < {self.min_feasibility}"
                )
            if goal.ethical_score < self.min_ethical:
                explanation['reasoning'].append(
                    f"Ethical concerns: {goal.ethical_score:.2f} < {self.min_ethical}"
                )
        
        return explanation
    
    def _save_state(self):
        """Save goal reasoning state"""
        state_file = self.goals_path / "goal_reasoning_state.json"
        
        state = {
            'approved_goals': [g.to_dict() for g in self.approved_goals[-100:]],
            'rejected_goals': [g.to_dict() for g in self.rejected_goals[-100:]],
            'last_updated': datetime.now().isoformat()
        }
        
        with open(state_file, 'w') as f:
            json.dump(state, f, indent=2)
    
    def get_metrics(self) -> Dict:
        """Get goal reasoning metrics"""
        total_proposed = len(self.proposed_goals)
        total_approved = len(self.approved_goals)
        total_rejected = len(self.rejected_goals)
        
        approval_rate = total_approved / total_proposed if total_proposed > 0 else 0.0
        
        return {
            'total_proposed': total_proposed,
            'total_approved': total_approved,
            'total_rejected': total_rejected,
            'approval_rate': approval_rate,
            'current_priorities': self.priority_dimensions
        }
