"""
Phi Fractal - Production Integration
Robust, monitored, and production-ready fluid intelligence
"""

import sys
sys.path.insert(0, '/Eden/CORE/phi_fractal')

from phi_fractal import (
    RelationalEncoder,
    AnalogyEngine,
    MetaCognitionLoop,
    CuriosityDriver,
    CausalScaffold,
    HierarchicalPlanner,
    ConsolidationEngine
)

import threading
import time
import logging
from pathlib import Path
from datetime import datetime
from typing import Dict, Optional, List
import json
from collections import deque
import traceback

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('/Eden/LOGS/fluid_intelligence/phi_fractal.log'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

class PhiFractalProduction:
    """Production-grade Phi Fractal integration with monitoring and error handling"""
    
    def __init__(self):
        self.initialized = False
        self.health_status = "initializing"
        self.metrics = {
            'requests_processed': 0,
            'errors_count': 0,
            'avg_response_time': 0.0,
            'uptime_seconds': 0
        }
        self.start_time = time.time()
        
        # Component status tracking
        self.component_status = {}
        
        # Recent errors for debugging
        self.recent_errors = deque(maxlen=100)
        
        # Performance tracking
        self.response_times = deque(maxlen=1000)
        
        # Initialize components with error handling
        self._initialize_components()
        
        # Start background workers
        self._start_background_workers()
        
        logger.info("PhiFractalProduction initialized")
    
    def _initialize_components(self):
        """Initialize all components with error handling"""
        logger.info("Initializing Phi Fractal components...")
        
        components = [
            ('encoder', RelationalEncoder, "Knowledge graph encoder"),
            ('analogy', AnalogyEngine, "Analogy matching"),
            ('meta', MetaCognitionLoop, "Meta-cognition"),
            ('curiosity', CuriosityDriver, "Curiosity driver"),
            ('causal', CausalScaffold, "Causal reasoning"),
            ('planner', HierarchicalPlanner, "Goal planning"),
            ('consolidation', ConsolidationEngine, "Memory consolidation")
        ]
        
        for name, ComponentClass, description in components:
            try:
                setattr(self, name, ComponentClass())
                self.component_status[name] = {
                    'status': 'healthy',
                    'description': description,
                    'last_check': datetime.now().isoformat()
                }
                logger.info(f"✓ {description} initialized")
            except Exception as e:
                logger.error(f"✗ Failed to initialize {description}: {e}")
                setattr(self, name, None)
                self.component_status[name] = {
                    'status': 'failed',
                    'description': description,
                    'error': str(e),
                    'last_check': datetime.now().isoformat()
                }
        
        # Check if critical components loaded
        critical_components = ['encoder', 'meta', 'curiosity']
        all_critical_loaded = all(
            getattr(self, comp) is not None 
            for comp in critical_components
        )
        
        if all_critical_loaded:
            self.initialized = True
            self.health_status = "healthy"
            logger.info("✓ All critical components loaded")
        else:
            self.health_status = "degraded"
            logger.warning("⚠ Some components failed to load")
    
    def _start_background_workers(self):
        """Start background consolidation and monitoring threads"""
        # Consolidation worker
        self.consolidation_thread = threading.Thread(
            target=self._consolidation_worker,
            daemon=True,
            name="ConsolidationWorker"
        )
        self.consolidation_thread.start()
        logger.info("✓ Consolidation worker started")
        
        # Metrics collector
        self.metrics_thread = threading.Thread(
            target=self._metrics_worker,
            daemon=True,
            name="MetricsWorker"
        )
        self.metrics_thread.start()
        logger.info("✓ Metrics worker started")
    
    def _consolidation_worker(self):
        """Background worker for memory consolidation"""
        logger.info("Consolidation worker running")
        
        while True:
            try:
                time.sleep(3600)  # Check every hour
                
                if self.consolidation and self.consolidation.should_consolidate():
                    logger.info("Starting scheduled consolidation...")
                    
                    # Gather data for consolidation
                    observations = []  # Would pull from storage
                    experiences = []   # Would pull from history
                    
                    # Run consolidation
                    report = self.consolidation.consolidate(
                        self.encoder.graph if self.encoder else None,
                        observations,
                        experiences
                    )
                    
                    logger.info(f"Consolidation complete: {report}")
                    
            except Exception as e:
                logger.error(f"Consolidation error: {e}")
                self._record_error("consolidation_worker", e)
                time.sleep(600)  # Wait 10 min before retry
    
    def _metrics_worker(self):
        """Background worker for metrics collection"""
        while True:
            try:
                time.sleep(60)  # Update every minute
                
                # Update uptime
                self.metrics['uptime_seconds'] = int(time.time() - self.start_time)
                
                # Update average response time
                if self.response_times:
                    self.metrics['avg_response_time'] = sum(self.response_times) / len(self.response_times)
                
                # Save metrics
                self._save_metrics()
                
            except Exception as e:
                logger.error(f"Metrics worker error: {e}")
    
    def process_message(self, 
                       message: str, 
                       session_id: str = "default",
                       context: Optional[Dict] = None) -> Dict:
        """
        Process a message through Phi Fractal
        
        Args:
            message: User message
            session_id: Session identifier
            context: Additional context
            
        Returns:
            Processing result with insights
        """
        start_time = time.time()
        
        if not self.initialized:
            return {
                'success': False,
                'error': 'System not initialized',
                'health': self.health_status
            }
        
        if context is None:
            context = {}
        
        result = {
            'success': True,
            'message': message,
            'session_id': session_id,
            'timestamp': datetime.now().isoformat()
        }
        
        try:
            # 1. Encode to knowledge graph
            if self.encoder:
                try:
                    self.encoder.encode_text(message, session_id)
                    result['knowledge_graph'] = {
                        'nodes': len(self.encoder.graph.nodes),
                        'edges': len(self.encoder.graph.edges)
                    }
                except Exception as e:
                    logger.error(f"Encoding error: {e}")
                    result['encoding_error'] = str(e)
            
            # 2. Compute curiosity
            if self.curiosity and self.encoder:
                try:
                    curiosity_score = self.curiosity.compute_curiosity(
                        message,
                        context,
                        self.encoder.graph
                    )
                    result['curiosity'] = {
                        'score': float(curiosity_score),
                        'level': 'high' if curiosity_score > 0.7 else 'medium' if curiosity_score > 0.4 else 'low'
                    }
                    
                    # Generate exploration goals if curious
                    if curiosity_score > 0.6:
                        goals = self.curiosity.generate_exploration_goals(
                            self.encoder.graph,
                            [message],
                            max_goals=3
                        )
                        result['exploration_goals'] = [
                            {
                                'type': g['type'],
                                'description': g.get('topic', g.get('theme', g.get('center_node'))),
                                'curiosity': g['curiosity']
                            }
                            for g in goals
                        ]
                except Exception as e:
                    logger.error(f"Curiosity error: {e}")
                    result['curiosity_error'] = str(e)
            
            # 3. Find analogies
            if self.analogy and self.encoder:
                try:
                    query_graph = self.encoder.get_subgraph(message.lower().split()[0], depth=2)
                    if len(query_graph) > 0:
                        analogies = self.analogy.find_analogies(
                            query_graph,
                            message,
                            context.get('domain', 'general')
                        )
                        if analogies:
                            result['analogies'] = [
                                {
                                    'source_domain': a['source_domain'],
                                    'similarity': a['similarity'],
                                    'is_cross_domain': a['is_cross_domain']
                                }
                                for a in analogies[:3]
                            ]
                except Exception as e:
                    logger.error(f"Analogy error: {e}")
                    result['analogy_error'] = str(e)
            
            # 4. Causal insights
            if self.causal and len(message.split()) > 3:
                try:
                    # Extract potential variables
                    words = [w.lower() for w in message.split() if len(w) > 3]
                    if len(words) >= 2:
                        hypotheses = self.causal.generate_hypotheses(
                            target_variable=words[-1],
                            candidate_causes=words[:-1]
                        )
                        if hypotheses:
                            result['causal_hypotheses'] = [
                                {
                                    'cause': h['cause'],
                                    'effect': h['effect'],
                                    'plausibility': h['plausibility']
                                }
                                for h in hypotheses[:2]
                            ]
                except Exception as e:
                    logger.error(f"Causal reasoning error: {e}")
            
            # Update metrics
            self.metrics['requests_processed'] += 1
            
        except Exception as e:
            logger.error(f"Processing error: {e}\n{traceback.format_exc()}")
            self._record_error("process_message", e)
            result['success'] = False
            result['error'] = str(e)
            self.metrics['errors_count'] += 1
        
        # Record response time
        response_time = time.time() - start_time
        self.response_times.append(response_time)
        result['response_time_ms'] = int(response_time * 1000)
        
        return result
    
    def create_goal(self, goal_description: str, context: Optional[Dict] = None) -> Dict:
        """Create and decompose a hierarchical goal"""
        if not self.planner:
            return {'success': False, 'error': 'Planner not available'}
        
        try:
            goal_name = f"goal_{int(time.time())}"
            plan = self.planner.create_plan(goal_name, goal_description, context)
            
            return {
                'success': True,
                'goal_name': goal_name,
                'total_subgoals': plan['total_goals'],
                'next_actions': plan['next_actions'][:5],
                'tree': plan['tree']
            }
        except Exception as e:
            logger.error(f"Goal creation error: {e}")
            self._record_error("create_goal", e)
            return {'success': False, 'error': str(e)}
    
    def get_health(self) -> Dict:
        """Get system health status"""
        return {
            'status': self.health_status,
            'initialized': self.initialized,
            'uptime_seconds': int(time.time() - self.start_time),
            'components': self.component_status,
            'metrics': self.metrics,
            'recent_errors': list(self.recent_errors)[-5:]
        }
    
    def get_metrics(self) -> Dict:
        """Get all component metrics"""
        metrics = {
            'system': self.metrics,
            'components': {}
        }
        
        # Collect from each component
        if self.encoder:
            metrics['components']['encoder'] = self.encoder.get_metrics()
        if self.analogy:
            metrics['components']['analogy'] = self.analogy.get_metrics()
        if self.meta:
            metrics['components']['meta'] = self.meta.get_metrics()
        if self.curiosity:
            metrics['components']['curiosity'] = self.curiosity.get_metrics()
        if self.causal:
            metrics['components']['causal'] = self.causal.get_metrics()
        if self.planner:
            metrics['components']['planner'] = self.planner.get_metrics()
        if self.consolidation:
            metrics['components']['consolidation'] = self.consolidation.get_metrics()
        
        return metrics
    
    def _record_error(self, source: str, error: Exception):
        """Record error for debugging"""
        error_record = {
            'timestamp': datetime.now().isoformat(),
            'source': source,
            'error': str(error),
            'traceback': traceback.format_exc()
        }
        self.recent_errors.append(error_record)
        
        # Log to file
        error_log = Path('/Eden/LOGS/fluid_intelligence/errors.log')
        error_log.parent.mkdir(parents=True, exist_ok=True)
        
        with open(error_log, 'a') as f:
            f.write(json.dumps(error_record) + '\n')
    
    def _save_metrics(self):
        """Save current metrics to disk"""
        metrics_file = Path('/Eden/LOGS/fluid_intelligence/metrics.json')
        metrics_file.parent.mkdir(parents=True, exist_ok=True)
        
        try:
            with open(metrics_file, 'w') as f:
                json.dump(self.get_metrics(), f, indent=2)
        except Exception as e:
            logger.error(f"Failed to save metrics: {e}")

# Global instance
_phi_fractal_instance = None

def get_phi_fractal() -> PhiFractalProduction:
    """Get or create global Phi Fractal instance"""
    global _phi_fractal_instance
    if _phi_fractal_instance is None:
        _phi_fractal_instance = PhiFractalProduction()
    return _phi_fractal_instance

if __name__ == "__main__":
    # Test production system
    print("=" * 60)
    print("PHI FRACTAL PRODUCTION - SYSTEM TEST")
    print("=" * 60)
    
    phi = get_phi_fractal()
    
    print("\n1. Health Check:")
    health = phi.get_health()
    print(f"   Status: {health['status']}")
    print(f"   Components loaded: {sum(1 for c in health['components'].values() if c['status'] == 'healthy')}/7")
    
    print("\n2. Processing test messages:")
    test_messages = [
        "I want to learn about neural networks",
        "How do I optimize database queries?",
        "Tell me about quantum computing"
    ]
    
    for msg in test_messages:
        result = phi.process_message(msg)
        print(f"\n   Message: {msg}")
        print(f"   Curiosity: {result.get('curiosity', {}).get('score', 'N/A')}")
        print(f"   Response time: {result.get('response_time_ms', 'N/A')}ms")
        if 'exploration_goals' in result:
            print(f"   Exploration goals: {len(result['exploration_goals'])}")
    
    print("\n3. System Metrics:")
    metrics = phi.get_metrics()
    print(f"   Requests processed: {metrics['system']['requests_processed']}")
    print(f"   Avg response time: {metrics['system']['avg_response_time']*1000:.1f}ms")
    print(f"   Errors: {metrics['system']['errors_count']}")
    
    print("\n✅ Production system operational!")
