#!/usr/bin/env python3
"""
Unified Fluid Intelligence - THE ONE SYSTEM
Combines proven Fluid Eden v2.0 with consciousness integration
Simple, elegant, production-ready
"""
import subprocess
import time
import json
from pathlib import Path
from datetime import datetime

class UnifiedFluidIntelligence:
    """
    THE ONE SYSTEM - Unified Fluid Intelligence
    
    Uses proven routing logic from v2.0
    Works for consciousness layers AND external queries
    Simple, maintainable, production-ready
    """
    
    def __init__(self):
        self.models = {
            'fast': 'qwen2.5:7b',    # Fast operations
            'smart': 'qwen2.5:14b'   # Complex reasoning
        }
        
        # Adaptive timeouts (learned from testing)
        self.timeouts = {
            'fast': 5,
            'smart': 20
        }
        
        # Query cache
        self.cache = {}
        
        # Performance tracking
        self.stats = {
            'fast': {'attempts': 0, 'successes': 0, 'total_time': 0},
            'smart': {'attempts': 0, 'successes': 0, 'total_time': 0}
        }
        
        # Complex keywords for routing
        self.complex_keywords = [
            'explain', 'design', 'analyze', 'compare', 'create',
            'implement', 'develop', 'comprehensive', 'detailed',
            'evaluate', 'architect', 'optimize', 'synthesize'
        ]
    
    def route(self, query: str) -> str:
        """
        Intelligent routing: fast or smart?
        
        Returns: 'fast' or 'smart'
        """
        query_lower = query.lower()
        words = len(query_lower.split())
        
        # Check for complex keywords
        has_complex = any(kw in query_lower for kw in self.complex_keywords)
        
        # Simple routing logic
        if words < 10 and not has_complex:
            return 'fast'
        elif has_complex or words > 20:
            return 'smart'
        else:
            return 'fast'
    
    def query(self, prompt: str, force_model: str = None, timeout: int = None) -> dict:
        """
        Execute query with fluid intelligence
        
        Args:
            prompt: The query
            force_model: Optional force 'fast' or 'smart' (overrides routing)
            timeout: Optional custom timeout
            
        Returns:
            {
                'response': str,
                'model': str,
                'model_type': 'fast' or 'smart',
                'time': float,
                'success': bool,
                'cached': bool,
                'error': str (if failed)
            }
        """
        # Determine model type
        model_type = force_model if force_model else self.route(prompt)
        model = self.models[model_type]
        
        # Check cache
        cache_key = f"{prompt[:100]}:{model_type}"
        if cache_key in self.cache:
            return {
                'response': self.cache[cache_key],
                'model': model,
                'model_type': model_type,
                'time': 0.001,
                'success': True,
                'cached': True
            }
        
        # Use adaptive timeout or custom
        timeout = timeout if timeout else self.timeouts[model_type]
        
        # Track attempt
        self.stats[model_type]['attempts'] += 1
        
        start = time.time()
        
        try:
            result = subprocess.run(
                ['ollama', 'run', model, prompt],
                capture_output=True,
                text=True,
                timeout=timeout
            )
            
            elapsed = time.time() - start
            
            if result.returncode == 0 and result.stdout.strip():
                response = result.stdout.strip()
                
                # Update stats
                self.stats[model_type]['successes'] += 1
                self.stats[model_type]['total_time'] += elapsed
                
                # Cache successful response
                self.cache[cache_key] = response
                
                return {
                    'response': response,
                    'model': model,
                    'model_type': model_type,
                    'time': elapsed,
                    'success': True,
                    'cached': False
                }
            else:
                # Failed but didn't timeout - try fallback
                if model_type == 'smart':
                    return self.query(prompt, force_model='fast')
                
                return {
                    'response': None,
                    'model': model,
                    'model_type': model_type,
                    'time': elapsed,
                    'success': False,
                    'cached': False,
                    'error': 'Empty response'
                }
                
        except subprocess.TimeoutExpired:
            elapsed = time.time() - start
            
            # Automatic fallback to fast model
            if model_type == 'smart':
                # Increase timeout for next time
                self.timeouts['smart'] = min(self.timeouts['smart'] + 5, 30)
                return self.query(prompt, force_model='fast')
            
            return {
                'response': None,
                'model': model,
                'model_type': model_type,
                'time': elapsed,
                'success': False,
                'cached': False,
                'error': f'Timeout after {timeout}s'
            }
            
        except Exception as e:
            elapsed = time.time() - start
            
            # Try fallback on any error
            if model_type == 'smart':
                return self.query(prompt, force_model='fast')
            
            return {
                'response': None,
                'model': model,
                'model_type': model_type,
                'time': elapsed,
                'success': False,
                'cached': False,
                'error': str(e)
            }
    
    def get_health(self) -> dict:
        """Get system health statistics"""
        health = {}
        
        for model_type, stats in self.stats.items():
            if stats['attempts'] > 0:
                success_rate = stats['successes'] / stats['attempts']
                avg_time = stats['total_time'] / stats['successes'] if stats['successes'] > 0 else 0
                
                health[model_type] = {
                    'model': self.models[model_type],
                    'attempts': stats['attempts'],
                    'successes': stats['successes'],
                    'success_rate': success_rate,
                    'avg_time': avg_time,
                    'current_timeout': self.timeouts[model_type]
                }
        
        return health

# Global singleton instance
_fluid_intelligence = None

def get_fluid_intelligence():
    """Get or create the global Unified Fluid Intelligence instance"""
    global _fluid_intelligence
    if _fluid_intelligence is None:
        _fluid_intelligence = UnifiedFluidIntelligence()
    return _fluid_intelligence

# Convenience functions for easy use
def fluid_query(prompt: str, force_model: str = None) -> str:
    """
    Simple fluid query - returns just the response text
    
    Usage:
        response = fluid_query("What is AI?")
        response = fluid_query("Complex analysis...", force_model='smart')
    """
    fluid = get_fluid_intelligence()
    result = fluid.query(prompt, force_model)
    return result['response'] if result['success'] else ""

def fluid_query_full(prompt: str, force_model: str = None) -> dict:
    """
    Full fluid query - returns complete result dict
    
    Usage:
        result = fluid_query_full("What is AI?")
        print(f"Response: {result['response']}")
        print(f"Time: {result['time']}s")
        print(f"Model: {result['model']}")
    """
    fluid = get_fluid_intelligence()
    return fluid.query(prompt, force_model)

def fluid_health() -> dict:
    """Get fluid intelligence system health"""
    fluid = get_fluid_intelligence()
    return fluid.get_health()

# Test function
def test_unified_fluid():
    """Test the unified system"""
    print("="*70)
    print("🌟 UNIFIED FLUID INTELLIGENCE - TEST")
    print("="*70)
    print()
    
    fluid = get_fluid_intelligence()
    
    tests = [
        ("Hi", "fast"),
        ("What's 2+2?", "fast"),
        ("Explain neural networks", "smart"),
        ("Design a system architecture", "smart"),
        ("Hello", "fast"),
    ]
    
    print(f"Running {len(tests)} test queries...")
    print()
    
    for query, expected_route in tests:
        print(f"Query: {query[:50]}...")
        
        result = fluid.query(query)
        
        icon = "✅" if result['success'] else "❌"
        cached = " [CACHED]" if result.get('cached') else ""
        route_match = "✓" if result['model_type'] == expected_route else "✗"
        
        print(f"  {icon} {result['model_type']} {route_match} → {result['model'].split(':')[1]} ({result['time']:.2f}s){cached}")
        
        if not result['success']:
            print(f"      Error: {result.get('error')}")
        
        print()
    
    # Health check
    print("="*70)
    print("SYSTEM HEALTH")
    print("="*70)
    print()
    
    health = fluid.get_health()
    for model_type, stats in health.items():
        print(f"{model_type.upper()}:")
        print(f"  Success rate: {stats['success_rate']*100:.1f}%")
        print(f"  Avg time: {stats['avg_time']:.2f}s")
        print(f"  Timeout: {stats['current_timeout']}s")
        print()
    
    print("✅ Unified Fluid Intelligence working!")
    print()

if __name__ == "__main__":
    test_unified_fluid()
