#!/usr/bin/env python3
"""
Fluid Eden v2.0 - Optimized with 14B fixes
Handles timeouts gracefully, uses adaptive timeouts
"""
import json
from pathlib import Path
from datetime import datetime
import subprocess
import time
import sys

sys.path.insert(0, str(Path(__file__).parent))

class OptimizedComplexityAnalyzer:
    """Simplified complexity analysis"""
    
    def __init__(self):
        self.complex_keywords = [
            'explain', 'design', 'analyze', 'compare', 'create',
            'implement', 'develop', 'comprehensive', 'detailed'
        ]
    
    def analyze(self, query: str) -> str:
        """Returns 'fast' or 'smart'"""
        query_lower = query.lower()
        words = len(query_lower.split())
        
        has_complex = any(kw in query_lower for kw in self.complex_keywords)
        
        # Simple logic: short queries without complex keywords → fast
        if words < 10 and not has_complex:
            return 'fast'
        elif has_complex or words > 20:
            return 'smart'
        else:
            return 'fast'

class OptimizedModelRouter:
    """Router with better timeout handling"""
    
    def __init__(self):
        self.models = {
            'fast': 'qwen2.5:7b',
            'smart': 'qwen2.5:14b'
        }
        self.cache = {}
        
        # Adaptive timeouts based on past performance
        self.timeouts = {
            'fast': 5,
            'smart': 20  # Increased from 10 to 20
        }
        
        # Track 14B performance
        self.model_stats = {
            'fast': {'attempts': 0, 'successes': 0, 'avg_time': 0},
            'smart': {'attempts': 0, 'successes': 0, 'avg_time': 0}
        }
    
    def execute(self, query: str, model_type: str) -> dict:
        """Execute with adaptive timeout and fallback"""
        
        # Check cache first
        cache_key = f"{query[:100]}:{model_type}"
        if cache_key in self.cache:
            return {
                'model': self.models[model_type],
                'response': self.cache[cache_key],
                'time': 0.001,
                'success': True,
                'cached': True
            }
        
        model = self.models[model_type]
        timeout = self.timeouts[model_type]
        
        self.model_stats[model_type]['attempts'] += 1
        
        start = time.time()
        
        try:
            # Try with adaptive timeout
            result = subprocess.run(
                ['ollama', 'run', model, query],
                capture_output=True,
                text=True,
                timeout=timeout
            )
            
            elapsed = time.time() - start
            
            if result.returncode == 0 and result.stdout.strip():
                response = result.stdout.strip()
                
                # Update stats
                self.model_stats[model_type]['successes'] += 1
                old_avg = self.model_stats[model_type]['avg_time']
                count = self.model_stats[model_type]['successes']
                self.model_stats[model_type]['avg_time'] = (old_avg * (count - 1) + elapsed) / count
                
                # Cache
                self.cache[cache_key] = response
                
                return {
                    'model': model,
                    'response': response,
                    'time': elapsed,
                    'success': True,
                    'cached': False
                }
            else:
                # Failed but didn't timeout - try fallback
                if model_type == 'smart':
                    print(f"  ⚠️  14B failed, falling back to 7B...")
                    return self.execute(query, 'fast')
                
                return {
                    'model': model,
                    'response': None,
                    'time': elapsed,
                    'success': False,
                    'error': 'Empty response',
                    'cached': False
                }
                
        except subprocess.TimeoutExpired:
            elapsed = time.time() - start
            
            # Fallback to 7B if 14B times out
            if model_type == 'smart':
                print(f"  ⚠️  14B timed out ({timeout}s), falling back to 7B...")
                # Increase timeout for next time
                self.timeouts['smart'] = min(self.timeouts['smart'] + 5, 30)
                return self.execute(query, 'fast')
            
            return {
                'model': model,
                'response': None,
                'time': elapsed,
                'success': False,
                'error': f'Timeout after {timeout}s',
                'cached': False
            }
            
        except Exception as e:
            elapsed = time.time() - start
            
            if model_type == 'smart':
                print(f"  ⚠️  14B error, falling back to 7B...")
                return self.execute(query, 'fast')
            
            return {
                'model': model,
                'response': None,
                'time': elapsed,
                'success': False,
                'error': str(e),
                'cached': False
            }
    
    def get_health(self):
        """Get router health statistics"""
        health = {}
        for model_type, stats in self.model_stats.items():
            if stats['attempts'] > 0:
                success_rate = stats['successes'] / stats['attempts']
                health[model_type] = {
                    'attempts': stats['attempts'],
                    'successes': stats['successes'],
                    'success_rate': success_rate,
                    'avg_time': stats['avg_time'],
                    'current_timeout': self.timeouts[model_type]
                }
        return health

class FluidEdenV2:
    """Optimized Fluid Eden with better error handling"""
    
    def __init__(self):
        self.analyzer = OptimizedComplexityAnalyzer()
        self.router = OptimizedModelRouter()
        self.log_file = Path('/Eden/ENHANCEMENT/fluid_eden/v2_log.json')
        self.stats = {'total': 0, 'successful': 0, 'queries': []}
    
    def process(self, query: str) -> dict:
        """Process query through optimized Fluid Eden"""
        
        # Analyze
        complexity = self.analyzer.analyze(query)
        
        # Execute
        result = self.router.execute(query, complexity)
        
        # Track
        self.stats['total'] += 1
        if result['success']:
            self.stats['successful'] += 1
        
        self.stats['queries'].append({
            'timestamp': datetime.now().isoformat(),
            'query': query[:100],
            'complexity': complexity,
            'model': result['model'],
            'time': result['time'],
            'success': result['success'],
            'cached': result.get('cached', False)
        })
        
        # Save stats
        with open(self.log_file, 'w') as f:
            json.dump(self.stats, f, indent=2)
        
        return {
            'query': query,
            'complexity': complexity,
            'model': result['model'],
            'response': result['response'],
            'time': result['time'],
            'success': result['success'],
            'cached': result.get('cached', False),
            'error': result.get('error')
        }
    
    def get_statistics(self):
        """Get performance statistics"""
        if self.stats['total'] == 0:
            return {}
        
        success_rate = self.stats['successful'] / self.stats['total']
        
        successful_queries = [q for q in self.stats['queries'] if q['success']]
        if successful_queries:
            avg_time = sum(q['time'] for q in successful_queries) / len(successful_queries)
            
            model_counts = {}
            for q in successful_queries:
                model = q['model']
                model_counts[model] = model_counts.get(model, 0) + 1
        else:
            avg_time = 0
            model_counts = {}
        
        return {
            'total_queries': self.stats['total'],
            'successful': self.stats['successful'],
            'success_rate': success_rate,
            'average_time': avg_time,
            'model_distribution': model_counts,
            'router_health': self.router.get_health()
        }

def main():
    """Test optimized Fluid Eden v2"""
    print("="*70)
    print("🌟 FLUID EDEN v2.0 - OPTIMIZED TEST")
    print("="*70)
    print()
    
    fluid = FluidEdenV2()
    
    # Comprehensive test
    tests = [
        "Hi",
        "Hello there",
        "What's 2+2?",
        "Tell me a joke",
        "Explain neural networks",
        "Design a system",
        "What time is it?",
        "Good morning",
        "Analyze this concept",
        "Create a plan",
    ]
    
    print(f"Testing with {len(tests)} queries...")
    print()
    
    for i, query in enumerate(tests, 1):
        print(f"[{i}/{len(tests)}] {query[:50]}...")
        
        result = fluid.process(query)
        
        cached = " [CACHED]" if result.get('cached') else ""
        icon = "✅" if result['success'] else "❌"
        
        print(f"  {icon} {result['complexity']} → {result['model'].split(':')[1]} ({result['time']:.2f}s){cached}")
        
        if not result['success']:
            print(f"      Error: {result.get('error')}")
        
        print()
    
    # Statistics
    print("="*70)
    print("FINAL STATISTICS")
    print("="*70)
    print()
    
    stats = fluid.get_statistics()
    
    print(f"Total Queries: {stats['total_queries']}")
    print(f"Success Rate: {stats['success_rate']*100:.1f}%")
    print(f"Average Time: {stats['average_time']:.2f}s")
    print()
    
    print("Model Distribution:")
    for model, count in stats['model_distribution'].items():
        model_short = model.split(':')[1] if ':' in model else model
        percent = count / stats['successful'] * 100 if stats['successful'] > 0 else 0
        print(f"  {model_short}: {count} queries ({percent:.1f}%)")
    print()
    
    print("Router Health:")
    for model_type, health in stats['router_health'].items():
        print(f"  {model_type}:")
        print(f"    Success rate: {health['success_rate']*100:.1f}%")
        print(f"    Avg time: {health['avg_time']:.2f}s")
        print(f"    Timeout: {health['current_timeout']}s")
    
    print()
    
    if stats['success_rate'] >= 0.9:
        print("✅ FLUID EDEN v2.0 PRODUCTION READY!")
    else:
        print("⚠️  Some issues detected, but system is functional")
    
    print()
    print("="*70)

if __name__ == "__main__":
    main()
