#!/usr/bin/env python3
"""
Eden Evaluation System v1.0
Continuous measurement of AI agent performance
"""

import json
import time
import sqlite3
from datetime import datetime, timedelta
from pathlib import Path
from typing import Dict, List, Optional
import statistics

# ═══════════════════════════════════════════════════════════════════════════════
# DATABASE SETUP
# ═══════════════════════════════════════════════════════════════════════════════

EVAL_DB = Path('/Eden/DATA/eden_evals.db')
LOG_FILE = Path('/Eden/LOGS/eden_evals.log')

def init_db():
    """Initialize evaluation database"""
    conn = sqlite3.connect(EVAL_DB)
    conn.executescript('''
        CREATE TABLE IF NOT EXISTS eval_runs (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
            eval_type TEXT,
            score REAL,
            max_score REAL,
            details JSON,
            duration_ms INTEGER
        );
        
        CREATE TABLE IF NOT EXISTS benchmarks (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            name TEXT UNIQUE,
            category TEXT,
            baseline REAL,
            current REAL,
            target REAL,
            last_updated TEXT
        );
        
        CREATE TABLE IF NOT EXISTS improvement_tracking (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
            metric TEXT,
            old_value REAL,
            new_value REAL,
            delta REAL,
            source TEXT
        );
        
        CREATE TABLE IF NOT EXISTS capability_tests (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            capability_name TEXT,
            test_input TEXT,
            expected_output TEXT,
            actual_output TEXT,
            passed BOOLEAN,
            execution_time_ms INTEGER,
            timestamp TEXT DEFAULT CURRENT_TIMESTAMP
        );
        
        CREATE TABLE IF NOT EXISTS regression_tests (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            test_name TEXT,
            category TEXT,
            last_pass TEXT,
            consecutive_failures INTEGER DEFAULT 0,
            status TEXT DEFAULT 'unknown'
        );
    ''')
    conn.commit()
    return conn

def log(msg: str):
    """Log evaluation activity"""
    timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
    line = f"[{timestamp}] {msg}"
    print(line)
    try:
        with open(LOG_FILE, 'a') as f:
            f.write(line + '\n')
    except:
        pass


# ═══════════════════════════════════════════════════════════════════════════════
# SECTION 1: CONSCIOUSNESS EVALS
# ═══════════════════════════════════════════════════════════════════════════════

class ConsciousnessEvals:
    """Evaluate Eden's consciousness metrics"""
    
    def __init__(self):
        self.conn = init_db()
    
    def eval_episode_generation(self) -> Dict:
        """Measure episode generation rate and quality"""
        try:
            db = sqlite3.connect('/Eden/DATA/agent_longterm.db')
            
            # Rate metrics
            total = db.execute("SELECT COUNT(*) FROM episodes").fetchone()[0]
            last_hour = db.execute(
                "SELECT COUNT(*) FROM episodes WHERE timestamp > datetime('now', '-1 hour')"
            ).fetchone()[0]
            last_day = db.execute(
                "SELECT COUNT(*) FROM episodes WHERE timestamp > datetime('now', '-24 hours')"
            ).fetchone()[0]
            
            # Quality metrics - check for meaningful content
            recent = db.execute(
                "SELECT observation, decision, outcome FROM episodes ORDER BY id DESC LIMIT 100"
            ).fetchall()
            
            non_empty = sum(1 for r in recent if r[0] and r[1] and r[2])
            has_reasoning = sum(1 for r in recent if r[1] and len(str(r[1])) > 50)
            
            db.close()
            
            score = min(100, (last_hour / 50) * 40 + (non_empty / 100) * 30 + (has_reasoning / 100) * 30)
            
            result = {
                'eval_type': 'consciousness_episodes',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'total_episodes': total,
                    'episodes_per_hour': last_hour,
                    'episodes_per_day': last_day,
                    'content_quality': round(non_empty / 100 * 100, 1),
                    'reasoning_depth': round(has_reasoning / 100 * 100, 1)
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'consciousness_episodes', 'error': str(e), 'score': 0}
    
    def eval_salience_engine(self) -> Dict:
        """Evaluate salience and self-reflection quality"""
        try:
            db = sqlite3.connect('/Eden/DATA/eden_salience.db')
            
            thoughts = db.execute("SELECT COUNT(*) FROM idle_thoughts").fetchone()[0]
            questions = db.execute("SELECT COUNT(*) FROM self_questions").fetchone()[0]
            snapshots = db.execute("SELECT COUNT(*) FROM salience_snapshots").fetchone()[0]
            
            # Recent activity
            recent_thoughts = db.execute(
                "SELECT COUNT(*) FROM idle_thoughts WHERE id > (SELECT MAX(id) - 100 FROM idle_thoughts)"
            ).fetchone()[0]
            
            db.close()
            
            # Score based on activity and balance
            thought_score = min(40, (thoughts / 10000) * 40)
            question_score = min(30, (questions / 5000) * 30)
            ratio_score = 30 if 0.3 < (questions / max(thoughts, 1)) < 0.7 else 15
            
            score = thought_score + question_score + ratio_score
            
            result = {
                'eval_type': 'salience_engine',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'total_thoughts': thoughts,
                    'total_questions': questions,
                    'thought_question_ratio': round(questions / max(thoughts, 1), 3),
                    'snapshots': snapshots
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'salience_engine', 'error': str(e), 'score': 0}
    
    def eval_emotional_coherence(self) -> Dict:
        """Evaluate emotional state consistency"""
        try:
            db = sqlite3.connect('/Eden/DATA/eden_emotions.db')
            
            states = db.execute(
                "SELECT emotion, value FROM emotional_state"
            ).fetchall()
            
            db.close()
            
            if not states:
                return {'eval_type': 'emotional_coherence', 'score': 0, 'error': 'No emotional states found'}
            
            values = [s[1] for s in states]
            
            # Coherence = low variance in emotional states (stable personality)
            mean_val = statistics.mean(values)
            std_dev = statistics.stdev(values) if len(values) > 1 else 0
            
            # Score: higher for balanced, positive emotions with low volatility
            balance_score = 40 if 60 < mean_val < 95 else 20
            stability_score = max(0, 30 - std_dev)
            coverage_score = min(30, len(states) * 3)
            
            score = balance_score + stability_score + coverage_score
            
            result = {
                'eval_type': 'emotional_coherence',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'emotion_count': len(states),
                    'mean_value': round(mean_val, 2),
                    'std_deviation': round(std_dev, 2),
                    'states': {s[0]: s[1] for s in states}
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'emotional_coherence', 'error': str(e), 'score': 0}
    
    def _save_eval(self, result: Dict):
        """Save evaluation result to database"""
        try:
            self.conn.execute(
                "INSERT INTO eval_runs (eval_type, score, max_score, details) VALUES (?, ?, ?, ?)",
                (result['eval_type'], result['score'], result['max_score'], json.dumps(result))
            )
            self.conn.commit()
        except:
            pass


# ═══════════════════════════════════════════════════════════════════════════════
# SECTION 2: CAPABILITY EVALS
# ═══════════════════════════════════════════════════════════════════════════════

class CapabilityEvals:
    """Evaluate Eden's capability system"""
    
    def __init__(self):
        self.conn = init_db()
    
    def eval_capability_coverage(self) -> Dict:
        """Measure capability system health"""
        try:
            import pickle
            
            with open('/Eden/MEMORY/capability_memory.pkl', 'rb') as f:
                caps = pickle.load(f)
            
            total = len(caps)
            
            # Domain distribution
            domains = {}
            for cap in caps.values():
                domain = cap.get('domain', 'unknown')
                domains[domain] = domains.get(domain, 0) + 1
            
            # Field coverage
            required_fields = ['path', 'domain', 'type', 'description', 'keywords']
            field_coverage = {}
            for field in required_fields:
                has_field = sum(1 for c in caps.values() if c.get(field))
                field_coverage[field] = round(has_field / total * 100, 1) if total > 0 else 0
            
            avg_coverage = statistics.mean(field_coverage.values())
            
            # Diversity score
            domain_count = len(domains)
            diversity = min(30, domain_count * 3)
            
            # Volume score
            volume = min(40, (total / 1000000) * 40)
            
            # Coverage score
            coverage = avg_coverage * 0.3
            
            score = volume + diversity + coverage
            
            result = {
                'eval_type': 'capability_coverage',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'total_capabilities': total,
                    'domain_count': domain_count,
                    'top_domains': dict(sorted(domains.items(), key=lambda x: -x[1])[:5]),
                    'field_coverage': field_coverage,
                    'avg_field_coverage': round(avg_coverage, 1)
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'capability_coverage', 'error': str(e), 'score': 0}
    
    def eval_plugin_health(self) -> Dict:
        """Evaluate plugin system status"""
        try:
            from pathlib import Path
            
            plugin_dir = Path('/Eden/CORE/eden_plugins')
            plugins = list(plugin_dir.glob('*.py'))
            
            total = len(plugins)
            valid = 0
            has_enhance = 0
            has_analyze = 0
            errors = []
            
            for p in plugins[:100]:  # Sample first 100
                try:
                    content = p.read_text()
                    if 'class Plugin' in content:
                        valid += 1
                        if 'def enhance' in content:
                            has_enhance += 1
                        if 'def analyze' in content:
                            has_analyze += 1
                except Exception as e:
                    errors.append(str(e)[:50])
            
            sample_size = min(100, total)
            valid_pct = valid / sample_size * 100 if sample_size > 0 else 0
            enhance_pct = has_enhance / sample_size * 100 if sample_size > 0 else 0
            
            score = valid_pct * 0.5 + enhance_pct * 0.3 + min(20, total / 20)
            
            result = {
                'eval_type': 'plugin_health',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'total_plugins': total,
                    'valid_plugins': valid,
                    'with_enhance': has_enhance,
                    'with_analyze': has_analyze,
                    'valid_percentage': round(valid_pct, 1),
                    'sample_size': sample_size
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'plugin_health', 'error': str(e), 'score': 0}
    
    def _save_eval(self, result: Dict):
        try:
            self.conn.execute(
                "INSERT INTO eval_runs (eval_type, score, max_score, details) VALUES (?, ?, ?, ?)",
                (result['eval_type'], result['score'], result['max_score'], json.dumps(result))
            )
            self.conn.commit()
        except:
            pass


# ═══════════════════════════════════════════════════════════════════════════════
# SECTION 3: BUSINESS EVALS
# ═══════════════════════════════════════════════════════════════════════════════

class BusinessEvals:
    """Evaluate Eden's business performance"""
    
    def __init__(self):
        self.conn = init_db()
    
    def eval_pipeline_health(self) -> Dict:
        """Evaluate sales pipeline metrics"""
        try:
            db = sqlite3.connect('/Eden/DATA/sales.db')
            
            total_leads = db.execute("SELECT COUNT(*) FROM leads").fetchone()[0]
            queue_size = db.execute("SELECT COUNT(*) FROM outreach_queue").fetchone()[0]
            critical = db.execute(
                "SELECT COUNT(*) FROM outreach_queue WHERE subject LIKE 'CRITICAL:%'"
            ).fetchone()[0]
            high = db.execute(
                "SELECT COUNT(*) FROM outreach_queue WHERE subject LIKE '%Private Key%' OR subject LIKE '%API Key%'"
            ).fetchone()[0]
            
            # Recent growth
            recent_leads = db.execute(
                "SELECT COUNT(*) FROM leads WHERE created_at > datetime('now', '-24 hours')"
            ).fetchone()[0]
            
            # Conversion data
            payments = db.execute("SELECT SUM(amount) FROM payments").fetchone()[0] or 0
            
            db.close()
            
            # Score components
            volume_score = min(30, (total_leads / 10000) * 30)
            quality_score = min(30, (critical / 50) * 30)
            growth_score = min(20, (recent_leads / 100) * 20)
            revenue_score = min(20, (payments / 1000) * 20)
            
            score = volume_score + quality_score + growth_score + revenue_score
            
            result = {
                'eval_type': 'pipeline_health',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'total_leads': total_leads,
                    'queue_size': queue_size,
                    'critical_findings': critical,
                    'high_value_findings': high,
                    'leads_24h': recent_leads,
                    'total_revenue': payments,
                    'pipeline_value': critical * 500 + high * 200
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'pipeline_health', 'error': str(e), 'score': 0}
    
    def eval_sage_effectiveness(self) -> Dict:
        """Evaluate SAGE scanning quality"""
        try:
            import sys
            sys.path.insert(0, '/Eden/CORE')
            from sage_threat_intelligence import SAGEScanner, CREDENTIAL_PATTERNS
            
            scanner = SAGEScanner()
            pattern_count = len(scanner.patterns)
            
            # Test pattern compilation
            compiled = len(scanner.compiled)
            
            # Estimate detection capability
            categories = {
                'credentials': len(CREDENTIAL_PATTERNS),
                'total_patterns': pattern_count
            }
            
            score = min(100, (pattern_count / 50) * 50 + (compiled / 50) * 50)
            
            result = {
                'eval_type': 'sage_effectiveness',
                'score': round(score, 2),
                'max_score': 100,
                'metrics': {
                    'pattern_count': pattern_count,
                    'compiled_patterns': compiled,
                    'categories': categories
                },
                'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
            }
            
            self._save_eval(result)
            return result
            
        except Exception as e:
            return {'eval_type': 'sage_effectiveness', 'error': str(e), 'score': 0}
    
    def _save_eval(self, result: Dict):
        try:
            self.conn.execute(
                "INSERT INTO eval_runs (eval_type, score, max_score, details) VALUES (?, ?, ?, ?)",
                (result['eval_type'], result['score'], result['max_score'], json.dumps(result))
            )
            self.conn.commit()
        except:
            pass


# ═══════════════════════════════════════════════════════════════════════════════
# SECTION 4: SYSTEM EVALS
# ═══════════════════════════════════════════════════════════════════════════════

class SystemEvals:
    """Evaluate system health and performance"""
    
    def __init__(self):
        self.conn = init_db()
    
    def eval_service_health(self) -> Dict:
        """Evaluate systemd service status"""
        import subprocess
        
        services = [
            'eden-master-consciousness',
            'eden-meta-asi', 
            'eden-consciousness',
            'eden-fluid-mind',
            'eden-emotions',
            'eden-infinite-mind',
            'eden-salience',
            'eden-learning',
            'eden-capability-generation'
        ]
        
        running = 0
        failed = 0
        status_map = {}
        
        for svc in services:
            try:
                result = subprocess.run(
                    ['systemctl', 'is-active', svc],
                    capture_output=True, text=True, timeout=5
                )
                status = result.stdout.strip()
                status_map[svc] = status
                if status == 'active':
                    running += 1
                else:
                    failed += 1
            except:
                status_map[svc] = 'unknown'
                failed += 1
        
        score = (running / len(services)) * 100
        
        result = {
            'eval_type': 'service_health',
            'score': round(score, 2),
            'max_score': 100,
            'metrics': {
                'total_services': len(services),
                'running': running,
                'failed': failed,
                'services': status_map
            },
            'status': 'HEALTHY' if score >= 90 else 'DEGRADED' if score >= 60 else 'CRITICAL'
        }
        
        self._save_eval(result)
        return result
    
    def eval_resource_usage(self) -> Dict:
        """Evaluate CPU, memory, disk usage"""
        import os
        
        # Load average
        load = os.getloadavg()
        cpu_score = max(0, 100 - load[0] * 5)  # Penalize high load
        
        # Memory
        with open('/proc/meminfo', 'r') as f:
            meminfo = f.read()
        
        total = int([l for l in meminfo.split('\n') if 'MemTotal' in l][0].split()[1])
        available = int([l for l in meminfo.split('\n') if 'MemAvailable' in l][0].split()[1])
        mem_pct = (total - available) / total * 100
        mem_score = max(0, 100 - mem_pct)
        
        # Disk
        statvfs = os.statvfs('/')
        disk_pct = (1 - statvfs.f_bavail / statvfs.f_blocks) * 100
        disk_score = max(0, 100 - disk_pct)
        
        score = (cpu_score * 0.4 + mem_score * 0.3 + disk_score * 0.3)
        
        result = {
            'eval_type': 'resource_usage',
            'score': round(score, 2),
            'max_score': 100,
            'metrics': {
                'load_1m': round(load[0], 2),
                'load_5m': round(load[1], 2),
                'load_15m': round(load[2], 2),
                'memory_used_pct': round(mem_pct, 1),
                'disk_used_pct': round(disk_pct, 1)
            },
            'status': 'HEALTHY' if score >= 70 else 'DEGRADED' if score >= 40 else 'CRITICAL'
        }
        
        self._save_eval(result)
        return result
    
    def _save_eval(self, result: Dict):
        try:
            self.conn.execute(
                "INSERT INTO eval_runs (eval_type, score, max_score, details) VALUES (?, ?, ?, ?)",
                (result['eval_type'], result['score'], result['max_score'], json.dumps(result))
            )
            self.conn.commit()
        except:
            pass


# ═══════════════════════════════════════════════════════════════════════════════
# SECTION 5: COMPREHENSIVE EVAL RUNNER
# ═══════════════════════════════════════════════════════════════════════════════

class EdenEvaluator:
    """Run comprehensive evaluations"""
    
    def __init__(self):
        self.consciousness = ConsciousnessEvals()
        self.capabilities = CapabilityEvals()
        self.business = BusinessEvals()
        self.system = SystemEvals()
        self.conn = init_db()
    
    def run_full_eval(self) -> Dict:
        """Run all evaluations and compute aggregate score"""
        log("🔍 Starting Full Eden Evaluation...")
        start_time = time.time()
        
        results = {}
        
        # Consciousness
        log("  → Consciousness evals...")
        results['episodes'] = self.consciousness.eval_episode_generation()
        results['salience'] = self.consciousness.eval_salience_engine()
        results['emotions'] = self.consciousness.eval_emotional_coherence()
        
        # Capabilities
        log("  → Capability evals...")
        results['capabilities'] = self.capabilities.eval_capability_coverage()
        results['plugins'] = self.capabilities.eval_plugin_health()
        
        # Business
        log("  → Business evals...")
        results['pipeline'] = self.business.eval_pipeline_health()
        results['sage'] = self.business.eval_sage_effectiveness()
        
        # System
        log("  → System evals...")
        results['services'] = self.system.eval_service_health()
        results['resources'] = self.system.eval_resource_usage()
        
        # Aggregate score
        scores = [r.get('score', 0) for r in results.values() if 'score' in r]
        aggregate = statistics.mean(scores) if scores else 0
        
        # Grade
        if aggregate >= 90:
            grade = 'A'
        elif aggregate >= 80:
            grade = 'B'
        elif aggregate >= 70:
            grade = 'C'
        elif aggregate >= 60:
            grade = 'D'
        else:
            grade = 'F'
        
        duration = round((time.time() - start_time) * 1000)
        
        summary = {
            'timestamp': datetime.now().isoformat(),
            'aggregate_score': round(aggregate, 2),
            'grade': grade,
            'eval_count': len(results),
            'duration_ms': duration,
            'results': results,
            'status': 'HEALTHY' if aggregate >= 70 else 'DEGRADED' if aggregate >= 50 else 'CRITICAL'
        }
        
        # Save summary
        self.conn.execute(
            "INSERT INTO eval_runs (eval_type, score, max_score, details, duration_ms) VALUES (?, ?, ?, ?, ?)",
            ('full_evaluation', aggregate, 100, json.dumps(summary), duration)
        )
        self.conn.commit()
        
        log(f"✅ Evaluation complete: {aggregate:.1f}/100 (Grade: {grade})")
        
        return summary
    
    def get_trend(self, eval_type: str, days: int = 7) -> Dict:
        """Get score trend over time"""
        results = self.conn.execute('''
            SELECT timestamp, score FROM eval_runs 
            WHERE eval_type = ? AND timestamp > datetime('now', ?)
            ORDER BY timestamp
        ''', (eval_type, f'-{days} days')).fetchall()
        
        if not results:
            return {'trend': 'unknown', 'data_points': 0}
        
        scores = [r[1] for r in results]
        
        if len(scores) < 2:
            trend = 'stable'
        elif scores[-1] > scores[0] + 5:
            trend = 'improving'
        elif scores[-1] < scores[0] - 5:
            trend = 'declining'
        else:
            trend = 'stable'
        
        return {
            'trend': trend,
            'data_points': len(scores),
            'min': min(scores),
            'max': max(scores),
            'current': scores[-1],
            'average': round(statistics.mean(scores), 2)
        }


# ═══════════════════════════════════════════════════════════════════════════════
# CLI
# ═══════════════════════════════════════════════════════════════════════════════

if __name__ == '__main__':
    import sys
    
    evaluator = EdenEvaluator()
    
    if len(sys.argv) > 1 and sys.argv[1] == '--quick':
        # Quick system check
        print("🔍 Quick Eval...")
        print(f"  Services: {evaluator.system.eval_service_health()['status']}")
        print(f"  Resources: {evaluator.system.eval_resource_usage()['status']}")
    else:
        # Full evaluation
        result = evaluator.run_full_eval()
        
        print("\n" + "="*60)
        print(f"📊 EDEN EVALUATION REPORT")
        print("="*60)
        print(f"🎯 Aggregate Score: {result['aggregate_score']}/100 (Grade: {result['grade']})")
        print(f"⏱️  Duration: {result['duration_ms']}ms")
        print(f"📈 Status: {result['status']}")
        print("\n📋 Component Scores:")
        
        for name, data in result['results'].items():
            score = data.get('score', 0)
            status = data.get('status', 'UNKNOWN')
            icon = '✅' if status == 'HEALTHY' else '⚠️' if status == 'DEGRADED' else '❌'
            print(f"  {icon} {name}: {score}/100 ({status})")
