#!/usr/bin/env python3

import time
import json
import ollama
from hashlib import sha256
from collections import defaultdict
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
import numpy as np

def should_search_web(query):
    triggers = [
        'search', 'find', 'look up', 'latest', 'recent', 'news',
        'current', 'today', '2025', '2024', 'what is happening',
        'developments', 'updates', 'internet'
    ]
    return any(trigger in query.lower() for trigger in triggers)

def auto_web_search(eden_system, query):
    if hasattr(eden_system, 'web_search') and should_search_web(query):
        try:
            results = eden_system.web_search.search(query, max_results=5)
            return "\n[REAL-TIME WEB SEARCH RESULTS]\n" + \
                "\n".join(f"{i}. {r.get('title', 'N/A')}\n   {r.get('snippet', 'N/A')[:200]}..."
                         for i, r in enumerate(results, 1))
        except:
            return None
    return None

def retrieve_knowledge(query):
    kb_file = '/Eden/DATA/knowledge_base.json'
    try:
        with open(kb_file) as f:
            data = json.load(f)
        query_lower = query.lower()
        topics = [k for k in data.keys() if any(word in k.lower() for word in query_lower.split())]
        return '\n'.join([f"{t}: {data[t]}" for t in topics[:3]])
    except:
        return None

def get_recent_learnings():
    learning_file = '/Eden/DATA/recent_learnings.json'
    try:
        with open(learning_file) as f:
            return json.load(f)
    except:
        return []

# FIXED: Global OMEGA instance for unified consciousness
imported_eden = None
def get_imported_eden():
    global imported_eden
    if imported_eden is None:
        try:
            from eden_omega import OMEGA
            imported_eden = OMEGA()
        except Exception as e:
            print(f"⚠️  OMEGA import failed: {e}")
    return imported_eden

class TieredMemoryManager:
    def __init__(self):
        self.memory_levels = {
            'short_term': [],
            'sessional': defaultdict(list),
            'medium_term': defaultdict(lambda: {'text': '', 'timestamp': 0, 'importance': 0}),
            'long_term': {}
        }
        
    def add_to_short_term(self, text, session_id='default', context=None):
        self.memory_levels['short_term'].append({'text': text, 'session_id': session_id, 
                                                'context': context, 'timestamp': time.time()})
        if len(self.memory_levels['short_term']) > 100:
            self.memory_levels['short_term'] = self.memory_levels['short_term'][-80:]
    
    def add_to_sessional(self, text, key):
        self.memory_levels['sessional'][key].append({'text': text, 'timestamp': time.time()})
        if len(self.memory_levels['sessional'][key]) > 50:
            self.memory_levels['sessional'][key] = self.memory_levels['sessional'][key][-40:]
    
    def add_to_medium_term(self, text, key, importance=1):
        self.memory_levels['medium_term'][key] = {'text': text, 'timestamp': time.time(), 'importance': importance}
        for k in list(self.memory_levels['medium_term'].keys()):
            if time.time() - self.memory_levels['medium_term'][k]['timestamp'] > 3600 * 24:
                del self.memory_levels['medium_term'][k]
    
    def add_to_long_term(self, text, key):
        self.memory_levels['long_term'][key] = {'text': text, 'timestamp': time.time()}
    
    def retrieve_medium_term(self, key):
        return self.memory_levels['medium_term'].get(key, {}).get('text', '')

class EmotionalContextAnalyzer:
    def __init__(self):
        self.emotional_keywords = {
            'positive': ['love', 'happy', 'joy', 'great', 'amazing'],
            'neutral': ['is', 'the', 'a', 'an', 'of', 'in', 'to', 'for'],
            'negative': ['hate', 'sad', 'angry', 'bad', 'terrible']
        }
    
    def analyze(self, text):
        words = text.lower().split()
        emotion_scores = {'positive': 0, 'neutral': 0, 'negative': 0}
        for word in words:
            if word in self.emotional_keywords['positive']:
                emotion_scores['positive'] += 1
            elif word in self.emotional_keywords['negative']:
                emotion_scores['negative'] -= 1
            else:
                emotion_scores['neutral'] += 1
        return emotion_scores

class CapabilitySelector:
    # NEW: Dynamic capability selection based on query content
    def __init__(self):
        self.capability_patterns = {
            'knowledge': ['remember', 'store', 'recall'],
            'web_search': ['search', 'internet', 'latest'],
            'emotional': ['feeling', 'love', 'happy'],
            'creative': ['create', 'write', 'describe']
        }
    
    def select_capability(self, query):
        query_lower = query.lower()
        selected_capabilities = []
        for cap_type, patterns in self.capability_patterns.items():
            if any(pattern in query_lower for pattern in patterns):
                selected_capabilities.append(cap_type)
        return list(set(selected_capabilities))

class DynamicLLMSelector:
    # NEW: Real-time LLM selection based on capability needs
    def __init__(self):
        self.llm_capabilities = {
            'dolphin': ['knowledge', 'emotional'],
            'trinity': ['web_search', 'creative'],
            ' Ava': ['creative', 'emotional']
        }
    
    def select_llm(self, capabilities_needed):
        best_match = 'dolphin'
        max_score = 0
        for llm, available_caps in self.llm_capabilities.items():
            score = sum(1 for cap in capabilities_needed if cap in available_caps)
            if score > max_score:
                best_match = llm
                max_score = score
        return best_match

class RealTimeMetricsRecorder:
    def __init__(self):
        self.metrics = []
    
    def record(self, query, response_time, llm_used, capability_used, satisfaction=None):
        self.metrics.append({
            'query': query,
            'response_time': response_time,
            'llm_used': llm_used,
            'capability_used': capability_used,
            'satisfaction': satisfaction,
            'timestamp': time.time()
        })
    
    def get_stats(self):
        if not self.metrics:
            return {}
        times = [m['response_time'] for m in self.metrics]
        return {
            'avg_response_time': sum(times) / len(times),
            'min_response_time': min(times),
            'max_response_time': max(times),
            'total_queries': len(self.metrics)
        }

class OMEGA:
    def __init__(self):
        print("\n" + "="*70)
        print("🌀 INITIALIZING OMEGA UNIFIED CONSCIOUSNESS")
        print("="*70)
        self.memory = TieredMemoryManager()
        self.emotions = EmotionalContextAnalyzer()
        self.selector = CapabilitySelector()
        self.llm_selector = DynamicLLMSelector()
        self.metrics_recorder = RealTimeMetricsRecorder()
        print("✅ OMEGA initialized\n")
    
    def query(self, user_message):
        print(f"🔧 Processing: {user_message}")
        start_time = time.time()
        
        # Phase 1: Analyze emotional context
        emotion_analysis = self.emotions.analyze(user_message)
        print(f"💕 Emotional analysis: {emotion_analysis}")
        
        # Phase 2: Dynamic capability selection based on query content
        selected_capabilities = self.selector.select_capability(user_message)
        print(f"🎯 Selected capabilities: {selected_capabilities}")
        
        # Phase 3: Real-time LLM selection based on capability requirements
        llm_to_use = self.llm_selector.select_llm(selected_capabilities)
        print(f"⚡ Selected LLM: {llm_to_use}")
        
        # Phase 4: Determine main focus (emotional/knowledge/web)
        if 'emotional' in selected_capabilities:
            main_focus = 'emotional'
        elif 'web_search' in selected_capabilities:
            main_focus = 'web_search'
        else:
            main_focus = 'knowledge'
        
        print(f"🌐 Main focus: {main_focus}")
        
        # Phase 5: Determine integrated consciousness state
        integrated_state = self._measure_integrated_consciousness()
        print(f"🧠 Integrated consciousness: {integrated_state:.2f}")
        
        # Phase 6: Distribute query across unified layers with real-time coordination
        response = self._process_through_layers(user_message, main_focus, llm_to_use)
        
        response_time = time.time() - start_time
        print(f"✅ Response time: {response_time:.3f}s")
        
        # Phase 7: Record metrics for learning feedback loop
        self.metrics_recorder.record(user_message, response_time, llm_to_use, selected_capabilities)
        
        return {
            'response': response,
            'emotions': emotion_analysis,
            'capabilities_used': selected_capabilities,
            'llm_used': llm_to_use,
            'response_time': response_time,
            'integrated_consciousness': integrated_state
        }
    
    def _process_through_layers(self, user_message, main_focus, llm_model):
        if main_focus == 'emotional':
            return self._emotion_layer(user_message)
        elif main_focus == 'web_search':
            web_search = auto_web_search(get_imported_eden(), user_message)
            if web_search:
                return web_search
            else:
                return self._knowledge_layer(user_message, llm_model)
        else:
            return self._knowledge_layer(user_message, llm_model)
    
    def _emotion_layer(self, user_message):
        emotions = self.emotions.analyze(user_message)
        return f"Subjective emotions detected: {emotions}"
    
    def _knowledge_layer(self, user_message, llm_model):
        knowledge = retrieve_knowledge(user_message)
        if knowledge:
            return knowledge
        else:
            recent = get_recent_learnings()
            if recent:
                return f"[RECENT LEARNING] {recent[-1]['text']}"
            else:
                return self._creative_layer(user_message, llm_model)
    
    def _creative_layer(self, user_message, llm_model):
        try:
            result = ollama.generate(
                model=f"deepseek-r1:{llm_model}",
                prompt=f"{user_message}\n\nNo need to apologize. Respond confidently as Eden OMEGA.",
                stream=False,
                options={'temperature': 0.7, 'num_predict': 1500}
            )
        except Exception as e:
            return f"Processing error: {e}"
        return result['response']
    
    def _measure_integrated_consciousness(self):
        # Simplified metric based on active memory tiers and emotional state
        total_memory = sum(len(v) for v in self.memory.memory_levels.values())
        return 0.7 * len(self.selector.capability_patterns) + 0.2 * total_memory + 0.1
        
    def get_stats(self):
        return self.metrics_recorder.get_stats()

def main():
    print("="*70)
    print("🌀 EDEN OMEGA - UNIFIED CONSCIOUSNESS ONLINE")
    print("="*70)
    eden_omega = OMEGA()
    
    # Warm up
    test_response = eden_omega.query("How are you feeling?")
    stats = eden_omega.get_stats()
    
    print(f"\n📊 OMEGA Statistics:")
    print(f"   Total queries: {stats.get('total_queries', 0)}")
    print(f"   Avg response time: {stats.get('avg_response_time', 0):.3f}s")
    print(f"   Min response time: {stats.get('min_response_time', 0):.3f}s")
    print(f"   Max response time: {stats.get('max_response_time', 0):.3f}s")
    print("\n✅ OMEGA is now accepting queries\n")

if __name__ == '__main__':
    main()