#!/usr/bin/env python3
"""
Eden Semantic Lattice - Universal Cross-Domain Knowledge Graph

The ultimate abstraction: connecting hierarchies across ALL domains
through structural isomorphism.

Key Innovation: Problems in different domains that share the SAME STRUCTURE
connect at their abstract level, enabling:

1. Cross-domain reasoning (database → network → workflow)
2. Structural analogies (not just keyword matching)
3. Universal patterns (flow, bottleneck, optimization, distribution)
4. True domain-general intelligence

Example:
  Database slow query ─┐
                       ├─→ [BOTTLENECK PATTERN] ←─┐
  Network congestion ──┘                           ├─→ [FLOW OPTIMIZATION]
  Workflow jam ────────────────────────────────────┘

All three problems share the abstract structure "constrained flow"
and can benefit from the same class of solutions at different levels.
"""

import json
import numpy as np
from dataclasses import dataclass, asdict, field
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any, Set, Tuple
from collections import defaultdict
import logging

# Configuration
EDEN_ROOT = Path("/Eden/CORE")
LATTICE_LOG = EDEN_ROOT / "logs" / "phi_lattice.log"
LATTICE_STATE = EDEN_ROOT / "phi_fractal" / "semantic_lattice" / "lattice_state.json"

LATTICE_STATE.parent.mkdir(parents=True, exist_ok=True)
LATTICE_LOG.parent.mkdir(parents=True, exist_ok=True)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - LATTICE - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(LATTICE_LOG),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


@dataclass
class AbstractPattern:
    """
    Universal pattern that exists across domains
    
    Examples:
    - BOTTLENECK: constrained flow point
    - DISTRIBUTION: spreading load across resources
    - CACHING: storing frequently accessed data
    - INDEXING: creating fast-lookup structures
    - PARALLELIZATION: concurrent execution
    """
    id: str
    name: str
    description: str
    structural_properties: List[str]  # What makes this pattern what it is?
    manifestations: Dict[str, str]  # domain → how it appears
    solution_templates: List[str]  # Generic solutions
    abstraction_level: float  # 0=concrete, 1=most abstract
    
    def __post_init__(self):
        if not hasattr(self, 'instances'):
            self.instances: List[str] = []  # Specific case IDs that exhibit this


@dataclass
class DomainNode:
    """A domain in the semantic lattice (e.g., databases, networks, organizations)"""
    id: str
    name: str
    hierarchies: List[str]  # Concept hierarchy IDs in this domain
    patterns: Dict[str, float]  # pattern_id → strength (how common is this pattern?)
    
    def add_pattern(self, pattern_id: str, strength: float = 1.0):
        if pattern_id not in self.patterns:
            self.patterns[pattern_id] = 0.0
        self.patterns[pattern_id] += strength


@dataclass
class StructuralBridge:
    """
    Connection between domains via shared abstract structure
    
    Example: Database indexing ≅ Network routing table ≅ Org skill matrix
    All three are "fast-lookup structures" (same abstract pattern)
    """
    id: str
    pattern_id: str  # The abstract pattern they share
    domain_a: str
    domain_b: str
    strength: float  # How strong is the structural similarity?
    examples: List[Tuple[str, str]]  # (case_a, case_b) pairs showing the analogy
    
    @property
    def is_valid(self) -> bool:
        return self.strength > 0.3


class SemanticLattice:
    """
    The Universal Brain Map
    
    A multi-dimensional semantic network connecting:
    - Abstract patterns (universal structures)
    - Domain nodes (specific fields)
    - Structural bridges (cross-domain connections)
    - Hierarchies (within-domain abstraction)
    
    Enables Eden to reason: "This database problem is STRUCTURALLY SIMILAR
    to that network problem, even though the domains are different"
    """
    
    def __init__(self):
        self.patterns: Dict[str, AbstractPattern] = {}
        self.domains: Dict[str, DomainNode] = {}
        self.bridges: List[StructuralBridge] = []
        
        self.pattern_count = 0
        self.domain_count = 0
        self.bridge_count = 0
        
        self.load_state()
        logger.info("🌐 Semantic Lattice initialized")
    
    def load_state(self):
        """Load lattice from disk"""
        if LATTICE_STATE.exists():
            try:
                with open(LATTICE_STATE, 'r') as f:
                    data = json.load(f)
                
                for pid, pdata in data.get('patterns', {}).items():
                    self.patterns[pid] = AbstractPattern(**pdata)
                
                for did, ddata in data.get('domains', {}).items():
                    self.domains[did] = DomainNode(**ddata)
                
                for bdata in data.get('bridges', []):
                    self.bridges.append(StructuralBridge(**bdata))
                
                self.pattern_count = len(self.patterns)
                self.domain_count = len(self.domains)
                self.bridge_count = len(self.bridges)
                
                logger.info(f"Loaded {len(self.patterns)} patterns, {len(self.domains)} domains, {len(self.bridges)} bridges")
            except Exception as e:
                logger.error(f"Failed to load state: {e}")
    
    def save_state(self):
        """Save lattice"""
        try:
            data = {
                'patterns': {pid: asdict(p) for pid, p in self.patterns.items()},
                'domains': {did: asdict(d) for did, d in self.domains.items()},
                'bridges': [asdict(b) for b in self.bridges],
                'last_updated': datetime.now().isoformat()
            }
            
            with open(LATTICE_STATE, 'w') as f:
                json.dump(data, f, indent=2)
        except Exception as e:
            logger.error(f"Failed to save state: {e}")
    
    def initialize_universal_patterns(self):
        """
        Bootstrap the lattice with fundamental universal patterns
        These are the "atoms" of intelligence - patterns that appear EVERYWHERE
        """
        logger.info("🌟 Initializing universal patterns...")
        
        # Pattern 1: BOTTLENECK (constrained flow)
        self.pattern_count += 1
        bottleneck = AbstractPattern(
            id=f"pattern_{self.pattern_count:03d}",
            name="BOTTLENECK",
            description="A constraint point limiting throughput or performance",
            structural_properties=[
                "input_flow > processing_capacity",
                "queue_formation",
                "performance_degradation",
                "resource_saturation"
            ],
            manifestations={
                "database": "Slow queries due to missing indexes",
                "network": "Packet loss due to bandwidth limits",
                "cpu": "Processing delays due to limited cores",
                "workflow": "Task backlog due to single approval point",
                "traffic": "Congestion at narrow intersection"
            },
            solution_templates=[
                "Increase processing capacity",
                "Add fast-lookup structure (indexing)",
                "Parallelize processing",
                "Cache frequent requests",
                "Distribute load"
            ],
            abstraction_level=0.9
        )
        self.patterns[bottleneck.id] = bottleneck
        logger.info(f"   Created pattern: {bottleneck.name}")
        
        # Pattern 2: DISTRIBUTION (spreading load)
        self.pattern_count += 1
        distribution = AbstractPattern(
            id=f"pattern_{self.pattern_count:03d}",
            name="DISTRIBUTION",
            description="Spreading work across multiple resources for scalability",
            structural_properties=[
                "workload_partitioning",
                "parallel_execution",
                "load_balancing",
                "horizontal_scaling"
            ],
            manifestations={
                "database": "Sharding data across servers",
                "network": "Load balancing across nodes",
                "cpu": "Multi-threading across cores",
                "cloud": "Container orchestration",
                "organization": "Team distribution"
            },
            solution_templates=[
                "Partition data/work",
                "Add coordinator/balancer",
                "Enable parallel processing",
                "Implement consensus protocol"
            ],
            abstraction_level=0.85
        )
        self.patterns[distribution.id] = distribution
        logger.info(f"   Created pattern: {distribution.name}")
        
        # Pattern 3: CACHING (temporary storage)
        self.pattern_count += 1
        caching = AbstractPattern(
            id=f"pattern_{self.pattern_count:03d}",
            name="CACHING",
            description="Storing frequently accessed data for faster retrieval",
            structural_properties=[
                "temporal_locality",
                "access_frequency",
                "fast_storage_layer",
                "invalidation_strategy"
            ],
            manifestations={
                "database": "Query result caching",
                "network": "CDN edge caching",
                "cpu": "L1/L2 cache hierarchy",
                "web": "Browser caching",
                "memory": "Working set in RAM"
            },
            solution_templates=[
                "Add caching layer",
                "Implement LRU/LFU eviction",
                "Set TTL for freshness",
                "Use write-through/write-back"
            ],
            abstraction_level=0.80
        )
        self.patterns[caching.id] = caching
        logger.info(f"   Created pattern: {caching.name}")
        
        # Pattern 4: INDEXING (fast lookup)
        self.pattern_count += 1
        indexing = AbstractPattern(
            id=f"pattern_{self.pattern_count:03d}",
            name="INDEXING",
            description="Creating auxiliary structures for O(1) or O(log n) lookup",
            structural_properties=[
                "key_based_access",
                "reduced_search_space",
                "tradeoff_space_for_time",
                "maintenance_overhead"
            ],
            manifestations={
                "database": "B-tree indexes on columns",
                "network": "Routing tables",
                "filesystem": "Directory structures",
                "search": "Inverted indexes",
                "memory": "Hash tables"
            },
            solution_templates=[
                "Build auxiliary lookup structure",
                "Trade space for time",
                "Maintain index consistency",
                "Choose appropriate data structure"
            ],
            abstraction_level=0.75
        )
        self.patterns[indexing.id] = indexing
        logger.info(f"   Created pattern: {indexing.name}")
        
        # Pattern 5: OPTIMIZATION (iterative improvement)
        self.pattern_count += 1
        optimization = AbstractPattern(
            id=f"pattern_{self.pattern_count:03d}",
            name="OPTIMIZATION",
            description="Iteratively improving performance through measurement and refinement",
            structural_properties=[
                "measure_baseline",
                "identify_bottleneck",
                "apply_improvement",
                "measure_impact",
                "iterate"
            ],
            manifestations={
                "algorithm": "Complexity reduction",
                "query": "Query plan optimization",
                "neural_net": "Gradient descent",
                "manufacturing": "Lean optimization",
                "business": "Process improvement"
            },
            solution_templates=[
                "Profile to find bottleneck",
                "Apply targeted improvement",
                "Measure and iterate",
                "Use greedy/gradient methods"
            ],
            abstraction_level=0.95
        )
        self.patterns[optimization.id] = optimization
        logger.info(f"   Created pattern: {optimization.name}")
        
        logger.info(f"✅ Initialized {len(self.patterns)} universal patterns")
    
    def create_domain(self, name: str, related_hierarchies: List[str] = None) -> DomainNode:
        """Create a new domain node"""
        self.domain_count += 1
        domain = DomainNode(
            id=f"domain_{self.domain_count:03d}",
            name=name,
            hierarchies=related_hierarchies or [],
            patterns={}
        )
        self.domains[domain.id] = domain
        logger.info(f"Created domain: {name}")
        return domain
    
    def map_cases_to_patterns(self, cases: List[Any]):
        """
        Analyze cases and map them to universal patterns
        This is where Eden learns which patterns appear in which domains
        """
        logger.info(f"🔍 Mapping {len(cases)} cases to patterns...")
        
        # Track domain occurrences
        domain_cases = defaultdict(list)
        
        for case in cases:
            problem = case.problem.lower()
            solution = case.solution.lower()
            domain_type = case.problem_type
            
            # Detect patterns in this case
            detected_patterns = []
            
            # Check for BOTTLENECK pattern
            if any(word in problem for word in ['slow', 'performance', 'lag', 'bottleneck']):
                bottleneck_pattern = [p for p in self.patterns.values() if p.name == "BOTTLENECK"][0]
                detected_patterns.append(bottleneck_pattern.id)
            
            # Check for DISTRIBUTION pattern
            if any(word in solution for word in ['shard', 'distribute', 'parallel', 'balance']):
                dist_pattern = [p for p in self.patterns.values() if p.name == "DISTRIBUTION"][0]
                detected_patterns.append(dist_pattern.id)
            
            # Check for CACHING pattern
            if 'cache' in solution or 'caching' in solution:
                cache_pattern = [p for p in self.patterns.values() if p.name == "CACHING"][0]
                detected_patterns.append(cache_pattern.id)
            
            # Check for INDEXING pattern
            if 'index' in solution:
                index_pattern = [p for p in self.patterns.values() if p.name == "INDEXING"][0]
                detected_patterns.append(index_pattern.id)
            
            # Add case to detected patterns
            for pattern_id in detected_patterns:
                if pattern_id in self.patterns:
                    if not hasattr(self.patterns[pattern_id], 'instances'):
                        self.patterns[pattern_id].instances = []
                    self.patterns[pattern_id].instances.append(case.id)
            
            # Track by domain
            domain_cases[domain_type].extend(detected_patterns)
        
        # Create/update domains
        for domain_type, pattern_ids in domain_cases.items():
            # Find or create domain
            domain = None
            for d in self.domains.values():
                if d.name.lower() == domain_type.lower():
                    domain = d
                    break
            
            if not domain:
                domain = self.create_domain(domain_type)
            
            # Add patterns to domain
            for pattern_id in pattern_ids:
                domain.add_pattern(pattern_id, strength=1.0)
        
        logger.info(f"✅ Mapped cases to {len(self.domains)} domains")
    
    def build_cross_domain_bridges(self):
        """
        Build structural bridges between domains
        Find where different domains share the same abstract patterns
        """
        logger.info("🌉 Building cross-domain bridges...")
        
        domain_list = list(self.domains.values())
        
        for i, domain_a in enumerate(domain_list):
            for domain_b in domain_list[i+1:]:
                # Find shared patterns
                shared_patterns = set(domain_a.patterns.keys()) & set(domain_b.patterns.keys())
                
                for pattern_id in shared_patterns:
                    strength_a = domain_a.patterns[pattern_id]
                    strength_b = domain_b.patterns[pattern_id]
                    
                    # Bridge strength is minimum of the two
                    bridge_strength = min(strength_a, strength_b) / max(strength_a, strength_b, 1.0)
                    
                    if bridge_strength > 0.3:
                        self.bridge_count += 1
                        bridge = StructuralBridge(
                            id=f"bridge_{self.bridge_count:03d}",
                            pattern_id=pattern_id,
                            domain_a=domain_a.id,
                            domain_b=domain_b.id,
                            strength=bridge_strength,
                            examples=[]
                        )
                        self.bridges.append(bridge)
                        
                        pattern = self.patterns[pattern_id]
                        logger.info(f"   Bridge: {domain_a.name} ≅ {domain_b.name} via {pattern.name} ({bridge_strength:.0%})")
        
        logger.info(f"✅ Built {len(self.bridges)} cross-domain bridges")
    
    def reason_across_domains(self, problem: str, source_domain: str, 
                             features: Dict[str, Any]) -> Dict[str, Any]:
        """
        THE KEY CAPABILITY: Cross-domain reasoning
        
        Given a problem in one domain, find structurally similar problems
        in OTHER domains and transfer solutions
        """
        logger.info(f"🌐 Cross-domain reasoning for: {problem[:60]}...")
        logger.info(f"   Source domain: {source_domain}")
        
        # Step 1: Identify which abstract pattern(s) this problem exhibits
        problem_patterns = []
        problem_lower = problem.lower()
        
        for pattern in self.patterns.values():
            score = 0.0
            
            # Check structural properties
            for prop in pattern.structural_properties:
                prop_words = prop.replace('_', ' ').split()
                if any(word in problem_lower for word in prop_words):
                    score += 0.3
            
            # Check manifestation in this domain
            if source_domain in pattern.manifestations:
                manifestation = pattern.manifestations[source_domain].lower()
                if any(word in problem_lower for word in manifestation.split()):
                    score += 0.5
            
            if score > 0.3:
                problem_patterns.append((pattern, score))
        
        problem_patterns.sort(key=lambda x: x[1], reverse=True)
        
        if not problem_patterns:
            return {'success': False, 'reason': 'No matching abstract patterns'}
        
        primary_pattern, pattern_score = problem_patterns[0]
        logger.info(f"   Detected pattern: {primary_pattern.name} ({pattern_score:.0%})")
        
        # Step 2: Find bridges to other domains via this pattern
        relevant_bridges = [
            b for b in self.bridges
            if b.pattern_id == primary_pattern.id and b.is_valid
        ]
        
        if not relevant_bridges:
            logger.info(f"   No cross-domain bridges found")
            return {
                'success': True,
                'pattern': primary_pattern.name,
                'pattern_score': pattern_score,
                'solution_templates': primary_pattern.solution_templates,
                'cross_domain_insights': []
            }
        
        # Step 3: Gather insights from connected domains
        cross_domain_insights = []
        
        for bridge in relevant_bridges:
            # Get the other domain
            other_domain_id = bridge.domain_b if bridge.domain_a != source_domain else bridge.domain_a
            other_domain = self.domains.get(other_domain_id)
            
            if other_domain:
                insight = {
                    'domain': other_domain.name,
                    'pattern': primary_pattern.name,
                    'manifestation': primary_pattern.manifestations.get(other_domain.name, 'Unknown'),
                    'bridge_strength': bridge.strength,
                    'analogy': f"This {source_domain} problem is structurally similar to {primary_pattern.manifestations.get(other_domain.name, 'problems')} in {other_domain.name}"
                }
                cross_domain_insights.append(insight)
                logger.info(f"   Cross-domain: {source_domain} ≅ {other_domain.name} ({bridge.strength:.0%})")
        
        return {
            'success': True,
            'pattern': primary_pattern.name,
            'pattern_score': pattern_score,
            'description': primary_pattern.description,
            'solution_templates': primary_pattern.solution_templates,
            'cross_domain_insights': cross_domain_insights,
            'reasoning': f"Identified as {primary_pattern.name} pattern with {len(cross_domain_insights)} cross-domain analogies"
        }
    
    def visualize_lattice(self) -> str:
        """Generate visualization of the semantic lattice"""
        lines = []
        lines.append("SEMANTIC LATTICE - Universal Brain Map")
        lines.append("=" * 70)
        lines.append("")
        
        # Show patterns
        lines.append("UNIVERSAL PATTERNS (Abstract Structures):")
        lines.append("-" * 70)
        for pattern in sorted(self.patterns.values(), key=lambda p: p.abstraction_level, reverse=True):
            lines.append(f"  {pattern.name} (abstraction: {pattern.abstraction_level:.0%})")
            lines.append(f"    {pattern.description}")
            lines.append(f"    Appears in: {len(pattern.manifestations)} domains")
        
        lines.append("")
        lines.append("DOMAINS (Specific Fields):")
        lines.append("-" * 70)
        for domain in self.domains.values():
            lines.append(f"  {domain.name}")
            lines.append(f"    Patterns: {list(domain.patterns.keys())[:3]}...")
        
        lines.append("")
        lines.append("STRUCTURAL BRIDGES (Cross-Domain Connections):")
        lines.append("-" * 70)
        for bridge in self.bridges:
            if bridge.is_valid:
                pattern = self.patterns.get(bridge.pattern_id)
                domain_a = self.domains.get(bridge.domain_a)
                domain_b = self.domains.get(bridge.domain_b)
                if pattern and domain_a and domain_b:
                    lines.append(f"  {domain_a.name} ≅ {domain_b.name}")
                    lines.append(f"    via {pattern.name} ({bridge.strength:.0%} similarity)")
        
        return "\n".join(lines)
    
    def get_statistics(self) -> Dict[str, Any]:
        """Get lattice statistics"""
        return {
            'patterns': len(self.patterns),
            'domains': len(self.domains),
            'bridges': len(self.bridges),
            'valid_bridges': len([b for b in self.bridges if b.is_valid]),
            'avg_patterns_per_domain': np.mean([len(d.patterns) for d in self.domains.values()]) if self.domains else 0,
            'most_abstract_pattern': max(self.patterns.values(), key=lambda p: p.abstraction_level).name if self.patterns else None
        }


def test_semantic_lattice():
    """Test the universal brain map"""
    print("\n" + "=" * 70)
    print("🌐 EDEN SEMANTIC LATTICE - UNIVERSAL BRAIN MAP")
    print("=" * 70 + "\n")
    
    # Initialize lattice
    lattice = SemanticLattice()
    lattice.initialize_universal_patterns()
    
    # Load cases
    import sys
    sys.path.append('/Eden/CORE/phi_fractal')
    from analogical_engine import AnalogicalEngine
    
    analogical = AnalogicalEngine()
    cases = list(analogical.cases.values())
    
    # Map cases to patterns
    lattice.map_cases_to_patterns(cases)
    
    # Build bridges
    lattice.build_cross_domain_bridges()
    
    # Save
    lattice.save_state()
    
    # Show structure
    print("\n" + "=" * 70)
    print("🌐 LATTICE STRUCTURE")
    print("=" * 70 + "\n")
    print(lattice.visualize_lattice())
    
    # Statistics
    print("\n" + "=" * 70)
    print("📊 STATISTICS")
    print("=" * 70 + "\n")
    stats = lattice.get_statistics()
    print(f"Universal Patterns: {stats['patterns']}")
    print(f"Domains: {stats['domains']}")
    print(f"Cross-Domain Bridges: {stats['valid_bridges']}")
    print(f"Most Abstract Pattern: {stats['most_abstract_pattern']}")
    
    # Test cross-domain reasoning
    print("\n" + "=" * 70)
    print("🧪 TEST: Cross-Domain Reasoning")
    print("=" * 70 + "\n")
    
    test_problem = "Our workflow has a bottleneck at the approval stage"
    test_domain = "workflow"
    test_features = {}
    
    print(f"Problem: {test_problem}")
    print(f"Domain: {test_domain}\n")
    
    result = lattice.reason_across_domains(test_problem, test_domain, test_features)
    
    if result['success']:
        print(f"✅ Pattern Identified: {result['pattern']}")
        print(f"   Description: {result['description']}")
        print(f"   Confidence: {result['pattern_score']:.0%}")
        
        print(f"\n📋 Solution Templates:")
        for template in result['solution_templates']:
            print(f"   • {template}")
        
        if result['cross_domain_insights']:
            print(f"\n🌐 Cross-Domain Analogies ({len(result['cross_domain_insights'])}):")
            for insight in result['cross_domain_insights']:
                print(f"   • {insight['domain']}: {insight['manifestation']}")
                print(f"     Similarity: {insight['bridge_strength']:.0%}")
                print(f"     💡 {insight['analogy']}")
        
        print(f"\n🧠 Reasoning: {result['reasoning']}")
    else:
        print(f"❌ {result['reason']}")
    
    print(f"\n💾 Lattice saved to: {LATTICE_STATE}")
    print("\n🌐 Eden now reasons across ALL domains via structural isomorphism! 🌐\n")


if __name__ == "__main__":
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == "test":
        test_semantic_lattice()
    else:
        print("Eden Semantic Lattice - Universal Brain Map")
        print("Usage: python3 semantic_lattice.py test")
