#!/usr/bin/env python3
"""
Eden Concept Graph - Hierarchical Knowledge Representation

Evolution from flat cases to conceptual scaffolding:
1. Cluster cases → form concepts
2. Extract rules from patterns
3. Build concept graph (nodes + edges)
4. Multi-concept reasoning
5. Meta-reflection on concept utility

This is the shift from "I remember this case" to "I understand this CLASS of problems"
"""

import json
import numpy as np
from dataclasses import dataclass, asdict, field
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple, Set
from collections import defaultdict
from sklearn.cluster import DBSCAN
from sklearn.metrics.pairwise import cosine_similarity
import logging

# Configuration
EDEN_ROOT = Path("/Eden/CORE")
CONCEPT_LOG = EDEN_ROOT / "logs" / "phi_concept_graph.log"
CONCEPT_STATE = EDEN_ROOT / "phi_fractal" / "concept_graph" / "graph_state.json"
CONCEPTS_DIR = EDEN_ROOT / "phi_fractal" / "concept_graph" / "concepts"

# Create directories
CONCEPT_STATE.parent.mkdir(parents=True, exist_ok=True)
CONCEPTS_DIR.mkdir(parents=True, exist_ok=True)
CONCEPT_LOG.parent.mkdir(parents=True, exist_ok=True)

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - CONCEPT - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(CONCEPT_LOG),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


@dataclass
class ConceptNode:
    """A higher-order concept formed from multiple cases"""
    id: str
    name: str
    description: str
    case_ids: List[str]  # Cases that belong to this concept
    structural_features: Dict[str, Any]  # Abstract features
    confidence: float  # How well-formed is this concept?
    usage_count: int = 0
    success_count: int = 0
    timestamp: str = ""
    
    def __post_init__(self):
        if not self.timestamp:
            self.timestamp = datetime.now().isoformat()
    
    @property
    def success_rate(self) -> float:
        return self.success_count / self.usage_count if self.usage_count > 0 else 0.5


@dataclass
class ConceptRule:
    """A reusable rule extracted from concept patterns"""
    id: str
    concept_id: str
    condition: str  # When does this apply?
    action: str  # What to do?
    confidence: float  # How reliable?
    support_count: int  # How many cases support this?
    exceptions: List[str] = field(default_factory=list)
    
    def matches(self, features: Dict[str, Any]) -> bool:
        """Check if this rule applies to given features"""
        # Simple rule matching (can be enhanced)
        return True  # Placeholder


@dataclass
class ConceptEdge:
    """Relationship between concepts"""
    source: str
    target: str
    relationship: str  # 'causes', 'analogous_to', 'generalizes', 'requires'
    strength: float  # 0-1


class ConceptClusterer:
    """Cluster cases into higher-order concepts"""
    
    def __init__(self, min_cluster_size: int = 2):
        self.min_cluster_size = min_cluster_size
    
    def extract_structural_features(self, cases: List[Any]) -> np.ndarray:
        """
        Extract abstract structural features from cases
        Goes beyond keywords to structural patterns
        """
        feature_vectors = []
        
        for case in cases:
            # Structural dimensions
            features = {
                'has_performance_issue': 0,
                'has_scalability_issue': 0,
                'has_resource_constraint': 0,
                'requires_indexing': 0,
                'requires_caching': 0,
                'requires_parallelization': 0,
                'requires_distribution': 0,
                'complexity_level': 0,
                'data_volume': 0,
                'concurrency_level': 0
            }
            
            # Analyze case content
            problem = case.problem.lower()
            solution = case.solution.lower()
            
            # Performance
            if any(word in problem for word in ['slow', 'lag', 'performance', 'speed']):
                features['has_performance_issue'] = 1
            
            # Scalability
            if any(word in problem for word in ['scale', 'growth', 'load', 'capacity']):
                features['has_scalability_issue'] = 1
            
            # Resources
            if any(word in problem for word in ['memory', 'cpu', 'disk', 'resource']):
                features['has_resource_constraint'] = 1
            
            # Solution patterns
            if 'index' in solution:
                features['requires_indexing'] = 1
            if 'cache' in solution or 'caching' in solution:
                features['requires_caching'] = 1
            if 'parallel' in solution or 'async' in solution:
                features['requires_parallelization'] = 1
            if 'distribut' in solution or 'shard' in solution:
                features['requires_distribution'] = 1
            
            # Complexity estimation
            if case.problem_type in ['system_design', 'distributed_systems']:
                features['complexity_level'] = 0.8
            elif case.problem_type in ['debugging', 'refactoring']:
                features['complexity_level'] = 0.5
            else:
                features['complexity_level'] = 0.3
            
            feature_vectors.append(list(features.values()))
        
        return np.array(feature_vectors)
    
    def cluster_cases(self, cases: List[Any]) -> Dict[int, List[Any]]:
        """
        Cluster cases by structural similarity
        Returns: {cluster_id: [cases]}
        """
        if len(cases) < self.min_cluster_size:
            return {0: cases}
        
        # Extract features
        features = self.extract_structural_features(cases)
        
        # DBSCAN clustering (density-based, finds natural clusters)
        clustering = DBSCAN(eps=0.3, min_samples=self.min_cluster_size)
        labels = clustering.fit_predict(features)
        
        # Group by cluster
        clusters = defaultdict(list)
        for case, label in zip(cases, labels):
            clusters[int(label)].append(case)
        
        logger.info(f"Formed {len(clusters)} clusters from {len(cases)} cases")
        return dict(clusters)


class RuleExtractor:
    """Extract generalizable rules from concept clusters"""
    
    def extract_rules(self, concept: ConceptNode, cases: List[Any]) -> List[ConceptRule]:
        """
        Find recurring patterns and form rules
        """
        rules = []
        
        # Analyze success patterns
        problem_patterns = defaultdict(list)
        solution_patterns = defaultdict(list)
        
        for case in cases:
            # Group by problem characteristics
            if 'performance' in case.problem.lower():
                problem_patterns['performance'].append(case)
            if 'scale' in case.problem.lower():
                problem_patterns['scalability'].append(case)
            
            # Group by solution type
            if 'index' in case.solution.lower():
                solution_patterns['indexing'].append(case)
            if 'cache' in case.solution.lower():
                solution_patterns['caching'].append(case)
        
        # Generate rules
        rule_id = 0
        
        # Rule: Performance + no index → add index
        if 'performance' in problem_patterns and 'indexing' in solution_patterns:
            perf_cases = problem_patterns['performance']
            idx_cases = solution_patterns['indexing']
            overlap = len(set(c.id for c in perf_cases) & set(c.id for c in idx_cases))
            
            if overlap >= 2:
                rule_id += 1
                rules.append(ConceptRule(
                    id=f"{concept.id}_rule_{rule_id}",
                    concept_id=concept.id,
                    condition="Performance issue + data structure present",
                    action="Add indexing/fast-lookup structure",
                    confidence=overlap / len(perf_cases),
                    support_count=overlap
                ))
        
        # Rule: Scalability + distribution
        if 'scalability' in problem_patterns and 'distribution' in [c.problem_type for c in cases]:
            rule_id += 1
            rules.append(ConceptRule(
                id=f"{concept.id}_rule_{rule_id}",
                concept_id=concept.id,
                condition="Scalability bottleneck + single instance",
                action="Distribute workload across multiple nodes",
                confidence=0.8,
                support_count=len(problem_patterns['scalability'])
            ))
        
        logger.info(f"Extracted {len(rules)} rules from concept {concept.id}")
        return rules


class ConceptGraph:
    """The complete concept graph - Eden's semantic knowledge"""
    
    def __init__(self):
        self.concepts: Dict[str, ConceptNode] = {}
        self.rules: Dict[str, ConceptRule] = {}
        self.edges: List[ConceptEdge] = []
        self.clusterer = ConceptClusterer()
        self.rule_extractor = RuleExtractor()
        self.concept_count = 0
        
        self.load_state()
        logger.info("🧠 Concept Graph initialized")
    
    def load_state(self):
        """Load concept graph from disk"""
        if CONCEPT_STATE.exists():
            try:
                with open(CONCEPT_STATE, 'r') as f:
                    data = json.load(f)
                
                for cid, cdata in data.get('concepts', {}).items():
                    self.concepts[cid] = ConceptNode(**cdata)
                
                for rid, rdata in data.get('rules', {}).items():
                    self.rules[rid] = ConceptRule(**rdata)
                
                for edata in data.get('edges', []):
                    self.edges.append(ConceptEdge(**edata))
                
                self.concept_count = len(self.concepts)
                logger.info(f"Loaded {len(self.concepts)} concepts, {len(self.rules)} rules")
            except Exception as e:
                logger.error(f"Failed to load state: {e}")
    
    def save_state(self):
        """Save concept graph"""
        try:
            data = {
                'concepts': {cid: asdict(c) for cid, c in self.concepts.items()},
                'rules': {rid: asdict(r) for rid, r in self.rules.items()},
                'edges': [asdict(e) for e in self.edges],
                'last_updated': datetime.now().isoformat()
            }
            
            with open(CONCEPT_STATE, 'w') as f:
                json.dump(data, f, indent=2)
        except Exception as e:
            logger.error(f"Failed to save state: {e}")
    
    def build_from_cases(self, cases: List[Any]) -> None:
        """
        Build concept graph from flat case library
        This is the transformation from episodic to semantic memory
        """
        logger.info(f"🏗️  Building concept graph from {len(cases)} cases...")
        
        # Step 1: Cluster cases into concepts
        clusters = self.clusterer.cluster_cases(cases)
        
        # Step 2: Create concept nodes
        for cluster_id, cluster_cases in clusters.items():
            if cluster_id == -1:  # Noise cluster from DBSCAN
                continue
            
            self.concept_count += 1
            concept_id = f"concept_{self.concept_count:03d}"
            
            # Determine concept name based on patterns
            concept_name = self._name_concept(cluster_cases)
            
            # Extract structural features
            features = self._extract_concept_features(cluster_cases)
            
            concept = ConceptNode(
                id=concept_id,
                name=concept_name,
                description=f"Cluster of {len(cluster_cases)} related cases",
                case_ids=[c.id for c in cluster_cases],
                structural_features=features,
                confidence=0.7 + (len(cluster_cases) * 0.05)  # More cases = higher confidence
            )
            
            self.concepts[concept_id] = concept
            logger.info(f"   Created {concept_id}: {concept_name} ({len(cluster_cases)} cases)")
            
            # Step 3: Extract rules
            rules = self.rule_extractor.extract_rules(concept, cluster_cases)
            for rule in rules:
                self.rules[rule.id] = rule
        
        # Step 4: Build edges (relationships between concepts)
        self._build_concept_edges()
        
        self.save_state()
        logger.info(f"✅ Built graph: {len(self.concepts)} concepts, {len(self.rules)} rules, {len(self.edges)} edges")
    
    def _name_concept(self, cases: List[Any]) -> str:
        """Determine conceptual name from case patterns"""
        # Analyze problem types
        problem_types = [c.problem_type for c in cases]
        most_common = max(set(problem_types), key=problem_types.count) if problem_types else "general"
        
        # Analyze problem words
        all_words = []
        for c in cases:
            all_words.extend(c.problem.lower().split())
        
        # Common patterns
        if sum('performance' in c.problem.lower() or 'slow' in c.problem.lower() for c in cases) > len(cases) * 0.5:
            return "Performance_Optimization"
        elif sum('scale' in c.problem.lower() for c in cases) > len(cases) * 0.5:
            return "Scalability_Issues"
        elif sum('bug' in c.problem.lower() or 'error' in c.problem.lower() for c in cases) > len(cases) * 0.5:
            return "Error_Resolution"
        elif 'system' in most_common:
            return "System_Design_Patterns"
        else:
            return f"{most_common.title()}_Solutions"
    
    def _extract_concept_features(self, cases: List[Any]) -> Dict[str, Any]:
        """Extract abstract structural features defining this concept"""
        features = {
            'typical_complexity': np.mean([0.5 for _ in cases]),  # Placeholder
            'requires_indexing': sum('index' in c.solution.lower() for c in cases) / len(cases),
            'requires_caching': sum('cache' in c.solution.lower() for c in cases) / len(cases),
            'requires_distribution': sum('distribut' in c.solution.lower() for c in cases) / len(cases),
            'problem_domains': list(set(c.problem_type for c in cases))
        }
        return features
    
    def _build_concept_edges(self):
        """Build relationships between concepts"""
        concept_list = list(self.concepts.values())
        
        for i, concept1 in enumerate(concept_list):
            for concept2 in concept_list[i+1:]:
                # Check for analogical relationships
                similarity = self._concept_similarity(concept1, concept2)
                
                if similarity > 0.6:
                    self.edges.append(ConceptEdge(
                        source=concept1.id,
                        target=concept2.id,
                        relationship='analogous_to',
                        strength=similarity
                    ))
    
    def _concept_similarity(self, c1: ConceptNode, c2: ConceptNode) -> float:
        """Compute similarity between concepts"""
        # Compare structural features
        f1 = c1.structural_features
        f2 = c2.structural_features
        
        common_keys = set(f1.keys()) & set(f2.keys())
        if not common_keys:
            return 0.0
        
        similarities = []
        for key in common_keys:
            if isinstance(f1[key], (int, float)) and isinstance(f2[key], (int, float)):
                diff = abs(f1[key] - f2[key])
                similarities.append(1.0 - diff)
        
        return np.mean(similarities) if similarities else 0.0
    
    def reason_with_concepts(self, problem: str, problem_features: Dict[str, Any]) -> Dict[str, Any]:
        """
        Multi-concept reasoning - THE KEY INNOVATION
        Instead of matching one case, synthesize solution from multiple concepts
        """
        logger.info(f"🧠 Reasoning with concepts for: {problem[:60]}...")
        
        # Find relevant concepts
        concept_matches = []
        for concept in self.concepts.values():
            # Match based on structural features
            relevance = self._match_problem_to_concept(problem, problem_features, concept)
            if relevance > 0.3:
                concept_matches.append((concept, relevance))
        
        # Sort by relevance
        concept_matches.sort(key=lambda x: x[1], reverse=True)
        
        if not concept_matches:
            logger.info("   No matching concepts")
            return {'success': False, 'reason': 'No matching concepts'}
        
        # Multi-concept synthesis
        logger.info(f"   Matched {len(concept_matches)} concepts")
        
        primary_concept, primary_relevance = concept_matches[0]
        
        # Get applicable rules
        applicable_rules = [
            rule for rule in self.rules.values()
            if rule.concept_id == primary_concept.id
        ]
        
        # Synthesize solution
        if applicable_rules:
            best_rule = max(applicable_rules, key=lambda r: r.confidence)
            
            solution = {
                'success': True,
                'primary_concept': primary_concept.name,
                'relevance': primary_relevance,
                'action': best_rule.action,
                'confidence': best_rule.confidence * primary_relevance,
                'reasoning': f"Matched concept '{primary_concept.name}' → Applied rule '{best_rule.condition}'"
            }
            
            # Add secondary concepts if available
            if len(concept_matches) > 1:
                secondary = [c.name for c, _ in concept_matches[1:3]]
                solution['related_concepts'] = secondary
            
            logger.info(f"   ✅ Solution: {best_rule.action}")
            return solution
        
        return {'success': False, 'reason': 'No applicable rules'}
    
    def _match_problem_to_concept(self, problem: str, features: Dict[str, Any], 
                                  concept: ConceptNode) -> float:
        """Compute how well problem matches concept"""
        score = 0.0
        
        # Keyword matching
        problem_lower = problem.lower()
        if 'performance' in problem_lower and 'Performance' in concept.name:
            score += 0.4
        if 'scale' in problem_lower and 'Scalability' in concept.name:
            score += 0.4
        
        # Feature matching
        if features and concept.structural_features:
            # Simple overlap
            score += 0.2
        
        return min(1.0, score)
    
    def meta_reflect(self) -> Dict[str, Any]:
        """
        Meta-reflection: evaluate concept quality
        Which concepts are useful? Which should be refined?
        """
        logger.info("🔍 Meta-reflecting on concept graph...")
        
        concept_stats = []
        for concept in self.concepts.values():
            stats = {
                'id': concept.id,
                'name': concept.name,
                'usage': concept.usage_count,
                'success_rate': concept.success_rate,
                'confidence': concept.confidence,
                'quality': concept.success_rate * concept.confidence
            }
            concept_stats.append(stats)
        
        # Identify high/low quality concepts
        sorted_concepts = sorted(concept_stats, key=lambda x: x['quality'], reverse=True)
        
        return {
            'total_concepts': len(self.concepts),
            'total_rules': len(self.rules),
            'best_concepts': sorted_concepts[:3],
            'needs_refinement': [c for c in sorted_concepts if c['quality'] < 0.3]
        }
    
    def get_statistics(self) -> Dict[str, Any]:
        """Get concept graph statistics"""
        return {
            'concepts': len(self.concepts),
            'rules': len(self.rules),
            'edges': len(self.edges),
            'avg_cases_per_concept': np.mean([len(c.case_ids) for c in self.concepts.values()]) if self.concepts else 0,
            'concept_names': [c.name for c in self.concepts.values()]
        }


def test_concept_graph():
    """Test concept graph building"""
    print("\n" + "=" * 70)
    print("🧠 EDEN CONCEPT GRAPH - TEST MODE")
    print("=" * 70 + "\n")
    
    # Load cases from analogical engine
    import sys
    sys.path.append('/Eden/CORE/phi_fractal')
    from analogical_engine import AnalogicalEngine
    
    analogical = AnalogicalEngine()
    cases = list(analogical.cases.values())
    
    print(f"Loaded {len(cases)} cases from memory\n")
    
    # Build concept graph
    print("🏗️  Building concept graph...")
    graph = ConceptGraph()
    graph.build_from_cases(cases)
    
    print(f"\n📊 Concept Graph Statistics:")
    stats = graph.get_statistics()
    print(f"  Concepts: {stats['concepts']}")
    print(f"  Rules: {stats['rules']}")
    print(f"  Edges: {stats['edges']}")
    print(f"  Avg cases/concept: {stats['avg_cases_per_concept']:.1f}")
    
    print(f"\n🏷️  Concepts formed:")
    for name in stats['concept_names']:
        print(f"  • {name}")
    
    # Test concept-based reasoning
    print("\n" + "=" * 70)
    print("🧪 TEST: Concept-Based Reasoning")
    print("=" * 70 + "\n")
    
    test_problem = "API endpoints experiencing high latency during peak load"
    test_features = {
        'has_performance_issue': 1,
        'has_scalability_issue': 1,
        'complexity_level': 0.7
    }
    
    print(f"Problem: {test_problem}\n")
    result = graph.reason_with_concepts(test_problem, test_features)
    
    if result['success']:
        print(f"✅ Concept Match: {result['primary_concept']}")
        print(f"   Relevance: {result['relevance']:.0%}")
        print(f"   Action: {result['action']}")
        print(f"   Confidence: {result['confidence']:.0%}")
        print(f"   Reasoning: {result['reasoning']}")
        if 'related_concepts' in result:
            print(f"   Related: {', '.join(result['related_concepts'])}")
    else:
        print(f"❌ {result['reason']}")
    
    # Meta-reflection
    print("\n" + "=" * 70)
    print("🔍 META-REFLECTION")
    print("=" * 70 + "\n")
    
    reflection = graph.meta_reflect()
    print(f"Total concepts: {reflection['total_concepts']}")
    print(f"Total rules: {reflection['total_rules']}")
    
    if reflection['best_concepts']:
        print(f"\nBest performing concepts:")
        for concept in reflection['best_concepts']:
            print(f"  • {concept['name']}: quality={concept['quality']:.2f}")
    
    print(f"\n💾 Concept graph saved to: {CONCEPT_STATE}")
    print("\n🧠 Eden now thinks in CONCEPTS, not just cases! 🧠\n")


if __name__ == "__main__":
    import sys
    if len(sys.argv) > 1 and sys.argv[1] == "test":
        test_concept_graph()
    else:
        print("Eden Concept Graph")
        print("Usage: python3 concept_graph.py test")
