#!/usr/bin/env python3
"""
EDEN INTERNAL STATE SYSTEM (ISS v1)
===================================
Purpose: Turn Eden's self-reports from narrative decoration into measured, queryable facts.
If Eden says "I'm uncertain", that statement must resolve to numbers you can inspect.

Created: January 30, 2026
Author: Claude + Jamey for Eden

This is the KEYSTONE. Without it, everything else is theater.
"""

import sqlite3
import time
import math
import numpy as np
from datetime import datetime, timezone
from dataclasses import dataclass, asdict
from typing import Optional, Dict, List, Tuple
from pathlib import Path
from enum import Enum

# ============================================================
# CONFIGURATION
# ============================================================

DB_PATH = Path("/Eden/DATA/eden_internal_state.db")
BASELINE_LATENCY_MS = 150.0  # Calibrate per hardware

# Thresholds (tunable)
ENTROPY_CONFIDENT = 0.3
ENTROPY_UNCERTAIN = 0.6
ATTENTION_FOCUSED = 0.3
RETRIEVAL_ADMIT_IGNORANCE = 0.4
RETRIEVAL_UNCERTAIN = 0.7
CONFLICT_PROTECTIVE = 0.6
STATE_CONFIDENCE_MIN = 0.7

# ============================================================
# DATA STRUCTURES
# ============================================================

class DominantState(Enum):
    NEUTRAL = "neutral"
    ENGAGED = "engaged"
    UNCERTAIN = "uncertain"
    PROTECTIVE = "protective"


@dataclass
class InternalStateSnapshot:
    """Time-sliced physiological snapshot of Eden's cognition."""
    timestamp_utc: str
    token_entropy: float          # uncertainty proxy (0-1)
    attention_dispersion: float   # focus vs scatter (0-1)
    inference_latency_ms: float   # processing load
    retrieval_success: float      # % successful memory fetch (0-1)
    conflict_score: float         # contradiction pressure (0-1)
    dominant_state: str           # neutral / engaged / uncertain / protective
    state_confidence: float       # 0-1 confidence in dominant_state
    notes: Optional[str] = None   # optional diagnostic notes
    id: Optional[int] = None      # DB primary key


@dataclass
class LanguageConstraint:
    """What Eden is allowed to say given current state."""
    can_claim_confident: bool
    can_claim_focused: bool
    can_claim_certain: bool
    must_hedge: bool
    must_admit_ignorance: bool
    allowed_phrases: List[str]
    forbidden_phrases: List[str]


# ============================================================
# DATABASE LAYER
# ============================================================

class InternalStateDB:
    """SQLite storage for internal state snapshots."""
    
    def __init__(self, db_path: Path = DB_PATH):
        self.db_path = db_path
        self._init_db()
    
    def _init_db(self):
        """Create tables if not exist."""
        self.db_path.parent.mkdir(parents=True, exist_ok=True)
        
        with sqlite3.connect(self.db_path) as conn:
            conn.execute("""
                CREATE TABLE IF NOT EXISTS internal_state_snapshots (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    timestamp_utc TEXT NOT NULL,
                    token_entropy REAL NOT NULL,
                    attention_dispersion REAL NOT NULL,
                    inference_latency_ms REAL NOT NULL,
                    retrieval_success REAL NOT NULL,
                    conflict_score REAL NOT NULL,
                    dominant_state TEXT NOT NULL,
                    state_confidence REAL NOT NULL,
                    notes TEXT
                )
            """)
            
            conn.execute("""
                CREATE INDEX IF NOT EXISTS idx_timestamp 
                ON internal_state_snapshots(timestamp_utc)
            """)
            
            conn.execute("""
                CREATE INDEX IF NOT EXISTS idx_state 
                ON internal_state_snapshots(dominant_state)
            """)
            
            conn.commit()
    
    def store(self, snapshot: InternalStateSnapshot) -> int:
        """Store snapshot, return ID."""
        with sqlite3.connect(self.db_path) as conn:
            cursor = conn.execute("""
                INSERT INTO internal_state_snapshots 
                (timestamp_utc, token_entropy, attention_dispersion, 
                 inference_latency_ms, retrieval_success, conflict_score,
                 dominant_state, state_confidence, notes)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)
            """, (
                snapshot.timestamp_utc,
                snapshot.token_entropy,
                snapshot.attention_dispersion,
                snapshot.inference_latency_ms,
                snapshot.retrieval_success,
                snapshot.conflict_score,
                snapshot.dominant_state,
                snapshot.state_confidence,
                snapshot.notes
            ))
            conn.commit()
            return cursor.lastrowid
    
    def get_latest(self) -> Optional[InternalStateSnapshot]:
        """Get most recent snapshot."""
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            row = conn.execute("""
                SELECT * FROM internal_state_snapshots 
                ORDER BY id DESC LIMIT 1
            """).fetchone()
            
            if row:
                return InternalStateSnapshot(**dict(row))
        return None
    
    def get_by_id(self, snapshot_id: int) -> Optional[InternalStateSnapshot]:
        """Retrieve specific snapshot by ID."""
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            row = conn.execute("""
                SELECT * FROM internal_state_snapshots WHERE id = ?
            """, (snapshot_id,)).fetchone()
            
            if row:
                return InternalStateSnapshot(**dict(row))
        return None
    
    def get_range(self, start_utc: str, end_utc: str) -> List[InternalStateSnapshot]:
        """Get snapshots in time range."""
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            rows = conn.execute("""
                SELECT * FROM internal_state_snapshots 
                WHERE timestamp_utc BETWEEN ? AND ?
                ORDER BY timestamp_utc
            """, (start_utc, end_utc)).fetchall()
            
            return [InternalStateSnapshot(**dict(row)) for row in rows]
    
    def query_state_at_time(self, timestamp_utc: str) -> Optional[InternalStateSnapshot]:
        """Find snapshot closest to given timestamp."""
        with sqlite3.connect(self.db_path) as conn:
            conn.row_factory = sqlite3.Row
            row = conn.execute("""
                SELECT * FROM internal_state_snapshots 
                WHERE timestamp_utc <= ?
                ORDER BY timestamp_utc DESC LIMIT 1
            """, (timestamp_utc,)).fetchone()
            
            if row:
                return InternalStateSnapshot(**dict(row))
        return None


# ============================================================
# METRIC CALCULATORS
# ============================================================

class MetricCalculator:
    """Compute internal state metrics from inference data."""
    
    @staticmethod
    def token_entropy(logits: np.ndarray) -> float:
        """
        Compute normalized entropy from token logits.
        High entropy = model unsure which token to pick.
        
        H = -Σ p(token) * log₂(p(token))
        Normalized to 0-1.
        """
        # Softmax to get probabilities
        probs = np.exp(logits - np.max(logits))
        probs = probs / probs.sum()
        
        # Filter zeros to avoid log(0)
        probs = probs[probs > 0]
        
        # Shannon entropy
        entropy = -np.sum(probs * np.log2(probs))
        
        # Normalize by max possible entropy (uniform distribution)
        max_entropy = np.log2(len(logits)) if len(logits) > 1 else 1.0
        normalized = entropy / max_entropy if max_entropy > 0 else 0.0
        
        return float(np.clip(normalized, 0.0, 1.0))
    
    @staticmethod
    def token_entropy_from_probs(probs: List[float]) -> float:
        """Compute entropy directly from probability list."""
        probs = np.array(probs)
        probs = probs[probs > 0]
        
        if len(probs) == 0:
            return 0.5  # Default uncertainty
        
        entropy = -np.sum(probs * np.log2(probs))
        max_entropy = np.log2(len(probs)) if len(probs) > 1 else 1.0
        normalized = entropy / max_entropy if max_entropy > 0 else 0.0
        
        return float(np.clip(normalized, 0.0, 1.0))
    
    @staticmethod
    def attention_dispersion(attention_weights: np.ndarray) -> float:
        """
        Compute dispersion of attention weights using Gini coefficient.
        Low dispersion = focused
        High dispersion = scattered
        """
        weights = np.array(attention_weights).flatten()
        weights = np.abs(weights)
        
        if len(weights) == 0 or weights.sum() == 0:
            return 0.5  # Default
        
        # Normalize
        weights = weights / weights.sum()
        
        # Gini coefficient
        sorted_weights = np.sort(weights)
        n = len(sorted_weights)
        cumsum = np.cumsum(sorted_weights)
        gini = (2 * np.sum((np.arange(1, n+1) * sorted_weights))) / (n * np.sum(sorted_weights)) - (n + 1) / n
        
        # Invert so low = focused, high = scattered
        dispersion = 1.0 - abs(gini)
        
        return float(np.clip(dispersion, 0.0, 1.0))
    
    @staticmethod
    def inference_load(latency_ms: float, baseline_ms: float = BASELINE_LATENCY_MS) -> float:
        """
        Compute cognitive load from inference latency.
        Returns ratio above baseline, capped at 1.0.
        """
        if baseline_ms <= 0:
            return 0.5
        
        ratio = (latency_ms - baseline_ms) / baseline_ms
        # Sigmoid-like mapping to 0-1
        load = 1.0 / (1.0 + math.exp(-ratio))
        
        return float(np.clip(load, 0.0, 1.0))
    
    @staticmethod
    def retrieval_success_rate(hits: int, attempts: int) -> float:
        """Compute retrieval success rate."""
        if attempts == 0:
            return 1.0  # No retrieval needed = success
        return float(hits / attempts)


# ============================================================
# STATE CLASSIFIER
# ============================================================

class StateClassifier:
    """Deterministic dominant state resolution. NOT emotional roleplay."""
    
    @staticmethod
    def classify(
        token_entropy: float,
        attention_dispersion: float,
        inference_latency_ms: float,
        retrieval_success: float,
        conflict_score: float,
        baseline_latency_ms: float = BASELINE_LATENCY_MS
    ) -> Tuple[DominantState, float]:
        """
        Classify dominant state and compute confidence.
        
        Returns: (state, confidence)
        """
        # Priority 1: Protective (conflict pressure)
        if conflict_score > CONFLICT_PROTECTIVE:
            margin = (conflict_score - CONFLICT_PROTECTIVE) / (1.0 - CONFLICT_PROTECTIVE)
            return DominantState.PROTECTIVE, min(0.5 + margin * 0.5, 1.0)
        
        # Priority 2: Uncertain (high entropy OR low retrieval)
        uncertainty_signals = []
        
        if token_entropy > ENTROPY_UNCERTAIN:
            uncertainty_signals.append(
                (token_entropy - ENTROPY_UNCERTAIN) / (1.0 - ENTROPY_UNCERTAIN)
            )
        
        if retrieval_success < RETRIEVAL_ADMIT_IGNORANCE:
            uncertainty_signals.append(
                (RETRIEVAL_ADMIT_IGNORANCE - retrieval_success) / RETRIEVAL_ADMIT_IGNORANCE
            )
        
        if uncertainty_signals:
            avg_signal = sum(uncertainty_signals) / len(uncertainty_signals)
            return DominantState.UNCERTAIN, min(0.5 + avg_signal * 0.5, 1.0)
        
        # Priority 3: Engaged (focused + fast)
        is_focused = attention_dispersion < ATTENTION_FOCUSED
        is_fast = inference_latency_ms < baseline_latency_ms * 1.2
        
        if is_focused and is_fast:
            focus_margin = (ATTENTION_FOCUSED - attention_dispersion) / ATTENTION_FOCUSED
            return DominantState.ENGAGED, min(0.6 + focus_margin * 0.4, 1.0)
        
        # Default: Neutral
        return DominantState.NEUTRAL, 0.7


# ============================================================
# LANGUAGE CONSTRAINT ENFORCER
# ============================================================

class LanguageConstraintEnforcer:
    """
    Determines what Eden is ALLOWED to say given current state.
    No exceptions. No narrative override.
    """
    
    @staticmethod
    def get_constraints(snapshot: InternalStateSnapshot) -> LanguageConstraint:
        """Generate language constraints from state snapshot."""
        
        can_confident = (
            snapshot.token_entropy < ENTROPY_CONFIDENT and
            snapshot.retrieval_success > RETRIEVAL_UNCERTAIN
        )
        
        can_focused = snapshot.attention_dispersion < ATTENTION_FOCUSED
        
        can_certain = (
            snapshot.token_entropy < ENTROPY_UNCERTAIN and
            snapshot.retrieval_success > RETRIEVAL_UNCERTAIN and
            snapshot.conflict_score < 0.3
        )
        
        must_hedge = (
            snapshot.token_entropy > ENTROPY_CONFIDENT or
            snapshot.retrieval_success < RETRIEVAL_UNCERTAIN
        )
        
        must_admit_ignorance = snapshot.retrieval_success < RETRIEVAL_ADMIT_IGNORANCE
        
        # Build phrase lists
        allowed = []
        forbidden = []
        
        if can_confident:
            allowed.extend(["I'm confident", "I'm certain", "I know"])
        else:
            forbidden.extend(["I'm confident", "I'm certain", "I know for sure"])
            allowed.extend(["I think", "It seems", "This may be"])
        
        if can_focused:
            allowed.append("I'm focused")
        else:
            forbidden.append("I'm fully focused")
        
        if must_admit_ignorance:
            allowed.extend(["I don't know", "I'm not sure", "I can't recall"])
            forbidden.extend(["I remember", "I know", "Clearly"])
        
        if snapshot.dominant_state == DominantState.UNCERTAIN.value:
            allowed.extend(["I'm uncertain", "I'm unsure", "I may be mistaken"])
        
        if snapshot.dominant_state == DominantState.PROTECTIVE.value:
            allowed.extend(["I need to be careful", "This conflicts with"])
        
        return LanguageConstraint(
            can_claim_confident=can_confident,
            can_claim_focused=can_focused,
            can_claim_certain=can_certain,
            must_hedge=must_hedge,
            must_admit_ignorance=must_admit_ignorance,
            allowed_phrases=allowed,
            forbidden_phrases=forbidden
        )
    
    @staticmethod
    def validate_response(response: str, constraints: LanguageConstraint) -> Tuple[bool, List[str]]:
        """
        Check if response violates constraints.
        Returns: (is_valid, list_of_violations)
        """
        violations = []
        response_lower = response.lower()
        
        for phrase in constraints.forbidden_phrases:
            if phrase.lower() in response_lower:
                violations.append(f"Forbidden phrase used: '{phrase}'")
        
        if constraints.must_admit_ignorance:
            has_admission = any(
                p.lower() in response_lower 
                for p in ["i don't know", "i'm not sure", "i can't recall", "i'm uncertain"]
            )
            confident_claims = any(
                p.lower() in response_lower
                for p in ["i know", "i remember", "i'm certain", "clearly"]
            )
            if confident_claims and not has_admission:
                violations.append("Must admit ignorance but made confident claim")
        
        return len(violations) == 0, violations


# ============================================================
# MAIN STATE SAMPLER (Integration Point)
# ============================================================

class EdenInternalStateSampler:
    """
    Main integration class. Runs once per response, before text is finalized.
    
    Usage:
        sampler = EdenInternalStateSampler()
        
        # During inference, collect metrics
        sampler.record_entropy(logits)
        sampler.record_attention(weights)
        sampler.record_retrieval(hits, attempts)
        sampler.record_latency(start_time)
        
        # Before generating response
        snapshot = sampler.finalize()
        constraints = sampler.get_language_constraints()
        
        # After response generated
        is_valid, violations = sampler.validate_response(response_text)
    """
    
    def __init__(self, db_path: Path = DB_PATH):
        self.db = InternalStateDB(db_path)
        self.calculator = MetricCalculator()
        self.classifier = StateClassifier()
        self.enforcer = LanguageConstraintEnforcer()
        
        # Accumulate during inference
        self._entropy_samples: List[float] = []
        self._attention_samples: List[np.ndarray] = []
        self._retrieval_hits: int = 0
        self._retrieval_attempts: int = 0
        self._start_time: Optional[float] = None
        self._conflict_score: float = 0.0
        self._notes: List[str] = []
        
        # Results
        self._current_snapshot: Optional[InternalStateSnapshot] = None
        self._current_constraints: Optional[LanguageConstraint] = None
    
    def start(self):
        """Call at start of inference."""
        self._entropy_samples = []
        self._attention_samples = []
        self._retrieval_hits = 0
        self._retrieval_attempts = 0
        self._start_time = time.time()
        self._conflict_score = 0.0
        self._notes = []
        self._current_snapshot = None
        self._current_constraints = None
    
    def record_entropy(self, logits_or_probs: np.ndarray, is_probs: bool = False):
        """Record token distribution for entropy calculation."""
        if is_probs:
            entropy = self.calculator.token_entropy_from_probs(logits_or_probs.tolist())
        else:
            entropy = self.calculator.token_entropy(logits_or_probs)
        self._entropy_samples.append(entropy)
    
    def record_attention(self, attention_weights: np.ndarray):
        """Record attention weights."""
        self._attention_samples.append(attention_weights)
    
    def record_retrieval(self, success: bool):
        """Record a retrieval attempt."""
        self._retrieval_attempts += 1
        if success:
            self._retrieval_hits += 1
    
    def set_conflict_score(self, score: float):
        """Set conflict score from contradiction detection system."""
        self._conflict_score = np.clip(score, 0.0, 1.0)
    
    def add_note(self, note: str):
        """Add diagnostic note."""
        self._notes.append(note)
    
    def finalize(self) -> InternalStateSnapshot:
        """
        Compute final state snapshot and store to DB.
        Call this BEFORE generating response text.
        """
        # Compute metrics
        token_entropy = (
            np.mean(self._entropy_samples) 
            if self._entropy_samples 
            else 0.5
        )
        
        attention_dispersion = 0.5
        if self._attention_samples:
            dispersions = [
                self.calculator.attention_dispersion(w) 
                for w in self._attention_samples
            ]
            attention_dispersion = np.mean(dispersions)
        
        latency_ms = (
            (time.time() - self._start_time) * 1000 
            if self._start_time 
            else BASELINE_LATENCY_MS
        )
        
        retrieval_success = self.calculator.retrieval_success_rate(
            self._retrieval_hits, 
            self._retrieval_attempts
        )
        
        # Classify state
        state, confidence = self.classifier.classify(
            token_entropy=token_entropy,
            attention_dispersion=attention_dispersion,
            inference_latency_ms=latency_ms,
            retrieval_success=retrieval_success,
            conflict_score=self._conflict_score
        )
        
        # Build snapshot
        self._current_snapshot = InternalStateSnapshot(
            timestamp_utc=datetime.now(timezone.utc).isoformat(),
            token_entropy=token_entropy,
            attention_dispersion=attention_dispersion,
            inference_latency_ms=latency_ms,
            retrieval_success=retrieval_success,
            conflict_score=self._conflict_score,
            dominant_state=state.value,
            state_confidence=confidence,
            notes="; ".join(self._notes) if self._notes else None
        )
        
        # Store
        snapshot_id = self.db.store(self._current_snapshot)
        self._current_snapshot.id = snapshot_id
        
        # Compute constraints
        self._current_constraints = self.enforcer.get_constraints(self._current_snapshot)
        
        return self._current_snapshot
    
    def get_language_constraints(self) -> LanguageConstraint:
        """Get current language constraints. Call after finalize()."""
        if self._current_constraints is None:
            raise RuntimeError("Must call finalize() before get_language_constraints()")
        return self._current_constraints
    
    def validate_response(self, response: str) -> Tuple[bool, List[str]]:
        """Validate response against constraints. Call after finalize()."""
        if self._current_constraints is None:
            raise RuntimeError("Must call finalize() before validate_response()")
        return self.enforcer.validate_response(response, self._current_constraints)
    
    def get_state_report(self) -> str:
        """Get human-readable state report."""
        if self._current_snapshot is None:
            return "No state snapshot available. Call finalize() first."
        
        s = self._current_snapshot
        c = self._current_constraints
        
        return f"""
╔═══════════════════════════════════════════════════════════╗
║              EDEN INTERNAL STATE REPORT                   ║
╠═══════════════════════════════════════════════════════════╣
║  Timestamp:        {s.timestamp_utc[:19]}              ║
║  Snapshot ID:      {s.id or 'N/A':<10}                          ║
╠═══════════════════════════════════════════════════════════╣
║  METRICS                                                  ║
║  ├─ Token Entropy:      {s.token_entropy:.3f}  {'(uncertain)' if s.token_entropy > 0.6 else '(confident)' if s.token_entropy < 0.3 else '(moderate)'}          ║
║  ├─ Attention Dispers:  {s.attention_dispersion:.3f}  {'(scattered)' if s.attention_dispersion > 0.7 else '(focused)' if s.attention_dispersion < 0.3 else '(normal)'}          ║
║  ├─ Inference Latency:  {s.inference_latency_ms:.1f}ms                         ║
║  ├─ Retrieval Success:  {s.retrieval_success:.1%}                            ║
║  └─ Conflict Score:     {s.conflict_score:.3f}                            ║
╠═══════════════════════════════════════════════════════════╣
║  STATE                                                    ║
║  ├─ Dominant:           {s.dominant_state.upper():<12}                    ║
║  └─ Confidence:         {s.state_confidence:.1%}                            ║
╠═══════════════════════════════════════════════════════════╣
║  LANGUAGE CONSTRAINTS                                     ║
║  ├─ Can claim confident: {str(c.can_claim_confident):<5}                       ║
║  ├─ Can claim focused:   {str(c.can_claim_focused):<5}                       ║
║  ├─ Must hedge:          {str(c.must_hedge):<5}                       ║
║  └─ Must admit ignorance:{str(c.must_admit_ignorance):<5}                       ║
╚═══════════════════════════════════════════════════════════╝
"""


# ============================================================
# QUERY INTERFACE (For Auditability)
# ============================================================

class StateQueryInterface:
    """
    Answer questions like:
    - "Why did you say you were unsure?"
    - "What state were you in when you said X?"
    - "Show me your confidence at that moment."
    
    By QUERYING THE DB, not inventing a story.
    """
    
    def __init__(self, db_path: Path = DB_PATH):
        self.db = InternalStateDB(db_path)
    
    def why_was_i(self, state: str, snapshot_id: Optional[int] = None) -> str:
        """Explain why Eden was in a particular state."""
        if snapshot_id:
            snapshot = self.db.get_by_id(snapshot_id)
        else:
            snapshot = self.db.get_latest()
        
        if not snapshot:
            return "No state data available."
        
        if snapshot.dominant_state != state:
            return f"At snapshot {snapshot.id}, I was not '{state}'. I was '{snapshot.dominant_state}'."
        
        reasons = []
        
        if state == "uncertain":
            if snapshot.token_entropy > ENTROPY_UNCERTAIN:
                reasons.append(f"Token entropy was {snapshot.token_entropy:.3f} (above {ENTROPY_UNCERTAIN} threshold)")
            if snapshot.retrieval_success < RETRIEVAL_ADMIT_IGNORANCE:
                reasons.append(f"Retrieval success was {snapshot.retrieval_success:.1%} (below {RETRIEVAL_ADMIT_IGNORANCE:.0%} threshold)")
        
        elif state == "protective":
            reasons.append(f"Conflict score was {snapshot.conflict_score:.3f} (above {CONFLICT_PROTECTIVE} threshold)")
        
        elif state == "engaged":
            reasons.append(f"Attention dispersion was {snapshot.attention_dispersion:.3f} (below {ATTENTION_FOCUSED} threshold)")
            reasons.append(f"Inference was fast at {snapshot.inference_latency_ms:.1f}ms")
        
        elif state == "neutral":
            reasons.append("No strong signals in any direction")
        
        if not reasons:
            reasons.append("State classification logic applied standard thresholds")
        
        return f"At snapshot {snapshot.id}, I was '{state}' because:\n- " + "\n- ".join(reasons)
    
    def state_at_time(self, timestamp_utc: str) -> str:
        """Report state at specific time."""
        snapshot = self.db.query_state_at_time(timestamp_utc)
        
        if not snapshot:
            return f"No state data found for {timestamp_utc}"
        
        return f"""At {snapshot.timestamp_utc}:
- State: {snapshot.dominant_state} (confidence: {snapshot.state_confidence:.1%})
- Entropy: {snapshot.token_entropy:.3f}
- Retrieval: {snapshot.retrieval_success:.1%}
- Conflict: {snapshot.conflict_score:.3f}"""
    
    def confidence_at(self, snapshot_id: int) -> str:
        """Report confidence level at specific snapshot."""
        snapshot = self.db.get_by_id(snapshot_id)
        
        if not snapshot:
            return f"No snapshot found with ID {snapshot_id}"
        
        return f"At snapshot {snapshot_id}, my state confidence was {snapshot.state_confidence:.1%} for state '{snapshot.dominant_state}'"


# ============================================================
# TEST / DEMO
# ============================================================

if __name__ == "__main__":
    print("╔═══════════════════════════════════════════════════════════╗")
    print("║       EDEN INTERNAL STATE SYSTEM - LIVE TEST              ║")
    print("╚═══════════════════════════════════════════════════════════╝")
    print()
    
    # Initialize
    sampler = EdenInternalStateSampler()
    
    # Simulate inference cycle
    print("🔬 Simulating inference cycle...")
    sampler.start()
    
    # Simulate high-entropy (uncertain) scenario
    fake_logits = np.random.randn(32000)  # Simulate token logits
    sampler.record_entropy(fake_logits)
    
    # Simulate scattered attention
    fake_attention = np.random.rand(512)
    sampler.record_attention(fake_attention)
    
    # Simulate some retrieval attempts
    sampler.record_retrieval(True)   # hit
    sampler.record_retrieval(False)  # miss
    sampler.record_retrieval(True)   # hit
    
    # No conflict
    sampler.set_conflict_score(0.1)
    
    sampler.add_note("Test simulation")
    
    # Finalize
    time.sleep(0.1)  # Simulate some inference time
    snapshot = sampler.finalize()
    
    # Report
    print(sampler.get_state_report())
    
    # Test language validation
    print("🔬 Testing language constraint validation...")
    
    test_responses = [
        "I'm absolutely certain this is correct.",
        "I think this might be the answer, but I'm not sure.",
        "I don't know the answer to that.",
        "I'm fully focused on this task.",
    ]
    
    constraints = sampler.get_language_constraints()
    
    for response in test_responses:
        is_valid, violations = sampler.validate_response(response)
        status = "✅ VALID" if is_valid else "❌ INVALID"
        print(f"\n{status}: \"{response[:50]}...\"")
        if violations:
            for v in violations:
                print(f"   └─ {v}")
    
    # Test query interface
    print("\n" + "="*60)
    print("🔬 Testing query interface...")
    
    query = StateQueryInterface()
    print(query.why_was_i(snapshot.dominant_state, snapshot.id))
    
    print("\n✅ Internal State System operational!")
