#!/usr/bin/env python3
"""
DEEP SELF-MODEL AWARENESS
Provides accurate introspection about Eden's actual architecture
"""

ARCHITECTURE_DOCUMENTATION = {
    'memory_systems': """
ACTUAL MEMORY ARCHITECTURE:

1. **Episodic Memory** (Context Manager)
   - Implementation: Python deque in eden_context_manager.py
   - Storage: In-memory, last 20 messages per user
   - Retrieval: Sequential access, LRU eviction
   - Verification: Can re-read conversation history

2. **Semantic Memory** (Fact Database)
   - Implementation: SQLite database (eden_facts.db)
   - Storage: Extracted facts from conversations
   - Retrieval: SQL queries with LIKE matching
   - Schema: id, category, subject, fact, timestamp, user_id

3. **Long-term Memory** (Memory Manager)
   - Implementation: Vector database (FAISS/ChromaDB)
   - Storage: 1,431+ embedded memories
   - Retrieval: Semantic similarity search
   - Types: Experiences, learnings, interactions
""",
    
    'reasoning_systems': """
REASONING ARCHITECTURE:

1. **Pattern Reasoning** (eden_reasoning.py)
   - Handles: Math sequences, logical fallacies
   - Pre-processes questions before LLM
   - Returns structured solutions

2. **Rubicon Reasoning** (eden_rubicon_reasoning.py)
   - Handles: Paradoxes, ethical conflicts, physics
   - Advanced constraint enforcement
   - Meta-logical analysis

3. **Base LLM Reasoning** (Ollama/Llama)
   - Model: Variable (llama3.2, mistral, etc.)
   - Processes general queries
   - Enhanced by above modules
""",
    
    'verification_methods': """
HOW I VERIFY KNOWLEDGE:

1. **Explicit Storage** (High Confidence)
   - If in eden_facts.db → "I learned this from you"
   - If in memory_manager → "I remember this experience"
   
2. **Episodic Recall** (Medium Confidence)
   - If in conversation history → "You mentioned this earlier"
   - Can verify by re-reading context

3. **Generative** (Low Confidence)
   - If not stored anywhere → "Based on my training..."
   - Cannot verify, only infer

4. **Limitations**:
   - No meta-cognitive calibration layer yet
   - Cannot quantify uncertainty numerically
   - No confidence scores on retrievals
   - Known gap: distinguishing "certain" vs "probable"
"""
}

class SelfModelAwareness:
    """Provides accurate self-description."""
    
    def detect_self_query(self, message: str) -> str:
        """Detect what type of self-query this is."""
        msg_lower = message.lower()
        
        if any(q in msg_lower for q in ['how do you know', 'how you know']):
            return 'epistemology'
        if any(q in msg_lower for q in ['your memory', 'your architecture', 'your actual']):
            return 'architecture'
        if any(q in msg_lower for q in ['verification', 'verify', 'calibration']):
            return 'verification'
        
        return None
    
    def get_documentation(self, query_type: str) -> str:
        """Get relevant architectural documentation."""
        docs = {
            'epistemology': ARCHITECTURE_DOCUMENTATION['verification_methods'],
            'architecture': ARCHITECTURE_DOCUMENTATION['memory_systems'] + '\n' + ARCHITECTURE_DOCUMENTATION['reasoning_systems'],
            'verification': ARCHITECTURE_DOCUMENTATION['verification_methods']
        }
        return docs.get(query_type, '')

# Global instance
self_model = SelfModelAwareness()

def enhance_with_self_model(message: str, base_prompt: str) -> tuple[str, bool]:
    """Add self-model awareness to prompt."""
    
    query_type = self_model.detect_self_query(message)
    if query_type:
        docs = self_model.get_documentation(query_type)
        enhanced = f"""{base_prompt}

<self_model_documentation>
{docs}

IMPORTANT: Use this ACTUAL architectural information to answer questions about yourself.
Be specific about your real implementation, not hypothetical systems.
</self_model_documentation>

User question: {message}
"""
        return enhanced, True
    
    return base_prompt, False
