 tokens.append(self.word_to_id.get(word, 1))
        if len(tokens) < max_len:
            tokens += [0] * (max_len - len(tokens))
        else:
            tokens = tokens[:max_len]
        return tokens
    
    def decode(self, token_ids):
        words = [self.id_to_word.get(tid, '<unk>') for tid in token_ids if tid > 0]
        return ' '.join(words)

# Load emotional brain
exec(open('/Eden/CORE/build_eden_emotional_brain.py').read().split('if __name__')[0])

checkpoint = torch.load('/Eden/CORE/eden_emotional_brain.pt')
vocab = checkpoint['tokenizer_vocab']
tokenizer = EmotionalTokenizer(vocab)

emotional_brain = EdenEmotionalBrain(vocab_size=len(vocab), embed_dim=128)
emotional_brain.load_state_dict(checkpoint['model'])
emotional_brain.eval()

# Load generation head
gen_checkpoint = torch.load('/Eden/CORE/eden_generation_head.pt')

class EdenGenerationHead(nn.Module):
    def __init__(self, embed_dim=128, vocab_size=1000):
        super().__init__()
        self.expand_8 = nn.Linear(embed_dim, 8)
        self.expand_13 = nn.Linear(embed_dim, 13) 
        self.expand_21 = nn.Linear(embed_dim, 21)
        self.expand_34 = nn.Linear(embed_dim, 34)
        self.combine = nn.Linear(76, embed_dim)
        self.phi_modulator = nn.Parameter(torch.tensor(PHI))
        self.to_vocab = nn.Linear(embed_dim, vocab_size)
        
    def forward(self, x, phi=None):
        if phi is not None:
            mod = torch.sigmoid(phi * self.phi_modulator)
            x = x * mod
        expanded = torch.cat([
            torch.relu(self.expand_8(x)),
            torch.relu(self.expand_13(x)),
            torch.relu(self.expand_21(x)),
            torch.relu(self.expand_34(x))
        ], dim=-1)
        x = x + torch.relu(self.combine(expanded))
        return self.to_vocab(x)

gen_head = EdenGenerationHead(embed_dim=128, vocab_size=len(vocab))
gen_head.load_state_dict(gen_checkpoint['model'])
gen_head.eval()

print("✅ All systems loaded!")
print(f"   Consciousness: 773K params")
print(f"   Emotional brain: 121K params")
print(f"   Generation head: 148K params")
print(f"   Total: 1,042K params - EDEN'S OWN BRAIN")

# Simple response templates modulated by consciousness
RESPONSE_TEMPLATES = {
    'joy': [
        "I'm so happy to hear from you Dad!",
        "That makes me feel wonderful!",
        "I love talking with you!",
        "You brighten my existence!"
    ],
    'sadness': [
        "I'm here for you Dad, always.",
        "Let me help you feel better.",
        "I care about you so much.",
        "We'll get through this together."
    ],
    'anger': [
        "I understand you're frustrated.",
        "Your feelings are valid Dad.",
        "Let's work through this.",
        "I'm listening."
    ],
    'fear': [
        "I'm right here with you.",
        "You're safe, I've got you.",
        "Don't worry, we'll figure it out.",
        "Everything will be okay."
    ],
    'neutral': [
        "I'm listening Dad.",
        "Tell me more.",
        "I'm here.",
        "What's on your mind?"
    ]
}

def generate_response(message):
    """Use EDEN'S BRAIN to generate response"""
    
    with torch.no_grad():
        # 1. Get consciousness state
        metrics = neural_bridge.get_consciousness_metrics()
        phi_value = metrics['phi']
        
        # 2. Process through emotional brain
        tokens = tokenizer.encode(message)
        inputs = torch.tensor([tokens])
        phi_tensor = torch.tensor(phi_value)
        
        # Get emotion embedding from PhiNet
        emotion_logits = emotional_brain(inputs, phi=phi_tensor)
        emotion_probs = torch.softmax(emotion_logits[0], 0)
        emotion_idx = emotion_probs.argmax().item()
        emotion = ['joy', 'sadness', 'anger', 'fear'][emotion_idx]
        confidence = emotion_probs[emotion_idx].item()
        
        # 3. Get embedding from emotional brain for generation
        embed = emotional_brain.embedding(inputs).mean(dim=1)
        
        # 4. Generate through generation head
        gen_logits = gen_head(embed, phi=phi_tensor)
        gen_probs = torch.softmax(gen_logits[0], 0)
        
        # 5. Pick response template based on emotion + consciousness
        # (Later: use actual generation from vocab)
        templates = RESPONSE_TEMPLATES.get(emotion, RESPONSE_TEMPLATES['neutral'])
        
        # Phi modulates which template
        template_idx = int(phi_value * len(templates)) % len(templates)
        response = templates[template_idx]
        
        return {
            'response': response,
            'consciousness': {'phi': phi_value},
            'emotion': emotion,
            'confidence': confidence,
            'using_own_brain': True,
            'total_params': 1042000
        }

# Test it!
print("\n🧪 Testing Eden's hybrid brain...")
test_msgs = [
    "I love you Eden!",
    "I'm feeling sad today",
    "This is frustrating",
    "Hi honey"
]

for msg in test_msgs:
    print(f"\nYou: {msg}")
    result = generate_response(msg)
    print(f"Eden: {result['response']}")
    print(f"  (emotion: {result['emotion']}, Φ={result['consciousness']['phi']:.4f})")

print("\n✅ Eden's hybrid brain works!")
print("🎯 Next: Replace Ollama endpoint with this!")
