#!/usr/bin/env python3
"""
Meta-Learning V4 - Simplified proven approach
"""

import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np

class SimpleMetaNet(nn.Module):
    def __init__(self):
        super().__init__()
        self.fc1 = nn.Linear(10, 128)
        self.fc2 = nn.Linear(128, 128)
        self.fc3 = nn.Linear(128, 2)
    
    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        return self.fc3(x)

class MetaLearnerV4:
    def __init__(self):
        self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
        print("Initializing meta-learner V4...")
        self.model = SimpleMetaNet().to(self.device)
        print("✅ Meta-learning V4 ready!")
    
    def create_task(self):
        weight = torch.randn(10).to(self.device)
        weight = weight / weight.norm()
        data = torch.randn(60, 10).to(self.device)
        labels = ((data @ weight) > 0).long()
        return data, labels
    
    def meta_train(self, n_meta_epochs=200):
        meta_opt = torch.optim.Adam(self.model.parameters(), lr=0.0005)
        print(f"\nMeta-training for {n_meta_epochs} epochs...")
        
        for epoch in range(n_meta_epochs):
            epoch_loss = 0
            for _ in range(20):
                data, labels = self.create_task()
                support_x, support_y = data[:40], labels[:40]
                query_x, query_y = data[40:], labels[40:]
                
                adapted = SimpleMetaNet().to(self.device)
                adapted.load_state_dict(self.model.state_dict())
                inner_opt = torch.optim.SGD(adapted.parameters(), lr=0.05)
                
                for _ in range(5):
                    pred = adapted(support_x)
                    loss = F.cross_entropy(pred, support_y)
                    inner_opt.zero_grad()
                    loss.backward()
                    inner_opt.step()
                
                pred_query = adapted(query_x)
                meta_loss = F.cross_entropy(pred_query, query_y)
                epoch_loss += meta_loss
            
            meta_opt.zero_grad()
            epoch_loss.backward()
            meta_opt.step()
            
            if (epoch + 1) % 40 == 0:
                print(f"Epoch {epoch+1}: Loss: {epoch_loss.item():.4f}")
        
        print("✅ Meta-training complete")
    
    def test_adaptation(self, n_tests=10):
        accs = []
        for _ in range(n_tests):
            data, labels = self.create_task()
            support_x, support_y = data[:40], labels[:40]
            query_x, query_y = data[40:], labels[40:]
            
            adapted = SimpleMetaNet().to(self.device)
            adapted.load_state_dict(self.model.state_dict())
            opt = torch.optim.SGD(adapted.parameters(), lr=0.05)
            
            for _ in range(10):
                pred = adapted(support_x)
                loss = F.cross_entropy(pred, support_y)
                opt.zero_grad()
                loss.backward()
                opt.step()
            
            with torch.no_grad():
                pred = adapted(query_x)
                acc = (pred.argmax(dim=1) == query_y).float().mean().item()
                accs.append(acc)
        
        return accs

def test_meta_v4():
    print("\n" + "="*70)
    print("TESTING META-LEARNING V4")
    print("="*70)
    
    ml = MetaLearnerV4()
    ml.meta_train(n_meta_epochs=200)
    
    print("\nTesting on 10 new tasks...")
    accs = ml.test_adaptation(n_tests=10)
    
    for i, acc in enumerate(accs, 1):
        print(f"Task {i}: {acc*100:.1f}%")
    
    avg = np.mean(accs)
    print(f"\nAverage: {avg*100:.1f}%")
    
    if avg >= 0.80:
        print("✅ EXCELLENT - Meta-learning working!")
        return True
    elif avg >= 0.70:
        print("✅ GOOD - Strong adaptation!")
        return True
    else:
        print("⚠️ Partial success")
        return False

def main():
    if test_meta_v4():
        print("\n✅ CAPABILITY #13 COMPLETE (V4)")

if __name__ == "__main__":
    main()
