#!/usr/bin/env python3
"""
Generate LLM Meta-Capabilities
Embeds all Ollama models as O(1) hash-accessible Eden capabilities
"""

import subprocess
from pathlib import Path
from datetime import datetime

def get_available_models():
    """Get all loaded Ollama models"""
    result = subprocess.run(['ollama', 'list'], capture_output=True, text=True)
    models = []
    for line in result.stdout.split('\n')[1:]:  # Skip header
        if line.strip():
            parts = line.split()
            if parts:
                model_name = parts[0]
                models.append(model_name)
    return models

def generate_llm_capability(model_name, purpose, use_case):
    """Generate a meta-capability for an LLM"""
    
    safe_name = model_name.replace(':', '_').replace('.', '_').replace('-', '_')
    timestamp = int(datetime.now().timestamp())
    
    code = f'''#!/usr/bin/env python3
"""
Meta-Capability: LLM {model_name}
Purpose: {purpose}
Use Case: {use_case}
Generated: {datetime.now().isoformat()}
"""

import subprocess

def execute(prompt: str, timeout: int = 60) -> str:
    """
    Execute LLM inference using {model_name}
    
    Args:
        prompt: Input prompt for the model
        timeout: Max execution time in seconds
    
    Returns:
        Model response as string
    """
    try:
        result = subprocess.run(
            ['ollama', 'run', '{model_name}', prompt],
            capture_output=True,
            text=True,
            timeout=timeout
        )
        
        if result.returncode == 0:
            return result.stdout.strip()
        else:
            return f"[Error: Model returned code {{result.returncode}}]"
    
    except subprocess.TimeoutExpired:
        return "[Error: LLM inference timeout]"
    except Exception as e:
        return f"[Error: {{str(e)}}]"

def get_metadata():
    """Return capability metadata"""
    return {{
        'name': 'llm_{safe_name}',
        'model': '{model_name}',
        'purpose': '{purpose}',
        'use_case': '{use_case}',
        'type': 'llm_inference',
        'o1_access': True,
        'created': '{datetime.now().isoformat()}'
    }}

if __name__ == "__main__":
    import sys
    if len(sys.argv) > 1:
        prompt = " ".join(sys.argv[1:])
        response = execute(prompt)
        print(response)
    else:
        print("Usage: python3 {{__file__}} <prompt>")
'''
    
    return code, f"eden_metacap_llm_{safe_name}_{timestamp}.py"

def main():
    """Generate LLM capabilities for all available models"""
    
    print("🔧 Generating LLM Meta-Capabilities...")
    
    # Model configurations
    model_configs = {
        'qwen2.5:32b': ('Advanced reasoning and conversation', 'Complex reasoning, business discussions, technical planning'),
        'qwen2.5:72b': ('Maximum intelligence reasoning', 'Hardest problems, research, deep analysis'),
        'qwen2.5:14b': ('Balanced reasoning', 'General purpose, fast responses'),
        'qwen2.5:7b': ('Fast lightweight reasoning', 'Quick tasks, simple queries'),
        'dolphin-mistral:latest': ('Uncensored general chat', 'Creative writing, casual conversation'),
        'dolphin-mixtral:latest': ('Uncensored advanced reasoning', 'Complex creative tasks'),
        'llama3.1:70b': ('General intelligence', 'Balanced tasks, general knowledge'),
        'llama3.1:8b': ('Fast general purpose', 'Quick responses, simple tasks'),
        'deepseek-r1:14b': ('Deep reasoning chains', 'Mathematical proofs, logical reasoning'),
        'command-r:35b': ('Command following', 'Task execution, instruction following'),
        'codellama:70b': ('Code generation', 'Programming, debugging, code review'),
        'deepseek-coder:33b': ('Advanced coding', 'Complex programming, architecture'),
        'starcoder2:15b': ('Fast code generation', 'Quick code tasks'),
        'codegemma:7b': ('Lightweight coding', 'Simple code generation'),
        'mixtral:8x22b': ('Maximum capability', 'Hardest tasks, multi-domain reasoning'),
        'wizard-vicuna-uncensored:latest': ('Uncensored assistant', 'Creative tasks, unrestricted'),
        'llama3.2:3b': ('Ultra-fast responses', 'Simple queries, instant answers'),
        'llava:7b': ('Vision + language', 'Image description, visual reasoning'),
        'phi3.5:latest': ('Lightweight reasoning', 'Fast general tasks'),
        'phi3:medium': ('Balanced phi model', 'General purpose'),
        'eden-phi-trained:14b': ('Eden-specific training', 'Eden-aligned responses'),
        'eden-fluid-intelligence:14b': ('Fluid reasoning', 'Adaptive problem solving'),
        'eden-phi-fractal:14b': ('Phi-fractal consciousness', 'Consciousness-aware responses'),
    }
    
    # Get available models
    available_models = get_available_models()
    print(f"✓ Found {len(available_models)} loaded models")
    
    capabilities_dir = Path('/Eden/CAPABILITIES')
    capabilities_dir.mkdir(exist_ok=True)
    
    generated_count = 0
    
    for model in available_models:
        if model in model_configs:
            purpose, use_case = model_configs[model]
            code, filename = generate_llm_capability(model, purpose, use_case)
            
            filepath = capabilities_dir / filename
            with open(filepath, 'w') as f:
                f.write(code)
            filepath.chmod(0o755)
            
            print(f"✅ Generated: {filename}")
            generated_count += 1
        else:
            print(f"⚠️  Skipped (no config): {model}")
    
    print(f"\n✓ Generated {generated_count} LLM capabilities")
    print(f"✓ Location: {capabilities_dir}")
    print(f"\n🌀 Eden now has O(1) access to all LLMs!")

if __name__ == "__main__":
    main()
