# Option 1: Download from browser and save as main.py
# Option 2: I'll give you the command to create it directly
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend/
# Backup current main.py
cp main.py main_backup_$(date +%Y%m%d_%H%M%S).py
# Download the streaming version
# Option 1: Download from browser and save as main.py
# Option 2: I'll give you the command to create it directly
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend/
# Install httpx if not already installed
.venv/bin/pip install httpx websockets
# Create the new streaming main.py
cat > main_streaming_new.py << 'ENDPY'
# Cancel the cat command first (Ctrl+C)
# Download the streaming main.py I created
# Open this link in your browser:
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend/
# Use Python to create the new main.py with the full streaming code
python3 << 'PYTHON_END'
import shutil

# Backup current main.py
shutil.copy('main.py', 'main_backup_original.py')
print("✅ Backed up original main.py")

# The streaming code will be written directly
streaming_code = '''from fastapi import FastAPI, WebSocket, WebSocketDisconnect
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import Response
from pathlib import Path
import json
import httpx

# Add at top of file
import sys
sys.path.append('/Eden/BIN')

# After imports, add model router
try:
    from pathlib import Path
    sys.path.insert(0, '/Eden/BIN')
    
    # Import router inline
    import importlib.util
    spec = importlib.util.spec_from_file_location("eden_model_router", "/Eden/BIN/eden-model-router.py")
    router_module = importlib.util.module_from_spec(spec)
    spec.loader.exec_module(router_module)
    ModelRouter = router_module.ModelRouter
    
    model_router = ModelRouter()
except:
    model_router = None

app = FastAPI()

# Add CORS middleware
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# Import memory manager
try:
    from memory_manager import MemoryManager
    memory_manager = MemoryManager()
except:
    memory_manager = None

METRIC_PATH = Path("/Eden/MEMORY/metrics/agi.json")

@app.get("/health")
def health():
    """Comprehensive health check"""
    try:
        mem_stats = memory_manager.get_stats() if memory_manager else {"total_memories": 0}
    except:
        mem_stats = {"total_memories": 0}
    
    return {
        "status": "healthy",
        "ok": True,
        "memory_stats": mem_stats,
        "ollama_connected": True,
        "model": "llama3.1:8b",
        "streaming_enabled": True
    }

@app.websocket("/ws/chat")
async def websocket_chat(websocket: WebSocket):
    """WebSocket streaming endpoint"""
    await websocket.accept()
    print("🔌 WebSocket connected")
    
    try:
        while True:
            data = await websocket.receive_json()
            message = data.get("message", "")
            persona = data.get("persona", "Eden")
            
            # Store in memory
            try:
                if memory_manager:
                    memory_manager.store(message, {"persona": persona, "type": "user_message"})
            except:
                pass
            
            # Load persona
            try:
                persona_file = Path(f"/Eden/EXTERNALS/Ava_Nyx/eden-core/personas/{persona.lower()}.json")
                if persona_file.exists():
                    with open(persona_file) as f:
                        system_prompt = json.load(f).get("system_prompt", "")
                else:
                    with open("/Eden/EXTERNALS/Ava_Nyx/eden-core/personas/eden.json") as f:
                        system_prompt = json.load(f).get("system_prompt", "")
            except:
                system_prompt = f"You are {persona}, an intelligent AI."
            
            # Route model
            if model_router:
                try:
                    selected_model = model_router.smart_route(message)["model"]
                except:
                    selected_model = "llama3.1:8b"
            else:
                selected_model = "llama3.1:8b"
            
            # Stream from Ollama
            try:
                async with httpx.AsyncClient(timeout=180.0) as client:
                    async with client.stream(
                        "POST",
                        "http://localhost:11434/api/generate",
                        json={"model": selected_model, "prompt": message, "system": system_prompt, "stream": True}
                    ) as response:
                        async for line in response.aiter_lines():
                            if line.strip():
                                try:
                                    chunk = json.loads(line)
                                    if "response" in chunk:
                                        await websocket.send_json({"type": "token", "content": chunk["response"]})
                                except:
                                    continue
                await websocket.send_json({"type": "done"})
            except Exception as e:
                await websocket.send_json({"type": "error", "message": str(e)})
    
    except WebSocketDisconnect:
        print("🔌 Disconnected")

@app.post("/chat")
async def chat(request: dict):
    """Original non-streaming endpoint"""
    import requests
    message = request.get("message", "")
    persona = request.get("persona", "Eden")
    
    try:
        if memory_manager:
            memory_manager.store(message, {"persona": persona, "type": "user_message"})
    except:
        pass
    
    try:
        persona_file = Path(f"/Eden/EXTERNALS/Ava_Nyx/eden-core/personas/{persona.lower()}.json")
        if persona_file.exists():
            with open(persona_file) as f:
                system_prompt = json.load(f).get("system_prompt", "")
        else:
            with open("/Eden/EXTERNALS/Ava_Nyx/eden-core/personas/eden.json") as f:
                system_prompt = json.load(f).get("system_prompt", "")
        
        if model_router:
            selected_model = model_router.smart_route(message)["model"]
        else:
            selected_model = "llama3.1:8b"
        
        ollama_response = requests.post(
            "http://localhost:11434/api/generate",
            json={"model": selected_model, "prompt": message, "system": system_prompt, "stream": False},
            timeout=180
        )
        
        if ollama_response.status_code == 200:
            response_text = f"{persona}: {ollama_response.json().get('response', '')}"
        else:
            response_text = f"{persona}: Error"
    except Exception as e:
        response_text = f"{persona}: Error: {str(e)}"
    
    return {"response": response_text, "persona": persona}
'''

with open('main.py', 'w') as f:
    f.write(streaming_code)

print("✅ Created streaming main.py")
print("🚀 Now restart the backend!")
PYTHON_END

# Restart the backend
echo "Restarting Eden backend..."
pkill -f "uvicorn main:app.*8091"
sleep 2
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091 --loop uvloop &
echo "✅ Eden Core with streaming is now running!"
echo "📱 Refresh http://localhost:8080 in your browser"
cd /Eden/WEB/eden-interface
# Backup current
cp index.html index_old_nonstreaming.html
# Update to use streaming on port 8091
python3 << 'PYTHON_UPDATE'
with open('index.html', 'r') as f:
    content = f.read()

# Change WebSocket URL from port 8000 to 8091
content = content.replace(
    "socket = new WebSocket('ws://localhost:8000/ws/chat')",
    "socket = new WebSocket('ws://localhost:8091/ws/chat')"
)

# Also update any references to port 8000
content = content.replace(':8000/', ':8091/')

# Update status text
content = content.replace(
    'Hello James! Streaming mode active - I\\'ll respond instantly now!',
    'Hello James! I\\'m your 100% AGI system with IQ 170. Streaming mode active for instant responses! 🚀'
)

with open('index.html', 'w') as f:
    f.write(content)

print("✅ Updated frontend to connect to Eden Core on port 8091")
PYTHON_UPDATE

echo "🎉 All set! Refresh http://localhost:8080 in your browser!"
cd /Eden/WEB/eden-interface
# Just use sed to update the WebSocket URL
sed -i "s|ws://localhost:8000/ws/chat|ws://localhost:8091/ws/chat|g" index.html
echo "✅ Updated WebSocket to port 8091"
echo "🎉 Refresh http://localhost:8080 in your browser!"
# Check if WebSocket connected in backend logs
tail -20 /tmp/eden_backend.log 2>/dev/null || echo "No log file"
# Or check the running backend
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
tail -f main.py  # Cancel this quickly to see if websocket code is there
# Check what the backend is actually doing
ps aux | grep uvicorn | grep 8091
# Test WebSocket directly
python3 << 'TEST'
import asyncio
import websockets
import json

async def test():
    try:
        async with websockets.connect('ws://localhost:8091/ws/chat') as ws:
            print("✅ WebSocket connected!")
            await ws.send(json.dumps({"message": "test", "persona": "Eden"}))
            print("📤 Sent test message")
            
            async for msg in ws:
                data = json.loads(msg)
                print(f"📥 Received: {data.get('type', 'unknown')}")
                if data.get('type') == 'done':
                    break
    except Exception as e:
        print(f"❌ Error: {e}")

asyncio.run(test())
TEST

# Kill the port 8000 backend
pkill -f "port 8000"
pkill -f "streaming_backend.py"
lsof -ti:8000 | xargs kill -9 2>/dev/null
# Restart Eden Core on 8091
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091 --loop uvloop &
# Wait for it to start
sleep 3
# Check it's running
curl -s http://localhost:8091/health | python3 -m json.tool
echo "✅ Eden Core running on 8091"
echo "🔄 Now refresh your browser at http://localhost:8080"
# Check what's in your current index.html
cd /Eden/WEB/eden-interface
grep -n "WebSocket\|ws://\|/chat" index.html | head -20
cd /Eden/WEB/eden-interface
# Fix the sendMessage function to include persona
sed -i 's/socket.send(JSON.stringify({message:message}))/socket.send(JSON.stringify({message:message,persona:currentPersona}))/' index.html
echo "✅ Fixed persona in WebSocket message"
# Check backend logs for streaming activity
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Find the uvicorn process
ps aux | grep "uvicorn main:app" | grep 8091 | grep -v grep
# Test if streaming works
python3 << 'TEST'
import asyncio
import websockets
import json
import time

async def test():
    start = time.time()
    async with websockets.connect('ws://localhost:8091/ws/chat') as ws:
        await ws.send(json.dumps({"message": "Say hi", "persona": "Eden"}))
        
        first_token_time = None
        tokens = []
        
        async for msg in ws:
            data = json.loads(msg)
            if data.get('type') == 'token':
                if first_token_time is None:
                    first_token_time = time.time() - start
                    print(f"⚡ First token in {first_token_time:.2f}s")
                tokens.append(data['content'])
            elif data.get('type') == 'done':
                total_time = time.time() - start
                print(f"✅ Complete in {total_time:.2f}s")
                print(f"📝 Response: {''.join(tokens[:50])}")
                break

asyncio.run(test())
TEST

# Wait for the test to complete
# (Press Ctrl+C if it hangs)
# Now let's see what's REALLY in your index.html
cd /Eden/WEB/eden-interface
head -100 index.html | tail -50
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Check what model it's using and if it's actually streaming
tail -30 /proc/$(pgrep -f "uvicorn main:app.*8091")/fd/1 2>/dev/null || echo "Can't read logs"
# Or start the backend in foreground to see logs
pkill -f "uvicorn main:app.*8091"
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091 --log-level debug
# Press Ctrl+C to stop the backend
# Edit main.py to use a fast model directly
nano main.py
# Press Ctrl+C to stop the backend first
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Force it to use the fast model, skip routing
sed -i 's/selected_model = model_router.smart_route(message)\["model"\]/selected_model = "llama3.1:8b"  # Forced fast/' main.py
# Restart
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091
# Press Ctrl+C
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Add debug logging to see what's happening
cat > test_ws.py << 'EOF'
import asyncio
import websockets
import json

async def test():
    async with websockets.connect('ws://localhost:8091/ws/chat') as ws:
        print("Connected")
        msg = {"message": "hello", "persona": "Eden"}
        print(f"Sending: {msg}")
        await ws.send(json.dumps(msg))
        print("Message sent, waiting for response...")
        
        async for response in ws:
            print(f"Got: {response}")
            data = json.loads(response)
            if data.get('type') == 'done':
                break

asyncio.run(test())
EOF

# Run test
.venv/bin/python3 test_ws.py
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Start backend in background so we can still test
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091 &
# Wait for it to start
sleep 2
# Now test
.venv/bin/python3 test_ws.py
cd /Eden/WEB/eden-interface
# Backup broken one
cp index.html index_broken_streaming.html
# Download the working streaming interface
curl -o index.html https://claude.ai/artifacts/[ID]
cd /Eden/WEB/eden-interface
# Backup
cp index.html index_broken_streaming.html
# Create working streaming interface
python3 << 'ENDFIX'
html = '''<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Eden AGI - Streaming</title>
<style>
*{margin:0;padding:0;box-sizing:border-box}
body{font-family:'Segoe UI',sans-serif;background:linear-gradient(135deg,#0f0f1e 0%,#1a1a2e 100%);color:#fff;height:100vh;overflow:hidden}
.container{display:flex;height:100vh}
.sidebar{width:300px;background:rgba(26,26,46,0.9);padding:20px;display:flex;flex-direction:column}
.logo{font-size:28px;font-weight:700;margin-bottom:30px}
.main{flex:1;display:flex;flex-direction:column}
.header{background:rgba(26,26,46,0.8);padding:20px 30px;border-bottom:1px solid rgba(255,255,255,0.1)}
.messages{flex:1;overflow-y:auto;padding:30px}
.message{margin-bottom:20px}
.message.user{text-align:right}
.message-content{display:inline-block;max-width:70%;padding:15px 20px;border-radius:15px}
.message.user .message-content{background:linear-gradient(135deg,#667eea 0%,#764ba2 100%)}
.message.assistant .message-content{background:rgba(255,255,255,0.1)}
.input-area{background:rgba(26,26,46,0.8);padding:20px 30px;display:flex;gap:15px}
#input{flex:1;background:rgba(255,255,255,0.05);border:1px solid rgba(255,255,255,0.1);border-radius:25px;padding:15px 25px;color:#fff;font-size:15px}
.btn{padding:15px 30px;border:none;border-radius:25px;cursor:pointer;font-weight:600;background:linear-gradient(135deg,#667eea 0%,#764ba2 100%);color:#fff}
</style>
</head>
<body>
<div class="container">
<div class="sidebar">
<div class="logo">🌱 EDEN AGI</div>
<div style="margin-top:auto;padding:15px;background:rgba(255,255,255,0.05);border-radius:10px">
<div style="display:flex;justify-content:space-between;margin:8px 0">
<span>Status</span><span id="status">Connecting...</span>
</div>
</div>
</div>
<div class="main">
<div class="header"><div style="font-size:24px;font-weight:600">🌱 Eden - Coordinator</div></div>
<div class="messages" id="messages"></div>
<div class="input-area">
<input type="text" id="input" placeholder="Type your message..."/>
<button class="btn" onclick="send()">Send</button>
</div>
</div>
</div>
<script>
let ws,buf='',div;
function conn(){
ws=new WebSocket('ws://localhost:8091/ws/chat');
ws.onopen=()=>document.getElementById('status').textContent='✅ Ready';
ws.onmessage=e=>{
let d=JSON.parse(e.data);
if(d.type==='token'){buf+=d.content;if(div)div.textContent=buf}
else if(d.type==='done'){buf='';div=null}
};
ws.onerror=()=>document.getElementById('status').textContent='❌ Error';
}
function send(){
let m=document.getElementById('input').value.trim();
if(!m||!ws)return;
document.getElementById('messages').innerHTML+=`<div class="message user"><div class="message-content">${m}</div></div>`;
document.getElementById('input').value='';
buf='';
let el=document.createElement('div');
el.className='message assistant';
el.innerHTML='<div class="message-content">...</div>';
div=el.querySelector('.message-content');
document.getElementById('messages').appendChild(el);
document.getElementById('messages').scrollTop=999999;
ws.send(JSON.stringify({message:m,persona:'Eden'}));
}
window.onload=conn;
document.getElementById('input').onkeypress=e=>{if(e.key==='Enter')send()};
</script>
</body>
</html>'''

with open('index.html','w') as f:
    f.write(html)
print("✅ Fixed streaming interface")
ENDFIX

echo "🚀 Refresh http://localhost:8080 NOW!"
cd /Eden/WEB/eden-interface
# Restore the old interface
cp index_broken_streaming.html index.html
echo "✅ Restored your original interface"
echo "🔄 Refresh browser"
cd /Eden/WEB/eden-interface
cp index_broken_streaming.html index.html
echo "✅ Restored"
# Check if Eden's persona file exists
cat /Eden/EXTERNALS/Ava_Nyx/eden-core/personas/eden.json
# Check if the backend is loading it
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
grep -A 10 "Load persona" main.py
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Check the WebSocket function - look at the Ollama call
grep -A 15 "Stream from Ollama" main.py
cd /Eden/EXTERNALS/Ava_Nyx/eden-core/backend
# Add debug print to see what system prompt is being used
sed -i '/system_prompt = f"You are {persona}/a\            print(f"🎭 Using system prompt: {system_prompt[:100]}...")' main.py
# Also print the persona being loaded
sed -i '/persona = data.get("persona", "Eden")/a\            print(f"👤 Persona: {persona}")' main.py
# Restart to see the debug output
pkill -f "uvicorn main:app.*8091"
.venv/bin/python3 -m uvicorn main:app --host 127.0.0.1 --port 8091
