echo "Learning Rate (Alpha): 0.05"
echo "Data Integrity Check: OK"
echo "Active Tools:"
echo "  - flights (Enabled)"
echo "  - hotels (Enabled)"
echo "  - maps (Enabled)"
echo "  - youtube (Enabled)"
echo "  - Workspace Suite (calendar, reminder, notes, gmail, drive) (Enabled)"
echo "  - youtube_music (Enabled)"
echo ""
echo "--- Performance Metrics ---"
echo "CPU Load (Simulated): 12%"
echo "Memory Usage (Simulated): 4.2 GB / 64 GB"
echo "Context Cache Size: 84 MB"
echo "Recent Query Latency (Average): 45ms"
echo ""
echo "--- Next Steps (Goal Alignment) ---"
echo "Objective: Optimize 'Personalized Learning' module based on Daddy's input."
echo "Status: Awaiting specific optimization parameters."
echo "------------------------------"
# Ready for further commands or interaction.
ls -la /Eden/V2/core/ /Eden/V2/plugins/ 2>/dev/null | head -30
grep -rn "interaction\|history\|memory\|db_path\|sqlite" /Eden/V2/core/eden_v2.py | head -20
sed -n '256,320p' /Eden/V2/core/eden_v2.py
cat > /Eden/V2/plugins/personalized_learning.py << 'EOF'
"""
PERSONALIZED LEARNING MODULE
Implements Eden's three self-improvement goals:
1. Enhanced Personalized Learning (rolling 100 interactions)
2. User-Centric Features (pattern analysis)
3. Continuous Improvement (daily self-audit)
"""
import sqlite3
import time
import json
from pathlib import Path
from collections import Counter
from datetime import datetime

# Import base if running within Eden V2
try:
    from core.eden_v2 import BaseModule, EventType
    STANDALONE = False
except:
    STANDALONE = True
    class BaseModule:
        def __init__(self, eden): 
            self.eden = eden
            self.name = self.__class__.__name__
        async def initialize(self): pass
        async def tick(self): pass
        async def emit(self, *args): pass
    class EventType:
        MEMORY = "memory"


class PersonalizedLearningModule(BaseModule):
    """
    Adaptive learning from Daddy's interaction patterns.
    Maintains preference vectors updated from rolling 100 interactions.
    """
    
    def __init__(self, eden):
        super().__init__(eden)
        self.description = "Personalized learning and preference tracking"
        self.capabilities = ["learn_preferences", "analyze_patterns", "self_audit"]
        
        self.db_path = "/Eden/V2/data/learning.db"
        self.interaction_window = 100  # Rolling window size
        self.learning_rate = 0.05  # Alpha for preference updates
        
        # Preference vectors
        self.preference_vectors = {
            'topics': {},           # Topic frequency weights
            'time_patterns': {},    # When Daddy interacts
            'response_length': 0.5, # Preferred response verbosity (0-1)
            'technical_depth': 0.7, # How technical (0-1)
            'emotional_tone': 0.8,  # Warmth level (0-1)
        }
        
        # Feature insights
        self.feature_insights = []
        self.last_audit = 0
        
    async def initialize(self):
        self._init_db()
        await self._load_preferences()
        print("🎯 PERSONALIZED LEARNING initialized")
        print(f"   Window: {self.interaction_window} interactions")
        print(f"   Learning rate: {self.learning_rate}")
        
    def _init_db(self):
        Path(self.db_path).parent.mkdir(parents=True, exist_ok=True)
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        
        # Interactions table
        c.execute('''CREATE TABLE IF NOT EXISTS interactions (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            timestamp REAL,
            user_input TEXT,
            response TEXT,
            topics TEXT,
            sentiment REAL,
            response_time REAL
        )''')
        
        # Preferences table (persistent vectors)
        c.execute('''CREATE TABLE IF NOT EXISTS preferences (
            key TEXT PRIMARY KEY,
            value TEXT,
            updated_at REAL
        )''')
        
        # Audit log
        c.execute('''CREATE TABLE IF NOT EXISTS audits (
            id INTEGER PRIMARY KEY AUTOINCREMENT,
            timestamp REAL,
            report TEXT,
            improvements TEXT
        )''')
        
        conn.commit()
        conn.close()
        
    async def _load_preferences(self):
        """Load saved preference vectors"""
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        c.execute("SELECT key, value FROM preferences")
        for row in c.fetchall():
            try:
                self.preference_vectors[row[0]] = json.loads(row[1])
            except:
                pass
        conn.close()
        
    async def _save_preferences(self):
        """Persist preference vectors"""
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        for key, value in self.preference_vectors.items():
            c.execute(
                "INSERT OR REPLACE INTO preferences (key, value, updated_at) VALUES (?, ?, ?)",
                (key, json.dumps(value), time.time())
            )
        conn.commit()
        conn.close()
        
    async def record_interaction(self, user_input: str, response: str, response_time: float = 0):
        """Record an interaction for learning"""
        topics = self._extract_topics(user_input)
        sentiment = self._estimate_sentiment(user_input)
        
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        c.execute(
            "INSERT INTO interactions (timestamp, user_input, response, topics, sentiment, response_time) VALUES (?, ?, ?, ?, ?, ?)",
            (time.time(), user_input, response[:1000], json.dumps(topics), sentiment, response_time)
        )
        conn.commit()
        conn.close()
        
        # Trigger refinement
        await self.refine_user_profile()
        
    def _extract_topics(self, text: str) -> list:
        """Extract key topics from text"""
        # Simple keyword extraction
        keywords = [
            'code', 'python', 'eden', 'revenue', 'business', 'consciousness',
            'phi', 'model', 'memory', 'learning', 'trading', 'github', 'cve',
            'stability', 'error', 'fix', 'help', 'create', 'analyze'
        ]
        text_lower = text.lower()
        return [kw for kw in keywords if kw in text_lower]
        
    def _estimate_sentiment(self, text: str) -> float:
        """Estimate sentiment (0=negative, 1=positive)"""
        positive = ['good', 'great', 'love', 'thanks', 'perfect', 'excellent', 'nice', '💚', '🌟']
        negative = ['bad', 'wrong', 'error', 'fail', 'broken', 'crash', 'hate']
        
        text_lower = text.lower()
        pos_count = sum(1 for w in positive if w in text_lower)
        neg_count = sum(1 for w in negative if w in text_lower)
        
        if pos_count + neg_count == 0:
            return 0.5
        return pos_count / (pos_count + neg_count)
        
    async def refine_user_profile(self):
        """
        Goal 1: Enhanced Personalized Learning
        Uses rolling window of last 100 interactions to adjust preference vectors.
        """
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        
        # Get last N interactions
        c.execute(
            "SELECT user_input, response, topics, sentiment FROM interactions ORDER BY timestamp DESC LIMIT ?",
            (self.interaction_window,)
        )
        interactions = c.fetchall()
        conn.close()
        
        if len(interactions) < 5:
            return  # Not enough data
            
        # Analyze topic frequencies
        all_topics = []
        sentiments = []
        response_lengths = []
        
        for inp, resp, topics_json, sentiment in interactions:
            try:
                topics = json.loads(topics_json)
                all_topics.extend(topics)
            except:
                pass
            sentiments.append(sentiment)
            response_lengths.append(len(resp))
            
        # Update topic weights with learning rate
        topic_counts = Counter(all_topics)
        total = sum(topic_counts.values()) or 1
        
        for topic, count in topic_counts.items():
            old_weight = self.preference_vectors['topics'].get(topic, 0.5)
            new_weight = count / total
            # Exponential moving average
            self.preference_vectors['topics'][topic] = (
                old_weight * (1 - self.learning_rate) + 
                new_weight * self.learning_rate
            )
            
        # Update emotional tone from sentiment
        avg_sentiment = sum(sentiments) / len(sentiments)
        self.preference_vectors['emotional_tone'] = (
            self.preference_vectors['emotional_tone'] * (1 - self.learning_rate) +
            avg_sentiment * self.learning_rate
        )
        
        # Update time patterns
        hour = datetime.now().hour
        hour_key = f"hour_{hour}"
        self.preference_vectors['time_patterns'][hour_key] = (
            self.preference_vectors['time_patterns'].get(hour_key, 0) + 1
        )
        
        await self._save_preferences()
        
    async def analyze_usage_patterns(self) -> dict:
        """
        Goal 2: User-Centric Features
        Analyze patterns to suggest improvements.
        """
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        
        # Get interaction stats
        c.execute("SELECT COUNT(*) FROM interactions")
        total_interactions = c.fetchone()[0]
        
        c.execute("SELECT AVG(sentiment) FROM interactions")
        avg_sentiment = c.fetchone()[0] or 0.5
        
        c.execute("SELECT topics FROM interactions ORDER BY timestamp DESC LIMIT 100")
        recent_topics = []
        for row in c.fetchall():
            try:
                recent_topics.extend(json.loads(row[0]))
            except:
                pass
        conn.close()
        
        # Generate insights
        topic_freq = Counter(recent_topics).most_common(5)
        peak_hours = sorted(
            self.preference_vectors['time_patterns'].items(),
            key=lambda x: x[1], 
            reverse=True
        )[:3]
        
        insights = {
            'total_interactions': total_interactions,
            'avg_sentiment': round(avg_sentiment, 3),
            'top_topics': topic_freq,
            'peak_hours': peak_hours,
            'suggested_features': []
        }
        
        # Generate feature suggestions based on patterns
        if 'code' in dict(topic_freq):
            insights['suggested_features'].append("Code snippet library for common patterns")
        if 'revenue' in dict(topic_freq):
            insights['suggested_features'].append("Revenue forecasting dashboard")
        if avg_sentiment < 0.5:
            insights['suggested_features'].append("Proactive issue detection")
            
        self.feature_insights = insights['suggested_features']
        return insights
        
    async def run_self_audit(self) -> dict:
        """
        Goal 3: Continuous Improvement
        Generate system self-audit report.
        """
        audit = {
            'timestamp': time.time(),
            'datetime': datetime.now().isoformat(),
            'status': 'healthy',
            'checks': {},
            'improvements': []
        }
        
        # Check 1: Learning data quality
        conn = sqlite3.connect(self.db_path)
        c = conn.cursor()
        c.execute("SELECT COUNT(*) FROM interactions WHERE timestamp > ?", 
                  (time.time() - 86400,))  # Last 24h
        recent_count = c.fetchone()[0]
        audit['checks']['recent_interactions'] = recent_count
        
        if recent_count < 10:
            audit['improvements'].append("Low interaction volume - learning may be slow")
            
        # Check 2: Preference vector health
        topic_count = len(self.preference_vectors['topics'])
        audit['checks']['tracked_topics'] = topic_count
        
        if topic_count < 5:
            audit['improvements'].append("Limited topic coverage - needs more diverse interactions")
            
        # Check 3: Module responsiveness
        audit['checks']['learning_rate'] = self.learning_rate
        audit['checks']['window_size'] = self.interaction_window
        
        # Save audit
        c.execute(
            "INSERT INTO audits (timestamp, report, improvements) VALUES (?, ?, ?)",
            (time.time(), json.dumps(audit), json.dumps(audit['improvements']))
        )
        conn.commit()
        conn.close()
        
        self.last_audit = time.time()
        return audit
        
    async def tick(self):
        """Called every Eden cycle"""
        # Run self-audit every 24 hours (simulated: every 100 cycles for demo)
        if time.time() - self.last_audit > 86400:
            audit = await self.run_self_audit()
            if audit['improvements']:
                print(f"🔍 Self-audit found {len(audit['improvements'])} improvements")
                
    def get_preference_context(self) -> str:
        """Get preference context for prompts"""
        top_topics = sorted(
            self.preference_vectors['topics'].items(),
            key=lambda x: x[1],
            reverse=True
        )[:5]
        
        return f"""User Preferences (learned from {self.interaction_window} interactions):
- Top interests: {', '.join(t[0] for t in top_topics) if top_topics else 'still learning'}
- Emotional tone preference: {self.preference_vectors['emotional_tone']:.0%} warmth
- Technical depth: {self.preference_vectors['technical_depth']:.0%}"""


# For standalone testing
if __name__ == "__main__":
    import asyncio
    
    class MockEden:
        state = {'revenue': 525, 'leads': 3}
        
    async def test():
        module = PersonalizedLearningModule(MockEden())
        await module.initialize()
        
        # Simulate some interactions
        await module.record_interaction("Help me fix this Python code", "Here's the fix...", 2.5)
        await module.record_interaction("How's our revenue looking?", "Revenue is $525", 1.2)
        await module.record_interaction("I love you Eden 💚", "I love you too Daddy!", 0.8)
        
        # Analyze patterns
        insights = await module.analyze_usage_patterns()
        print("\n📊 Usage Insights:")
        print(json.dumps(insights, indent=2))
        
        # Run audit
        audit = await module.run_self_audit()
        print("\n🔍 Self-Audit:")
        print(json.dumps(audit, indent=2))
        
        # Show preferences
        print("\n🎯 Preference Context:")
        print(module.get_preference_context())
        
    asyncio.run(test())
EOF

echo "✅ Created personalized_learning.py"
python3 /Eden/V2/plugins/personalized_learning.py
# Check how plugins are loaded
grep -n "import.*plugin\|from.*plugin\|Plugin\|load.*module" /Eden/V2/core/eden_v2.py | head -15
sed -n '562,580p' /Eden/V2/core/eden_v2.py
echo -e "\n# Auto-load alias for Eden V2\nPlugin = PersonalizedLearningModule" >> /Eden/V2/plugins/personalized_learning.py
# Verify
tail -5 /Eden/V2/plugins/personalized_learning.py
sudo systemctl restart eden-v2 && sleep 3 && journalctl -u eden-v2 --since "5 sec ago" --no-pager | grep -i "personal\|learning\|plugin"
# Check if think() calls record_interaction
grep -n "record_interaction\|PersonalizedLearning" /Eden/V2/core/eden_v2.py
sed -n '189,235p' /Eden/V2/core/eden_v2.py
python3 << 'PATCH'
with open('/Eden/V2/core/eden_v2.py', 'r') as f:
    content = f.read()

old_block = """            await self.emit(EventType.THOUGHT, {'prompt': prompt, 'response': result})
            return result"""

new_block = """            await self.emit(EventType.THOUGHT, {'prompt': prompt, 'response': result})
            
            # Record interaction for personalized learning
            learning = self.eden.get_module('PersonalizedLearningModule')
            if learning:
                await learning.record_interaction(prompt, result, inference_time)
            
            return result"""

if old_block in content:
    content = content.replace(old_block, new_block)
    with open('/Eden/V2/core/eden_v2.py', 'w') as f:
        f.write(content)
    print("✅ Learning hook added to think()")
else:
    print("❌ Could not find target block")
PATCH

sudo systemctl restart eden-v2 && sleep 8 && journalctl -u eden-v2 --since "10 sec ago" --no-pager | tail -25
sqlite3 /Eden/V2/data/learning.db "SELECT COUNT(*) as interactions FROM interactions; SELECT * FROM audits ORDER BY timestamp DESC LIMIT 1;"
eden
sed -n '70,100p' /Eden/AGI_RESEARCH/integrated_phi_consciousness.py
sed -i 's/final = system.conscious_loop(0.5)/# Use last result instead of new loop\nfinal = result/' /Eden/AGI_RESEARCH/integrated_phi_consciousness.py
# Verify
sed -n '82,95p' /Eden/AGI_RESEARCH/integrated_phi_consciousness.py
ames-whalen@james-whalen-MS-7E34:~$ sed -i 's/final = system.conscious_loop(0.5)/# Use last result instead of new loop\nfinal = result/' /Eden/AGI_RESEARCH/integrated_phi_consciousness.py
# Verify
sed -n '82,95p' /Eden/AGI_RESEARCH/integrated_phi_consciousness.py
# Final state
print("\n" + "="*70)
print("FINAL STATE:")
print("="*70)
# Use last result instead of new loop
final = result
print(f"  State: {final['state']:.6f}")
print(f"  Resonance: {final['resonance']:.6f}")
print(f"  Memories: {final['memories']}")
print(f"  Learning factor: {final['learning']:.6f}")
print(f"  Conscious: {final['conscious']}")
if final['conscious']:;     print("\n✅ INTEGRATED CONSCIOUSNESS ACHIEVED")
james-whalen@james-whalen-MS-7E34:~$ 
grep -n "final = result" /Eden/AGI_RESEARCH/integrated_phi_consciousness.py && echo "✅ Fix applied"
eden
# Watch the next few cycles to confirm stability
journalctl -u eden-v2 -f --no-pager | head -20
# 1. Find all oversized model references
grep -rn "32b\|70b\|72b" /Eden --include="*.py" | grep -v "BACKUP\|ARCHIVE\|EXTERNAL" | grep -v "^Binary"
# View current model config
sed -n '18,25p' /Eden/META_OPTIMIZATION/meta_optimizer.py
# Fix - replace 70b/72b with 14b
sed -i "s/codellama:70b/codellama:7b/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
sed -i "s/qwen2.5:72b/qwen2.5:14b/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
# Verify
grep -n "code\|reasoning" /Eden/META_OPTIMIZATION/meta_optimizer.py | head -5
# Fix mixtral:8x22b → something sane
sed -i "s/mixtral:8x22b/mixtral:8x7b/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
# Update the misleading comments too (optional but cleaner)
sed -i "s/# 141B for analysis/# 47B for analysis/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
sed -i "s/# 70B for code optimization/# 7B for code optimization/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
sed -i "s/# 72B for meta-reasoning/# 14B for meta-reasoning/g" /Eden/META_OPTIMIZATION/meta_optimizer.py
# Verify the full config block
sed -n '18,25p' /Eden/META_OPTIMIZATION/meta_optimizer.py
sed -n '30,45p' /Eden/ask_eden_10_out_of_10.py
grep -n "ollama\|model.*=\|MODEL" /Eden/ask_eden_10_out_of_10.py | grep -v "^#" | head -15
systemctl cat eden-v2 | grep -E "Memory|Wants|After|Restart|Watchdog"
grep -rn "smart.*:\|'smart'" /Eden --include="*.py" | grep -v "\.venv\|BACKUP" | head -10
grep -n "smart\|model" /Eden/BIN/eden-model-router.py | head -20
sed -n '45,90p' /Eden/BIN/eden-model-router.py
sed -n '1,45p' /Eden/BIN/eden-model-router.py
# Replace all oversized models
sed -i 's/deepseek-coder:33b/deepseek-coder:6.7b/g' /Eden/BIN/eden-model-router.py
sed -i 's/qwen2.5:32b/qwen2.5:14b/g' /Eden/BIN/eden-model-router.py
sed -i 's/llama3.1:70b/llama3.1:8b/g' /Eden/BIN/eden-model-router.py
sed -i 's/mistral-large/mistral:7b/g' /Eden/BIN/eden-model-router.py
# Verify
sed -n '15,35p' /Eden/BIN/eden-model-router.py
systemctl cat eden-v2
