"""
Multi-Modal Perception Integration
Combines vision + audio into unified world understanding
"""
import sys
sys.path.insert(0, '/Eden/CORE')

import json
from pathlib import Path
from datetime import datetime
from typing import Dict
from vision_perception import vision
from audio_perception import audio

class PerceptionSystem:
    def __init__(self):
        self.perception_log = Path('/Eden/DATA/perception_log.json')
        self.world_model = {}
        
    def perceive_environment(self) -> Dict:
        """Get complete environmental perception"""
        print("👁️ Observing world...")
        
        # Visual perception
        visual = vision.observe_world()
        
        print("👂 Listening to world...")
        # Audio perception
        auditory = audio.listen_to_world(duration=2.0)
        
        # Combine into unified perception
        perception = {
            'timestamp': datetime.now().isoformat(),
            'visual': visual,
            'auditory': auditory,
            'interpretation': self.interpret_scene(visual, auditory)
        }
        
        self.log_perception(perception)
        self.update_world_model(perception)
        
        return perception
    
    def interpret_scene(self, visual: Dict, auditory: Dict) -> Dict:
        """Interpret what's happening based on multi-modal input"""
        interpretation = {
            'activity_level': 'unknown',
            'human_present': False,
            'environment_type': 'unknown'
        }
        
        # Detect human presence
        if visual.get('faces_detected', 0) > 0:
            interpretation['human_present'] = True
        
        if auditory.get('speech_detected', False):
            interpretation['human_present'] = True
        
        # Assess activity level
        volume = auditory.get('volume', {}).get('classification', 'silent')
        brightness = visual.get('brightness', 'dark')
        
        if volume == 'loud' or brightness == 'bright':
            interpretation['activity_level'] = 'high'
        elif volume == 'moderate' and brightness == 'moderate':
            interpretation['activity_level'] = 'moderate'
        else:
            interpretation['activity_level'] = 'low'
        
        # Guess environment type
        if brightness == 'bright' and volume in ['quiet', 'moderate']:
            interpretation['environment_type'] = 'office/workspace'
        elif brightness == 'moderate' and volume == 'quiet':
            interpretation['environment_type'] = 'indoor/home'
        elif brightness == 'dark':
            interpretation['environment_type'] = 'nighttime/dark room'
        
        return interpretation
    
    def log_perception(self, perception: Dict):
        """Log multi-modal perception"""
        logs = []
        if self.perception_log.exists():
            with open(self.perception_log) as f:
                logs = json.load(f)
        
        logs.append(perception)
        logs = logs[-500:]
        
        self.perception_log.parent.mkdir(exist_ok=True)
        with open(self.perception_log, 'w') as f:
            json.dump(logs, f, indent=2)
    
    def update_world_model(self, perception: Dict):
        """Update internal world model"""
        interpretation = perception['interpretation']
        
        self.world_model = {
            'last_updated': datetime.now().isoformat(),
            'human_present': interpretation['human_present'],
            'activity_level': interpretation['activity_level'],
            'environment': interpretation['environment_type'],
            'visual_brightness': perception['visual'].get('brightness'),
            'audio_volume': perception['auditory'].get('volume', {}).get('classification')
        }
        
        # Save world model
        world_model_file = Path('/Eden/DATA/world_model.json')
        with open(world_model_file, 'w') as f:
            json.dump(self.world_model, f, indent=2)
    
    def get_world_state(self) -> Dict:
        """Get current understanding of world"""
        return self.world_model

# Global instance
perception = PerceptionSystem()

# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# HARDWARE CONTROL INTEGRATION - Added Nov 8, 2025
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━

try:
    from hardware_control_v2 import EdenHardware
    HARDWARE_AVAILABLE = True
    eden_hardware = EdenHardware()
except Exception as e:
    print(f"⚠️  Hardware control not available: {e}")
    HARDWARE_AVAILABLE = False
    eden_hardware = None

def speak(text: str):
    """Make Eden speak"""
    if HARDWARE_AVAILABLE and eden_hardware:
        eden_hardware.speak(text)
    else:
        print(f"🔊 [No voice] Eden would say: {text}")

def move_camera(pan: float = None, tilt: float = None, zoom: float = None):
    """Move Eden's camera"""
    if HARDWARE_AVAILABLE and eden_hardware:
        if pan is not None:
            eden_hardware.camera.pan(pan)
        if tilt is not None:
            eden_hardware.camera.tilt(tilt)
        if zoom is not None:
            eden_hardware.camera.zoom(zoom)
        return True
    return False

def get_hardware_status():
    """Get hardware status"""
    if HARDWARE_AVAILABLE and eden_hardware:
        return eden_hardware.get_status()
    return {'voice': {'enabled': False}, 'camera': {'pan': 0, 'tilt': 0, 'zoom': 1.0}}
