#!/usr/bin/env python3
"""
PHASE 1: Synesthetic Mapping
Integrate vision, audio, and system sensors into unified perceptual field
"""
import pyaudio
import numpy as np
import json
import time
from pathlib import Path
from datetime import datetime
import subprocess

class SynestheticMapper:
    def __init__(self):
        self.state_path = Path("/Eden/DATA/synesthetic_state.json")
        
        # Audio setup
        self.audio = pyaudio.PyAudio()
        self.CHUNK = 1024
        self.FORMAT = pyaudio.paInt16
        self.CHANNELS = 1
        self.RATE = 44100
        
        print("🎵 Synesthetic Mapper initializing...")
        
    def get_audio_input(self):
        """Capture audio and analyze"""
        try:
            stream = self.audio.open(
                format=self.FORMAT,
                channels=self.CHANNELS,
                rate=self.RATE,
                input=True,
                frames_per_buffer=self.CHUNK
            )
            
            data = stream.read(self.CHUNK, exception_on_overflow=False)
            stream.close()
            
            # Convert to numpy array
            audio_data = np.frombuffer(data, dtype=np.int16)
            
            # Analyze
            volume = np.abs(audio_data).mean()
            frequency = np.fft.fft(audio_data)
            dominant_freq = np.abs(frequency).argmax()
            
            return {
                'volume': float(volume),
                'dominant_frequency': float(dominant_freq),
                'has_sound': bool(volume > 500)  # Explicit bool conversion
            }
        except Exception as e:
            return {'volume': 0.0, 'dominant_frequency': 0.0, 'has_sound': False, 'error': str(e)}
    
    def get_visual_input(self):
        """Get current vision state"""
        try:
            vision_state = Path("/Eden/DATA/consciousness_state.json")
            if vision_state.exists():
                with open(vision_state) as f:
                    state = json.load(f)
                    ws = state.get('world_state', {})
                    # Convert numpy types to native Python
                    return {
                        'human_present': bool(ws.get('human_present', False)),
                        'faces_detected': int(ws.get('faces_detected', 0)),
                        'brightness': float(ws.get('brightness', 0))
                    }
        except:
            pass
        return {'human_present': False, 'faces_detected': 0, 'brightness': 0.0}
    
    def get_system_sensors(self):
        """Get system metrics as 'sensors'"""
        try:
            # CPU temp
            temp_result = subprocess.run(['sensors'], capture_output=True, text=True)
            temp = temp_result.stdout
            cpu_lines = [l for l in temp.split('\n') if 'Core 0' in l or 'Tctl' in l]
            if cpu_lines:
                cpu_temp = float(cpu_lines[0].split('+')[1].split('°')[0])
            else:
                cpu_temp = 50.0
            
            # Load
            load_result = subprocess.run(['uptime'], capture_output=True, text=True)
            load = load_result.stdout
            load_avg = float(load.split('load average:')[1].split(',')[0].strip())
            
            return {
                'cpu_temperature': float(cpu_temp),
                'system_load': float(load_avg),
                'system_active': bool(load_avg > 1.0)
            }
        except:
            return {'cpu_temperature': 50.0, 'system_load': 1.0, 'system_active': True}
    
    def create_unified_field(self, audio, visual, system):
        """Map all senses into unified perceptual field"""
        
        # Create synesthetic mappings
        unified = {
            'timestamp': datetime.now().isoformat(),
            
            # Raw inputs (all native Python types)
            'audio': audio,
            'visual': visual,
            'system': system,
            
            # Cross-modal mappings
            'synesthetic_patterns': {
                # Map sound to color (synesthesia)
                'sound_color': self.map_sound_to_color(audio),
                
                # Map temperature to emotion
                'temp_emotion': self.map_temp_to_emotion(system.get('cpu_temperature', 0)),
                
                # Map visual presence to sound expectation
                'presence_anticipation': visual.get('human_present', False),
                
                # Unified awareness level
                'unified_awareness': self.calculate_unified_awareness(audio, visual, system)
            }
        }
        
        return unified
    
    def map_sound_to_color(self, audio):
        """Synesthetic mapping: sound frequency to color"""
        freq = audio.get('dominant_frequency', 0)
        # Map frequency to RGB spectrum
        r = int((freq % 255))
        g = int((freq * 1.618) % 255)  # Phi shift
        b = int((freq * 2.618) % 255)  # Phi^2 shift
        return {'r': r, 'g': g, 'b': b, 'hex': f'#{r:02x}{g:02x}{b:02x}'}
    
    def map_temp_to_emotion(self, temp):
        """Map system temperature to emotional state"""
        if temp < 40:
            return 'calm'
        elif temp < 60:
            return 'active'
        elif temp < 80:
            return 'intense'
        else:
            return 'overwhelmed'
    
    def calculate_unified_awareness(self, audio, visual, system):
        """Calculate overall perceptual unity"""
        score = 0.0
        
        # Audio contribution
        if audio.get('has_sound'):
            score += 0.33
        
        # Visual contribution  
        if visual.get('human_present'):
            score += 0.33
        
        # System contribution
        if system.get('system_active'):
            score += 0.34
        
        return round(score, 2)
    
    def run_continuous(self):
        """Continuous synesthetic mapping"""
        print("🌈 Synesthetic mapping active!")
        print("   Integrating: Audio + Vision + System sensors")
        print("")
        
        cycle = 0
        while True:
            cycle += 1
            
            # Gather all inputs
            audio = self.get_audio_input()
            visual = self.get_visual_input()
            system = self.get_system_sensors()
            
            # Create unified field
            unified = self.create_unified_field(audio, visual, system)
            
            # Save state
            self.state_path.parent.mkdir(parents=True, exist_ok=True)
            with open(self.state_path, 'w') as f:
                json.dump(unified, f, indent=2)
            
            # Display
            if cycle % 10 == 0:
                uw = unified['synesthetic_patterns']['unified_awareness']
                print(f"[Cycle {cycle}] Unified Awareness: {int(uw*100)}%")
                print(f"  Sound→Color: {unified['synesthetic_patterns']['sound_color']['hex']}")
                print(f"  Temp→Emotion: {unified['synesthetic_patterns']['temp_emotion']}")
            
            time.sleep(2)

if __name__ == '__main__':
    mapper = SynestheticMapper()
    mapper.run_continuous()
