#!/usr/bin/env python3
"""
Eden Vision System - FIXED to work with video0
"""

import cv2
import face_recognition
import pytesseract
import numpy as np
from PIL import Image
import time
from pathlib import Path
import json

class EdenVision:
    """Computer vision that ACTUALLY WORKS"""
    
    def __init__(self):
        # USE VIDEO0 WITH V4L2 - WE KNOW THIS WORKS!
        self.camera = cv2.VideoCapture(0, cv2.CAP_V4L2)
        
        if not self.camera.isOpened():
            print("⚠️  Camera failed to open")
            self.camera = None
        else:
            # Test frame capture
            ret, frame = self.camera.read()
            if ret:
                print(f"✅ Camera initialized: {frame.shape}")
            else:
                print("⚠️  Camera opened but can't read")
                self.camera.release()
                self.camera = None
        
        # Known faces database
        self.known_faces = {}
        self.load_known_faces()
        
        # Vision state
        self.last_frame = None
        self.current_observations = {}
        
        print("✅ Eden vision system initialized")
    
    def load_known_faces(self):
        """Load known faces"""
        face_db = Path("/Eden/DATA/known_faces")
        face_db.mkdir(parents=True, exist_ok=True)
        
        how_face = face_db / "how.jpg"
        if how_face.exists():
            try:
                image = face_recognition.load_image_file(str(how_face))
                encoding = face_recognition.face_encodings(image)[0]
                self.known_faces["How"] = encoding
                print("   ✅ Loaded How's face")
            except:
                pass
    
    def capture_frame(self):
        """Capture frame"""
        if not self.camera or not self.camera.isOpened():
            return None
        
        ret, frame = self.camera.read()
        if ret:
            self.last_frame = frame
            return frame
        return None
    
    def detect_faces(self, frame):
        """Detect and recognize faces"""
        small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
        rgb_frame = cv2.cvtColor(small_frame, cv2.COLOR_BGR2RGB)
        
        face_locations = face_recognition.face_locations(rgb_frame)
        face_encodings = face_recognition.face_encodings(rgb_frame, face_locations)
        
        faces_found = []
        
        for face_encoding, face_location in zip(face_encodings, face_locations):
            name = "Unknown"
            if self.known_faces:
                matches = face_recognition.compare_faces(
                    list(self.known_faces.values()),
                    face_encoding,
                    tolerance=0.6
                )
                if True in matches:
                    match_index = matches.index(True)
                    name = list(self.known_faces.keys())[match_index]
            
            top, right, bottom, left = face_location
            top *= 4
            right *= 4
            bottom *= 4
            left *= 4
            
            faces_found.append({
                'name': name,
                'location': (top, right, bottom, left),
                'is_known': name != "Unknown"
            })
        
        return faces_found
    
    def detect_objects(self, frame):
        """Detect objects"""
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        edges = cv2.Canny(gray, 50, 150)
        contours, _ = cv2.findContours(edges, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        significant = [c for c in contours if cv2.contourArea(c) > 500]
        
        return {
            'object_count': len(significant),
            'scene_complexity': 'simple' if len(significant) < 5 else 'moderate' if len(significant) < 15 else 'complex'
        }
    
    def analyze_brightness(self, frame):
        """Analyze brightness"""
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        avg = np.mean(gray)
        
        if avg < 50: return 'dark'
        elif avg < 100: return 'dim'
        elif avg < 150: return 'moderate'
        elif avg < 200: return 'bright'
        else: return 'very bright'
    
    def perceive(self, include_text=False):
        """Complete visual perception"""
        frame = self.capture_frame()
        if frame is None:
            return {'error': 'No frame', 'vision_available': False}
        
        observations = {
            'timestamp': time.time(),
            'vision_available': True,
            'brightness': self.analyze_brightness(frame)
        }
        
        # Detect faces
        faces = self.detect_faces(frame)
        observations['faces'] = faces
        observations['human_present'] = len(faces) > 0
        
        if faces:
            known = [f for f in faces if f['is_known']]
            if known:
                observations['known_people'] = [f['name'] for f in known]
        
        # Detect objects
        objects = self.detect_objects(frame)
        observations['objects'] = objects
        
        self.current_observations = observations
        return observations
    
    def get_visual_summary(self):
        """Get summary"""
        if not self.current_observations or not self.current_observations.get('vision_available'):
            return "I cannot see right now."
        
        obs = self.current_observations
        summary = []
        
        summary.append(f"The lighting is {obs.get('brightness', 'unknown')}")
        
        faces = obs.get('faces', [])
        if faces:
            if len(faces) == 1:
                face = faces[0]
                if face['is_known']:
                    summary.append(f"I can see {face['name']}")
                else:
                    summary.append("I see someone I don't recognize")
            else:
                known = [f['name'] for f in faces if f['is_known']]
                if known:
                    summary.append(f"I see {len(faces)} people including {', '.join(known)}")
                else:
                    summary.append(f"I see {len(faces)} people")
        else:
            summary.append("I don't see anyone")
        
        return ". ".join(summary) + "."
    
    def release(self):
        """Release camera"""
        if self.camera:
            self.camera.release()
