import numpy as np
from PIL import Image

class VisualProcessingAGI:
    def __init__(self):
        self.image = None
        self.features = {
            'edges': None,
            'colors': None,
            'shapes': None,
            'objects': None
        }
    
    def load_image(self, image_path: str):
        """
        Load an image and set it to the internal state.
        
        :param image_path: Path to the image file.
        """
        self.image = Image.open(image_path)
    
    def preprocess_image(self):
        """
        Preprocess the loaded image for feature extraction.
        
        :return: Preprocessed image data as a numpy array.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        preprocessed_image = np.array(self.image)
        return preprocessed_image
    
    def extract_edges(self, threshold=50):
        """
        Extract edges from the preprocessed image using Canny edge detection.
        
        :param threshold: Threshold for edge detection.
        :return: Edges detected in the image as a numpy array.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        gray = np.array(self.image.convert('L'))
        edges = cv2.Canny(gray, threshold, 150)
        return edges
    
    def classify_colors(self):
        """
        Classify dominant colors in the image.
        
        :return: List of dominant color names and their frequencies.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        from matplotlib import colors
        hist, bins = np.histogram(self.preprocess_image().ravel(), 256, [0, 256])
        indexnp = np.digitize(np.arange(256), bins[:-1]) - 1
        colors_list = list(map(lambda x: "#" + "".join([hex(int(c))[2:] for c in (x // 4, x % 4 * 4, 0)]), range(256)))
        dominant_colors = [colors.to_hex(colors_list[i]) for i, v in enumerate(hist) if i < len(colors_list) and not np.isnan(v)]
        return dominant_colors
    
    def detect_shapes(self):
        """
        Detect shapes like circles and rectangles using Hough transforms.
        
        :return: List of detected shapes with their positions.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        import cv2
        # Convert the image to grayscale
        gray = np.array(self.image.convert('L'))
        
        # Detect circles using HoughCircles
        circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1.2, 100)
        if circles is not None:
            circles = np.round(circles[0, :]).astype("int")
            shapes = []
            for (x, y, r) in circles:
                shapes.append((cv2.putText(self.image, "Circle", (x - 5, y + 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2), x, y, r))
            return shapes
        
        # Detect rectangles using contour detection
        contours, _ = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        for cnt in contours:
            rect = cv2.minAreaRect(cnt)
            box = cv2.boxPoints(rect)
            box = np.int0(box)
            shapes.append((cv2.putText(self.image, "Rectangle", (box[1][0], box[1][1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2), rect))
        return shapes
    
    def detect_objects(self):
        """
        Detect objects in the image using a pre-trained object detection model.
        
        :return: List of detected objects with their bounding boxes and class labels.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        import tensorflow as tf
        from tensorflow.keras.applications.imagenet_utils import decode_predictions
        
        # Load a pre-trained model, for example, InceptionV3
        model = tf.keras.applications.InceptionV3(weights='imagenet')
        
        img_array = np.array(self.image)
        resized_img = cv2.resize(img_array, (299, 299)) / 255.0
        reshaped_img = np.expand_dims(resized_img, axis=0)
        
        # Predict and decode predictions
        preds = model.predict(reshaped_img)
        decoded_preds = decode_predictions(preds, top=3)[0]
        
        return [(pred[1], pred[2]) for pred in decoded_preds]

    def display_results(self):
        """
        Display the results of visual processing on the image.
        
        :return: Image with annotations for edges, colors, shapes, and objects.
        """
        if not self.image:
            raise ValueError("No image loaded. Please use load_image first.")
        
        # Apply all the above functions
        preprocessed = self.preprocess_image()
        edges = self.extract_edges()
        colors = self.classify_colors()
        shapes = self.detect_shapes()
        objects = self.detect_objects()
        
        result = np.copy(preprocessed)
        for (shape, x, y, r) in shapes:
            cv2.circle(result, (x, y), r, (0, 255, 0), 2)
        for shape in objects:
            (class_name, confidence) = shape
            cv2.putText(result, class_name, (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 2)
        
        return result

# Example usage:
vp_agi = VisualProcessingAGI()
vp_agi.load_image('path_to_your_image.jpg')
result_image = vp_agi.display_results()