import numpy as np
from scipy.signal import butter, lfilter

class AuditoryProcessingAGI:
    def __init__(self):
        self.sampling_rate = 44100
        self.filter_order = 5
        self.freq_low = 200
        self.freq_high = 8000
        self.b, self.a = self._butter_bandpass(self.freq_low, self.freq_high, self.sampling_rate)
        
    def _butter_bandpass(self, lowcut, highcut, fs, order=5):
        nyquist = 0.5 * fs
        low = lowcut / nyquist
        high = highcut / nyquist
        b, a = butter(order, [low, high], btype='band')
        return b, a
    
    def _butter_bandpass_filter(self, data):
        y = lfilter(self.b, self.a, data)
        return y

    def process_audio(self, audio_signal):
        # Apply bandpass filter to the raw signal
        filtered_signal = self._butter_bandpass_filter(audio_signal)

        # Split into frames (e.g., 20ms windows with 50% overlap)
        frame_length = int(0.02 * self.sampling_rate)  # 20 ms at 44100 Hz
        hop_length = int(frame_length / 2)  # 50% overlap

        frames = [filtered_signal[i:i + frame_length] for i in range(0, len(filtered_signal), hop_length)]

        spectral_features = []
        
        for frame in frames:
            if len(frame) < frame_length:  # Pad the last window with zeros
                frame = np.pad(frame, (0, frame_length - len(frame)), mode='constant')

            # Compute Short-Time Fourier Transform (STFT)
            stft = np.fft.rfft(frame)

            # Extract magnitude spectrum as feature
            magnitude_spectrum = np.abs(stft)
            
            # Add other features like zero-crossing rate, spectral centroid, etc.
            spectral_features.append(magnitude_spectrum)
        
        return spectral_features

    def analyze_spectral_features(self, features):
        for idx, feature in enumerate(features):
            print(f"Frame {idx}: Magnitude Spectrum = {feature}")
            
            # Perform further analysis like peak detection, noise reduction, etc.
            peaks = np.where(np.diff(np.sign(feature - 0.5))[1:] == 1)[0]
            print(f"Peaks detected in frame {idx}: {peaks}")
        
# Example usage
if __name__ == "__main__":
    agi = AuditoryProcessingAGI()
    
    # Simulate audio data (replace with actual audio signal)
    np.random.seed(42)  # For reproducibility
    raw_audio_signal = np.random.randn(1024 * 10)  # 10 seconds of random noise
    
    processed_features = agi.process_audio(raw_audio_signal)
    
    for feature in processed_features:
        agi.analyze_spectral_features([feature])