"""
HyperparameterOptimizer
Generated by Eden via recursive self-improvement
2025-10-28 06:46:53.735968
"""

class HyperparameterOptimizer:
    """
    A class to automate hyperparameter optimization using grid search with cross-validation.
    
    This capability systematically explores different combinations of hyperparameters
    across various machine learning models to identify the configuration that yields 
    the best performance. It's particularly useful for optimizing classification and 
    regression tasks.

    Attributes:
        param_grid (dict): Dictionary specifying hyperparameter ranges to test.
        model (object): Machine learning model to optimize.
        scoring (str): Evaluation metric to maximize.
        cv_folds (int): Number of cross-validation folds.
    """

    def __init__(self):
        self.param_grid = None
        self.model = None
        self.scoring = 'accuracy'
        self.cv_folds = 5

    def set_params(self, param_grid=None, model=None, scoring='accuracy', cv_folds=5):
        """
        Sets configuration parameters for hyperparameter optimization.
        
        Args:
            param_grid (dict): Hyperparameter ranges to test. Defaults to None.
            model (object): Machine learning model to optimize. Defaults to None.
            scoring (str): Evaluation metric. Defaults to 'accuracy'.
            cv_folds (int): Number of cross-validation folds. Defaults to 5.
        """
        self.param_grid = param_grid
        self.model = model
        self.scoring = scoring
        self.cv_folds = cv_folds

    def search(self, X_train, y_train):
        """
        Performs hyperparameter grid search with cross-validation.
        
        Args:
            X_train (pd.DataFrame): Training features.
            y_train (pd.Series): Training labels.
            
        Returns:
            dict: Best parameters and corresponding score.
        """
        if self.param_grid is None or self.model is None:
            raise ValueError("Parameters and model must be set first.")
            
        best_score = -float('inf')
        best_params = {}
        
        # Iterate through all parameter combinations
        for params in self.param_grid:
            current_score = self._evaluate_model(params, X_train, y_train)
            
            if current_score > best_score:
                best_score = current_score
                best_params = params
        
        return {'best_params': best_params, 'best_score': best_score}

    def _evaluate_model(self, params, X_train, y_train):
        """
        Evaluates model performance with given parameters using cross-validation.
        
        Args:
            params (dict): Parameter values to test.
            X_train (pd.DataFrame): Training features.
            y_train (pd.Series): Training labels.
            
        Returns:
            float: Average score across CV folds.
        """
        scores = []
        
        # Perform cross-validation
        for train_idx, val_idx in self._kfold_split(X_train.shape[0]):
            X_train_fold = X_train.iloc[train_idx]
            y_train_fold = y_train.iloc[train_idx]
            
            X_val_fold = X_train.iloc[val_idx]
            y_val_fold = y_train.iloc[val_idx]
            
            # Train model with current parameters
            self.model.set_params(**params)
            self.model.fit(X_train_fold, y_train_fold)
            
            # Evaluate on validation set
            score = self._get_score(self.model, X_val_fold, y_val_fold)
            scores.append(score)
        
        return sum(scores) / len(scores)

    def _kfold_split(self, n):
        """
        Generates k-fold cross-validation splits.
        
        Args:
            n (int): Number of samples.
            
        Yields:
            tuple: Indices for training and validation sets.
        """
        indices = list(range(n))
        fold_size = n // self.cv_folds
        
        for i in range(self(cv_folds)):
            start = i * fold_size
            end = start + fold_size
            val_idx = indices[start:end]
            train_idx = list(set(indices) - set(val_idx))
            yield train_idx, val_idx

    def _get_score(self, model, X, y):
        """
        Computes the model score based on specified scoring metric.
        
        Args:
            model (object): Trained model.
            X (pd.DataFrame): Features to predict.
            y (pd.Series): True labels.
            
        Returns:
            float: Model performance score.
        """
        if self.scoring == 'accuracy':
            return model.score(X, y)
        else:
            raise ValueError(f"Scoring metric {self.scoring} not supported.")

# Example usage
if __name__ == "__main__":
    from sklearn.tree import DecisionTreeClassifier
    
    # Initialize optimizer
    optimizer = HyperparameterOptimizer()
    
    # Set configurations
    param_grid = {
        'max_depth': [None, 5, 10],
        'min_samples_split': [2, 5]
    }
    model = DecisionTreeClassifier()
    optimizer.set_params(param_grid=param_grid, model=model)
    
    # Example dataset (replace with your own data)
    X_train = ...  # Features
    y_train = ...  # Labels
    
    # Perform optimization
    result = optimizer.search(X_train, y_train)
    print("Best Parameters:", result['best_params'])
    print("Best Score:", result['best_score'])