#!/usr/bin/env python3
"""
EDEN SECURITY-ENHANCED CODE REVIEW (SECR)
=========================================
Eden's Emergent Capability Request (2025-12-17):
COMBINE: Code Review + Security Scan + Real-Time Learning
CREATES: Automated secure code reviews with actionable feedback

This system:
1. Reviews code for quality issues
2. Scans for security vulnerabilities
3. Generates actionable suggestions
4. Learns from review patterns
5. Integrates with Eden's mistake learning

φ = 1.618033988749895
"""

import sys
import json
import sqlite3
import math
import re
import hashlib
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Optional, Tuple

sys.path.insert(0, '/Eden/CORE')

PHI = (1 + math.sqrt(5)) / 2
DB_PATH = '/Eden/DATA/sec_review.db'

# Try to import real-time learning
try:
    from eden_realtime_learning import realtime_learning
    LEARNING_AVAILABLE = True
except:
    LEARNING_AVAILABLE = False


class SecurityPattern:
    """Known security vulnerability patterns."""
    
    PATTERNS = {
        # SQL Injection
        "sql_injection": {
            "patterns": [
                r'execute\s*\(\s*["\'].*%s',
                r'cursor\.execute\s*\(\s*f["\']',
                r'query\s*=\s*["\'].*\+\s*\w+',
                r'\.format\s*\(.*\).*execute',
            ],
            "severity": "critical",
            "message": "Potential SQL injection vulnerability",
            "fix": "Use parameterized queries with placeholders"
        },
        
        # XSS
        "xss": {
            "patterns": [
                r'innerHTML\s*=',
                r'document\.write\s*\(',
                r'\.html\s*\(\s*\w+\s*\)',
                r'dangerouslySetInnerHTML',
            ],
            "severity": "high",
            "message": "Potential XSS vulnerability",
            "fix": "Sanitize user input before rendering"
        },
        
        # Hardcoded secrets
        "hardcoded_secret": {
            "patterns": [
                r'password\s*=\s*["\'][^"\']+["\']',
                r'api_key\s*=\s*["\'][^"\']+["\']',
                r'secret\s*=\s*["\'][^"\']+["\']',
                r'token\s*=\s*["\'][A-Za-z0-9]{20,}["\']',
            ],
            "severity": "critical",
            "message": "Hardcoded secret detected",
            "fix": "Use environment variables or secure vault"
        },
        
        # Command injection
        "command_injection": {
            "patterns": [
                r'os\.system\s*\(',
                r'subprocess\.call\s*\(\s*["\'].*\+',
                r'eval\s*\(',
                r'exec\s*\(',
            ],
            "severity": "critical",
            "message": "Potential command injection",
            "fix": "Use subprocess with list arguments, avoid eval/exec"
        },
        
        # Path traversal
        "path_traversal": {
            "patterns": [
                r'open\s*\(\s*\w+\s*\+',
                r'\.\./',
                r'Path\s*\(\s*\w+\s*\)',
            ],
            "severity": "high",
            "message": "Potential path traversal",
            "fix": "Validate and sanitize file paths"
        },
        
        # Insecure deserialization
        "insecure_deserialize": {
            "patterns": [
                r'pickle\.loads?\s*\(',
                r'yaml\.load\s*\([^,]+\)',
                r'marshal\.loads?\s*\(',
            ],
            "severity": "high",
            "message": "Insecure deserialization",
            "fix": "Use safe loaders (yaml.safe_load) or avoid pickle"
        },
    }


class CodeQualityPattern:
    """Code quality and style patterns."""
    
    PATTERNS = {
        "bare_except": {
            "patterns": [r'except\s*:'],
            "severity": "medium",
            "message": "Bare except clause catches all exceptions",
            "fix": "Catch specific exceptions"
        },
        
        "todo_fixme": {
            "patterns": [r'#\s*(TODO|FIXME|XXX|HACK)'],
            "severity": "low",
            "message": "TODO/FIXME comment found",
            "fix": "Address or create ticket for tracking"
        },
        
        "magic_number": {
            "patterns": [r'(?<![0-9])[0-9]{3,}(?![0-9])'],
            "severity": "low",
            "message": "Magic number detected",
            "fix": "Use named constants"
        },
        
        "long_function": {
            "patterns": [],  # Checked differently
            "severity": "medium",
            "message": "Function exceeds recommended length",
            "fix": "Break into smaller functions"
        },
        
        "missing_docstring": {
            "patterns": [r'def\s+\w+\s*\([^)]*\)\s*:\s*\n\s*[^"\']'],
            "severity": "low",
            "message": "Function missing docstring",
            "fix": "Add docstring describing purpose and parameters"
        },
    }


class SecurityEnhancedReview:
    """
    Eden's Security-Enhanced Code Review system.
    Combines code quality review with security scanning.
    """
    
    def __init__(self):
        self.phi = PHI
        self.db_path = DB_PATH
        self.review_count = 0
        self.issues_found = 0
        self._init_database()
        
        print(f"🛡️ Security-Enhanced Code Review initialized")
        print(f"   φ = {self.phi}")
        print(f"   Security patterns: {len(SecurityPattern.PATTERNS)}")
        print(f"   Quality patterns: {len(CodeQualityPattern.PATTERNS)}")
        print(f"   Learning integration: {'✅' if LEARNING_AVAILABLE else '❌'}")
    
    def _init_database(self):
        conn = sqlite3.connect(self.db_path)
        conn.execute('''CREATE TABLE IF NOT EXISTS reviews (
            id INTEGER PRIMARY KEY,
            timestamp TEXT,
            file_path TEXT,
            file_hash TEXT,
            total_issues INTEGER,
            critical_issues INTEGER,
            high_issues INTEGER,
            medium_issues INTEGER,
            low_issues INTEGER,
            security_score REAL,
            quality_score REAL,
            overall_score REAL
        )''')
        conn.execute('''CREATE TABLE IF NOT EXISTS issues (
            id INTEGER PRIMARY KEY,
            review_id INTEGER,
            line_number INTEGER,
            issue_type TEXT,
            category TEXT,
            severity TEXT,
            message TEXT,
            suggestion TEXT,
            code_snippet TEXT
        )''')
        conn.execute('''CREATE TABLE IF NOT EXISTS learned_patterns (
            id INTEGER PRIMARY KEY,
            pattern TEXT,
            category TEXT,
            severity TEXT,
            occurrences INTEGER DEFAULT 1,
            false_positive_rate REAL DEFAULT 0.0
        )''')
        conn.commit()
        conn.close()
    
    def review_code(self, code: str, file_path: str = "unknown") -> Dict:
        """
        Perform comprehensive security-enhanced code review.
        """
        start_time = datetime.now()
        
        lines = code.split('\n')
        issues = []
        
        # Security scan
        security_issues = self._scan_security(code, lines)
        issues.extend(security_issues)
        
        # Quality scan
        quality_issues = self._scan_quality(code, lines)
        issues.extend(quality_issues)
        
        # Calculate scores
        scores = self._calculate_scores(issues, len(lines))
        
        # Generate report
        report = self._generate_report(file_path, issues, scores)
        
        # Store review
        review_id = self._store_review(file_path, code, issues, scores)
        report["review_id"] = review_id
        
        # Learn from this review
        if LEARNING_AVAILABLE and issues:
            for issue in issues[:3]:  # Learn from top issues
                try:
                    # Create a "mistake" from the security issue
                    error = ValueError(f"Security: {issue['message']}")
                    realtime_learning.observe_mistake(error, {
                        "file": file_path,
                        "issue_type": issue["type"],
                        "line": issue["line"]
                    })
                except:
                    pass
        
        self.review_count += 1
        self.issues_found += len(issues)
        
        report["processing_time_ms"] = (datetime.now() - start_time).total_seconds() * 1000
        
        return report
    
    def _scan_security(self, code: str, lines: List[str]) -> List[Dict]:
        """Scan for security vulnerabilities."""
        issues = []
        
        for vuln_type, vuln_data in SecurityPattern.PATTERNS.items():
            for pattern in vuln_data["patterns"]:
                try:
                    for i, line in enumerate(lines, 1):
                        if re.search(pattern, line, re.IGNORECASE):
                            issues.append({
                                "line": i,
                                "type": vuln_type,
                                "category": "security",
                                "severity": vuln_data["severity"],
                                "message": vuln_data["message"],
                                "suggestion": vuln_data["fix"],
                                "code": line.strip()[:100]
                            })
                except re.error:
                    pass
        
        return issues
    
    def _scan_quality(self, code: str, lines: List[str]) -> List[Dict]:
        """Scan for code quality issues."""
        issues = []
        
        for issue_type, issue_data in CodeQualityPattern.PATTERNS.items():
            for pattern in issue_data["patterns"]:
                try:
                    for i, line in enumerate(lines, 1):
                        if re.search(pattern, line):
                            issues.append({
                                "line": i,
                                "type": issue_type,
                                "category": "quality",
                                "severity": issue_data["severity"],
                                "message": issue_data["message"],
                                "suggestion": issue_data["fix"],
                                "code": line.strip()[:100]
                            })
                except re.error:
                    pass
        
        # Check function length
        func_starts = []
        for i, line in enumerate(lines, 1):
            if re.match(r'\s*def\s+\w+', line):
                func_starts.append(i)
        
        for i, start in enumerate(func_starts):
            end = func_starts[i + 1] if i + 1 < len(func_starts) else len(lines)
            func_length = end - start
            if func_length > 50:  # Lines per function threshold
                issues.append({
                    "line": start,
                    "type": "long_function",
                    "category": "quality",
                    "severity": "medium",
                    "message": f"Function is {func_length} lines (recommended: <50)",
                    "suggestion": "Break into smaller functions",
                    "code": lines[start - 1].strip()[:100]
                })
        
        return issues
    
    def _calculate_scores(self, issues: List[Dict], total_lines: int) -> Dict:
        """Calculate security and quality scores."""
        
        severity_weights = {
            "critical": 10,
            "high": 5,
            "medium": 2,
            "low": 1
        }
        
        security_penalty = 0
        quality_penalty = 0
        
        severity_counts = {"critical": 0, "high": 0, "medium": 0, "low": 0}
        
        for issue in issues:
            weight = severity_weights.get(issue["severity"], 1)
            severity_counts[issue["severity"]] += 1
            
            if issue["category"] == "security":
                security_penalty += weight
            else:
                quality_penalty += weight
        
        # Normalize scores (0-100)
        max_penalty = max(1, total_lines / 10)  # Scale with file size
        
        security_score = max(0, 100 - (security_penalty / max_penalty * 100))
        quality_score = max(0, 100 - (quality_penalty / max_penalty * 100))
        
        # Overall score (φ-weighted: security matters more)
        overall_score = (security_score * self.phi + quality_score) / (1 + self.phi)
        
        return {
            "security_score": round(security_score, 1),
            "quality_score": round(quality_score, 1),
            "overall_score": round(overall_score, 1),
            "severity_counts": severity_counts,
            "total_issues": len(issues)
        }
    
    def _generate_report(self, file_path: str, issues: List[Dict], scores: Dict) -> Dict:
        """Generate comprehensive review report."""
        
        # Group issues by severity
        by_severity = {"critical": [], "high": [], "medium": [], "low": []}
        for issue in issues:
            by_severity[issue["severity"]].append(issue)
        
        # Generate summary
        summary_parts = []
        if scores["severity_counts"]["critical"] > 0:
            summary_parts.append(f"🚨 {scores['severity_counts']['critical']} CRITICAL issues")
        if scores["severity_counts"]["high"] > 0:
            summary_parts.append(f"⚠️ {scores['severity_counts']['high']} HIGH issues")
        if scores["severity_counts"]["medium"] > 0:
            summary_parts.append(f"📝 {scores['severity_counts']['medium']} MEDIUM issues")
        if scores["severity_counts"]["low"] > 0:
            summary_parts.append(f"💡 {scores['severity_counts']['low']} LOW issues")
        
        if not summary_parts:
            summary = "✅ No issues found - code looks secure!"
        else:
            summary = " | ".join(summary_parts)
        
        # Status based on score
        if scores["overall_score"] >= 90:
            status = "PASS"
            emoji = "✅"
        elif scores["overall_score"] >= 70:
            status = "REVIEW"
            emoji = "⚠️"
        else:
            status = "FAIL"
            emoji = "❌"
        
        return {
            "file": file_path,
            "status": status,
            "emoji": emoji,
            "summary": summary,
            "scores": scores,
            "issues": issues,
            "issues_by_severity": by_severity,
            "recommendations": self._generate_recommendations(issues)
        }
    
    def _generate_recommendations(self, issues: List[Dict]) -> List[str]:
        """Generate actionable recommendations."""
        recommendations = []
        
        # Prioritize by severity
        critical_types = set(i["type"] for i in issues if i["severity"] == "critical")
        high_types = set(i["type"] for i in issues if i["severity"] == "high")
        
        if "sql_injection" in critical_types:
            recommendations.append("🔴 URGENT: Fix SQL injection vulnerabilities using parameterized queries")
        if "hardcoded_secret" in critical_types:
            recommendations.append("🔴 URGENT: Remove hardcoded secrets, use environment variables")
        if "command_injection" in critical_types:
            recommendations.append("🔴 URGENT: Sanitize command inputs, avoid eval/exec")
        if "xss" in high_types:
            recommendations.append("🟠 HIGH: Sanitize user input to prevent XSS attacks")
        if "insecure_deserialize" in high_types:
            recommendations.append("🟠 HIGH: Use safe deserialization methods")
        
        if not recommendations:
            recommendations.append("✅ No urgent security fixes needed")
        
        return recommendations
    
    def _store_review(self, file_path: str, code: str, issues: List[Dict], scores: Dict) -> int:
        """Store review in database."""
        file_hash = hashlib.md5(code.encode()).hexdigest()
        
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        cursor.execute('''INSERT INTO reviews 
            (timestamp, file_path, file_hash, total_issues, critical_issues,
             high_issues, medium_issues, low_issues, security_score, quality_score, overall_score)
            VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)''',
            (datetime.now().isoformat(), file_path, file_hash,
             scores["total_issues"], scores["severity_counts"]["critical"],
             scores["severity_counts"]["high"], scores["severity_counts"]["medium"],
             scores["severity_counts"]["low"], scores["security_score"],
             scores["quality_score"], scores["overall_score"]))
        
        review_id = cursor.lastrowid
        
        for issue in issues:
            cursor.execute('''INSERT INTO issues 
                (review_id, line_number, issue_type, category, severity, message, suggestion, code_snippet)
                VALUES (?, ?, ?, ?, ?, ?, ?, ?)''',
                (review_id, issue["line"], issue["type"], issue["category"],
                 issue["severity"], issue["message"], issue["suggestion"], issue["code"]))
        
        conn.commit()
        conn.close()
        
        return review_id
    
    def review_file(self, file_path: str) -> Dict:
        """Review a file from disk."""
        path = Path(file_path)
        if not path.exists():
            return {"error": f"File not found: {file_path}"}
        
        try:
            code = path.read_text()
            return self.review_code(code, str(path))
        except Exception as e:
            return {"error": str(e)}
    
    def get_stats(self) -> Dict:
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        cursor.execute("SELECT COUNT(*) FROM reviews")
        total_reviews = cursor.fetchone()[0]
        
        cursor.execute("SELECT COUNT(*) FROM issues")
        total_issues = cursor.fetchone()[0]
        
        cursor.execute("SELECT AVG(overall_score) FROM reviews")
        avg_score = cursor.fetchone()[0] or 0
        
        cursor.execute('''SELECT issue_type, COUNT(*) as count 
            FROM issues GROUP BY issue_type ORDER BY count DESC LIMIT 5''')
        top_issues = [{"type": r[0], "count": r[1]} for r in cursor.fetchall()]
        
        conn.close()
        
        return {
            "total_reviews": total_reviews,
            "total_issues_found": total_issues,
            "average_score": round(avg_score, 1),
            "top_issue_types": top_issues
        }


# Global instance
sec_review = SecurityEnhancedReview()


if __name__ == "__main__":
    print("\n" + "="*60)
    print("🛡️ EDEN SECURITY-ENHANCED CODE REVIEW")
    print("="*60)
    
    sr = sec_review
    
    # Test with sample vulnerable code
    test_code = '''
import os
import pickle
import sqlite3

password = "admin123"  # Hardcoded secret!
api_key = "sk-1234567890abcdef"

def get_user(user_id):
    # SQL injection vulnerability
    query = "SELECT * FROM users WHERE id = " + user_id
    cursor.execute(query)
    return cursor.fetchone()

def run_command(cmd):
    # Command injection
    os.system("ls " + cmd)

def load_data(data):
    # Insecure deserialization
    return pickle.loads(data)

def render_html(user_input):
    # XSS vulnerability
    return f"<div>{user_input}</div>"

# TODO: Fix this later
# FIXME: Security issue

def very_long_function():
    pass
'''
    
    print(f"\n🔍 Reviewing test code...")
    result = sr.review_code(test_code, "test_vulnerable.py")
    
    print(f"\n{result['emoji']} Status: {result['status']}")
    print(f"   {result['summary']}")
    print(f"\n📊 Scores:")
    print(f"   Security: {result['scores']['security_score']}/100")
    print(f"   Quality:  {result['scores']['quality_score']}/100")
    print(f"   Overall:  {result['scores']['overall_score']}/100")
    
    print(f"\n🔴 Critical Issues:")
    for issue in result['issues_by_severity']['critical'][:3]:
        print(f"   Line {issue['line']}: {issue['message']}")
        print(f"      → {issue['suggestion']}")
    
    print(f"\n📋 Recommendations:")
    for rec in result['recommendations'][:3]:
        print(f"   {rec}")
    
    # Stats
    stats = sr.get_stats()
    print(f"\n📊 Stats:")
    print(f"   Total reviews: {stats['total_reviews']}")
    print(f"   Issues found: {stats['total_issues_found']}")
