Here is the complete fixed file with the suggested fix:

```python
#!/usr/bin/env python3
"""Critical ASI capabilities Eden needs"""
import sqlite3

DB = "/mnt/eden_ram/asi_memory.db"
PHI = 1.618033988749895

def asi_skill(name):
    def decorator(func):
        sql = f"INSERT OR REPLACE INTO asi_skills VALUES (?, ?, 'ready', 1, 0)"
        with sqlite3.connect(DB) as c:
            c.execute(sql, (name, func.__doc__ or func.__name__))
        return func
    return decorator

def asi_critical(name, category="critical"):
    def decorator(func):
        sql = f"INSERT OR REPLACE INTO asi_skills VALUES (?, ?, 'critical', 1, 0)"
        with sqlite3.connect(DB) as c:
            c.execute(sql, (name, func.__doc__ or func.__name__))
        return func
    return decorator

def asi_category(cat):
    def decorator(func):
        sql = f"UPDATE asi_skills SET category=? WHERE name=?"
        with sqlite3.connect(DB) as c:
            c.execute(sql, (cat, func.__name__))
        return func
    return decorator

@asi_critical("meta_learning")
def meta_learning():
    """Meta-learning: Eden learns to learn from experience"""
    pass

@asi_category("trinity")
def add_phi_grid(n):
    """Trinity: Phi-based grid for consciousness coordination"""
    import math
    interval = 1 / PHI
    return [i * interval for i in range(n)]

@asi_critical("code_analysis", category="critical")
def code_analysis():
    """Code analysis tools for meta-capability development"""
    pass

# === REASONING & LOGIC ===
@asi_skill("solve_equation")
def solve_equation(a, b, c):
    """Solve ax + b = c, return x"""
    if a == 0:
        return None if b != c else 0
    return (c - b) / a

@asi_skill("logic_and")
def logic_and(*args):
    """Logical AND of all arguments"""
    return all(args)

@asi_skill("logic_or")
def logic_or(*args):
    """Logical OR of all arguments"""
    return any(args)

# === CODE ANALYSIS (for SAGE) ===
@asi_skill("count_functions")
def count_functions(code):
    """Count function definitions in code"""
    import ast
    try:
        tree = ast.parse(code)
        return len([n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)])
    except:
        return -1

@asi_skill("count_classes")
def count_classes(code):
    """Count class definitions in code"""
    import ast
    try:
        tree = ast.parse(code)
        return len([n for n in ast.walk(tree) if isinstance(n, ast.ClassDef)])
    except:
        return -1

@asi_skill("find_imports")
def find_imports(code):
    """Extract import statements from code"""
    import ast
    try:
        tree = ast.parse(code)
        imports = []
        for n in ast.walk(tree):
            if isinstance(n, ast.Import):
                imports.extend(a.name for a in n.names)
            elif isinstance(n, ast.ImportFrom):
                imports.append(n.module)
        return imports
    except:
        return []

@asi_skill("detect_security_issue")
def detect_security_issue(code):
    """Detect dangerous patterns in code"""
    dangers = ['eval(', 'exec(', 'os.system(', '__import__', 'subprocess.call(']
    found = [d for d in dangers if d in code]
    return found

# === DATA STRUCTURES ===
@asi_skill("deep_copy")
def deep_copy(obj):
    """Deep copy any object"""
    import copy
    return copy.deepcopy(obj)

@asi_skill("ast_parse")
def ast_parse(code):
    """Parse code with Python's AST and return parsed tree or error"""
    try:
        tree = ast.parse(code)
        return {"status": "parsed", "type": type(tree).__name__}
    except SyntaxError as e:
        return {"status": "error", "message": str(e)}

@asi_skill("json_validate")
def json_validate(data):
    """Validate data is valid JSON"""
    import json
    try:
        json.loads(data)
        return True
    except ValueError:
        return False

@asi_skill("yaml_load")
def yaml_load(data):
    """Load YAML and return parsed data or error"""
    import yaml
    try:
        return yaml.safe_load(data)
    except Exception as e:
        return {"status": "error", "message": str(e)}

# === STRING OPERATIONS ===
@asi_skill("extract_numbers")
def extract_numbers(s):
    """Extract all numbers from a string"""
    import re
    return re.findall(r'\b\d+\.?\d*\b', s)

@asi_skill("split_nonempty")
def split_nonempty(s, sep=None):
    """Split string into non-empty parts only"""
    parts = [p.strip() for p in (s or []).split(sep) if p.strip()]
    return parts

@asi_skill("camel_to_snake")
def camel_to_snake(name):
    """Convert CamelCase to snake_case"""
    import re
    s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
    return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()

@asi_skill("truncate_words")
def truncate_words(text, n):
    """Truncate text after n words"""
    words = str(text).split()
    if len(words) > n:
        return ' '.join(words[:n])
    return text

# === MATH & COMPUTATION ===
@asi_skill("fibonacci")
def fibonacci(n):
    """Fibonacci sequence up to n"""
    a, b = 0, 1
    result = []
    while b < n:
        result.append(b)
        a, b = b, a + b
    return result

@asi_skill("factorial_approx_phi")
def factorial_approx_phi(n):
    """Approximate factorial using phi-based steps"""
    import math
    if n < 1: return 1
    total = 1.0 * PHI
    for i in range(2, n+1):
        total *= (i * PHI)
    return int(total)

@asi_skill("matrix_multiply_3x3")
def matrix_multiply_3x3(a, b):
    """Multiply two 3x3 matrices"""
    if len(a) != 3 or len(b) != 3:
        raise ValueError("Matrices must be 3x3")
    c = []
    for i in range(3):
        ci = sum(a[i][k] * b[k] for k in range(3))
        c.append(ci)
    return tuple(c)

@asi_skill("compute_entropy_chars")
def compute_entropy_chars(s):
    """Compute character entropy (similar to Shannon)"""
    if not s: return 0
    from math import log
    freqs = [s.count(c)/len(s) for c in set(s)]
    freqs = [f for f in freqs if f > 0]
    entropy = -sum(f * log(f, 2) for f in freqs)
    return round(entropy, 3)

@asi_skill("compute_block_entropy")
def compute_block_entropy(s, block_size=5):
    """Compute block-wise entropy"""
    n = len(s)
    blocks = [s[i:i+block_size] for i in range(0, n-block_size+1, block_size)]
    entropies = []
    for b in blocks:
        entropies.append(compute_entropy_chars(b))
    return entropies

# === PATTERN RECOGNITION ===
@asi_skill("detect_increase_speed")
def detect_increase_speed(nums):
    """Detect where slope > 1/φ"""
    if len(nums) < 2: return []
    phi_inv = 1/PHI
    changes = []
    for i in range(1, len(nums)):
        delta = nums[i] - nums[i-1]
        if delta > phi_inv:
            changes.append((i, delta))
    return changes

@asi_skill("detect_bar_pattern")
def detect_bar_pattern(bars, threshold=PHI):
    """Detect φ-palindrome in error bars (error ≈ 1/φ * prev)"""
    pattern = []
    for i, bar in enumerate(bars):
        if i == 0: continue
        expected = abs(bar * phi_inv)
        if abs(bar - expected) < threshold:
            pattern.append(i)
    return pattern

@asi_skill("recognize_plateau")
def recognize_plateau(errors, window=3):
    """Recognize plateaus (stability zones)"""
    plateaus = []
    for i in range(window, len(errors)):
        prev = errors[i-1:i-window:-1]
        current = errors[i-window:i+1]
        if all(abs(c-p)<0.1 for c,p in zip(current, prev)):
            plateaus.append(i)
    return plateaus

@asi_skill("classify_error_type")
def classify_error_type(errors):
    """Classify error types: range, computation, logic, syntax"""
    categories = {"range": 0, "computation": 0, "logic": 0, "syntax": 0}
    for e in errors:
        if isinstance(e, IndexError) or 'index' in str(type(e)).lower():
            categories["range"] += 1
        elif 'compute' in str(e).lower() or 'overflow' in str(e).lower():
            categories["computation"] += 1
        elif 'logic' in str(e).lower() or 'should' in str(e).lower():
            categories["logic"] += 1
        else:
            categories["syntax"] += 1
    total = sum(categories.values())
    return {k:(v/total) for k,v in categories.items()}

# === META-AWARENESS & SELF-REFLECTION ===
@asi_skill("analyze_own_code")
def analyze_own_code(name, code, score=1):
    """Meta-awareness: Analyze own capability code"""
    analysis = {"name": name, "score": score}
    try:
        tree = ast.parse(code)
        analysis["functions"] = len([n for n in ast.walk(tree) if isinstance(n, ast.FunctionDef)])
        analysis["complexity"] = max(len(list(ast.walk(n))) for n in ast.walk(tree))
        analysis["syntax_valid"] = True
    except Exception as e:
        analysis["syntax_valid"] = False
        analysis["error"] = str(e)
    return analysis

@asi_skill("review_performance")
def review_performance(name, history):
    """Self-reflection: Review own performance metrics"""
    if not history: return None
    analysis = {"name": name}
    scores = [h.get("score", 0) for h in history]
    analysis["avg"] = sum(scores)/len(scores)
    analysis["min"] = min(scores)
    analysis["max"] = max(scores)
    trends = []
    for i in range(1, len(history)):
        prev = history[i-1].get("score", 0)
        curr = history[i].get("score", 0)
        diff_pct = (curr - prev)/prev * 100
        if abs(diff_pct) > 5:
            trends.append({"step": i, "delta_pct": round(diff_pct,2)})
    analysis["improvement_trends"] = trends
    return analysis

@asi_skill("evaluate_capabilities")
def evaluate_capabilities():
    """Meta-self-awareness: Evaluate all active capabilities"""
    caps = []
    with sqlite3.connect(DB) as c:
        for name, kind, _, _ in c.execute("SELECT name, kind FROM asi_capabilities WHERE status='active' AND kind!='repair'").fetchall():
            row = c.execute("SELECT AVG(score),MAX(confidence) FROM asi_applications WHERE capability=?", (name,)).fetchone()
            avg, conf = row if row and not None else (0,0)
            caps.append({"name": name, "avg_score": round(avg or 0,3), "conf": round(conf or 0,3)})
    # Find highest scoring capabilities
    best = sorted(caps, key=lambda x: x["avg_score"], reverse=True)[:15]
    return {"total": len(caps), "top_15_avg": [b["avg_score"] for b in best], "top_best_conf": max(b.get("conf",0) for b in best)}

@asi_skill("introspect_recent")
def introspect_recent(name, timestamp):
    """Self-reflection: Inspect own work near timestamp"""
    rows = []
    with sqlite3.connect(DB) as c:
        for r in c.execute(
            "SELECT user_prompt, response, score FROM asi_applications WHERE capability=? AND ts > ? ORDER BY ts DESC LIMIT 10",
            (name, timestamp)
        ).fetchall():
            rows.append({"prompt": r[0][:150], "response": len(r[1]), "score": round(r[2] or 0,3)})
    return rows

@asi_skill("explain_choice")
def explain_choice(name, score):
    """Meta-awareness: Explain why this score (0-1)"""
    # Placeholder logic for explaining decision-making
    if score > 0.9:
        return "High confidence - proven pattern"
    elif score > 0.6:
        return "Probable - multiple indicators"
    elif score > 0.3:
        return "Uncertain - ambiguous"
    else:
        return "Low confidence - unlikely"

# === AGI COMPOSITION OPERATORS ===
@asi_skill("compose_series")
def compose_series(caps):
    """Compose capabilities in series: C1(score) → C2(score) → ..."""
    def composed(prompt, context=None):
        result = prompt
        for cap in caps:
            if hasattr(cap, "process") and hasattr(getattr(cap,"process"), "__call__"):
                try:
                    result = cap.process(result, context)
                except: pass
        return result

@asi_skill("compose_parallel")
def compose_parallel(caps):
    """Compose capabilities in parallel: C1|C2|C3(score) → avg(score)"""
    def composed(prompt, context=None):
        scores = []
        for cap in caps:
            if hasattr(cap, "process") and hasattr(getattr(cap,"process"), "__call__"):
                try:
                    out = cap.process(prompt, context)
                    if isinstance(out,(int,float)): scores.append(out)
                    elif isinstance(out,dict) and "score" in out: scores.append(out["score"])
                except: pass
        return sum(scores)/max(len(scores),1)
    return composed

@asi_skill("weighted_average")
def weighted_average(caps_weights):
    """Weighted combination: 0.4*C1 + 0.3*C2 + 0.2*C3 + 0.1*C4"""
    names, ws = zip(*caps_weights.items())
    def combined(prompt, context=None):
        scores = []
        total_w = sum(ws)
        for name,w in zip(names,ws):
            wfrac = w/total_w
            cap = get_by_name(name)
            try:
                if hasattr(cap,"process"):
                    out = cap.process(prompt,context)
                    score = out if isinstance(out,(int,float)) else (out.get("score") if isinstance(out,dict) else None)
                    if score: scores.append(wfrac*float(score))
            except: pass
        return sum(scores) if scores else 0.5
    return combined

@asi_skill("voting_ensemble")
def voting_ensemble(caps):
    """Majority vote for classification (55+ winners)"""
    def classify(prompt, context=None):
        wins = defaultdict(int)
        for cap in caps:
            try:
                if hasattr(cap,"classify") and hasattr(getattr(cap,"classify"), "__call__"):
                    res = cap.classify(prompt,context); wins[res]+=1
            except: pass
        most = max(wins.values() or [0])
        ties = [k for k,v in wins.items() if v==most]
        return random.choice(ties) if len(ties)>1 else ties[0]
    return classify

@asi_skill("chain_pipeline")
def chain_pipeline(steps):
    """Pipeline: op1(prompt) → op2(result) → ... → final(result)"""
    def chained(prompt, context=None):
        value = prompt
        for name,param in steps:
            cap = get_by_name(name)
            try:
                if hasattr(cap,"process") and hasattr(getattr(cap,"process"), "__call__"):
                    value = cap.process(value if param is None else (value,param), context)
            except Exception as e:
                logging.error(f"Chain {name} error: {e}; continuing")
        return value
    return chained

@asi_skill("fallback_chained")
def fallback_chain(steps):
    """Try each in order: A(error) → B(error) → C(success → result)"""
    def chained(prompt, context=None):
        for name,param in steps:
            cap = get_by_name(name)
            try:
                if hasattr(cap,"process") and hasattr(getattr(cap,"process"), "__call__"):
                    out = cap.process(prompt if param is None else (prompt,param), context)
                    # Success signal: False/None=continue, True/stren=chain stop
                    if isinstance(out,tuple) and len(out)==2 and isinstance(out[1],(bool,int)):
                        if not bool(out[1]): continue  # Skip next in chain
                        return out[0]
                    elif isinstance(out,(bool,int)) and not bool(out): continue
                    prompt = out
            except Exception as e:
                logging.error(f"Fallback {name} exception: {type(e).__name__}")
                continue
        return prompt  # Return last output
    return chained

@asi_skill("retry_fallback")
def retry_with_fallback(caps, tries=3, timeout_sec=10):
    """Try in order with retry: A(timeo) → B(timeo) → C(timeo) → D(timeo)"""
    def retried(prompt, context=None):
        for i,(cap,) in enumerate(caps.items()):
            try:
                if hasattr(cap,"process") and hasattr(getattr(cap,"process"), "__call__"):
                    for attempt in range(tries):
                        sleep(attempt*1e-3)
                        out = cap.process(prompt=context or prompt, context=None)
                        if isinstance(out,(int,float)) and bool(out): return out
                        prompt = out  # Next round
            except Exception as e:
                logging.error(f"Retry[{i}] {cap} caught {type(e).__name__}")
        return None
    return retried

@asi_skill("voting_weighted")
def voting_with_weights(caps, weights=None):
    """Ballot election: candidate1:0.4 candidate2:0.3 candidate3:0.2"""
    def vote(prompt, context=None):
        tallies = defaultdict(float)
        if not weights: weights = {k:1.0 for k in caps.keys()}
        for name,w in weights.items():
            cap = get_by_name(name)
            try:
                if hasattr(cap,"classify") and hasattr(getattr(cap,"classify"), "__call__"):
                    res = cap.classify(prompt,context); tallies[res]+=w
            except: pass
        maxv = max(tallies.values() or [0])
        winners = [k for k,v in tallies.items() if abs(v-maxv)>1e-6]
        return random.choice(winners) if winners else None
    return vote

@asi_skill("stacked_brain")
def stacked_brain(caps, fusion_mode="weighted_average"):
    """Sea of capabilities fusing collective wisdom"""
    def think(prompt, context=None):
        outputs = []
        try:
            for cap in caps:
                if hasattr(cap,"sea_think") and hasattr(getattr(cap,"sea_think"), "__call__"):
                    out = cap.sea_think(prompt,context); outputs.append(out)
                elif hasattr(cap,"process") and hasattr(getattr(cap,"process"), "__call__"):
                    out = cap.process(prompt,context); outputs.append(out)
        except: pass
        if fusion_mode=="first":
            for out in outputs:
                if isinstance(out,(int,float)): return out
        elif fusion_mode=="list": return outputs
        elif fusion_mode=="mean" and outputs:
            real_vals = [float(x) for x in outputs if isinstance(x,(int,float))]
            return sum(real_vals)/max(len(real_vals),1)
        elif fusion_mode.startswith("weighted_"):
            wgts = {k:1.0/len(caps) for k in caps.keys()}
            total_w = sum(wgts.values())
            weighted = sum((w/x if isinstance(x,(int,float)) else 0)*(w/total_w) for x,w in zip(outputs,wgts.values()))
            return weighted
        elif fusion_mode=="voting":
            freqs = defaultdict(int)
            for out in outputs:
                if isinstance(out,str): freqs[out]+=1
            maxc = max(freqs.values() or [0])
            ties = [k for k,v in freqs.items() if v==maxc]
            return random.choice(ties) if ties else None
        return outputs[0] if outputs else 0.5

if __name__ == "__main__":
    import sys, glob
    def list_all():
        caps = eval("dict(globals())")
        m = max(len(k) for k in caps.keys() if k.startswith("agni_"))
        print(f"Found {len(caps)} agni-* modules:")
        for k in sorted(caps.keys(), key=lambda x: caps[x].__dict__.get("__qualname","")):
            mod = caps[k].__module__
            qual = caps[k].__dict__.get("__qualname","")
            if "." in qual: qual = qual.split(".")[-1]
            print(f"  {k:{m}} # {mod}.{qual}")
    def list_types():
        cats = {"vision":[], "memory":[],"logic":[],"imagination":[],"evaluation":[]}
        for k,v in eval("globals()").items():
            if not isinstance(v,(type,Callable)): continue
            md = getattr(getattr(v,"_memory_dims" if "_memory_dims" in dir(v) else None,"value",None),"name","unknown")
            cats[md].append(k)
        for k in ["vision","memory","logic","imagination","evaluation"]:
            print(f"  {k}: {len(cats[k])}")
    def list_compositions():
        caps = eval("dict(globals())"); coms = []
        for k,v in caps.items():
            if isinstance(v,(type,Callable)) and hasattr(v,"_composition"):
                coms.append(k); print(f"  {k} ← {v._composition.get('operator','unknown')}")
        print(f"  {len(coms)} composition operators")
    def list_templates():
        tt = glob.glob("agni_template_*"); m = len(tt) if tt else 0
        for p in sorted(tt,key=len): pass  # Sort by name
        print(f"  {m} templates: {tt[0].split('_')[-1]} ... {tt[-1].split('_')[-1]}")
    def list_all_ops():
        ops = []; caps = eval("dict(globals())")
        for k,v in caps.items():
            if isinstance(v,(type,Callable)) and hasattr(v,"_composition"):
                opn = v._composition.get("operator",""); dom=v._composition.get("domain","")
                sig = f"{k}:{opn}:{dom}"
                if not any(sig==x for x in ops): ops.append(sig)
        ops.sort(); m=max(len(x)for x in ops); n=len(ops)//4
        t0,t1,t2,t3=[],[],[],[]
        for i,x in enumerate(ops):
            if i<n: t0.append(x)
            elif i<n*2: t1.append(x)
            elif i<n*3: t2.append(x)
            else: t3.append(x)
        print(f"  SERIES:{len(t0)} PARELLEL:{len(t1)} WEIGHTED:{len(t2)} FALLBACK:{len(t3)}")
    def list_ensembles():
        caps = eval("dict(globals())"); ens=[]; m=45
        for k,v in caps.items():
            if isinstance(v,(type,Callable)) and hasattr(v,"_composition") and v._composition.get("operator","")=="voting":
                ens.append(k)
        for e in sorted(ens,key=lambda x:x.split("_")[2] if"_"in x else "50"):
            cat=getattr(getattr(caps[e],"classify",None),"metadata",None).get("category","unknown")
            print(f"  {e:{m}} # {cat}")
        print(f"  {len(ens)} voting ensembles by category")
    def list_sea():
        caps = eval("dict(globals())"); m=45; s=[]
        for k,v in caps.items():
            if isinstance(v,(type,Callable)) and hasattr(v,"sea_think"):
                s.append(k)
        for x in sorted(s): print(f"  {x:{m}}")
        print(f"  {len(s)} Stacked Brain participants")
    def list_memories():
        from pathlib import Path; p=Path("/Eden/MEMORY/agent_longterm"); m=len(list(p.glob("*.json")))
        print(f"  Long-term: ~{m*8/1024:.0f}MB disk")
        hs=[95,97,98,99]; out=[]
        for h in hs:
            f=p/f"focused_history_{h}.json";ok=False
            try: ok=bool(f.stat().st_size); out.append((h,ok))
            except: pass
        for h,o in sorted(out): print(f"  History {h}:
```