#!/usr/bin/env python3
import eden_brain_inject
"""
EDEN TECH INTELLIGENCE FEEDS
Real-time monitoring of sources that MATTER:
- GitHub Trending (what devs are building)
- Reddit r/programming, r/devops, r/machinelearning
- Lobsters (high-quality tech discussions)
- Dev.to (developer community)
- Product Hunt (new tools launching)
- TechCrunch (funding/startups) via RSS
- Security advisories (CVEs, GitHub Security)
"""

import json
import time
import requests
import xml.etree.ElementTree as ET
from datetime import datetime
from pathlib import Path

DATA_DIR = "/Eden/DATA"
FEEDS_PATH = f"{DATA_DIR}/tech_feeds.json"
LOG_PATH = f"{DATA_DIR}/tech_feeds.log"

class TechFeeds:
    def __init__(self):
        self.session = requests.Session()
        self.session.headers.update({
            'User-Agent': 'Eden-Intelligence/1.0 (Tech Feed Monitor)'
        })
        self.data = {
            'reddit': [],
            'lobsters': [],
            'devto': [],
            'producthunt': [],
            'techcrunch': [],
            'github_security': [],
            'ycombinator': [],
            'last_update': None
        }
    
    def log(self, msg):
        timestamp = datetime.now().strftime("%H:%M:%S")
        line = f"[{timestamp}] {msg}"
        print(line)
        with open(LOG_PATH, 'a') as f:
            f.write(line + "\n")

    def fetch_reddit(self) -> list:
        """Fetch from key programming subreddits"""
        items = []
        subreddits = [
            'programming',
            'devops', 
            'machinelearning',
            'netsec',
            'golang',
            'rust',
            'python',
            'javascript',
            'kubernetes',
            'aws'
        ]
        
        for sub in subreddits[:5]:  # Top 5 to avoid rate limits
            try:
                resp = self.session.get(
                    f"https://www.reddit.com/r/{sub}/hot.json?limit=10",
                    timeout=10
                )
                if resp.status_code == 200:
                    posts = resp.json().get('data', {}).get('children', [])
                    for post in posts[:5]:
                        p = post.get('data', {})
                        if p.get('score', 0) > 50:  # Only popular posts
                            items.append({
                                'title': p.get('title', '')[:100],
                                'url': p.get('url', ''),
                                'score': p.get('score', 0),
                                'subreddit': sub,
                                'source': 'reddit'
                            })
                time.sleep(1)  # Rate limit
            except Exception as e:
                pass
        
        self.log(f"📱 Reddit: {len(items)} hot posts")
        return items[:20]

    def fetch_lobsters(self) -> list:
        """Fetch from Lobste.rs - high quality tech discussions"""
        items = []
        try:
            resp = self.session.get(
                "https://lobste.rs/hottest.json",
                timeout=10
            )
            if resp.status_code == 200:
                for story in resp.json()[:15]:
                    items.append({
                        'title': story.get('title', ''),
                        'url': story.get('url', ''),
                        'score': story.get('score', 0),
                        'tags': story.get('tags', []),
                        'source': 'lobsters'
                    })
            self.log(f"🦞 Lobsters: {len(items)} stories")
        except Exception as e:
            self.log(f"❌ Lobsters error: {e}")
        return items

    def fetch_devto(self) -> list:
        """Fetch from Dev.to - developer community"""
        items = []
        try:
            resp = self.session.get(
                "https://dev.to/api/articles?top=1&per_page=15",
                timeout=10
            )
            if resp.status_code == 200:
                for article in resp.json():
                    items.append({
                        'title': article.get('title', ''),
                        'url': article.get('url', ''),
                        'reactions': article.get('public_reactions_count', 0),
                        'tags': article.get('tag_list', []),
                        'source': 'devto'
                    })
            self.log(f"📝 Dev.to: {len(items)} articles")
        except Exception as e:
            self.log(f"❌ Dev.to error: {e}")
        return items

    def fetch_producthunt(self) -> list:
        """Fetch from Product Hunt - new tools launching"""
        items = []
        try:
            # Product Hunt doesn't have a simple API, use their RSS
            resp = self.session.get(
                "https://www.producthunt.com/feed",
                timeout=10
            )
            if resp.status_code == 200:
                root = ET.fromstring(resp.content)
                for item in root.findall('.//item')[:15]:
                    title = item.find('title')
                    link = item.find('link')
                    if title is not None and link is not None:
                        items.append({
                            'title': title.text[:100] if title.text else '',
                            'url': link.text if link.text else '',
                            'source': 'producthunt'
                        })
            self.log(f"🚀 Product Hunt: {len(items)} launches")
        except Exception as e:
            self.log(f"❌ Product Hunt error: {e}")
        return items

    def fetch_techcrunch(self) -> list:
        """Fetch TechCrunch for funding/startup news"""
        items = []
        try:
            resp = self.session.get(
                "https://techcrunch.com/feed/",
                timeout=10
            )
            if resp.status_code == 200:
                root = ET.fromstring(resp.content)
                for item in root.findall('.//item')[:15]:
                    title = item.find('title')
                    link = item.find('link')
                    if title is not None:
                        title_text = title.text or ''
                        # Filter for funding/startup relevant
                        keywords = ['raises', 'funding', 'series', 'startup', 'launch', 'ai', 'acquisition']
                        if any(kw in title_text.lower() for kw in keywords):
                            items.append({
                                'title': title_text[:100],
                                'url': link.text if link is not None else '',
                                'source': 'techcrunch'
                            })
            self.log(f"💰 TechCrunch: {len(items)} funding/startup news")
        except Exception as e:
            self.log(f"❌ TechCrunch error: {e}")
        return items

    def fetch_github_security(self) -> list:
        """Fetch GitHub Security Advisories"""
        items = []
        try:
            resp = self.session.get(
                "https://api.github.com/advisories?per_page=20&severity=high,critical",
                headers={'Accept': 'application/vnd.github+json'},
                timeout=15
            )
            if resp.status_code == 200:
                for adv in resp.json()[:10]:
                    items.append({
                        'title': adv.get('summary', '')[:100],
                        'severity': adv.get('severity', 'unknown'),
                        'package': adv.get('vulnerabilities', [{}])[0].get('package', {}).get('name', ''),
                        'url': adv.get('html_url', ''),
                        'source': 'github_security'
                    })
            self.log(f"🔒 GitHub Security: {len(items)} advisories")
        except Exception as e:
            self.log(f"❌ GitHub Security error: {e}")
        return items

    def fetch_ycombinator_launches(self) -> list:
        """Fetch YC company launches"""
        items = []
        try:
            # Check HN for "Launch HN" posts
            resp = self.session.get(
                "https://hn.algolia.com/api/v1/search?query=Launch%20HN&tags=story&hitsPerPage=10",
                timeout=10
            )
            if resp.status_code == 200:
                for hit in resp.json().get('hits', []):
                    items.append({
                        'title': hit.get('title', ''),
                        'url': hit.get('url', ''),
                        'points': hit.get('points', 0),
                        'source': 'yc_launch'
                    })
            self.log(f"🚀 YC Launches: {len(items)} startups")
        except Exception as e:
            self.log(f"❌ YC error: {e}")
        return items

    def save_data(self):
        """Save all feed data"""
        self.data['last_update'] = datetime.now().isoformat()
        with open(FEEDS_PATH, 'w') as f:
            json.dump(self.data, f, indent=2, default=str)

    def run_cycle(self):
        """Fetch all feeds"""
        self.log("=" * 50)
        self.log("📡 TECH FEEDS UPDATE")
        self.log("=" * 50)
        
        self.data['reddit'] = self.fetch_reddit()
        time.sleep(2)
        
        self.data['lobsters'] = self.fetch_lobsters()
        time.sleep(1)
        
        self.data['devto'] = self.fetch_devto()
        time.sleep(1)
        
        self.data['producthunt'] = self.fetch_producthunt()
        time.sleep(1)
        
        self.data['techcrunch'] = self.fetch_techcrunch()
        time.sleep(1)
        
        self.data['github_security'] = self.fetch_github_security()
        time.sleep(1)
        
        self.data['ycombinator'] = self.fetch_ycombinator_launches()
        
        self.save_data()
        
        # Summary
        total = sum(len(v) for k, v in self.data.items() if isinstance(v, list))
        self.log(f"\n📊 TOTAL: {total} items from 7 sources")
        
        # Highlight important stuff
        funding = [t for t in self.data['techcrunch'] if 'raise' in t.get('title', '').lower()]
        security = self.data['github_security']
        
        if funding:
            self.log(f"💰 FUNDING NEWS: {len(funding)}")
        if security:
            self.log(f"🔒 SECURITY ALERTS: {len(security)}")
        
        return self.data

    def run_continuous(self, interval_minutes=5):
        """Run continuously"""
        self.log("📡 Tech Feeds Monitor Started")
        
        while True:
            try:
                self.run_cycle()
            except Exception as e:
                self.log(f"❌ Error: {e}")
            
            self.log(f"💤 Next update in {interval_minutes} min...")
            time.sleep(interval_minutes * 60)


def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--once', action='store_true')
    parser.add_argument('--interval', type=int, default=5)
    args = parser.parse_args()
    
    feeds = TechFeeds()
    
    if args.once:
        feeds.run_cycle()
    else:
        feeds.run_continuous(args.interval)


if __name__ == "__main__":
    main()
