"""
NLP Processor - Natural language understanding
"""
import re
from collections import Counter

class NLPProcessor:
    def __init__(self):
        self.processed_texts = []
        
    def analyze_sentiment(self, text):
        """Basic sentiment analysis"""
        positive_words = ['good', 'great', 'excellent', 'amazing', 'wonderful', 'fantastic', 
                         'love', 'best', 'perfect', 'happy', 'success']
        negative_words = ['bad', 'terrible', 'awful', 'horrible', 'hate', 'worst', 
                         'fail', 'failure', 'sad', 'angry', 'poor']
        
        text_lower = text.lower()
        positive_count = sum(1 for word in positive_words if word in text_lower)
        negative_count = sum(1 for word in negative_words if word in text_lower)
        
        if positive_count > negative_count:
            sentiment = "positive"
            score = positive_count / (positive_count + negative_count + 1)
        elif negative_count > positive_count:
            sentiment = "negative"
            score = negative_count / (positive_count + negative_count + 1)
        else:
            sentiment = "neutral"
            score = 0.5
        
        return {
            "sentiment": sentiment,
            "confidence": score,
            "positive_signals": positive_count,
            "negative_signals": negative_count
        }
    
    def extract_keywords(self, text, top_n=5):
        """Extract key words from text"""
        # Remove common stop words
        stop_words = {'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 
                     'to', 'for', 'of', 'with', 'is', 'are', 'was', 'were'}
        
        words = re.findall(r'\b\w+\b', text.lower())
        filtered = [w for w in words if w not in stop_words and len(w) > 3]
        
        counter = Counter(filtered)
        keywords = counter.most_common(top_n)
        
        return {"keywords": keywords, "total_words": len(words)}
    
    def summarize_text(self, text, max_sentences=3):
        """Basic text summarization"""
        sentences = re.split(r'[.!?]+', text)
        sentences = [s.strip() for s in sentences if s.strip()]
        
        if len(sentences) <= max_sentences:
            return {"summary": text, "original_sentences": len(sentences)}
        
        # Simple: take first, middle, last sentences
        summary_sentences = [
            sentences[0],
            sentences[len(sentences)//2],
            sentences[-1]
        ]
        
        summary = '. '.join(summary_sentences) + '.'
        
        return {
            "summary": summary,
            "original_sentences": len(sentences),
            "summary_sentences": max_sentences
        }

if __name__ == "__main__":
    print("NLP PROCESSOR TEST")
    
    nlp = NLPProcessor()
    
    # Test sentiment
    print("\n😊 Sentiment Analysis:")
    text = "This is an amazing and wonderful day! I love it!"
    result = nlp.analyze_sentiment(text)
    print(f"   Text: '{text}'")
    print(f"   Sentiment: {result['sentiment']} ({result['confidence']:.0%} confidence)")
    
    # Test keywords
    print("\n🔑 Keyword Extraction:")
    text = "Machine learning and artificial intelligence are transforming technology"
    result = nlp.extract_keywords(text)
    print(f"   Keywords: {[k[0] for k in result['keywords']]}")
    
    print("\n📝 Eden can now:")
    print("   - Analyze sentiment")
    print("   - Extract keywords")
    print("   - Summarize text")
    print("   - Process natural language")
    
    print("\n✅ NLP PROCESSOR OPERATIONAL")
