#!/usr/bin/env python3
"""
Tesla V100 Lyrics Analyzer - Fixed DBSCAN Version
"""

import sqlite3
import hashlib
import time
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.cluster import DBSCAN
import json
from datetime import datetime
import logging
import threading
from collections import defaultdict
import re

# Настройка логирования
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

class SimpleEmotionAnalyzer:
    """Простой анализатор эмоций на основе словарей"""
    
    def __init__(self):
        # Словари эмоций (русский + английский)
        self.positive_words = {
            'english': ['happy', 'joy', 'love', 'good', 'great', 'amazing', 'wonderful', 'beautiful', 'perfect', 'awesome', 'fantastic', 'excellent', 'brilliant', 'fun', 'smile', 'laugh', 'dance', 'party', 'celebrate'],
            'russian': ['счастье', 'радость', 'любовь', 'хорошо', 'отлично', 'прекрасно', 'замечательно', 'красиво', 'идеально', 'весело', 'улыбка', 'смех', 'танец', 'праздник', 'праздновать', 'радостный', 'счастливый']
        }
        
        self.negative_words = {
            'english': ['sad', 'angry', 'hate', 'bad', 'terrible', 'awful', 'horrible', 'pain', 'hurt', 'cry', 'tears', 'broken', 'lost', 'alone', 'dark', 'death', 'fear', 'worry', 'stress'],
            'russian': ['грустно', 'злой', 'плохо', 'ужасно', 'боль', 'больно', 'плакать', 'слезы', 'сломанный', 'потерянный', 'одинокий', 'темно', 'смерть', 'страх', 'беспокойство', 'стресс', 'печаль', 'тоска']
        }
        
        self.energy_words = {
            'high': ['dance', 'jump', 'run', 'fast', 'energy', 'power', 'strong', 'loud', 'rock', 'metal', 'party', 'танцуй', 'прыгай', 'быстро', 'энергия', 'сила', 'громко', 'рок', 'метал', 'вечеринка'],
            'low': ['slow', 'calm', 'quiet', 'sleep', 'rest', 'peaceful', 'soft', 'gentle', 'медленно', 'спокойно', 'тихо', 'сон', 'отдых', 'мирный', 'мягко', 'нежно']
        }

    def analyze(self, text):
        """Анализ эмоций в тексте"""
        text_lower = text.lower()
        
        # Подсчет позитивных и негативных слов
        positive_count = 0
        negative_count = 0
        
        for lang in self.positive_words:
            positive_count += sum(1 for word in self.positive_words[lang] if word in text_lower)
            negative_count += sum(1 for word in self.negative_words[lang] if word in text_lower)
        
        # Определение основной эмоции
        total_emotional = positive_count + negative_count
        if total_emotional == 0:
            primary_emotion = 'neutral'
            confidence = 0.5
        elif positive_count > negative_count:
            primary_emotion = 'positive'
            confidence = positive_count / total_emotional
        else:
            primary_emotion = 'negative'
            confidence = negative_count / total_emotional
        
        # Расчет энергетики
        high_energy = sum(1 for word in self.energy_words['high'] if word in text_lower)
        low_energy = sum(1 for word in self.energy_words['low'] if word in text_lower)
        total_energy = high_energy + low_energy
        
        if total_energy == 0:
            energy = 0.5
        else:
            energy = high_energy / total_energy
        
        # Валентность (позитивность)
        valence = confidence if primary_emotion == 'positive' else (1 - confidence) if primary_emotion == 'negative' else 0.5
        
        return {
            'primary': primary_emotion,
            'confidence': min(confidence + 0.3, 1.0),  # Небольшой буст уверенности
            'energy': energy,
            'valence': valence
        }

class TeslaLyricsAnalyzer:
    def __init__(self, db_path="tracks.sqlite"):
        self.db_path = db_path
        self.start_time = time.time()
        self.processed_count = 0
        self.emotion_analyzer = SimpleEmotionAnalyzer()
        
        logger.info(f"🚀 Tesla V100 Fixed Analyzer starting...")
        
    def get_tracks_with_lyrics(self):
        """Получаем треки с лирикой"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        cursor.execute("SELECT COUNT(*) FROM tracks WHERE lyric IS NOT NULL AND lyric != ''")
        total_with_lyrics = cursor.fetchone()[0]
        
        cursor.execute("SELECT COUNT(*) FROM tracks WHERE processing_version >= 2")
        already_processed = cursor.fetchone()[0]
        
        logger.info(f"📊 Database: {total_with_lyrics} tracks with lyrics, {already_processed} already processed")
        
        cursor.execute("""
            SELECT id, title, lyric, tags, processing_version
            FROM tracks 
            WHERE lyric IS NOT NULL AND lyric != '' 
            AND (processing_version IS NULL OR processing_version < 2)
            ORDER BY id
        """)
        
        tracks = cursor.fetchall()
        conn.close()
        
        logger.info(f"🎯 Found {len(tracks)} tracks to process")
        return tracks

    def calculate_lyric_hash(self, lyric):
        """Хеш лирики"""
        clean_lyric = re.sub(r'[^\w\s]', '', lyric.lower().strip())
        return hashlib.md5(clean_lyric.encode()).hexdigest()

    def find_similar_lyrics_simple(self, tracks, similarity_threshold=0.8):
        """Упрощенная группировка похожих текстов"""
        logger.info("🔍 Computing similarities (simplified)...")
        start_time = time.time()
        
        lyrics = [track[2] for track in tracks]
        
        # Группировка по хешам для точных дубликатов
        hash_groups = defaultdict(list)
        for i, lyric in enumerate(lyrics):
            lyric_hash = self.calculate_lyric_hash(lyric)
            hash_groups[lyric_hash].append(i)
        
        # Создаем кластеры
        clusters = [-1] * len(tracks)  # -1 означает уникальный трек
        cluster_id = 0
        
        for hash_val, indices in hash_groups.items():
            if len(indices) > 1:  # Если есть дубликаты
                for idx in indices:
                    clusters[idx] = cluster_id
                cluster_id += 1
        
        unique_clusters = len([c for c in set(clusters) if c != -1])
        noise_points = clusters.count(-1)
        
        similarity_time = time.time() - start_time
        logger.info(f"✅ Found {unique_clusters} duplicate groups, {noise_points} unique ({similarity_time:.1f}s)")
        
        return clusters

    def extract_dominant_tags(self, tags_str):
        """Извлечение доминирующих тегов"""
        if not tags_str:
            return ""
        
        tags = [tag.strip() for tag in tags_str.split(',')]
        
        # Приоритет по жанрам
        genre_keywords = ['rock', 'pop', 'jazz', 'blues', 'metal', 'electronic', 'folk', 'country', 'hip-hop', 'rap', 'classical', 'reggae', 'punk', 'indie', 'alternative', 'dance', 'house', 'techno', 'ambient']
        
        # Ищем жанры в тегах
        found_genres = []
        other_tags = []
        
        for tag in tags[:5]:  # Берем первые 5 тегов
            tag_lower = tag.lower()
            if any(genre in tag_lower for genre in genre_keywords):
                found_genres.append(tag)
            else:
                other_tags.append(tag)
        
        # Комбинируем: сначала жанры, потом остальные
        dominant = found_genres[:2] + other_tags[:1]
        return ', '.join(dominant)

    def categorize_track(self, emotions, tags, title):
        """Категоризация трека"""
        # Базовая категоризация по эмоциям
        if emotions['energy'] > 0.7:
            if emotions['valence'] > 0.6:
                return "energetic_positive"
            else:
                return "energetic_aggressive"
        elif emotions['energy'] < 0.3:
            if emotions['valence'] > 0.6:
                return "calm_positive"
            else:
                return "calm_melancholic"
        else:
            if emotions['valence'] > 0.6:
                return "moderate_upbeat"
            else:
                return "moderate_neutral"

    def process_batch(self, tracks_batch, clusters_batch):
        """Обработка батча"""
        batch_start = time.time()
        results = []
        emotions_batch = []
        
        for i, track in enumerate(tracks_batch):
            track_id, title, lyric, tags, _ = track
            
            # Анализ
            lyric_hash = self.calculate_lyric_hash(lyric)
            emotions = self.emotion_analyzer.analyze(lyric)
            emotions_batch.append(emotions)
            
            dominant_tags = self.extract_dominant_tags(tags)
            category = self.categorize_track(emotions, tags, title)
            
            # Качественный скор
            quality_score = 0.5
            lyric_len = len(lyric)
            if 500 <= lyric_len <= 3000:
                quality_score += 0.2
            quality_score += emotions['confidence'] * 0.2
            if tags and len(tags.split(',')) >= 3:
                quality_score += 0.1
            quality_score = min(max(quality_score, 0.0), 1.0)
            
            result = {
                'id': track_id,
                'lyric_hash': lyric_hash,
                'lyric_group_id': int(clusters_batch[i]) if clusters_batch[i] != -1 else None,
                'emotion_primary': emotions['primary'],
                'emotion_confidence': emotions['confidence'],
                'mood_energy': emotions['energy'],
                'mood_valence': emotions['valence'],
                'dominant_tags': dominant_tags,
                'category': category,
                'quality_score': quality_score,
                'processed_at': datetime.utcnow().isoformat(),
                'processing_version': 2
            }
            
            results.append(result)
        
        batch_time = time.time() - batch_start
        return results, batch_time, emotions_batch

    def update_database(self, results):
        """Обновление БД"""
        conn = sqlite3.connect(self.db_path)
        cursor = conn.cursor()
        
        for result in results:
            cursor.execute("""
                UPDATE tracks SET
                    lyric_hash = ?,
                    lyric_group_id = ?,
                    emotion_primary = ?,
                    emotion_confidence = ?,
                    mood_energy = ?,
                    mood_valence = ?,
                    dominant_tags = ?,
                    category = ?,
                    quality_score = ?,
                    processed_at = ?,
                    processing_version = ?
                WHERE id = ?
            """, (
                result['lyric_hash'],
                result['lyric_group_id'],
                result['emotion_primary'],
                result['emotion_confidence'],
                result['mood_energy'],  
                result['mood_valence'],
                result['dominant_tags'],
                result['category'],
                result['quality_score'],
                result['processed_at'],
                result['processing_version'],
                result['id']
            ))
        
        conn.commit()
        conn.close()

    def run_analysis(self, batch_size=100):
        """Основной анализ"""
        logger.info("🚀 Starting analysis...")
        
        tracks = self.get_tracks_with_lyrics()
        if not tracks:
            logger.info("No tracks to process")
            return
        
        # Упрощенная группировка (только точные дубликаты)
        clusters = self.find_similar_lyrics_simple(tracks)
        
        # Статистика
        emotion_stats = defaultdict(int)
        
        # Обработка батчами
        for i in range(0, len(tracks), batch_size):
            batch = tracks[i:i+batch_size]
            batch_clusters = clusters[i:i+batch_size]
            
            results, batch_time, emotions_batch = self.process_batch(batch, batch_clusters)
            self.update_database(results)
            
            # Обновляем статистику
            for emotion_data in emotions_batch:
                emotion_stats[emotion_data['primary']] += 1
            
            self.processed_count += len(batch)
            
            # Прогресс
            elapsed = time.time() - self.start_time
            speed = self.processed_count / elapsed if elapsed > 0 else 0
            eta = (len(tracks) - self.processed_count) / speed if speed > 0 else 0
            progress = (self.processed_count / len(tracks) * 100) if len(tracks) > 0 else 0
            
            logger.info(f"""
🚀 TESLA V100 PROGRESS 🚀
Progress: {progress:.1f}% ({self.processed_count}/{len(tracks)})
Speed: {speed:.1f} tracks/sec | ETA: {eta/60:.1f} min
Batch time: {batch_time:.2f}s
Emotions: {dict(emotion_stats)}
            """)
        
        # Финальный отчет
        total_time = time.time() - self.start_time
        logger.info(f"""
🎯 ANALYSIS COMPLETE! 🎯
Total time: {total_time/60:.1f} minutes
Total tracks: {self.processed_count}
Average speed: {self.processed_count/total_time:.1f} tracks/sec

📊 FINAL EMOTION DISTRIBUTION:
{json.dumps(dict(emotion_stats), indent=2)}
        """)

if __name__ == "__main__":
    analyzer = TeslaLyricsAnalyzer()
    analyzer.run_analysis()
