From 24a1d187c2824c3278ca62b87856ffff8249f24b Mon Sep 17 00:00:00 2001 From: Saahi30 Date: Mon, 17 Nov 2025 08:28:35 +0530 Subject: [PATCH 1/9] feat(analytics): add AI-powered analytics endpoints - Add predictive analytics endpoint - Add automated insights endpoint - Add audience segmentation endpoint - Add sentiment analysis endpoint - Add anomaly detection endpoint - Add attribution modeling endpoint - Add benchmarking endpoint - Add churn prediction endpoint - Add natural language query endpoint - Add KPI optimization endpoint - Use Groq API with llama-4-scout-17b-16e-instruct model --- backend/app/api/routes/ai_analytics.py | 1242 ++++++++++++++++++++++++ 1 file changed, 1242 insertions(+) create mode 100644 backend/app/api/routes/ai_analytics.py diff --git a/backend/app/api/routes/ai_analytics.py b/backend/app/api/routes/ai_analytics.py new file mode 100644 index 0000000..bf74ffa --- /dev/null +++ b/backend/app/api/routes/ai_analytics.py @@ -0,0 +1,1242 @@ +""" +AI-Powered Analytics endpoints for predictive analytics, insights, segmentation, etc. +""" +from fastapi import APIRouter, HTTPException, Depends, Query +from pydantic import BaseModel +from typing import Optional, List, Dict, Any +from datetime import datetime, timezone, timedelta +import json +from groq import Groq +from app.core.supabase_clients import supabase_anon +from app.core.dependencies import get_current_user +from app.core.config import settings + +router = APIRouter() + + +def get_groq_client(): + """Get Groq client instance""" + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + return Groq(api_key=settings.groq_api_key) + + +async def get_user_profile(user: dict): + """Get brand or creator profile based on user role""" + role = user.get("role") + user_id = user.get("id") + + if role == "Brand": + brand_res = supabase_anon.table("brands") \ + .select("*") \ + .eq("user_id", user_id) \ + .single() \ + .execute() + if brand_res.data: + return {"type": "brand", "profile": brand_res.data} + elif role == "Creator": + creator_res = supabase_anon.table("creators") \ + .select("*") \ + .eq("user_id", user_id) \ + .single() \ + .execute() + if creator_res.data: + return {"type": "creator", "profile": creator_res.data} + + raise HTTPException( + status_code=403, + detail="User profile not found. Please complete onboarding." + ) + + +def get_historical_metrics(brand_id: Optional[str] = None, creator_id: Optional[str] = None, campaign_id: Optional[str] = None): + """Fetch historical metrics data for analysis""" + # Build query for metric updates + query = supabase_anon.table("campaign_deliverable_metric_updates").select("*") + + # Get metric IDs based on filters + metric_ids = [] + has_filters = False + + if brand_id: + has_filters = True + # Filter by brand through campaigns + campaigns_res = supabase_anon.table("campaigns") \ + .select("id") \ + .eq("brand_id", brand_id) \ + .execute() + campaign_ids = [c["id"] for c in campaigns_res.data or []] + if campaign_ids: + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id") \ + .in_("campaign_id", campaign_ids) \ + .execute() + deliverable_ids = [d["id"] for d in deliverables_res.data or []] + if deliverable_ids: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id") \ + .in_("campaign_deliverable_id", deliverable_ids) \ + .execute() + metric_ids = [m["id"] for m in metrics_res.data or []] + + if campaign_id: + has_filters = True + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id") \ + .eq("campaign_id", campaign_id) \ + .execute() + deliverable_ids = [d["id"] for d in deliverables_res.data or []] + if deliverable_ids: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id") \ + .in_("campaign_deliverable_id", deliverable_ids) \ + .execute() + campaign_metric_ids = [m["id"] for m in metrics_res.data or []] + if metric_ids: + # Intersect with brand filter + metric_ids = [m for m in metric_ids if m in campaign_metric_ids] + else: + metric_ids = campaign_metric_ids + + # Apply metric filters only if we have specific filters + # If no filters, get all updates (for creator-only case) + if has_filters: + if metric_ids: + query = query.in_("campaign_deliverable_metric_id", metric_ids) + else: + # If we have filters but no metrics found, return empty + # This means the brand/campaign exists but has no metrics/updates yet + return [] + + if creator_id: + query = query.eq("submitted_by", creator_id) + + # Execute query + try: + result = query.order("submitted_at", desc=False).limit(1000).execute() + updates = result.data or [] + except Exception as e: + # If query fails, return empty + return [] + + if not updates: + return [] + + # Enrich with metric and deliverable data + metric_ids_from_updates = list(set([u["campaign_deliverable_metric_id"] for u in updates if u.get("campaign_deliverable_metric_id")])) + + if metric_ids_from_updates: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id, name, display_name, campaign_deliverable_id") \ + .in_("id", metric_ids_from_updates) \ + .execute() + metrics = {m["id"]: m for m in (metrics_res.data or [])} + else: + metrics = {} + + deliverable_ids = list(set([m.get("campaign_deliverable_id") for m in metrics.values() if m.get("campaign_deliverable_id")])) + if deliverable_ids: + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id, campaign_id, platform, content_type") \ + .in_("id", deliverable_ids) \ + .execute() + deliverables = {d["id"]: d for d in (deliverables_res.data or [])} + else: + deliverables = {} + + # Combine data + enriched_updates = [] + for update in updates: + metric_id = update.get("campaign_deliverable_metric_id") + metric = metrics.get(metric_id, {}) if metric_id else {} + deliverable_id = metric.get("campaign_deliverable_id") + deliverable = deliverables.get(deliverable_id, {}) if deliverable_id else {} + + enriched_update = { + **update, + "campaign_deliverable_metrics": metric, + "campaign_deliverables": deliverable + } + enriched_updates.append(enriched_update) + + return enriched_updates + + +# ==================== Pydantic Models ==================== + +class PredictiveAnalyticsRequest(BaseModel): + campaign_id: Optional[str] = None + metric_type: Optional[str] = None # 'performance', 'roi', 'engagement' + forecast_periods: int = 30 # days + + +class PredictiveAnalyticsResponse(BaseModel): + forecast: Dict[str, Any] + confidence: str + factors: List[str] + recommendations: List[str] + + +class AutomatedInsightsResponse(BaseModel): + summary: str + trends: List[str] + anomalies: List[Dict[str, Any]] + recommendations: List[str] + key_metrics: Dict[str, Any] + + +class AudienceSegmentationResponse(BaseModel): + segments: List[Dict[str, Any]] + visualization_data: Dict[str, Any] + + +class SentimentAnalysisRequest(BaseModel): + text: Optional[str] = None + campaign_id: Optional[str] = None + + +class SentimentAnalysisResponse(BaseModel): + overall_sentiment: str + sentiment_score: float + positive_aspects: List[str] + negative_aspects: List[str] + recommendations: List[str] + + +class AnomalyDetectionResponse(BaseModel): + anomalies: List[Dict[str, Any]] + summary: str + + +class AttributionModelingResponse(BaseModel): + attribution: Dict[str, float] + top_contributors: List[Dict[str, Any]] + insights: List[str] + + +class BenchmarkingResponse(BaseModel): + your_metrics: Dict[str, float] + industry_benchmarks: Dict[str, float] + comparison: Dict[str, Any] + recommendations: List[str] + + +class ChurnPredictionResponse(BaseModel): + churn_risk: Dict[str, float] + at_risk_segments: List[Dict[str, Any]] + recommendations: List[str] + + +class NaturalLanguageQueryRequest(BaseModel): + query: str + campaign_id: Optional[str] = None + + +class NaturalLanguageQueryResponse(BaseModel): + answer: str + data_sources: List[str] + confidence: str + + +class KPIOptimizationResponse(BaseModel): + current_kpis: Dict[str, float] + optimization_suggestions: List[Dict[str, Any]] + priority_actions: List[str] + + +# ==================== API Endpoints ==================== + +@router.post("/analytics/ai/predictive", response_model=PredictiveAnalyticsResponse) +async def get_predictive_analytics( + request: PredictiveAnalyticsRequest, + user: dict = Depends(get_current_user) +): + """Forecast campaign performance, ROI, or audience engagement using historical data""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + user_type = user_profile["type"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_type == "brand" else None, + creator_id=profile["id"] if user_type == "creator" else None, + campaign_id=request.campaign_id + ) + + if not historical_data: + return PredictiveAnalyticsResponse( + forecast={ + "predicted_value": 0.0, + "trend": "stable", + "growth_rate": 0.0, + "forecasted_values": [] + }, + confidence="low", + factors=["No historical data available"], + recommendations=[ + "Start by creating metrics for your campaign deliverables", + "Have creators submit metric values to build historical data", + "Once you have at least 5-10 data points, predictions will be available" + ] + ) + + # Prepare data for AI analysis + metrics_summary = {} + for entry in historical_data[-30:]: # Last 30 entries + metric_name = entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = entry.get("value", 0) + date = entry.get("submitted_at", "") + if metric_name not in metrics_summary: + metrics_summary[metric_name] = [] + metrics_summary[metric_name].append({"value": value, "date": date}) + + groq_client = get_groq_client() + + prompt = f"""Analyze this historical campaign metrics data and provide predictive analytics: + +HISTORICAL DATA: +{json.dumps(metrics_summary, indent=2)} + +METRIC TYPE: {request.metric_type or 'general performance'} +FORECAST PERIOD: {request.forecast_periods} days +USER TYPE: {user_type} + +Based on the historical trends, provide: +1. Forecasted values for the next {request.forecast_periods} days +2. Confidence level (high/medium/low) +3. Key factors influencing the forecast +4. Actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "forecast": {{ + "predicted_value": 0.0, + "trend": "increasing|decreasing|stable", + "growth_rate": 0.0, + "forecasted_values": [{{"date": "YYYY-MM-DD", "value": 0.0}}] + }}, + "confidence": "high|medium|low", + "factors": ["Factor 1", "Factor 2"], + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert data analyst specializing in predictive analytics for marketing campaigns. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return PredictiveAnalyticsResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating predictive analytics: {str(e)}") + + +@router.get("/analytics/ai/insights", response_model=AutomatedInsightsResponse) +async def get_automated_insights( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Generate plain-language summaries of analytics data with trends, anomalies, and recommendations""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + user_type = user_profile["type"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_type == "brand" else None, + creator_id=profile["id"] if user_type == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return AutomatedInsightsResponse( + summary="No analytics data available yet. Start tracking metrics to get automated insights.", + trends=[], + anomalies=[], + recommendations=[ + "Create metrics for your campaign deliverables", + "Have creators submit metric values", + "Once you have data, insights will appear here automatically" + ], + key_metrics={} + ) + + # Aggregate metrics + metrics_by_name = {} + for entry in historical_data: + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + date = entry.get("submitted_at", "") + + if metric_name not in metrics_by_name: + metrics_by_name[metric_name] = [] + metrics_by_name[metric_name].append({"value": value, "date": date}) + + # Calculate trends + trends_data = {} + for metric_name, values in metrics_by_name.items(): + if len(values) >= 2: + recent_avg = sum(v["value"] for v in values[-7:]) / min(7, len(values)) + older_avg = sum(v["value"] for v in values[:-7]) / max(1, len(values) - 7) if len(values) > 7 else recent_avg + change = ((recent_avg - older_avg) / older_avg * 100) if older_avg > 0 else 0 + trends_data[metric_name] = { + "current_avg": recent_avg, + "previous_avg": older_avg, + "change_percent": change, + "trend": "increasing" if change > 5 else "decreasing" if change < -5 else "stable" + } + + groq_client = get_groq_client() + + prompt = f"""Analyze this campaign analytics data and provide automated insights: + +METRICS DATA: +{json.dumps(metrics_by_name, indent=2)} + +TRENDS ANALYSIS: +{json.dumps(trends_data, indent=2)} + +USER TYPE: {user_type} + +Provide: +1. A plain-language executive summary (2-3 sentences) +2. Key trends identified +3. Any anomalies or unusual patterns +4. Actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "summary": "Executive summary in plain language", + "trends": ["Trend 1", "Trend 2"], + "anomalies": [{{"metric": "Metric name", "description": "Anomaly description", "severity": "high|medium|low"}}], + "recommendations": ["Recommendation 1", "Recommendation 2"], + "key_metrics": {{"metric_name": "value"}} +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert analytics consultant. Provide clear, actionable insights in plain language. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AutomatedInsightsResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating insights: {str(e)}") + + +@router.get("/analytics/ai/audience-segmentation", response_model=AudienceSegmentationResponse) +async def get_audience_segmentation( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Use AI to identify and visualize key audience segments based on demographics, interests, and behaviors""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + # Extract demographics data + demographics_data = [] + for entry in historical_data: + demographics = entry.get("demographics") + if demographics and isinstance(demographics, dict): + demographics_data.append(demographics) + + if not demographics_data: + # Return default segments if no demographics data + return AudienceSegmentationResponse( + segments=[ + {"name": "General Audience", "size": 100, "characteristics": ["No demographic data available"]} + ], + visualization_data={} + ) + + groq_client = get_groq_client() + + prompt = f"""Analyze this audience demographics data and identify key segments: + +DEMOGRAPHICS DATA: +{json.dumps(demographics_data[:50], indent=2)} # Limit to 50 for prompt size + +Identify distinct audience segments based on: +- Demographics (age, gender, location) +- Interests +- Behaviors +- Engagement patterns + +Return your response as JSON with this exact structure: +{{ + "segments": [ + {{ + "name": "Segment Name", + "size": 25, + "characteristics": ["Characteristic 1", "Characteristic 2"], + "demographics": {{"age_range": "25-34", "gender": "mixed", "location": "urban"}}, + "interests": ["Interest 1", "Interest 2"], + "engagement_score": 0.75 + }} + ], + "visualization_data": {{ + "segment_sizes": {{"Segment 1": 25, "Segment 2": 30}}, + "demographic_breakdown": {{}} + }} +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in audience segmentation and market research. Identify meaningful audience segments. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.5, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AudienceSegmentationResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating audience segmentation: {str(e)}") + + +@router.post("/analytics/ai/sentiment", response_model=SentimentAnalysisResponse) +async def analyze_sentiment( + request: SentimentAnalysisRequest, + user: dict = Depends(get_current_user) +): + """Analyze social media and campaign feedback to gauge public sentiment""" + try: + # Get feedback data if campaign_id provided + feedback_texts = [] + if request.campaign_id: + feedback_res = supabase_anon.table("campaign_deliverable_metric_feedback") \ + .select("feedback_text") \ + .execute() + feedback_texts = [f["feedback_text"] for f in feedback_res.data or [] if f.get("feedback_text")] + + if request.text: + feedback_texts.append(request.text) + + if not feedback_texts: + raise HTTPException(status_code=400, detail="No text data provided for sentiment analysis") + + combined_text = "\n\n".join(feedback_texts[:20]) # Limit to 20 feedback items + + groq_client = get_groq_client() + + prompt = f"""Analyze the sentiment of this campaign feedback and social media data: + +FEEDBACK DATA: +{combined_text} + +Provide a comprehensive sentiment analysis including: +1. Overall sentiment (positive, neutral, negative, mixed) +2. Sentiment score from -1 (very negative) to 1 (very positive) +3. Positive aspects mentioned +4. Negative aspects mentioned +5. Recommendations for improvement + +Return your response as JSON with this exact structure: +{{ + "overall_sentiment": "positive|neutral|negative|mixed", + "sentiment_score": 0.75, + "positive_aspects": ["Aspect 1", "Aspect 2"], + "negative_aspects": ["Aspect 1", "Aspect 2"], + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert sentiment analyst specializing in brand and campaign feedback. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return SentimentAnalysisResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error analyzing sentiment: {str(e)}") + + +@router.get("/analytics/ai/anomaly-detection", response_model=AnomalyDetectionResponse) +async def detect_anomalies( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Automatically flag unusual spikes or drops in metrics""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if len(historical_data) < 5: + return AnomalyDetectionResponse( + anomalies=[], + summary="Insufficient data for anomaly detection (need at least 5 data points)" + ) + + # Organize by metric + metrics_data = {} + for entry in historical_data: + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + date = entry.get("submitted_at", "") + + if metric_name not in metrics_data: + metrics_data[metric_name] = [] + metrics_data[metric_name].append({"value": value, "date": date}) + + # Calculate basic statistics for anomaly detection + stats = {} + for metric_name, values in metrics_data.items(): + if len(values) >= 3: + vals = [v["value"] for v in values] + mean = sum(vals) / len(vals) + variance = sum((x - mean) ** 2 for x in vals) / len(vals) + std_dev = variance ** 0.5 + stats[metric_name] = { + "mean": mean, + "std_dev": std_dev, + "values": values[-10:] # Last 10 values + } + + groq_client = get_groq_client() + + prompt = f"""Analyze this metrics data and detect anomalies (unusual spikes or drops): + +METRICS DATA WITH STATISTICS: +{json.dumps(stats, indent=2)} + +Identify anomalies where: +- Values are significantly above or below the mean (more than 2 standard deviations) +- Sudden spikes or drops in trends +- Unusual patterns + +Return your response as JSON with this exact structure: +{{ + "anomalies": [ + {{ + "metric": "Metric name", + "date": "YYYY-MM-DD", + "value": 0.0, + "expected_value": 0.0, + "deviation": 0.0, + "severity": "high|medium|low", + "description": "Description of the anomaly" + }} + ], + "summary": "Summary of detected anomalies" +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert data analyst specializing in anomaly detection. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.2, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AnomalyDetectionResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error detecting anomalies: {str(e)}") + + +@router.get("/analytics/ai/attribution", response_model=AttributionModelingResponse) +async def get_attribution_modeling( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Use AI to determine which channels, creators, or content types contribute most to conversions""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + # Organize by platform/channel/creator + attribution_data = {} + for entry in historical_data: + metric_data = entry.get("campaign_deliverable_metrics", {}) + deliverable_data = metric_data.get("campaign_deliverables", {}) if isinstance(metric_data.get("campaign_deliverables"), dict) else {} + platform = deliverable_data.get("platform", "unknown") + content_type = deliverable_data.get("content_type", "unknown") + value = float(entry.get("value", 0)) + + key = f"{platform}_{content_type}" + if key not in attribution_data: + attribution_data[key] = { + "platform": platform, + "content_type": content_type, + "total_value": 0, + "count": 0, + "avg_value": 0 + } + attribution_data[key]["total_value"] += value + attribution_data[key]["count"] += 1 + + # Calculate averages + for key, data in attribution_data.items(): + data["avg_value"] = data["total_value"] / data["count"] if data["count"] > 0 else 0 + + groq_client = get_groq_client() + + prompt = f"""Analyze this attribution data and determine which channels, creators, or content types contribute most: + +ATTRIBUTION DATA: +{json.dumps(attribution_data, indent=2)} + +Determine: +1. Attribution percentages for each channel/content type +2. Top contributors to conversions/engagement +3. Insights about what's working best + +Return your response as JSON with this exact structure: +{{ + "attribution": {{ + "Channel/Content Type": 25.5 + }}, + "top_contributors": [ + {{ + "name": "Channel/Content Type", + "contribution_percent": 25.5, + "total_value": 1000, + "insight": "Why this is effective" + }} + ], + "insights": ["Insight 1", "Insight 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in marketing attribution modeling. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AttributionModelingResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating attribution model: {str(e)}") + + +@router.get("/analytics/ai/benchmarking", response_model=BenchmarkingResponse) +async def get_benchmarking( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Compare brand's performance against industry standards using AI-driven benchmarks""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return BenchmarkingResponse( + your_metrics={}, + industry_benchmarks={}, + comparison={}, + recommendations=[ + "No metric data available yet. Start tracking metrics to compare against industry benchmarks.", + "Create metrics for your deliverables and have creators submit values", + "Once you have data, benchmarking will be available" + ] + ) + + # Calculate your metrics + your_metrics = {} + total_value = 0 + count = 0 + for entry in historical_data: + value = float(entry.get("value", 0)) + total_value += value + count += 1 + + if count > 0: + your_metrics = { + "avg_engagement": total_value / count, + "total_engagement": total_value, + "data_points": count + } + + groq_client = get_groq_client() + + prompt = f"""Compare these campaign metrics against industry benchmarks: + +YOUR METRICS: +{json.dumps(your_metrics, indent=2)} + +Provide: +1. Industry benchmark values for similar campaigns +2. Comparison showing how you perform vs industry +3. Recommendations for improvement + +Return your response as JSON with this exact structure: +{{ + "your_metrics": {{ + "metric_name": 0.0 + }}, + "industry_benchmarks": {{ + "metric_name": 0.0 + }}, + "comparison": {{ + "metric_name": {{ + "your_value": 0.0, + "industry_avg": 0.0, + "percentile": 75, + "status": "above|below|at average" + }} + }}, + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in marketing analytics and industry benchmarking. Provide realistic industry benchmarks. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return BenchmarkingResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating benchmarks: {str(e)}") + + +@router.get("/analytics/ai/churn-prediction", response_model=ChurnPredictionResponse) +async def predict_churn( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Predict which audience segments or customers are likely to disengage or churn""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if len(historical_data) < 10: + return ChurnPredictionResponse( + churn_risk={}, + at_risk_segments=[], + recommendations=["Insufficient data for churn prediction. Need at least 10 data points."] + ) + + # Analyze engagement trends + engagement_trends = {} + for entry in historical_data[-30:]: # Last 30 entries + date = entry.get("submitted_at", "") + value = float(entry.get("value", 0)) + if date: + engagement_trends[date] = value + + groq_client = get_groq_client() + + prompt = f"""Analyze this engagement data and predict churn risk: + +ENGAGEMENT TRENDS: +{json.dumps(engagement_trends, indent=2)} + +Identify: +1. Churn risk levels for different segments +2. At-risk audience segments +3. Recommendations to prevent churn + +Return your response as JSON with this exact structure: +{{ + "churn_risk": {{ + "segment_name": 0.75 + }}, + "at_risk_segments": [ + {{ + "segment": "Segment name", + "risk_score": 0.75, + "indicators": ["Indicator 1", "Indicator 2"], + "recommendations": ["Recommendation 1"] + }} + ], + "recommendations": ["General recommendation 1", "General recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in customer retention and churn prediction. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return ChurnPredictionResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error predicting churn: {str(e)}") + + +@router.post("/analytics/ai/natural-language-query", response_model=NaturalLanguageQueryResponse) +async def natural_language_query( + request: NaturalLanguageQueryRequest, + user: dict = Depends(get_current_user) +): + """Let users ask questions about their analytics data in plain English and get AI-generated answers""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=request.campaign_id + ) + + # Prepare summary of available data + data_summary = { + "total_data_points": len(historical_data), + "metrics": list(set([ + entry.get("campaign_deliverable_metrics", {}).get("display_name") or + entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + for entry in historical_data + ])), + "date_range": { + "earliest": historical_data[0].get("submitted_at") if historical_data else None, + "latest": historical_data[-1].get("submitted_at") if historical_data else None + }, + "recent_values": [ + { + "metric": entry.get("campaign_deliverable_metrics", {}).get("display_name") or "unknown", + "value": entry.get("value"), + "date": entry.get("submitted_at") + } + for entry in historical_data[-10:] + ] + } + + groq_client = get_groq_client() + + prompt = f"""Answer this question about campaign analytics data: + +USER QUESTION: {request.query} + +AVAILABLE DATA SUMMARY: +{json.dumps(data_summary, indent=2)} + +Provide a clear, helpful answer based on the available data. If the question cannot be answered with the available data, say so. + +Return your response as JSON with this exact structure: +{{ + "answer": "Clear answer to the user's question", + "data_sources": ["Data source 1", "Data source 2"], + "confidence": "high|medium|low" +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are a helpful analytics assistant. Answer questions about campaign data clearly and accurately. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.5, + max_completion_tokens=800, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return NaturalLanguageQueryResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error processing query: {str(e)}") + + +@router.get("/analytics/ai/kpi-optimization", response_model=KPIOptimizationResponse) +async def get_kpi_optimization( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Recommend actions to improve key metrics based on AI analysis""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return KPIOptimizationResponse( + current_kpis={}, + optimization_suggestions=[], + priority_actions=[ + "No metric data available yet. Start by creating metrics for your deliverables.", + "Have creators submit metric values to enable KPI optimization suggestions.", + "Once you have data, AI-powered optimization recommendations will appear here." + ] + ) + + # Calculate current KPIs + current_kpis = {} + metrics_summary = {} + for entry in historical_data[-30:]: # Last 30 entries + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + + if metric_name not in metrics_summary: + metrics_summary[metric_name] = [] + metrics_summary[metric_name].append(value) + + for metric_name, values in metrics_summary.items(): + current_kpis[metric_name] = { + "current_avg": sum(values) / len(values) if values else 0, + "trend": "increasing" if len(values) >= 2 and values[-1] > values[0] else "decreasing" if len(values) >= 2 and values[-1] < values[0] else "stable" + } + + groq_client = get_groq_client() + + prompt = f"""Analyze these KPIs and provide optimization recommendations: + +CURRENT KPIs: +{json.dumps(current_kpis, indent=2)} + +METRICS SUMMARY: +{json.dumps({k: {"values": v, "count": len(v)} for k, v in metrics_summary.items()}, indent=2)} + +Provide: +1. Optimization suggestions for each KPI +2. Priority actions to improve metrics +3. Specific, actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "current_kpis": {{ + "KPI name": 0.0 + }}, + "optimization_suggestions": [ + {{ + "kpi": "KPI name", + "current_value": 0.0, + "target_value": 0.0, + "suggestions": ["Suggestion 1", "Suggestion 2"], + "expected_impact": "high|medium|low" + }} + ], + "priority_actions": ["Action 1", "Action 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in KPI optimization and performance improvement. Provide actionable, specific recommendations. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return KPIOptimizationResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating KPI optimization: {str(e)}") + From c2f4e0ed074d4110e89dc0d9a064abe5a465d929 Mon Sep 17 00:00:00 2001 From: Saahi30 Date: Mon, 17 Nov 2025 08:28:36 +0530 Subject: [PATCH 2/9] feat(analytics): add AI analytics API client functions - Add TypeScript interfaces for all AI analytics features - Add API client functions for predictive analytics - Add API client functions for automated insights - Add API client functions for audience segmentation - Add API client functions for sentiment analysis - Add API client functions for anomaly detection - Add API client functions for attribution modeling - Add API client functions for benchmarking - Add API client functions for churn prediction - Add API client functions for natural language query - Add API client functions for KPI optimization --- frontend/lib/api/analytics.ts | 239 ++++++++++++++++++++++++++++++++++ 1 file changed, 239 insertions(+) diff --git a/frontend/lib/api/analytics.ts b/frontend/lib/api/analytics.ts index c0e7dc2..1368e54 100644 --- a/frontend/lib/api/analytics.ts +++ b/frontend/lib/api/analytics.ts @@ -416,3 +416,242 @@ export async function getCreatorDashboardStats(): Promise return parseJson(response); } +// ==================== AI-Powered Analytics Endpoints ==================== + +export interface PredictiveAnalyticsRequest { + campaign_id?: string; + metric_type?: string; // 'performance', 'roi', 'engagement' + forecast_periods?: number; // days +} + +export interface PredictiveAnalyticsResponse { + forecast: { + predicted_value: number; + trend: string; + growth_rate: number; + forecasted_values: Array<{ date: string; value: number }>; + }; + confidence: string; + factors: string[]; + recommendations: string[]; +} + +export async function getPredictiveAnalytics( + request: PredictiveAnalyticsRequest +): Promise { + const response = await authenticatedFetch( + `${API_BASE_URL}/analytics/ai/predictive`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + } + ); + return parseJson(response); +} + +export interface AutomatedInsightsResponse { + summary: string; + trends: string[]; + anomalies: Array<{ + metric: string; + description: string; + severity: string; + }>; + recommendations: string[]; + key_metrics: Record; +} + +export async function getAutomatedInsights( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/insights?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/insights`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface AudienceSegmentationResponse { + segments: Array<{ + name: string; + size: number; + characteristics: string[]; + demographics?: Record; + interests?: string[]; + engagement_score?: number; + }>; + visualization_data: Record; +} + +export async function getAudienceSegmentation( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/audience-segmentation?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/audience-segmentation`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface SentimentAnalysisRequest { + text?: string; + campaign_id?: string; +} + +export interface SentimentAnalysisResponse { + overall_sentiment: string; + sentiment_score: number; + positive_aspects: string[]; + negative_aspects: string[]; + recommendations: string[]; +} + +export async function analyzeSentiment( + request: SentimentAnalysisRequest +): Promise { + const response = await authenticatedFetch( + `${API_BASE_URL}/analytics/ai/sentiment`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + } + ); + return parseJson(response); +} + +export interface AnomalyDetectionResponse { + anomalies: Array<{ + metric: string; + date: string; + value: number; + expected_value: number; + deviation: number; + severity: string; + description: string; + }>; + summary: string; +} + +export async function detectAnomalies( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/anomaly-detection?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/anomaly-detection`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface AttributionModelingResponse { + attribution: Record; + top_contributors: Array<{ + name: string; + contribution_percent: number; + total_value: number; + insight: string; + }>; + insights: string[]; +} + +export async function getAttributionModeling( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/attribution?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/attribution`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface BenchmarkingResponse { + your_metrics: Record; + industry_benchmarks: Record; + comparison: Record; + recommendations: string[]; +} + +export async function getBenchmarking( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/benchmarking?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/benchmarking`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface ChurnPredictionResponse { + churn_risk: Record; + at_risk_segments: Array<{ + segment: string; + risk_score: number; + indicators: string[]; + recommendations: string[]; + }>; + recommendations: string[]; +} + +export async function predictChurn( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/churn-prediction?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/churn-prediction`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + +export interface NaturalLanguageQueryRequest { + query: string; + campaign_id?: string; +} + +export interface NaturalLanguageQueryResponse { + answer: string; + data_sources: string[]; + confidence: string; +} + +export async function naturalLanguageQuery( + request: NaturalLanguageQueryRequest +): Promise { + const response = await authenticatedFetch( + `${API_BASE_URL}/analytics/ai/natural-language-query`, + { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify(request), + } + ); + return parseJson(response); +} + +export interface KPIOptimizationResponse { + current_kpis: Record; + optimization_suggestions: Array<{ + kpi: string; + current_value: number; + target_value: number; + suggestions: string[]; + expected_impact: string; + }>; + priority_actions: string[]; +} + +export async function getKPIOptimization( + campaignId?: string +): Promise { + const url = campaignId + ? `${API_BASE_URL}/analytics/ai/kpi-optimization?campaign_id=${campaignId}` + : `${API_BASE_URL}/analytics/ai/kpi-optimization`; + const response = await authenticatedFetch(url); + return parseJson(response); +} + From 514aba0f830b4d0ebe5838d1d6a7a729e20fd641 Mon Sep 17 00:00:00 2001 From: Saahi30 Date: Mon, 17 Nov 2025 08:28:37 +0530 Subject: [PATCH 3/9] feat(analytics): add comprehensive AI analytics dashboard component - Add tabbed interface for all 10 AI analytics features - Add natural language query interface - Add sentiment analysis text input - Add visualizations for all analytics features - Add loading and error states - Add responsive UI components - Support both brand and creator roles --- .../analytics/AIAnalyticsDashboard.tsx | 1466 +++++++++++++++++ 1 file changed, 1466 insertions(+) create mode 100644 frontend/components/analytics/AIAnalyticsDashboard.tsx diff --git a/frontend/components/analytics/AIAnalyticsDashboard.tsx b/frontend/components/analytics/AIAnalyticsDashboard.tsx new file mode 100644 index 0000000..5e61f4d --- /dev/null +++ b/frontend/components/analytics/AIAnalyticsDashboard.tsx @@ -0,0 +1,1466 @@ +"use client"; + +import { useState, useEffect } from "react"; +import { + getPredictiveAnalytics, + getAutomatedInsights, + getAudienceSegmentation, + analyzeSentiment, + detectAnomalies, + getAttributionModeling, + getBenchmarking, + predictChurn, + naturalLanguageQuery, + getKPIOptimization, + type PredictiveAnalyticsResponse, + type AutomatedInsightsResponse, + type AudienceSegmentationResponse, + type SentimentAnalysisResponse, + type AnomalyDetectionResponse, + type AttributionModelingResponse, + type BenchmarkingResponse, + type ChurnPredictionResponse, + type NaturalLanguageQueryResponse, + type KPIOptimizationResponse, +} from "@/lib/api/analytics"; +import { + TrendingUp, + Brain, + Users, + MessageSquare, + AlertTriangle, + BarChart3, + Target, + TrendingDown, + Search, + Lightbulb, + RefreshCw, + Sparkles, + Zap, + Activity, + PieChart, +} from "lucide-react"; + +interface AIAnalyticsDashboardProps { + campaignId?: string; + role?: "brand" | "creator"; +} + +export default function AIAnalyticsDashboard({ + campaignId, + role = "brand", +}: AIAnalyticsDashboardProps) { + const [activeTab, setActiveTab] = useState("insights"); + const [loading, setLoading] = useState>({}); + const [error, setError] = useState>({}); + + // Data states + const [insights, setInsights] = useState(null); + const [predictive, setPredictive] = useState(null); + const [segmentation, setSegmentation] = useState(null); + const [sentiment, setSentiment] = useState(null); + const [sentimentText, setSentimentText] = useState(""); + const [anomalies, setAnomalies] = useState(null); + const [attribution, setAttribution] = useState(null); + const [benchmarking, setBenchmarking] = useState(null); + const [churn, setChurn] = useState(null); + const [kpiOptimization, setKPIOptimization] = useState(null); + + // Natural language query + const [nlQuery, setNlQuery] = useState(""); + const [nlResponse, setNlResponse] = useState(null); + const [nlLoading, setNlLoading] = useState(false); + + useEffect(() => { + loadInsights(); + }, [campaignId]); + + const setLoadingState = (key: string, value: boolean) => { + setLoading((prev) => ({ ...prev, [key]: value })); + }; + + const setErrorState = (key: string, value: string | null) => { + setError((prev) => ({ ...prev, [key]: value })); + }; + + const loadInsights = async () => { + setLoadingState("insights", true); + setErrorState("insights", null); + try { + const data = await getAutomatedInsights(campaignId); + setInsights(data); + } catch (err: any) { + setErrorState("insights", err.message || "Failed to load insights"); + } finally { + setLoadingState("insights", false); + } + }; + + const loadPredictive = async () => { + setLoadingState("predictive", true); + setErrorState("predictive", null); + try { + const data = await getPredictiveAnalytics({ + campaign_id: campaignId, + forecast_periods: 30, + }); + setPredictive(data); + } catch (err: any) { + setErrorState("predictive", err.message || "Failed to load predictive analytics"); + } finally { + setLoadingState("predictive", false); + } + }; + + const loadSegmentation = async () => { + setLoadingState("segmentation", true); + setErrorState("segmentation", null); + try { + const data = await getAudienceSegmentation(campaignId); + setSegmentation(data); + } catch (err: any) { + setErrorState("segmentation", err.message || "Failed to load audience segmentation"); + } finally { + setLoadingState("segmentation", false); + } + }; + + const loadSentiment = async (customText?: string) => { + setLoadingState("sentiment", true); + setErrorState("sentiment", null); + try { + const data = await analyzeSentiment({ + campaign_id: campaignId, + text: customText || sentimentText || undefined + }); + setSentiment(data); + if (customText || sentimentText) { + setSentimentText(""); // Clear after successful analysis + } + } catch (err: any) { + setErrorState("sentiment", err.message || "Failed to analyze sentiment"); + } finally { + setLoadingState("sentiment", false); + } + }; + + const loadAnomalies = async () => { + setLoadingState("anomalies", true); + setErrorState("anomalies", null); + try { + const data = await detectAnomalies(campaignId); + setAnomalies(data); + } catch (err: any) { + setErrorState("anomalies", err.message || "Failed to detect anomalies"); + } finally { + setLoadingState("anomalies", false); + } + }; + + const loadAttribution = async () => { + setLoadingState("attribution", true); + setErrorState("attribution", null); + try { + const data = await getAttributionModeling(campaignId); + setAttribution(data); + } catch (err: any) { + setErrorState("attribution", err.message || "Failed to load attribution modeling"); + } finally { + setLoadingState("attribution", false); + } + }; + + const loadBenchmarking = async () => { + setLoadingState("benchmarking", true); + setErrorState("benchmarking", null); + try { + const data = await getBenchmarking(campaignId); + setBenchmarking(data); + } catch (err: any) { + setErrorState("benchmarking", err.message || "Failed to load benchmarking"); + } finally { + setLoadingState("benchmarking", false); + } + }; + + const loadChurn = async () => { + setLoadingState("churn", true); + setErrorState("churn", null); + try { + const data = await predictChurn(campaignId); + setChurn(data); + } catch (err: any) { + setErrorState("churn", err.message || "Failed to predict churn"); + } finally { + setLoadingState("churn", false); + } + }; + + const loadKPIOptimization = async () => { + setLoadingState("kpi", true); + setErrorState("kpi", null); + try { + const data = await getKPIOptimization(campaignId); + setKPIOptimization(data); + } catch (err: any) { + setErrorState("kpi", err.message || "Failed to load KPI optimization"); + } finally { + setLoadingState("kpi", false); + } + }; + + const handleNLQuery = async () => { + if (!nlQuery.trim()) return; + setNlLoading(true); + try { + const response = await naturalLanguageQuery({ + query: nlQuery, + campaign_id: campaignId, + }); + setNlResponse(response); + } catch (err: any) { + setErrorState("nl", err.message || "Failed to process query"); + } finally { + setNlLoading(false); + } + }; + + const handleTabChange = (tab: string) => { + setActiveTab(tab); + // Load data when tab is first accessed + if (tab === "predictive" && !predictive) loadPredictive(); + if (tab === "segmentation" && !segmentation) loadSegmentation(); + if (tab === "sentiment" && !sentiment) loadSentiment(); + if (tab === "anomalies" && !anomalies) loadAnomalies(); + if (tab === "attribution" && !attribution) loadAttribution(); + if (tab === "benchmarking" && !benchmarking) loadBenchmarking(); + if (tab === "churn" && !churn) loadChurn(); + if (tab === "kpi" && !kpiOptimization) loadKPIOptimization(); + }; + + const tabs = [ + { id: "insights", label: "Automated Insights", icon: Brain }, + { id: "predictive", label: "Predictive Analytics", icon: TrendingUp }, + { id: "segmentation", label: "Audience Segmentation", icon: Users }, + { id: "sentiment", label: "Sentiment Analysis", icon: MessageSquare }, + { id: "anomalies", label: "Anomaly Detection", icon: AlertTriangle }, + { id: "attribution", label: "Attribution Modeling", icon: BarChart3 }, + { id: "benchmarking", label: "Benchmarking", icon: Target }, + { id: "churn", label: "Churn Prediction", icon: TrendingDown }, + { id: "kpi", label: "KPI Optimization", icon: Zap }, + ]; + + return ( +
+ {/* Header */} +
+
+

AI-Powered Analytics

+

+ Advanced analytics powered by AI to help you make data-driven decisions +

+
+
+ + Powered by AI +
+
+ + {/* Natural Language Query */} +
+
+ +

Ask Your Data

+
+
+ setNlQuery(e.target.value)} + onKeyPress={(e) => e.key === "Enter" && handleNLQuery()} + placeholder="Ask a question about your analytics data... (e.g., 'What's my average engagement rate?')" + className="flex-1 px-4 py-2 border border-gray-300 rounded-lg focus:ring-2 focus:ring-purple-500 focus:border-transparent" + /> + +
+ {nlResponse && ( +
+

{nlResponse.answer}

+
+ Confidence: {nlResponse.confidence} + {nlResponse.data_sources.length > 0 && ( + Sources: {nlResponse.data_sources.join(", ")} + )} +
+
+ )} +
+ + {/* Tabs */} +
+ +
+ + {/* Tab Content */} +
+ {activeTab === "insights" && ( + + )} + {activeTab === "predictive" && ( + + )} + {activeTab === "segmentation" && ( + + )} + {activeTab === "sentiment" && ( + + )} + {activeTab === "anomalies" && ( + + )} + {activeTab === "attribution" && ( + + )} + {activeTab === "benchmarking" && ( + + )} + {activeTab === "churn" && ( + + )} + {activeTab === "kpi" && ( + + )} +
+
+ ); +} + +// Tab Components +function InsightsTab({ + data, + loading, + error, + onRefresh, +}: { + data: AutomatedInsightsResponse | null; + loading: boolean; + error: string | null; + onRefresh: () => void; +}) { + if (loading) + return ( +
+ +
+ ); + if (error) + return ( +
+

{error}

+
+ ); + if (!data) return null; + + return ( +
+
+

Automated Insights

+ +
+ +
+

Executive Summary

+

{data.summary}

+
+ +
+
+

+ + Key Trends +

+
    + {data.trends.map((trend, idx) => ( +
  • + • + {trend} +
  • + ))} +
+
+ +
+

+ + Recommendations +

+
    + {data.recommendations.map((rec, idx) => ( +
  • + • + {rec} +
  • + ))} +
+
+
+ + {data.anomalies.length > 0 && ( +
+

+ + Detected Anomalies +

+
+ {data.anomalies.map((anomaly, idx) => ( +
+
{anomaly.metric}
+
{anomaly.description}
+
+ ))} +
+
+ )} +
+ ); +} + +function PredictiveTab({ + data, + loading, + error, + onRefresh, +}: { + data: PredictiveAnalyticsResponse | null; + loading: boolean; + error: string | null; + onRefresh: () => void; +}) { + if (loading) + return ( +
+ +
+ ); + if (error) + return ( +
+

{error}

+
+ ); + if (!data) return null; + + return ( +
+
+

Predictive Analytics

+ +
+ +
+
+
Predicted Value
+
+ {data.forecast.predicted_value.toFixed(2)} +
+
+
+
Growth Rate
+
+ {(data.forecast.growth_rate * 100).toFixed(1)}% +
+
+
+
Confidence
+
{data.confidence}
+
+
+ +
+

Forecasted Values

+
+ {data.forecast.forecasted_values.slice(0, 10).map((fv, idx) => ( +
+ {fv.date} + {fv.value.toFixed(2)} +
+ ))} +
+
+ +
+

Key Factors

+
    + {data.factors.map((factor, idx) => ( +
  • + • + {factor} +
  • + ))} +
+
+ +
+

Recommendations

+
    + {data.recommendations.map((rec, idx) => ( +
  • + • + {rec} +
  • + ))} +
+
+
+ ); +} + +function SegmentationTab({ + data, + loading, + error, + onRefresh, +}: { + data: AudienceSegmentationResponse | null; + loading: boolean; + error: string | null; + onRefresh: () => void; +}) { + if (loading) + return ( +
+ +
+ ); + if (error) + return ( +
+

{error}

+
+ ); + if (!data) return null; + + return ( +
+
+

Audience Segmentation

+ +
+ +
+ {data.segments.map((segment, idx) => ( +
+
+

{segment.name}

+ {segment.size}% +
+
+
Characteristics:
+
    + {segment.characteristics.map((char, charIdx) => ( +
  • + • + {char} +
  • + ))} +
+ {segment.engagement_score !== undefined && ( +
+
Engagement Score
+
+ {(segment.engagement_score * 100).toFixed(1)}% +
+
+ )} +
+
+ ))} +
+
+ ); +} + +function SentimentTab({ + data, + loading, + error, + onRefresh, + sentimentText, + setSentimentText, + onAnalyzeText, +}: { + data: SentimentAnalysisResponse | null; + loading: boolean; + error: string | null; + onRefresh: () => void; + sentimentText: string; + setSentimentText: (text: string) => void; + onAnalyzeText: (text?: string) => void; +}) { + if (loading) + return ( +
+
+

Sentiment Analysis

+
+ {/* Text Input Section - Show even while loading */} +
+

Analyze Custom Text

+

+ Paste text from social media comments, reviews, or feedback to analyze sentiment +

+
+