diff --git a/backend/SQL b/backend/SQL index 6bbda3d..96b6a6c 100644 --- a/backend/SQL +++ b/backend/SQL @@ -678,3 +678,40 @@ CREATE INDEX IF NOT EXISTS idx_metric_update_requests_creator_id ON public.campa CREATE INDEX IF NOT EXISTS idx_metric_update_requests_status ON public.campaign_deliverable_metric_update_requests(status); CREATE INDEX IF NOT EXISTS idx_metric_audit_metric_id ON public.campaign_deliverable_metric_audit(campaign_deliverable_metric_id); + + +-- Migration SQL for Campaign Wall Feature +-- Run this SQL in your Supabase SQL editor + +-- 1. Add new columns to campaigns table for campaign wall feature +ALTER TABLE public.campaigns +ADD COLUMN IF NOT EXISTS is_open_for_applications boolean DEFAULT false, +ADD COLUMN IF NOT EXISTS is_on_campaign_wall boolean DEFAULT false; + +-- 2. Add indexes for better query performance +CREATE INDEX IF NOT EXISTS idx_campaigns_is_open_for_applications ON public.campaigns(is_open_for_applications) WHERE is_open_for_applications = true; +CREATE INDEX IF NOT EXISTS idx_campaigns_is_on_campaign_wall ON public.campaigns(is_on_campaign_wall) WHERE is_on_campaign_wall = true; +CREATE INDEX IF NOT EXISTS idx_campaigns_open_and_wall ON public.campaigns(is_open_for_applications, is_on_campaign_wall) WHERE is_open_for_applications = true AND is_on_campaign_wall = true; + +-- 3. Add new columns to campaign_applications table +ALTER TABLE public.campaign_applications +ADD COLUMN IF NOT EXISTS payment_min numeric, +ADD COLUMN IF NOT EXISTS payment_max numeric, +ADD COLUMN IF NOT EXISTS timeline_days integer, +ADD COLUMN IF NOT EXISTS timeline_weeks integer, +ADD COLUMN IF NOT EXISTS description text; + +-- 4. Add index for campaign_applications status filtering +CREATE INDEX IF NOT EXISTS idx_campaign_applications_status ON public.campaign_applications(status); +CREATE INDEX IF NOT EXISTS idx_campaign_applications_campaign_status ON public.campaign_applications(campaign_id, status); +CREATE INDEX IF NOT EXISTS idx_campaign_applications_creator_status ON public.campaign_applications(creator_id, status); + +-- 5. Add comment for documentation +COMMENT ON COLUMN public.campaigns.is_open_for_applications IS 'Whether this campaign accepts applications from creators'; +COMMENT ON COLUMN public.campaigns.is_on_campaign_wall IS 'Whether this campaign is visible on the public campaign wall'; +COMMENT ON COLUMN public.campaign_applications.payment_min IS 'Minimum payment amount the creator is requesting'; +COMMENT ON COLUMN public.campaign_applications.payment_max IS 'Maximum payment amount the creator is requesting'; +COMMENT ON COLUMN public.campaign_applications.timeline_days IS 'Number of days the creator estimates to complete the campaign'; +COMMENT ON COLUMN public.campaign_applications.timeline_weeks IS 'Number of weeks the creator estimates to complete the campaign'; +COMMENT ON COLUMN public.campaign_applications.description IS 'Creator description explaining why they should be chosen for this campaign'; + diff --git a/backend/SQL_CAMPAIGN_WALL_MIGRATION.sql b/backend/SQL_CAMPAIGN_WALL_MIGRATION.sql new file mode 100644 index 0000000..87ece2c --- /dev/null +++ b/backend/SQL_CAMPAIGN_WALL_MIGRATION.sql @@ -0,0 +1,56 @@ +-- Migration SQL for Campaign Wall Feature +-- Run this SQL in your Supabase SQL editor + +-- 1. Add new columns to campaigns table for campaign wall feature +ALTER TABLE public.campaigns +ADD COLUMN IF NOT EXISTS is_open_for_applications boolean DEFAULT false, +ADD COLUMN IF NOT EXISTS is_on_campaign_wall boolean DEFAULT false; + +-- 2. Add indexes for better query performance +CREATE INDEX IF NOT EXISTS idx_campaigns_is_open_for_applications ON public.campaigns(is_open_for_applications) WHERE is_open_for_applications = true; +CREATE INDEX IF NOT EXISTS idx_campaigns_is_on_campaign_wall ON public.campaigns(is_on_campaign_wall) WHERE is_on_campaign_wall = true; +CREATE INDEX IF NOT EXISTS idx_campaigns_open_and_wall ON public.campaigns(is_open_for_applications, is_on_campaign_wall) WHERE is_open_for_applications = true AND is_on_campaign_wall = true; + +-- 3. Add new columns to campaign_applications table +ALTER TABLE public.campaign_applications +ADD COLUMN IF NOT EXISTS payment_min numeric, +ADD COLUMN IF NOT EXISTS payment_max numeric, +ADD COLUMN IF NOT EXISTS timeline_days integer, +ADD COLUMN IF NOT EXISTS timeline_weeks integer, +ADD COLUMN IF NOT EXISTS description text; + +-- 4. Add index for campaign_applications status filtering +CREATE INDEX IF NOT EXISTS idx_campaign_applications_status ON public.campaign_applications(status); +CREATE INDEX IF NOT EXISTS idx_campaign_applications_campaign_status ON public.campaign_applications(campaign_id, status); +CREATE INDEX IF NOT EXISTS idx_campaign_applications_creator_status ON public.campaign_applications(creator_id, status); + +-- 5. Add 'reviewing' to application_status enum if it doesn't exist +DO $$ +BEGIN + -- Check if 'reviewing' value exists in the enum + IF NOT EXISTS ( + SELECT 1 + FROM pg_enum + WHERE enumlabel = 'reviewing' + AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'application_status') + ) THEN + -- Add 'reviewing' to the enum + -- Note: PostgreSQL doesn't support IF NOT EXISTS for ALTER TYPE ADD VALUE + -- So we check first, then add if needed + ALTER TYPE application_status ADD VALUE 'reviewing'; + END IF; +EXCEPTION + WHEN duplicate_object THEN + -- Value already exists, ignore + NULL; +END $$; + +-- 6. Add comment for documentation +COMMENT ON COLUMN public.campaigns.is_open_for_applications IS 'Whether this campaign accepts applications from creators'; +COMMENT ON COLUMN public.campaigns.is_on_campaign_wall IS 'Whether this campaign is visible on the public campaign wall'; +COMMENT ON COLUMN public.campaign_applications.payment_min IS 'Minimum payment amount the creator is requesting'; +COMMENT ON COLUMN public.campaign_applications.payment_max IS 'Maximum payment amount the creator is requesting'; +COMMENT ON COLUMN public.campaign_applications.timeline_days IS 'Number of days the creator estimates to complete the campaign'; +COMMENT ON COLUMN public.campaign_applications.timeline_weeks IS 'Number of weeks the creator estimates to complete the campaign'; +COMMENT ON COLUMN public.campaign_applications.description IS 'Creator description explaining why they should be chosen for this campaign'; + diff --git a/backend/app/api/routes/ai_analytics.py b/backend/app/api/routes/ai_analytics.py new file mode 100644 index 0000000..bf74ffa --- /dev/null +++ b/backend/app/api/routes/ai_analytics.py @@ -0,0 +1,1242 @@ +""" +AI-Powered Analytics endpoints for predictive analytics, insights, segmentation, etc. +""" +from fastapi import APIRouter, HTTPException, Depends, Query +from pydantic import BaseModel +from typing import Optional, List, Dict, Any +from datetime import datetime, timezone, timedelta +import json +from groq import Groq +from app.core.supabase_clients import supabase_anon +from app.core.dependencies import get_current_user +from app.core.config import settings + +router = APIRouter() + + +def get_groq_client(): + """Get Groq client instance""" + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + return Groq(api_key=settings.groq_api_key) + + +async def get_user_profile(user: dict): + """Get brand or creator profile based on user role""" + role = user.get("role") + user_id = user.get("id") + + if role == "Brand": + brand_res = supabase_anon.table("brands") \ + .select("*") \ + .eq("user_id", user_id) \ + .single() \ + .execute() + if brand_res.data: + return {"type": "brand", "profile": brand_res.data} + elif role == "Creator": + creator_res = supabase_anon.table("creators") \ + .select("*") \ + .eq("user_id", user_id) \ + .single() \ + .execute() + if creator_res.data: + return {"type": "creator", "profile": creator_res.data} + + raise HTTPException( + status_code=403, + detail="User profile not found. Please complete onboarding." + ) + + +def get_historical_metrics(brand_id: Optional[str] = None, creator_id: Optional[str] = None, campaign_id: Optional[str] = None): + """Fetch historical metrics data for analysis""" + # Build query for metric updates + query = supabase_anon.table("campaign_deliverable_metric_updates").select("*") + + # Get metric IDs based on filters + metric_ids = [] + has_filters = False + + if brand_id: + has_filters = True + # Filter by brand through campaigns + campaigns_res = supabase_anon.table("campaigns") \ + .select("id") \ + .eq("brand_id", brand_id) \ + .execute() + campaign_ids = [c["id"] for c in campaigns_res.data or []] + if campaign_ids: + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id") \ + .in_("campaign_id", campaign_ids) \ + .execute() + deliverable_ids = [d["id"] for d in deliverables_res.data or []] + if deliverable_ids: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id") \ + .in_("campaign_deliverable_id", deliverable_ids) \ + .execute() + metric_ids = [m["id"] for m in metrics_res.data or []] + + if campaign_id: + has_filters = True + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id") \ + .eq("campaign_id", campaign_id) \ + .execute() + deliverable_ids = [d["id"] for d in deliverables_res.data or []] + if deliverable_ids: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id") \ + .in_("campaign_deliverable_id", deliverable_ids) \ + .execute() + campaign_metric_ids = [m["id"] for m in metrics_res.data or []] + if metric_ids: + # Intersect with brand filter + metric_ids = [m for m in metric_ids if m in campaign_metric_ids] + else: + metric_ids = campaign_metric_ids + + # Apply metric filters only if we have specific filters + # If no filters, get all updates (for creator-only case) + if has_filters: + if metric_ids: + query = query.in_("campaign_deliverable_metric_id", metric_ids) + else: + # If we have filters but no metrics found, return empty + # This means the brand/campaign exists but has no metrics/updates yet + return [] + + if creator_id: + query = query.eq("submitted_by", creator_id) + + # Execute query + try: + result = query.order("submitted_at", desc=False).limit(1000).execute() + updates = result.data or [] + except Exception as e: + # If query fails, return empty + return [] + + if not updates: + return [] + + # Enrich with metric and deliverable data + metric_ids_from_updates = list(set([u["campaign_deliverable_metric_id"] for u in updates if u.get("campaign_deliverable_metric_id")])) + + if metric_ids_from_updates: + metrics_res = supabase_anon.table("campaign_deliverable_metrics") \ + .select("id, name, display_name, campaign_deliverable_id") \ + .in_("id", metric_ids_from_updates) \ + .execute() + metrics = {m["id"]: m for m in (metrics_res.data or [])} + else: + metrics = {} + + deliverable_ids = list(set([m.get("campaign_deliverable_id") for m in metrics.values() if m.get("campaign_deliverable_id")])) + if deliverable_ids: + deliverables_res = supabase_anon.table("campaign_deliverables") \ + .select("id, campaign_id, platform, content_type") \ + .in_("id", deliverable_ids) \ + .execute() + deliverables = {d["id"]: d for d in (deliverables_res.data or [])} + else: + deliverables = {} + + # Combine data + enriched_updates = [] + for update in updates: + metric_id = update.get("campaign_deliverable_metric_id") + metric = metrics.get(metric_id, {}) if metric_id else {} + deliverable_id = metric.get("campaign_deliverable_id") + deliverable = deliverables.get(deliverable_id, {}) if deliverable_id else {} + + enriched_update = { + **update, + "campaign_deliverable_metrics": metric, + "campaign_deliverables": deliverable + } + enriched_updates.append(enriched_update) + + return enriched_updates + + +# ==================== Pydantic Models ==================== + +class PredictiveAnalyticsRequest(BaseModel): + campaign_id: Optional[str] = None + metric_type: Optional[str] = None # 'performance', 'roi', 'engagement' + forecast_periods: int = 30 # days + + +class PredictiveAnalyticsResponse(BaseModel): + forecast: Dict[str, Any] + confidence: str + factors: List[str] + recommendations: List[str] + + +class AutomatedInsightsResponse(BaseModel): + summary: str + trends: List[str] + anomalies: List[Dict[str, Any]] + recommendations: List[str] + key_metrics: Dict[str, Any] + + +class AudienceSegmentationResponse(BaseModel): + segments: List[Dict[str, Any]] + visualization_data: Dict[str, Any] + + +class SentimentAnalysisRequest(BaseModel): + text: Optional[str] = None + campaign_id: Optional[str] = None + + +class SentimentAnalysisResponse(BaseModel): + overall_sentiment: str + sentiment_score: float + positive_aspects: List[str] + negative_aspects: List[str] + recommendations: List[str] + + +class AnomalyDetectionResponse(BaseModel): + anomalies: List[Dict[str, Any]] + summary: str + + +class AttributionModelingResponse(BaseModel): + attribution: Dict[str, float] + top_contributors: List[Dict[str, Any]] + insights: List[str] + + +class BenchmarkingResponse(BaseModel): + your_metrics: Dict[str, float] + industry_benchmarks: Dict[str, float] + comparison: Dict[str, Any] + recommendations: List[str] + + +class ChurnPredictionResponse(BaseModel): + churn_risk: Dict[str, float] + at_risk_segments: List[Dict[str, Any]] + recommendations: List[str] + + +class NaturalLanguageQueryRequest(BaseModel): + query: str + campaign_id: Optional[str] = None + + +class NaturalLanguageQueryResponse(BaseModel): + answer: str + data_sources: List[str] + confidence: str + + +class KPIOptimizationResponse(BaseModel): + current_kpis: Dict[str, float] + optimization_suggestions: List[Dict[str, Any]] + priority_actions: List[str] + + +# ==================== API Endpoints ==================== + +@router.post("/analytics/ai/predictive", response_model=PredictiveAnalyticsResponse) +async def get_predictive_analytics( + request: PredictiveAnalyticsRequest, + user: dict = Depends(get_current_user) +): + """Forecast campaign performance, ROI, or audience engagement using historical data""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + user_type = user_profile["type"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_type == "brand" else None, + creator_id=profile["id"] if user_type == "creator" else None, + campaign_id=request.campaign_id + ) + + if not historical_data: + return PredictiveAnalyticsResponse( + forecast={ + "predicted_value": 0.0, + "trend": "stable", + "growth_rate": 0.0, + "forecasted_values": [] + }, + confidence="low", + factors=["No historical data available"], + recommendations=[ + "Start by creating metrics for your campaign deliverables", + "Have creators submit metric values to build historical data", + "Once you have at least 5-10 data points, predictions will be available" + ] + ) + + # Prepare data for AI analysis + metrics_summary = {} + for entry in historical_data[-30:]: # Last 30 entries + metric_name = entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = entry.get("value", 0) + date = entry.get("submitted_at", "") + if metric_name not in metrics_summary: + metrics_summary[metric_name] = [] + metrics_summary[metric_name].append({"value": value, "date": date}) + + groq_client = get_groq_client() + + prompt = f"""Analyze this historical campaign metrics data and provide predictive analytics: + +HISTORICAL DATA: +{json.dumps(metrics_summary, indent=2)} + +METRIC TYPE: {request.metric_type or 'general performance'} +FORECAST PERIOD: {request.forecast_periods} days +USER TYPE: {user_type} + +Based on the historical trends, provide: +1. Forecasted values for the next {request.forecast_periods} days +2. Confidence level (high/medium/low) +3. Key factors influencing the forecast +4. Actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "forecast": {{ + "predicted_value": 0.0, + "trend": "increasing|decreasing|stable", + "growth_rate": 0.0, + "forecasted_values": [{{"date": "YYYY-MM-DD", "value": 0.0}}] + }}, + "confidence": "high|medium|low", + "factors": ["Factor 1", "Factor 2"], + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert data analyst specializing in predictive analytics for marketing campaigns. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return PredictiveAnalyticsResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating predictive analytics: {str(e)}") + + +@router.get("/analytics/ai/insights", response_model=AutomatedInsightsResponse) +async def get_automated_insights( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Generate plain-language summaries of analytics data with trends, anomalies, and recommendations""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + user_type = user_profile["type"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_type == "brand" else None, + creator_id=profile["id"] if user_type == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return AutomatedInsightsResponse( + summary="No analytics data available yet. Start tracking metrics to get automated insights.", + trends=[], + anomalies=[], + recommendations=[ + "Create metrics for your campaign deliverables", + "Have creators submit metric values", + "Once you have data, insights will appear here automatically" + ], + key_metrics={} + ) + + # Aggregate metrics + metrics_by_name = {} + for entry in historical_data: + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + date = entry.get("submitted_at", "") + + if metric_name not in metrics_by_name: + metrics_by_name[metric_name] = [] + metrics_by_name[metric_name].append({"value": value, "date": date}) + + # Calculate trends + trends_data = {} + for metric_name, values in metrics_by_name.items(): + if len(values) >= 2: + recent_avg = sum(v["value"] for v in values[-7:]) / min(7, len(values)) + older_avg = sum(v["value"] for v in values[:-7]) / max(1, len(values) - 7) if len(values) > 7 else recent_avg + change = ((recent_avg - older_avg) / older_avg * 100) if older_avg > 0 else 0 + trends_data[metric_name] = { + "current_avg": recent_avg, + "previous_avg": older_avg, + "change_percent": change, + "trend": "increasing" if change > 5 else "decreasing" if change < -5 else "stable" + } + + groq_client = get_groq_client() + + prompt = f"""Analyze this campaign analytics data and provide automated insights: + +METRICS DATA: +{json.dumps(metrics_by_name, indent=2)} + +TRENDS ANALYSIS: +{json.dumps(trends_data, indent=2)} + +USER TYPE: {user_type} + +Provide: +1. A plain-language executive summary (2-3 sentences) +2. Key trends identified +3. Any anomalies or unusual patterns +4. Actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "summary": "Executive summary in plain language", + "trends": ["Trend 1", "Trend 2"], + "anomalies": [{{"metric": "Metric name", "description": "Anomaly description", "severity": "high|medium|low"}}], + "recommendations": ["Recommendation 1", "Recommendation 2"], + "key_metrics": {{"metric_name": "value"}} +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert analytics consultant. Provide clear, actionable insights in plain language. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AutomatedInsightsResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating insights: {str(e)}") + + +@router.get("/analytics/ai/audience-segmentation", response_model=AudienceSegmentationResponse) +async def get_audience_segmentation( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Use AI to identify and visualize key audience segments based on demographics, interests, and behaviors""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + # Extract demographics data + demographics_data = [] + for entry in historical_data: + demographics = entry.get("demographics") + if demographics and isinstance(demographics, dict): + demographics_data.append(demographics) + + if not demographics_data: + # Return default segments if no demographics data + return AudienceSegmentationResponse( + segments=[ + {"name": "General Audience", "size": 100, "characteristics": ["No demographic data available"]} + ], + visualization_data={} + ) + + groq_client = get_groq_client() + + prompt = f"""Analyze this audience demographics data and identify key segments: + +DEMOGRAPHICS DATA: +{json.dumps(demographics_data[:50], indent=2)} # Limit to 50 for prompt size + +Identify distinct audience segments based on: +- Demographics (age, gender, location) +- Interests +- Behaviors +- Engagement patterns + +Return your response as JSON with this exact structure: +{{ + "segments": [ + {{ + "name": "Segment Name", + "size": 25, + "characteristics": ["Characteristic 1", "Characteristic 2"], + "demographics": {{"age_range": "25-34", "gender": "mixed", "location": "urban"}}, + "interests": ["Interest 1", "Interest 2"], + "engagement_score": 0.75 + }} + ], + "visualization_data": {{ + "segment_sizes": {{"Segment 1": 25, "Segment 2": 30}}, + "demographic_breakdown": {{}} + }} +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in audience segmentation and market research. Identify meaningful audience segments. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.5, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AudienceSegmentationResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating audience segmentation: {str(e)}") + + +@router.post("/analytics/ai/sentiment", response_model=SentimentAnalysisResponse) +async def analyze_sentiment( + request: SentimentAnalysisRequest, + user: dict = Depends(get_current_user) +): + """Analyze social media and campaign feedback to gauge public sentiment""" + try: + # Get feedback data if campaign_id provided + feedback_texts = [] + if request.campaign_id: + feedback_res = supabase_anon.table("campaign_deliverable_metric_feedback") \ + .select("feedback_text") \ + .execute() + feedback_texts = [f["feedback_text"] for f in feedback_res.data or [] if f.get("feedback_text")] + + if request.text: + feedback_texts.append(request.text) + + if not feedback_texts: + raise HTTPException(status_code=400, detail="No text data provided for sentiment analysis") + + combined_text = "\n\n".join(feedback_texts[:20]) # Limit to 20 feedback items + + groq_client = get_groq_client() + + prompt = f"""Analyze the sentiment of this campaign feedback and social media data: + +FEEDBACK DATA: +{combined_text} + +Provide a comprehensive sentiment analysis including: +1. Overall sentiment (positive, neutral, negative, mixed) +2. Sentiment score from -1 (very negative) to 1 (very positive) +3. Positive aspects mentioned +4. Negative aspects mentioned +5. Recommendations for improvement + +Return your response as JSON with this exact structure: +{{ + "overall_sentiment": "positive|neutral|negative|mixed", + "sentiment_score": 0.75, + "positive_aspects": ["Aspect 1", "Aspect 2"], + "negative_aspects": ["Aspect 1", "Aspect 2"], + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert sentiment analyst specializing in brand and campaign feedback. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return SentimentAnalysisResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error analyzing sentiment: {str(e)}") + + +@router.get("/analytics/ai/anomaly-detection", response_model=AnomalyDetectionResponse) +async def detect_anomalies( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Automatically flag unusual spikes or drops in metrics""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if len(historical_data) < 5: + return AnomalyDetectionResponse( + anomalies=[], + summary="Insufficient data for anomaly detection (need at least 5 data points)" + ) + + # Organize by metric + metrics_data = {} + for entry in historical_data: + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + date = entry.get("submitted_at", "") + + if metric_name not in metrics_data: + metrics_data[metric_name] = [] + metrics_data[metric_name].append({"value": value, "date": date}) + + # Calculate basic statistics for anomaly detection + stats = {} + for metric_name, values in metrics_data.items(): + if len(values) >= 3: + vals = [v["value"] for v in values] + mean = sum(vals) / len(vals) + variance = sum((x - mean) ** 2 for x in vals) / len(vals) + std_dev = variance ** 0.5 + stats[metric_name] = { + "mean": mean, + "std_dev": std_dev, + "values": values[-10:] # Last 10 values + } + + groq_client = get_groq_client() + + prompt = f"""Analyze this metrics data and detect anomalies (unusual spikes or drops): + +METRICS DATA WITH STATISTICS: +{json.dumps(stats, indent=2)} + +Identify anomalies where: +- Values are significantly above or below the mean (more than 2 standard deviations) +- Sudden spikes or drops in trends +- Unusual patterns + +Return your response as JSON with this exact structure: +{{ + "anomalies": [ + {{ + "metric": "Metric name", + "date": "YYYY-MM-DD", + "value": 0.0, + "expected_value": 0.0, + "deviation": 0.0, + "severity": "high|medium|low", + "description": "Description of the anomaly" + }} + ], + "summary": "Summary of detected anomalies" +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert data analyst specializing in anomaly detection. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.2, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AnomalyDetectionResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error detecting anomalies: {str(e)}") + + +@router.get("/analytics/ai/attribution", response_model=AttributionModelingResponse) +async def get_attribution_modeling( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Use AI to determine which channels, creators, or content types contribute most to conversions""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + # Organize by platform/channel/creator + attribution_data = {} + for entry in historical_data: + metric_data = entry.get("campaign_deliverable_metrics", {}) + deliverable_data = metric_data.get("campaign_deliverables", {}) if isinstance(metric_data.get("campaign_deliverables"), dict) else {} + platform = deliverable_data.get("platform", "unknown") + content_type = deliverable_data.get("content_type", "unknown") + value = float(entry.get("value", 0)) + + key = f"{platform}_{content_type}" + if key not in attribution_data: + attribution_data[key] = { + "platform": platform, + "content_type": content_type, + "total_value": 0, + "count": 0, + "avg_value": 0 + } + attribution_data[key]["total_value"] += value + attribution_data[key]["count"] += 1 + + # Calculate averages + for key, data in attribution_data.items(): + data["avg_value"] = data["total_value"] / data["count"] if data["count"] > 0 else 0 + + groq_client = get_groq_client() + + prompt = f"""Analyze this attribution data and determine which channels, creators, or content types contribute most: + +ATTRIBUTION DATA: +{json.dumps(attribution_data, indent=2)} + +Determine: +1. Attribution percentages for each channel/content type +2. Top contributors to conversions/engagement +3. Insights about what's working best + +Return your response as JSON with this exact structure: +{{ + "attribution": {{ + "Channel/Content Type": 25.5 + }}, + "top_contributors": [ + {{ + "name": "Channel/Content Type", + "contribution_percent": 25.5, + "total_value": 1000, + "insight": "Why this is effective" + }} + ], + "insights": ["Insight 1", "Insight 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in marketing attribution modeling. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return AttributionModelingResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating attribution model: {str(e)}") + + +@router.get("/analytics/ai/benchmarking", response_model=BenchmarkingResponse) +async def get_benchmarking( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Compare brand's performance against industry standards using AI-driven benchmarks""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return BenchmarkingResponse( + your_metrics={}, + industry_benchmarks={}, + comparison={}, + recommendations=[ + "No metric data available yet. Start tracking metrics to compare against industry benchmarks.", + "Create metrics for your deliverables and have creators submit values", + "Once you have data, benchmarking will be available" + ] + ) + + # Calculate your metrics + your_metrics = {} + total_value = 0 + count = 0 + for entry in historical_data: + value = float(entry.get("value", 0)) + total_value += value + count += 1 + + if count > 0: + your_metrics = { + "avg_engagement": total_value / count, + "total_engagement": total_value, + "data_points": count + } + + groq_client = get_groq_client() + + prompt = f"""Compare these campaign metrics against industry benchmarks: + +YOUR METRICS: +{json.dumps(your_metrics, indent=2)} + +Provide: +1. Industry benchmark values for similar campaigns +2. Comparison showing how you perform vs industry +3. Recommendations for improvement + +Return your response as JSON with this exact structure: +{{ + "your_metrics": {{ + "metric_name": 0.0 + }}, + "industry_benchmarks": {{ + "metric_name": 0.0 + }}, + "comparison": {{ + "metric_name": {{ + "your_value": 0.0, + "industry_avg": 0.0, + "percentile": 75, + "status": "above|below|at average" + }} + }}, + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in marketing analytics and industry benchmarking. Provide realistic industry benchmarks. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return BenchmarkingResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating benchmarks: {str(e)}") + + +@router.get("/analytics/ai/churn-prediction", response_model=ChurnPredictionResponse) +async def predict_churn( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Predict which audience segments or customers are likely to disengage or churn""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if len(historical_data) < 10: + return ChurnPredictionResponse( + churn_risk={}, + at_risk_segments=[], + recommendations=["Insufficient data for churn prediction. Need at least 10 data points."] + ) + + # Analyze engagement trends + engagement_trends = {} + for entry in historical_data[-30:]: # Last 30 entries + date = entry.get("submitted_at", "") + value = float(entry.get("value", 0)) + if date: + engagement_trends[date] = value + + groq_client = get_groq_client() + + prompt = f"""Analyze this engagement data and predict churn risk: + +ENGAGEMENT TRENDS: +{json.dumps(engagement_trends, indent=2)} + +Identify: +1. Churn risk levels for different segments +2. At-risk audience segments +3. Recommendations to prevent churn + +Return your response as JSON with this exact structure: +{{ + "churn_risk": {{ + "segment_name": 0.75 + }}, + "at_risk_segments": [ + {{ + "segment": "Segment name", + "risk_score": 0.75, + "indicators": ["Indicator 1", "Indicator 2"], + "recommendations": ["Recommendation 1"] + }} + ], + "recommendations": ["General recommendation 1", "General recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in customer retention and churn prediction. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return ChurnPredictionResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error predicting churn: {str(e)}") + + +@router.post("/analytics/ai/natural-language-query", response_model=NaturalLanguageQueryResponse) +async def natural_language_query( + request: NaturalLanguageQueryRequest, + user: dict = Depends(get_current_user) +): + """Let users ask questions about their analytics data in plain English and get AI-generated answers""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=request.campaign_id + ) + + # Prepare summary of available data + data_summary = { + "total_data_points": len(historical_data), + "metrics": list(set([ + entry.get("campaign_deliverable_metrics", {}).get("display_name") or + entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + for entry in historical_data + ])), + "date_range": { + "earliest": historical_data[0].get("submitted_at") if historical_data else None, + "latest": historical_data[-1].get("submitted_at") if historical_data else None + }, + "recent_values": [ + { + "metric": entry.get("campaign_deliverable_metrics", {}).get("display_name") or "unknown", + "value": entry.get("value"), + "date": entry.get("submitted_at") + } + for entry in historical_data[-10:] + ] + } + + groq_client = get_groq_client() + + prompt = f"""Answer this question about campaign analytics data: + +USER QUESTION: {request.query} + +AVAILABLE DATA SUMMARY: +{json.dumps(data_summary, indent=2)} + +Provide a clear, helpful answer based on the available data. If the question cannot be answered with the available data, say so. + +Return your response as JSON with this exact structure: +{{ + "answer": "Clear answer to the user's question", + "data_sources": ["Data source 1", "Data source 2"], + "confidence": "high|medium|low" +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are a helpful analytics assistant. Answer questions about campaign data clearly and accurately. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.5, + max_completion_tokens=800, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return NaturalLanguageQueryResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error processing query: {str(e)}") + + +@router.get("/analytics/ai/kpi-optimization", response_model=KPIOptimizationResponse) +async def get_kpi_optimization( + campaign_id: Optional[str] = Query(None), + user: dict = Depends(get_current_user) +): + """Recommend actions to improve key metrics based on AI analysis""" + try: + user_profile = await get_user_profile(user) + profile = user_profile["profile"] + + historical_data = get_historical_metrics( + brand_id=profile["id"] if user_profile["type"] == "brand" else None, + creator_id=profile["id"] if user_profile["type"] == "creator" else None, + campaign_id=campaign_id + ) + + if not historical_data: + return KPIOptimizationResponse( + current_kpis={}, + optimization_suggestions=[], + priority_actions=[ + "No metric data available yet. Start by creating metrics for your deliverables.", + "Have creators submit metric values to enable KPI optimization suggestions.", + "Once you have data, AI-powered optimization recommendations will appear here." + ] + ) + + # Calculate current KPIs + current_kpis = {} + metrics_summary = {} + for entry in historical_data[-30:]: # Last 30 entries + metric_name = entry.get("campaign_deliverable_metrics", {}).get("display_name") or entry.get("campaign_deliverable_metrics", {}).get("name", "unknown") + value = float(entry.get("value", 0)) + + if metric_name not in metrics_summary: + metrics_summary[metric_name] = [] + metrics_summary[metric_name].append(value) + + for metric_name, values in metrics_summary.items(): + current_kpis[metric_name] = { + "current_avg": sum(values) / len(values) if values else 0, + "trend": "increasing" if len(values) >= 2 and values[-1] > values[0] else "decreasing" if len(values) >= 2 and values[-1] < values[0] else "stable" + } + + groq_client = get_groq_client() + + prompt = f"""Analyze these KPIs and provide optimization recommendations: + +CURRENT KPIs: +{json.dumps(current_kpis, indent=2)} + +METRICS SUMMARY: +{json.dumps({k: {"values": v, "count": len(v)} for k, v in metrics_summary.items()}, indent=2)} + +Provide: +1. Optimization suggestions for each KPI +2. Priority actions to improve metrics +3. Specific, actionable recommendations + +Return your response as JSON with this exact structure: +{{ + "current_kpis": {{ + "KPI name": 0.0 + }}, + "optimization_suggestions": [ + {{ + "kpi": "KPI name", + "current_value": 0.0, + "target_value": 0.0, + "suggestions": ["Suggestion 1", "Suggestion 2"], + "expected_impact": "high|medium|low" + }} + ], + "priority_actions": ["Action 1", "Action 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert in KPI optimization and performance improvement. Provide actionable, specific recommendations. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.4, + max_completion_tokens=1500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + return KPIOptimizationResponse(**result) + except HTTPException: + raise + except Exception as e: + raise HTTPException(status_code=500, detail=f"Error generating KPI optimization: {str(e)}") + diff --git a/backend/app/api/routes/analytics.py b/backend/app/api/routes/analytics.py index 76c5647..90a7b2e 100644 --- a/backend/app/api/routes/analytics.py +++ b/backend/app/api/routes/analytics.py @@ -5,10 +5,12 @@ from fastapi import APIRouter, HTTPException, Depends, UploadFile, File, Query from pydantic import BaseModel, Field from typing import Optional, List, Dict, Any -from datetime import datetime, timezone +from datetime import datetime, timezone, timedelta from uuid import UUID import base64 import httpx +import json +from groq import Groq from app.core.supabase_clients import supabase_anon from app.core.dependencies import get_current_brand, get_current_creator, get_current_user from app.core.config import settings diff --git a/backend/app/api/routes/campaigns.py b/backend/app/api/routes/campaigns.py index b28a287..fb6ff5d 100644 --- a/backend/app/api/routes/campaigns.py +++ b/backend/app/api/routes/campaigns.py @@ -6,7 +6,8 @@ from typing import Optional, List from datetime import datetime, timezone from app.core.supabase_clients import supabase_anon -from app.core.dependencies import get_current_brand +from app.core.dependencies import get_current_brand, get_current_creator, optional_security +from fastapi.security import HTTPAuthorizationCredentials from uuid import UUID router = APIRouter() @@ -29,6 +30,8 @@ class CampaignCreate(BaseModel): starts_at: Optional[datetime] = None ends_at: Optional[datetime] = None is_featured: bool = False + is_open_for_applications: bool = False + is_on_campaign_wall: bool = False class CampaignUpdate(BaseModel): @@ -48,6 +51,8 @@ class CampaignUpdate(BaseModel): starts_at: Optional[datetime] = None ends_at: Optional[datetime] = None is_featured: Optional[bool] = None + is_open_for_applications: Optional[bool] = None + is_on_campaign_wall: Optional[bool] = None class CampaignResponse(BaseModel): @@ -72,6 +77,8 @@ class CampaignResponse(BaseModel): starts_at: Optional[datetime] ends_at: Optional[datetime] is_featured: bool + is_open_for_applications: Optional[bool] = False + is_on_campaign_wall: Optional[bool] = False @router.post("/campaigns", response_model=CampaignResponse, status_code=201) @@ -123,6 +130,8 @@ async def create_campaign(campaign: CampaignCreate, brand: dict = Depends(get_cu "starts_at": campaign.starts_at.isoformat() if campaign.starts_at else None, "ends_at": campaign.ends_at.isoformat() if campaign.ends_at else None, "is_featured": campaign.is_featured, + "is_open_for_applications": campaign.is_open_for_applications, + "is_on_campaign_wall": campaign.is_on_campaign_wall, } # If status is active, set published_at @@ -248,6 +257,61 @@ async def get_campaigns( ) from e +# ============================================================================ +# PUBLIC CAMPAIGN WALL ENDPOINT (Must be before /campaigns/{campaign_id} to avoid route conflict) +# ============================================================================ + +@router.get("/campaigns/public", response_model=List[CampaignResponse]) +async def get_public_campaigns( + search: Optional[str] = Query(None, description="Search by title or description"), + platform: Optional[str] = Query(None, description="Filter by platform"), + niche: Optional[str] = Query(None, description="Filter by preferred niche"), + budget_min: Optional[float] = Query(None, description="Minimum budget"), + budget_max: Optional[float] = Query(None, description="Maximum budget"), + limit: int = Query(50, ge=1, le=100), + offset: int = Query(0, ge=0) +): + """ + Get all campaigns that are open for applications and on the campaign wall. + This endpoint is accessible to any authenticated user (creators and brands). + """ + from app.core.supabase_clients import supabase_anon + supabase = supabase_anon + + try: + # Build query for campaigns that are open and on wall + query = supabase.table("campaigns").select("*").eq("is_open_for_applications", True).eq("is_on_campaign_wall", True).eq("status", "active") + + # Apply filters + if search: + query = query.or_(f"title.ilike.%{search}%,description.ilike.%{search}%,short_description.ilike.%{search}%") + + if platform: + query = query.contains("platforms", [platform]) + + if niche: + query = query.contains("preferred_creator_niches", [niche]) + + if budget_min is not None: + query = query.gte("budget_min", budget_min) + + if budget_max is not None: + query = query.lte("budget_max", budget_max) + + # Apply pagination and ordering + query = query.order("created_at", desc=True).range(offset, offset + limit - 1) + + response = query.execute() + + return response.data if response.data else [] + + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error fetching public campaigns: {str(e)}" + ) from e + + @router.get("/campaigns/{campaign_id}", response_model=CampaignResponse) async def get_campaign( campaign_id: str, @@ -864,3 +928,613 @@ async def search_creator_by_id_or_name( status_code=500, detail=f"Error searching creator: {str(e)}" ) from e + + +# ============================================================================ +# CAMPAIGN WALL APPLICATION ROUTES (For Creators) +# ============================================================================ + +class CampaignApplicationCreate(BaseModel): + """Schema for creating a campaign application.""" + payment_min: Optional[float] = None + payment_max: Optional[float] = None + timeline_days: Optional[int] = None + timeline_weeks: Optional[int] = None + description: str = Field(..., min_length=1) + + +class CampaignApplicationResponse(BaseModel): + """Schema for campaign application response.""" + id: str + campaign_id: str + creator_id: str + payment_min: Optional[float] + payment_max: Optional[float] + timeline_days: Optional[int] + timeline_weeks: Optional[int] + description: Optional[str] + message: Optional[str] + proposed_amount: Optional[float] + status: str + created_at: datetime + updated_at: datetime + creator_name: Optional[str] = None + creator_profile_picture: Optional[str] = None + campaign_title: Optional[str] = None + + +# ============================================================================ +# CAMPAIGN WALL RECOMMENDATIONS (Moved here to keep all campaign wall routes together) +# ============================================================================ + +@router.get("/creators/campaign-wall/recommendations", response_model=List[CampaignResponse]) +async def get_campaign_recommendations( + creator: dict = Depends(get_current_creator), + limit: int = Query(10, ge=1, le=20), + use_ai: bool = Query(True, description="Use Gemini to recommend campaigns") +): + """ + Get AI-recommended campaigns for the current creator based on their profile. + """ + from app.core.config import settings + import json + + supabase = supabase_anon + creator_id = creator['id'] + + try: + # Fetch creator profile + creator_resp = supabase.table("creators") \ + .select("*") \ + .eq("id", creator_id) \ + .eq("is_active", True) \ + .single() \ + .execute() + + if not creator_resp.data: + raise HTTPException(status_code=404, detail="Creator profile not found") + + creator_data = creator_resp.data + + # Fetch all open campaigns + campaigns_resp = supabase.table("campaigns") \ + .select("*") \ + .eq("is_open_for_applications", True) \ + .eq("is_on_campaign_wall", True) \ + .eq("status", "active") \ + .order("created_at", desc=True) \ + .limit(100) \ + .execute() + + campaigns = campaigns_resp.data or [] + + if not campaigns: + return [] + + # Use AI to rank campaigns if enabled + if use_ai and settings.gemini_api_key: + try: + import google.generativeai as genai + genai.configure(api_key=settings.gemini_api_key) + + creator_summary = f""" +Creator Profile: +- Name: {creator_data.get('display_name', 'N/A')} +- Primary Niche: {creator_data.get('primary_niche', 'N/A')} +- Secondary Niches: {', '.join(creator_data.get('secondary_niches', []) or [])} +- Total Followers: {creator_data.get('total_followers', 0)} +- Engagement Rate: {creator_data.get('engagement_rate', 0)}% +- Bio: {creator_data.get('bio', 'N/A')[:300]} +- Platforms: {', '.join([p for p in ['YouTube', 'Instagram', 'TikTok', 'Twitter'] if creator_data.get(f'{p.lower()}_handle')])} +- Content Types: {', '.join(creator_data.get('content_types', []) or [])} +""" + + campaigns_info = [] + for idx, campaign in enumerate(campaigns[:50]): # Limit to 50 for AI processing + campaigns_info.append(f""" +Campaign {idx + 1}: +- ID: {campaign.get('id')} +- Title: {campaign.get('title', 'N/A')} +- Description: {campaign.get('description', campaign.get('short_description', 'N/A'))[:200]} +- Platforms: {', '.join(campaign.get('platforms', []) or [])} +- Preferred Niches: {', '.join(campaign.get('preferred_creator_niches', []) or [])} +- Budget: {campaign.get('budget_min', 0)} - {campaign.get('budget_max', 0)} INR +- Follower Range: {campaign.get('preferred_creator_followers_range', 'Any')} +""") + + prompt = f"""You are an expert at matching content creators with marketing campaigns. + +{creator_summary} + +AVAILABLE CAMPAIGNS: +{''.join(campaigns_info)} + +Analyze which campaigns are the BEST matches for this creator. Consider: +1. Niche alignment +2. Platform compatibility +3. Audience fit +4. Budget alignment +5. Content style match + +Return a JSON array with campaign IDs ranked by match quality (best first): +[ + {{"id": "campaign_id_1", "match_score": 95, "reasoning": "Why this is a great match"}}, + {{"id": "campaign_id_2", "match_score": 88, "reasoning": "Why this is a good match"}}, + ... +] + +Return ONLY the JSON array, no additional text.""" + + model = genai.GenerativeModel('gemini-pro') + response = model.generate_content(prompt) + content = response.text.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + try: + ai_rankings = json.loads(content) + ai_map = {item.get("id"): (item.get("match_score", 0), item.get("reasoning", "")) for item in ai_rankings if item.get("id")} + + # Sort campaigns by AI score + ranked_campaigns = [] + for campaign in campaigns: + campaign_id = campaign.get("id") + if campaign_id in ai_map: + score, reasoning = ai_map[campaign_id] + ranked_campaigns.append((campaign, score, reasoning)) + + ranked_campaigns.sort(key=lambda x: x[1], reverse=True) + return [camp for camp, _, _ in ranked_campaigns[:limit]] + + except Exception: + # If AI parsing fails, return campaigns by date + pass + + except Exception: + # If AI fails, continue with date-based ordering + pass + + # Return campaigns ordered by creation date + return campaigns[:limit] + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error getting campaign recommendations: {str(e)}" + ) from e + + +@router.post("/campaigns/{campaign_id}/applications", response_model=CampaignApplicationResponse, status_code=201) +async def create_campaign_application( + campaign_id: str, + application: CampaignApplicationCreate, + creator: dict = Depends(get_current_creator) +): + """ + Create a new application for a campaign. + """ + supabase = supabase_anon + creator_id = creator['id'] + + try: + # Verify campaign exists and is open for applications + campaign_resp = supabase.table("campaigns") \ + .select("*") \ + .eq("id", campaign_id) \ + .eq("is_open_for_applications", True) \ + .eq("is_on_campaign_wall", True) \ + .eq("status", "active") \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found or not open for applications") + + # Check if creator already applied + existing = supabase.table("campaign_applications") \ + .select("id") \ + .eq("campaign_id", campaign_id) \ + .eq("creator_id", creator_id) \ + .execute() + + if existing.data: + raise HTTPException( + status_code=400, + detail="You have already applied to this campaign" + ) + + # Get creator profile snapshot + creator_resp = supabase.table("creators") \ + .select("*") \ + .eq("id", creator_id) \ + .single() \ + .execute() + + creator_profile = creator_resp.data if creator_resp.data else {} + + # Create application + application_data = { + "campaign_id": campaign_id, + "creator_id": creator_id, + "payment_min": application.payment_min, + "payment_max": application.payment_max, + "timeline_days": application.timeline_days, + "timeline_weeks": application.timeline_weeks, + "description": application.description, + "message": application.description, # Use description as message + "proposed_amount": application.payment_max or application.payment_min, + "profile_snapshot": creator_profile, + "status": "applied" + } + + response = supabase.table("campaign_applications").insert(application_data).execute() + + if not response.data: + raise HTTPException(status_code=500, detail="Failed to create application") + + # Fetch with creator and campaign info + app = response.data[0] + app["creator_name"] = creator_profile.get("display_name") + app["creator_profile_picture"] = creator_profile.get("profile_picture_url") + app["campaign_title"] = campaign_resp.data.get("title") + + return app + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error creating application: {str(e)}" + ) from e + + +@router.get("/campaigns/{campaign_id}/applications", response_model=List[CampaignApplicationResponse]) +async def get_campaign_applications( + campaign_id: str, + brand: dict = Depends(get_current_brand), + status: Optional[str] = Query(None, description="Filter by application status") +): + """ + Get all applications for a campaign. Only the brand owner can view applications. + """ + supabase = supabase_anon + brand_id = brand['id'] + + try: + # Verify campaign belongs to brand + campaign_resp = supabase.table("campaigns") \ + .select("id, title") \ + .eq("id", campaign_id) \ + .eq("brand_id", brand_id) \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found") + + # Build query + query = supabase.table("campaign_applications") \ + .select("*, creators!campaign_applications_creator_id_fkey(display_name, profile_picture_url)") \ + .eq("campaign_id", campaign_id) \ + .order("created_at", desc=True) + + if status: + query = query.eq("status", status) + + response = query.execute() + + applications = response.data or [] + + # Format response + formatted = [] + for app in applications: + creator_info = app.get("creators", {}) if isinstance(app.get("creators"), dict) else {} + formatted.append({ + "id": app["id"], + "campaign_id": app["campaign_id"], + "creator_id": app["creator_id"], + "payment_min": app.get("payment_min"), + "payment_max": app.get("payment_max"), + "timeline_days": app.get("timeline_days"), + "timeline_weeks": app.get("timeline_weeks"), + "description": app.get("description"), + "message": app.get("message"), + "proposed_amount": app.get("proposed_amount"), + "status": app.get("status", "applied"), + "created_at": app["created_at"], + "updated_at": app["updated_at"], + "creator_name": creator_info.get("display_name") if creator_info else None, + "creator_profile_picture": creator_info.get("profile_picture_url") if creator_info else None, + "campaign_title": campaign_resp.data.get("title") + }) + + return formatted + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error fetching applications: {str(e)}" + ) from e + + +@router.put("/campaigns/{campaign_id}/applications/{application_id}/status") +async def update_application_status( + campaign_id: str, + application_id: str, + new_status: str = Query(..., description="New status: 'reviewing', 'accepted', 'rejected'"), + brand: dict = Depends(get_current_brand) +): + """ + Update the status of a campaign application. Only the brand owner can update. + """ + supabase = supabase_anon + brand_id = brand['id'] + + try: + # Verify campaign belongs to brand + campaign_resp = supabase.table("campaigns") \ + .select("id") \ + .eq("id", campaign_id) \ + .eq("brand_id", brand_id) \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found") + + # Verify application exists and belongs to campaign + app_resp = supabase.table("campaign_applications") \ + .select("id, campaign_id, creator_id, status") \ + .eq("id", application_id) \ + .eq("campaign_id", campaign_id) \ + .single() \ + .execute() + + if not app_resp.data: + raise HTTPException(status_code=404, detail="Application not found") + + # Validate status + # The enum should have: 'applied', 'reviewing', 'accepted', 'rejected' + valid_statuses = ["reviewing", "accepted", "rejected"] + if new_status not in valid_statuses: + raise HTTPException( + status_code=400, + detail=f"Invalid status. Must be one of: {', '.join(valid_statuses)}" + ) + + # Update status + update_data = { + "status": new_status, + "updated_at": datetime.now(timezone.utc).isoformat() + } + + # Update the application + update_response = supabase.table("campaign_applications") \ + .update(update_data) \ + .eq("id", application_id) \ + .execute() + + if not update_response.data: + raise HTTPException(status_code=500, detail="Failed to update application status") + + # Fetch the updated application with only necessary fields to avoid JSON serialization issues + # Use a simple select without profile_snapshot or attachments + try: + response = supabase.table("campaign_applications") \ + .select("id, campaign_id, creator_id, payment_min, payment_max, timeline_days, timeline_weeks, description, message, proposed_amount, status, created_at, updated_at") \ + .eq("id", application_id) \ + .single() \ + .execute() + except Exception as e: + # If select fails, try to get minimal data + response = supabase.table("campaign_applications") \ + .select("id, campaign_id, creator_id, status, created_at, updated_at") \ + .eq("id", application_id) \ + .single() \ + .execute() + + if not response.data: + raise HTTPException(status_code=500, detail="Failed to fetch updated application") + + # Fetch creator info separately + creator_resp = supabase.table("creators") \ + .select("display_name, profile_picture_url") \ + .eq("id", response.data["creator_id"]) \ + .single() \ + .execute() + + creator_info = creator_resp.data if creator_resp.data else {} + campaign_title_resp = supabase.table("campaigns") \ + .select("title") \ + .eq("id", campaign_id) \ + .single() \ + .execute() + + # Build result manually to ensure clean JSON + result = { + "id": response.data.get("id"), + "campaign_id": response.data.get("campaign_id"), + "creator_id": response.data.get("creator_id"), + "payment_min": response.data.get("payment_min"), + "payment_max": response.data.get("payment_max"), + "timeline_days": response.data.get("timeline_days"), + "timeline_weeks": response.data.get("timeline_weeks"), + "description": response.data.get("description"), + "message": response.data.get("message"), + "proposed_amount": response.data.get("proposed_amount"), + "status": response.data.get("status"), + "created_at": response.data.get("created_at"), + "updated_at": response.data.get("updated_at"), + "creator_name": creator_info.get("display_name"), + "creator_profile_picture": creator_info.get("profile_picture_url"), + "campaign_title": campaign_title_resp.data.get("title") if campaign_title_resp.data else None + } + + return result + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error updating application status: {str(e)}" + ) from e + + +@router.post("/campaigns/{campaign_id}/applications/{application_id}/create-proposal") +async def create_proposal_from_application( + campaign_id: str, + application_id: str, + brand: dict = Depends(get_current_brand) +): + """ + Create a proposal from an accepted application. This creates a proposal with status 'pending' for negotiation. + """ + supabase = supabase_anon + brand_id = brand['id'] + + try: + # Verify campaign belongs to brand + campaign_resp = supabase.table("campaigns") \ + .select("*") \ + .eq("id", campaign_id) \ + .eq("brand_id", brand_id) \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found") + + # Verify application exists, belongs to campaign, and is accepted + app_resp = supabase.table("campaign_applications") \ + .select("*") \ + .eq("id", application_id) \ + .eq("campaign_id", campaign_id) \ + .eq("status", "accepted") \ + .single() \ + .execute() + + if not app_resp.data: + raise HTTPException( + status_code=404, + detail="Application not found or not accepted" + ) + + application = app_resp.data + creator_id = application["creator_id"] + + # Check if proposal already exists + existing_proposal = supabase.table("proposals") \ + .select("id") \ + .eq("campaign_id", campaign_id) \ + .eq("creator_id", creator_id) \ + .eq("brand_id", brand_id) \ + .execute() + + if existing_proposal.data: + raise HTTPException( + status_code=400, + detail="A proposal already exists for this application" + ) + + # Create proposal from application + proposal_data = { + "campaign_id": campaign_id, + "creator_id": creator_id, + "brand_id": brand_id, + "subject": f"Proposal for {campaign_resp.data.get('title', 'Campaign')}", + "message": application.get("description", ""), + "proposed_amount": application.get("payment_max") or application.get("payment_min"), + "content_ideas": [], + "status": "pending" + } + + proposal_response = supabase.table("proposals").insert(proposal_data).execute() + + if not proposal_response.data: + raise HTTPException(status_code=500, detail="Failed to create proposal") + + return { + "proposal": proposal_response.data[0], + "application_id": application_id, + "message": "Proposal created successfully. You can now negotiate with the creator." + } + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error creating proposal from application: {str(e)}" + ) from e + + +@router.get("/creators/applications", response_model=List[CampaignApplicationResponse]) +async def get_creator_applications( + creator: dict = Depends(get_current_creator), + status: Optional[str] = Query(None, description="Filter by application status") +): + """ + Get all applications submitted by the current creator. + """ + supabase = supabase_anon + creator_id = creator['id'] + + try: + query = supabase.table("campaign_applications") \ + .select("*, campaigns!campaign_applications_campaign_id_fkey(id, title, brand_id, brands!campaigns_brand_id_fkey(company_name))") \ + .eq("creator_id", creator_id) \ + .order("created_at", desc=True) + + if status: + query = query.eq("status", status) + + response = query.execute() + + applications = response.data or [] + + # Format response + formatted = [] + for app in applications: + campaign_info = app.get("campaigns", {}) if isinstance(app.get("campaigns"), dict) else {} + brand_info = campaign_info.get("brands", {}) if isinstance(campaign_info.get("brands"), dict) else {} + formatted.append({ + "id": app["id"], + "campaign_id": app["campaign_id"], + "creator_id": app["creator_id"], + "payment_min": app.get("payment_min"), + "payment_max": app.get("payment_max"), + "timeline_days": app.get("timeline_days"), + "timeline_weeks": app.get("timeline_weeks"), + "description": app.get("description"), + "message": app.get("message"), + "proposed_amount": app.get("proposed_amount"), + "status": app.get("status", "applied"), + "created_at": app["created_at"], + "updated_at": app["updated_at"], + "creator_name": None, # Not needed for creator's own applications + "creator_profile_picture": None, + "campaign_title": campaign_info.get("title") if campaign_info else None + }) + + return formatted + + except HTTPException: + raise + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error fetching creator applications: {str(e)}" + ) from e diff --git a/backend/app/api/routes/proposals.py b/backend/app/api/routes/proposals.py index af75618..a54b965 100644 --- a/backend/app/api/routes/proposals.py +++ b/backend/app/api/routes/proposals.py @@ -59,7 +59,7 @@ async def delete_proposal( class ProposalCreate(BaseModel): """Schema for creating a new proposal.""" campaign_id: str - creator_id: str + creator_id: Optional[str] = None # Optional: required for brands, auto-filled for creators subject: str = Field(..., min_length=1, max_length=255) message: str = Field(..., min_length=1) proposed_amount: Optional[float] = None @@ -655,40 +655,104 @@ class AcceptNegotiationRequest(BaseModel): @router.post("/proposals", response_model=ProposalResponse, status_code=201) async def create_proposal( proposal: ProposalCreate, - brand: dict = Depends(get_current_brand) + current_user: dict = Depends(get_current_user) ): - """Create a new proposal from a brand to a creator.""" + """Create a new proposal. Can be created by either a brand (to a creator) or a creator (to a brand).""" supabase = supabase_anon - brand_id = brand['id'] + user_role = current_user.get('role') try: - # Verify campaign belongs to brand - campaign_resp = supabase.table("campaigns") \ - .select("id, title, brand_id") \ - .eq("id", proposal.campaign_id) \ - .eq("brand_id", brand_id) \ - .single() \ - .execute() + # Determine if this is a brand or creator creating the proposal + if user_role == 'Brand': + # Brand creating proposal: they specify creator_id, brand_id from session + brand_resp = supabase.table("brands") \ + .select("id, company_name") \ + .eq("user_id", current_user['id']) \ + .single() \ + .execute() - if not campaign_resp.data: - raise HTTPException(status_code=404, detail="Campaign not found") + if not brand_resp.data: + raise HTTPException(status_code=404, detail="Brand profile not found") - # Verify creator exists - creator_resp = supabase.table("creators") \ - .select("id, display_name") \ - .eq("id", proposal.creator_id) \ - .eq("is_active", True) \ - .single() \ - .execute() + brand_id = brand_resp.data['id'] + brand_name = brand_resp.data.get("company_name", "Unknown Brand") - if not creator_resp.data: - raise HTTPException(status_code=404, detail="Creator not found") + if not proposal.creator_id: + raise HTTPException(status_code=400, detail="creator_id is required when creating a proposal as a brand") + + creator_id = proposal.creator_id + + # Verify campaign belongs to brand + campaign_resp = supabase.table("campaigns") \ + .select("id, title, brand_id") \ + .eq("id", proposal.campaign_id) \ + .eq("brand_id", brand_id) \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found or does not belong to you") + + elif user_role == 'Creator': + # Creator creating proposal: they specify campaign_id, creator_id from session, brand_id from campaign + creator_resp = supabase.table("creators") \ + .select("id, display_name") \ + .eq("user_id", current_user['id']) \ + .eq("is_active", True) \ + .single() \ + .execute() + + if not creator_resp.data: + raise HTTPException(status_code=404, detail="Creator profile not found or inactive") + + creator_id = creator_resp.data['id'] + creator_name = creator_resp.data.get("display_name", "Unknown Creator") + + # Get campaign to find brand_id + campaign_resp = supabase.table("campaigns") \ + .select("id, title, brand_id") \ + .eq("id", proposal.campaign_id) \ + .single() \ + .execute() + + if not campaign_resp.data: + raise HTTPException(status_code=404, detail="Campaign not found") + + brand_id = campaign_resp.data.get("brand_id") + if not brand_id: + raise HTTPException(status_code=404, detail="Campaign has no associated brand") + + # Get brand name + brand_resp = supabase.table("brands") \ + .select("company_name") \ + .eq("id", brand_id) \ + .single() \ + .execute() + + brand_name = brand_resp.data.get("company_name", "Unknown Brand") if brand_resp.data else "Unknown Brand" + + else: + raise HTTPException(status_code=403, detail="Only brands and creators can create proposals") + + # For brands, verify creator exists and is active + if user_role == 'Brand': + creator_resp = supabase.table("creators") \ + .select("id, display_name") \ + .eq("id", creator_id) \ + .eq("is_active", True) \ + .single() \ + .execute() + + if not creator_resp.data: + raise HTTPException(status_code=404, detail="Creator not found or inactive") + + creator_name = creator_resp.data.get("display_name", "Unknown Creator") # Check if proposal already exists existing = supabase.table("proposals") \ .select("id") \ .eq("campaign_id", proposal.campaign_id) \ - .eq("creator_id", proposal.creator_id) \ + .eq("creator_id", creator_id) \ .eq("brand_id", brand_id) \ .execute() @@ -707,7 +771,7 @@ async def create_proposal( proposal_data = { "campaign_id": proposal.campaign_id, "brand_id": brand_id, - "creator_id": proposal.creator_id, + "creator_id": creator_id, "subject": proposal.subject, "message": clean_msg, "proposed_amount": proposal.proposed_amount, @@ -725,8 +789,8 @@ async def create_proposal( normalized = normalize_proposal_record( proposal_obj, - brand_name=brand.get("company_name", "Unknown Brand"), - creator_name=creator_resp.data.get("display_name") + brand_name=brand_name, + creator_name=creator_name ) normalized["campaign_title"] = campaign_resp.data.get("title") @@ -752,11 +816,15 @@ async def get_sent_proposals( limit: int = Query(50, ge=1, le=100), offset: int = Query(0, ge=0) ): - """Get all proposals sent by the current brand.""" + """ + Get all proposals for the current brand. + This includes proposals sent by the brand to creators AND proposals sent by creators to the brand. + """ supabase = supabase_anon brand_id = brand['id'] try: + # Get all proposals where this brand is involved (both sent by brand and received from creators) query = supabase.table("proposals") \ .select("*, campaigns(title), creators(display_name)") \ .eq("brand_id", brand_id) \ @@ -3525,3 +3593,1144 @@ async def draft_proposal_content( status_code=500, detail=f"Error drafting proposal: {str(e)}" ) from e + + +# ============================================================================ +# NEGOTIATION AI FEATURES +# ============================================================================ + +class SentimentAnalysisRequest(BaseModel): + """Request for sentiment analysis of negotiation messages.""" + messages: List[str] = Field(..., description="List of messages to analyze") + + +class SentimentAnalysisResponse(BaseModel): + """Response for sentiment analysis.""" + overall_sentiment: str = Field(..., description="Overall sentiment: positive, neutral, negative, or mixed") + sentiment_score: float = Field(..., description="Sentiment score from -1 (negative) to 1 (positive)") + detected_tone: List[str] = Field(default_factory=list, description="Detected tones: e.g., 'hesitant', 'confident', 'conflict'") + guidance: str = Field(..., description="Actionable guidance based on sentiment") + alerts: List[str] = Field(default_factory=list, description="Alerts for concerning patterns") + + +@router.post("/proposals/{proposal_id}/negotiation/analyze-sentiment", response_model=SentimentAnalysisResponse) +async def analyze_negotiation_sentiment( + proposal_id: str, + user: dict = Depends(get_current_user) +): + """Analyze sentiment of negotiation messages to detect tone and provide guidance.""" + supabase = supabase_anon + proposal = fetch_proposal_by_id(proposal_id) + + # Verify user has access + user_role = user.get("role") + if user_role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != proposal["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif user_role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != proposal["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + thread = normalize_negotiation_thread(proposal.get("negotiation_thread")) + messages = [entry.get("message", "") for entry in thread if entry.get("type") == "message" and entry.get("message")] + + if not messages: + return SentimentAnalysisResponse( + overall_sentiment="neutral", + sentiment_score=0.0, + detected_tone=[], + guidance="No messages found in this negotiation yet.", + alerts=[] + ) + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + messages_text = "\n".join([f"Message {i+1}: {msg}" for i, msg in enumerate(messages)]) + + prompt = f"""Analyze the sentiment and tone of these negotiation messages from a business collaboration context: + +{messages_text} + +Provide a comprehensive sentiment analysis including: +1. Overall sentiment (positive, neutral, negative, or mixed) +2. Sentiment score from -1 (very negative) to 1 (very positive) +3. Detected tones (e.g., hesitant, confident, conflict, enthusiastic, defensive, collaborative) +4. Actionable guidance for the user on how to proceed +5. Any alerts for concerning patterns (conflict, hesitation, negative signals) + +Return your response as JSON with this exact structure: +{{ + "overall_sentiment": "positive|neutral|negative|mixed", + "sentiment_score": 0.75, + "detected_tone": ["confident", "collaborative"], + "guidance": "The negotiation shows positive momentum. Consider...", + "alerts": [] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert business communication analyst. Analyze negotiation messages and provide actionable insights. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=800, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return SentimentAnalysisResponse( + overall_sentiment=result.get("overall_sentiment", "neutral"), + sentiment_score=float(result.get("sentiment_score", 0.0)), + detected_tone=result.get("detected_tone", []), + guidance=result.get("guidance", "Continue the negotiation with professional communication."), + alerts=result.get("alerts", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error analyzing sentiment: {str(e)}" + ) from e + + +class MessageDraftRequest(BaseModel): + """Request for AI message drafting assistance.""" + context: str = Field(..., description="Context or intent for the message") + tone: Optional[str] = Field("professional", description="Desired tone: professional, polite, persuasive, friendly") + current_negotiation_state: Optional[str] = Field(None, description="Current state of negotiation") + + +class MessageDraftResponse(BaseModel): + """Response for message drafting.""" + draft: str = Field(..., description="AI-generated message draft") + suggestions: List[str] = Field(default_factory=list, description="Additional suggestions or tips") + + +@router.post("/proposals/{proposal_id}/negotiation/draft-message", response_model=MessageDraftResponse) +async def draft_negotiation_message( + proposal_id: str, + payload: MessageDraftRequest, + user: dict = Depends(get_current_user) +): + """AI assistance for drafting negotiation messages.""" + supabase = supabase_anon + proposal = fetch_proposal_by_id(proposal_id) + + # Verify user has access + user_role = user.get("role") + sender_name = "User" + if user_role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != proposal["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + sender_name = brand_profile.get("company_name", "Brand") + elif user_role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != proposal["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + sender_name = creator_profile.get("display_name", "Creator") + else: + raise HTTPException(status_code=403, detail="Access denied") + + thread = normalize_negotiation_thread(proposal.get("negotiation_thread")) + recent_messages = thread[-5:] if len(thread) > 5 else thread + conversation_context = "\n".join([ + f"{entry.get('sender_role')}: {entry.get('message', '')}" + for entry in recent_messages + if entry.get("type") == "message" + ]) + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + prompt = f"""You are helping {sender_name} draft a negotiation message. + +PROPOSAL CONTEXT: +- Subject: {proposal.get('subject', 'N/A')} +- Campaign: {proposal.get('campaign_title', 'N/A')} + +RECENT CONVERSATION: +{conversation_context if conversation_context else 'This is the start of the negotiation.'} + +USER'S INTENT: +{payload.context} + +DESIRED TONE: {payload.tone} + +CURRENT NEGOTIATION STATE: {payload.current_negotiation_state or 'Active negotiation'} + +Draft a {payload.tone} negotiation message that: +1. Is clear and professional +2. Addresses the user's intent +3. Maintains a {payload.tone} tone +4. Is appropriate for the negotiation context +5. Moves the conversation forward constructively + +Return your response as JSON with this structure: +{{ + "draft": "The complete message draft here", + "suggestions": ["Tip 1", "Tip 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert at writing professional business negotiation messages. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.7, + max_completion_tokens=600, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return MessageDraftResponse( + draft=result.get("draft", "I would like to discuss the proposal further."), + suggestions=result.get("suggestions", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error drafting message: {str(e)}" + ) from e + + +class DealProbabilityResponse(BaseModel): + """Response for deal probability prediction.""" + probability: float = Field(..., description="Probability of successful deal (0.0 to 1.0)") + confidence: str = Field(..., description="Confidence level: high, medium, low") + factors: List[str] = Field(default_factory=list, description="Key factors influencing the prediction") + recommendations: List[str] = Field(default_factory=list, description="Recommendations to improve deal probability") + + +@router.get("/proposals/{proposal_id}/negotiation/deal-probability", response_model=DealProbabilityResponse) +async def predict_deal_probability( + proposal_id: str, + user: dict = Depends(get_current_user) +): + """Predict the likelihood of a negotiation resulting in a successful deal.""" + supabase = supabase_anon + proposal = fetch_proposal_by_id(proposal_id) + + # Verify user has access + user_role = user.get("role") + if user_role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != proposal["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif user_role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != proposal["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + thread = normalize_negotiation_thread(proposal.get("negotiation_thread")) + messages = [entry.get("message", "") for entry in thread if entry.get("type") == "message"] + + # Get historical data (simplified - could be enhanced with actual historical success rates) + try: + # Count similar successful negotiations (simplified approach) + similar_proposals = supabase.table("proposals") \ + .select("id, status, negotiation_status") \ + .eq("brand_id", proposal["brand_id"]) \ + .eq("creator_id", proposal["creator_id"]) \ + .in_("negotiation_status", ["finalized", "open"]) \ + .execute() + + historical_success_rate = 0.5 # Default + if similar_proposals.data: + finalized = sum(1 for p in similar_proposals.data if p.get("negotiation_status") == "finalized") + historical_success_rate = finalized / len(similar_proposals.data) if similar_proposals.data else 0.5 + except: + historical_success_rate = 0.5 + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + conversation_summary = "\n".join([f"Message {i+1}: {msg}" for i, msg in enumerate(messages)]) if messages else "No messages yet." + + prompt = f"""Analyze this business negotiation and predict the probability of a successful deal. + +PROPOSAL DETAILS: +- Subject: {proposal.get('subject', 'N/A')} +- Status: {proposal.get('status', 'N/A')} +- Negotiation Status: {proposal.get('negotiation_status', 'N/A')} +- Proposed Amount: {proposal.get('proposed_amount', 'N/A')} +- Version: {proposal.get('version', 1)} + +CONVERSATION HISTORY: +{conversation_summary} + +HISTORICAL SUCCESS RATE: {historical_success_rate:.2%} + +CURRENT TERMS: +{json.dumps(proposal.get('current_terms', {}), indent=2) if proposal.get('current_terms') else 'No terms set yet.'} + +Based on: +1. Conversation tone and engagement +2. Progress in negotiation +3. Terms alignment +4. Historical patterns +5. Communication quality + +Predict the probability (0.0 to 1.0) of this negotiation resulting in a successful deal. + +Return your response as JSON with this structure: +{{ + "probability": 0.75, + "confidence": "high|medium|low", + "factors": ["Factor 1", "Factor 2"], + "recommendations": ["Recommendation 1", "Recommendation 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert business analyst specializing in deal prediction. Analyze negotiations and provide probability estimates. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.2, + max_completion_tokens=600, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + probability = float(result.get("probability", 0.5)) + # Clamp probability between 0 and 1 + probability = max(0.0, min(1.0, probability)) + + return DealProbabilityResponse( + probability=probability, + confidence=result.get("confidence", "medium"), + factors=result.get("factors", []), + recommendations=result.get("recommendations", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error predicting deal probability: {str(e)}" + ) from e + + +class TranslationRequest(BaseModel): + """Request for message translation.""" + text: str = Field(..., description="Text to translate") + target_language: str = Field(..., description="Target language code (e.g., 'es', 'fr', 'de', 'zh')") + source_language: Optional[str] = Field(None, description="Source language code (auto-detect if not provided)") + + +class TranslationResponse(BaseModel): + """Response for translation.""" + translated_text: str = Field(..., description="Translated text") + detected_language: Optional[str] = Field(None, description="Detected source language") + confidence: Optional[float] = Field(None, description="Translation confidence score") + + +@router.post("/proposals/{proposal_id}/negotiation/translate", response_model=TranslationResponse) +async def translate_negotiation_message( + proposal_id: str, + payload: TranslationRequest, + user: dict = Depends(get_current_user) +): + """Translate negotiation messages for cross-border negotiations.""" + supabase = supabase_anon + proposal = fetch_proposal_by_id(proposal_id) + + # Verify user has access + user_role = user.get("role") + if user_role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != proposal["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif user_role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != proposal["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + if not payload.text.strip(): + raise HTTPException(status_code=400, detail="Text to translate cannot be empty") + + # Language code mapping + language_names = { + "es": "Spanish", "fr": "French", "de": "German", "zh": "Chinese", + "ja": "Japanese", "ko": "Korean", "pt": "Portuguese", "it": "Italian", + "ru": "Russian", "ar": "Arabic", "hi": "Hindi", "nl": "Dutch", + "sv": "Swedish", "pl": "Polish", "tr": "Turkish" + } + + target_language_name = language_names.get(payload.target_language.lower(), payload.target_language) + source_language_name = language_names.get(payload.source_language.lower(), payload.source_language) if payload.source_language else None + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + prompt = f"""Translate the following business negotiation message to {target_language_name}. + +Maintain: +- Professional tone +- Business context and meaning +- All numbers, dates, and technical terms accurately +- Cultural appropriateness for business communication + +Source text: +{payload.text} + +Provide the translation and detect the source language if not specified. + +Return your response as JSON with this structure: +{{ + "translated_text": "Translated text here", + "detected_language": "en", + "confidence": 0.95 +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert translator specializing in business and professional communication. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=500, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return TranslationResponse( + translated_text=result.get("translated_text", payload.text), + detected_language=result.get("detected_language") or payload.source_language, + confidence=float(result.get("confidence", 0.9)) if result.get("confidence") else None + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error translating message: {str(e)}" + ) from e + + +# ============================================================================ +# CONTRACT AI FEATURES +# ============================================================================ + +class ContractQuestionRequest(BaseModel): + """Request for contract question answering.""" + question: str = Field(..., description="Question about the contract") + + +class ContractQuestionResponse(BaseModel): + """Response for contract question.""" + answer: str = Field(..., description="AI-generated answer to the question") + relevant_clauses: List[str] = Field(default_factory=list, description="Relevant contract clauses referenced") + + +@router.post("/contracts/{contract_id}/ask-question", response_model=ContractQuestionResponse) +async def ask_contract_question( + contract_id: str, + payload: ContractQuestionRequest, + user: dict = Depends(get_current_user) +): + """Allow users to ask questions about the contract and get AI-powered answers.""" + supabase = supabase_anon + + # Verify access + contract_resp = supabase.table("contracts") \ + .select("*, proposals(*)") \ + .eq("id", contract_id) \ + .single() \ + .execute() + + if not contract_resp.data: + raise HTTPException(status_code=404, detail="Contract not found") + + contract = contract_resp.data + role = user.get("role") + + if role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != contract["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != contract["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + contract_terms = json.dumps(contract.get("terms", {}), indent=2) + proposal = contract.get("proposals", {}) if isinstance(contract.get("proposals"), dict) else {} + + prompt = f"""You are a contract analysis assistant. Answer the user's question about this contract. + +CONTRACT TERMS: +{contract_terms} + +PROPOSAL CONTEXT: +- Subject: {proposal.get('subject', 'N/A')} +- Campaign: {proposal.get('campaign_title', 'N/A')} +- Proposed Amount: {proposal.get('proposed_amount', 'N/A')} + +USER'S QUESTION: +{payload.question} + +Provide a clear, accurate answer based on the contract terms. If the information is not in the contract, say so. Also identify which specific clauses or sections are relevant to the answer. + +Return your response as JSON with this structure: +{{ + "answer": "Clear answer to the question", + "relevant_clauses": ["Clause 1", "Clause 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert contract analyst. Answer questions accurately based on contract terms. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=800, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return ContractQuestionResponse( + answer=result.get("answer", "I couldn't find a clear answer to that question in the contract."), + relevant_clauses=result.get("relevant_clauses", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error answering question: {str(e)}" + ) from e + + +class ContractTemplateRequest(BaseModel): + """Request for contract template generation.""" + deal_type: str = Field(..., description="Type of deal (e.g., 'sponsored content', 'brand ambassadorship')") + deliverables: Optional[List[str]] = Field(default_factory=list, description="List of deliverables") + payment_amount: Optional[float] = Field(None, description="Payment amount") + duration: Optional[str] = Field(None, description="Contract duration") + additional_requirements: Optional[str] = Field(None, description="Additional requirements or notes") + + +class ContractTemplateResponse(BaseModel): + """Response for contract template.""" + template: Dict[str, Any] = Field(..., description="Generated contract template as JSON") + suggestions: List[str] = Field(default_factory=list, description="Suggestions for the contract") + + +@router.post("/contracts/generate-template", response_model=ContractTemplateResponse) +async def generate_contract_template( + payload: ContractTemplateRequest, + user: dict = Depends(get_current_user) +): + """Generate draft contract templates for new deals based on best practices.""" + if user.get("role") not in ("Brand", "Creator"): + raise HTTPException(status_code=403, detail="Only brands and creators can generate templates") + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + # Get user's previous contracts for reference + supabase = supabase_anon + role = user.get("role") + previous_contracts = [] + + try: + if role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if brand_profile: + contracts_resp = supabase.table("contracts") \ + .select("terms") \ + .eq("brand_id", brand_profile["id"]) \ + .limit(5) \ + .execute() + previous_contracts = [c.get("terms") for c in (contracts_resp.data or []) if c.get("terms")] + else: + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if creator_profile: + contracts_resp = supabase.table("contracts") \ + .select("terms") \ + .eq("creator_id", creator_profile["id"]) \ + .limit(5) \ + .execute() + previous_contracts = [c.get("terms") for c in (contracts_resp.data or []) if c.get("terms")] + except: + pass # Continue without previous contracts if fetch fails + + previous_examples = json.dumps(previous_contracts[:3], indent=2) if previous_contracts else "None available" + + prompt = f"""Generate a professional contract template for a brand-creator collaboration deal. + +DEAL TYPE: {payload.deal_type} +DELIVERABLES: {', '.join(payload.deliverables) if payload.deliverables else 'To be specified'} +PAYMENT AMOUNT: {payload.payment_amount or 'To be negotiated'} +DURATION: {payload.duration or 'To be specified'} +ADDITIONAL REQUIREMENTS: {payload.additional_requirements or 'None'} + +PREVIOUS CONTRACT EXAMPLES (for reference): +{previous_examples} + +Generate a comprehensive contract template that includes: +1. Parties involved +2. Scope of work and deliverables +3. Payment terms and schedule +4. Timeline and deadlines +5. Content usage rights +6. Exclusivity clauses (if applicable) +7. Termination conditions +8. Dispute resolution +9. Confidentiality +10. Any other relevant standard clauses + +Return your response as JSON with this structure: +{{ + "template": {{ + "parties": {{"brand": "...", "creator": "..."}}, + "scope_of_work": "...", + "deliverables": [...], + "payment_terms": {{"amount": ..., "schedule": "..."}}, + "timeline": "...", + "content_rights": "...", + "exclusivity": "...", + "termination": "...", + "dispute_resolution": "...", + "confidentiality": "..." + }}, + "suggestions": ["Suggestion 1", "Suggestion 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert contract lawyer specializing in influencer marketing agreements. Generate professional contract templates. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.5, + max_completion_tokens=2000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return ContractTemplateResponse( + template=result.get("template", {}), + suggestions=result.get("suggestions", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error generating template: {str(e)}" + ) from e + + +class ContractTranslationRequest(BaseModel): + """Request for contract translation.""" + target_language: str = Field(..., description="Target language code (e.g., 'es', 'fr', 'de')") + + +class ContractTranslationResponse(BaseModel): + """Response for contract translation.""" + translated_terms: Dict[str, Any] = Field(..., description="Translated contract terms") + detected_language: Optional[str] = Field(None, description="Detected source language") + + +@router.post("/contracts/{contract_id}/translate", response_model=ContractTranslationResponse) +async def translate_contract( + contract_id: str, + payload: ContractTranslationRequest, + user: dict = Depends(get_current_user) +): + """Translate contracts into the user's preferred language.""" + supabase = supabase_anon + + # Verify access + contract_resp = supabase.table("contracts") \ + .select("*") \ + .eq("id", contract_id) \ + .single() \ + .execute() + + if not contract_resp.data: + raise HTTPException(status_code=404, detail="Contract not found") + + contract = contract_resp.data + role = user.get("role") + + if role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != contract["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != contract["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + # Language code mapping + language_names = { + "es": "Spanish", "fr": "French", "de": "German", "zh": "Chinese", + "ja": "Japanese", "ko": "Korean", "pt": "Portuguese", "it": "Italian", + "ru": "Russian", "ar": "Arabic", "hi": "Hindi", "nl": "Dutch", + "sv": "Swedish", "pl": "Polish", "tr": "Turkish" + } + + target_language_name = language_names.get(payload.target_language.lower(), payload.target_language) + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + contract_terms = json.dumps(contract.get("terms", {}), indent=2) + + prompt = f"""Translate the following contract terms to {target_language_name}. + +Maintain: +- Legal accuracy and precision +- Professional business tone +- All numbers, dates, and technical terms exactly as they are +- Contract structure and formatting +- Cultural appropriateness for business communication + +CONTRACT TERMS (JSON): +{contract_terms} + +Return the translated contract as JSON with the same structure, and detect the source language. + +Return your response as JSON with this structure: +{{ + "translated_terms": {{...translated contract JSON...}}, + "detected_language": "en" +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert legal translator specializing in business contracts. Translate contracts accurately while maintaining legal precision. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.2, + max_completion_tokens=3000, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return ContractTranslationResponse( + translated_terms=result.get("translated_terms", contract.get("terms", {})), + detected_language=result.get("detected_language", "en") + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error translating contract: {str(e)}" + ) from e + + +class ClauseExplanationRequest(BaseModel): + """Request for clause explanation.""" + clause_text: str = Field(..., description="The clause text to explain") + clause_context: Optional[str] = Field(None, description="Context about where this clause appears in the contract") + + +class ClauseExplanationResponse(BaseModel): + """Response for clause explanation.""" + explanation: str = Field(..., description="Plain-language explanation of the clause") + key_points: List[str] = Field(default_factory=list, description="Key points to understand") + implications: List[str] = Field(default_factory=list, description="What this means for the user") + + +@router.post("/contracts/{contract_id}/explain-clause", response_model=ClauseExplanationResponse) +async def explain_contract_clause( + contract_id: str, + payload: ClauseExplanationRequest, + user: dict = Depends(get_current_user) +): + """Provide plain-language explanations for complex legal clauses.""" + supabase = supabase_anon + + # Verify access + contract_resp = supabase.table("contracts") \ + .select("*") \ + .eq("id", contract_id) \ + .single() \ + .execute() + + if not contract_resp.data: + raise HTTPException(status_code=404, detail="Contract not found") + + contract = contract_resp.data + role = user.get("role") + + if role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != contract["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != contract["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + contract_terms = json.dumps(contract.get("terms", {}), indent=2) + user_role_label = "creator" if role == "Creator" else "brand" + + prompt = f"""Explain this contract clause in plain, easy-to-understand language for a {user_role_label}. + +CONTRACT TERMS (for context): +{contract_terms} + +CLAUSE TO EXPLAIN: +{payload.clause_text} + +CONTEXT: {payload.clause_context or 'General contract clause'} + +Provide: +1. A clear, plain-language explanation of what this clause means +2. Key points the user should understand +3. What this means for their rights and responsibilities + +Use simple language, avoid legal jargon, and be specific about what the user needs to know. + +Return your response as JSON with this structure: +{{ + "explanation": "Clear explanation in plain language", + "key_points": ["Point 1", "Point 2"], + "implications": ["Implication 1", "Implication 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are a legal educator who explains complex contract clauses in simple, understandable terms. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=800, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return ClauseExplanationResponse( + explanation=result.get("explanation", "Unable to explain this clause."), + key_points=result.get("key_points", []), + implications=result.get("implications", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error explaining clause: {str(e)}" + ) from e + + +class ContractSummaryResponse(BaseModel): + """Response for contract summarization.""" + summary: str = Field(..., description="Concise summary of the contract") + key_terms: Dict[str, Any] = Field(..., description="Key terms extracted (payment, timeline, deliverables, etc.)") + obligations: Dict[str, List[str]] = Field(..., description="Obligations for each party") + important_dates: List[str] = Field(default_factory=list, description="Important dates and deadlines") + + +@router.get("/contracts/{contract_id}/summarize", response_model=ContractSummaryResponse) +async def summarize_contract( + contract_id: str, + user: dict = Depends(get_current_user) +): + """AI can generate concise summaries of lengthy contracts, highlighting key terms, payment details, and obligations.""" + supabase = supabase_anon + + # Verify access + contract_resp = supabase.table("contracts") \ + .select("*, proposals(*)") \ + .eq("id", contract_id) \ + .single() \ + .execute() + + if not contract_resp.data: + raise HTTPException(status_code=404, detail="Contract not found") + + contract = contract_resp.data + role = user.get("role") + + if role == "Brand": + brand_profile = fetch_brand_profile_by_user_id(user["id"]) + if not brand_profile or brand_profile.get("id") != contract["brand_id"]: + raise HTTPException(status_code=403, detail="Access denied") + elif role == "Creator": + creator_profile = fetch_creator_profile_by_user_id(user["id"]) + if not creator_profile or creator_profile.get("id") != contract["creator_id"]: + raise HTTPException(status_code=403, detail="Access denied") + else: + raise HTTPException(status_code=403, detail="Access denied") + + try: + if not settings.groq_api_key: + raise HTTPException(status_code=500, detail="GROQ API key not configured") + + groq_client = Groq(api_key=settings.groq_api_key) + + contract_terms = json.dumps(contract.get("terms", {}), indent=2) + proposal = contract.get("proposals", {}) if isinstance(contract.get("proposals"), dict) else {} + + prompt = f"""Create a concise, easy-to-understand summary of this contract. + +CONTRACT TERMS: +{contract_terms} + +PROPOSAL CONTEXT: +- Subject: {proposal.get('subject', 'N/A')} +- Campaign: {proposal.get('campaign_title', 'N/A')} + +Generate a summary that highlights: +1. Overall purpose and scope of the agreement +2. Key terms (payment amount, schedule, deliverables, timeline) +3. Obligations for each party (brand and creator) +4. Important dates and deadlines +5. Key rights and responsibilities + +Return your response as JSON with this structure: +{{ + "summary": "Overall summary paragraph", + "key_terms": {{ + "payment": "...", + "timeline": "...", + "deliverables": [...], + "content_rights": "..." + }}, + "obligations": {{ + "brand": ["Obligation 1", "Obligation 2"], + "creator": ["Obligation 1", "Obligation 2"] + }}, + "important_dates": ["Date 1", "Date 2"] +}}""" + + completion = groq_client.chat.completions.create( + model="meta-llama/llama-4-scout-17b-16e-instruct", + messages=[ + { + "role": "system", + "content": "You are an expert contract analyst. Create clear, concise summaries of contracts. Always respond with valid JSON only." + }, + {"role": "user", "content": prompt} + ], + temperature=0.3, + max_completion_tokens=1200, + response_format={"type": "json_object"} + ) + + content = completion.choices[0].message.content if completion.choices else "{}" + content = content.strip() + + # Clean JSON response + if content.startswith("```json"): + content = content[7:] + if content.startswith("```"): + content = content[3:] + if content.endswith("```"): + content = content[:-3] + content = content.strip() + + result = json.loads(content) + + return ContractSummaryResponse( + summary=result.get("summary", "Contract summary unavailable."), + key_terms=result.get("key_terms", {}), + obligations=result.get("obligations", {"brand": [], "creator": []}), + important_dates=result.get("important_dates", []) + ) + + except json.JSONDecodeError: + raise HTTPException(status_code=500, detail="Failed to parse AI response") + except Exception as e: + raise HTTPException( + status_code=500, + detail=f"Error summarizing contract: {str(e)}" + ) from e diff --git a/backend/app/main.py b/backend/app/main.py index 64dd92d..6a6f434 100644 --- a/backend/app/main.py +++ b/backend/app/main.py @@ -11,6 +11,7 @@ from app.api.routes import creators from app.api.routes import proposals from app.api.routes import analytics +from app.api.routes import ai_analytics from app.api.routes import profiles app = FastAPI(title="Inpact Backend", version="0.1.0") @@ -43,6 +44,7 @@ app.include_router(creators.router) app.include_router(proposals.router) app.include_router(analytics.router) +app.include_router(ai_analytics.router) app.include_router(profiles.router) @app.get("/") diff --git a/frontend/app/brand/analytics/page.tsx b/frontend/app/brand/analytics/page.tsx index ec7ce43..17ee7e1 100644 --- a/frontend/app/brand/analytics/page.tsx +++ b/frontend/app/brand/analytics/page.tsx @@ -3,14 +3,44 @@ import AuthGuard from "@/components/auth/AuthGuard"; import SlidingMenu from "@/components/SlidingMenu"; import BrandAnalyticsDashboard from "@/components/analytics/BrandAnalyticsDashboard"; +import AIAnalyticsDashboard from "@/components/analytics/AIAnalyticsDashboard"; +import { useState } from "react"; export default function BrandAnalyticsPage() { + const [activeView, setActiveView] = useState<"standard" | "ai">("ai"); + return (
+
+ + +
+ {activeView === "ai" ? ( + + ) : ( + )}
diff --git a/frontend/app/brand/campaigns/page.tsx b/frontend/app/brand/campaigns/page.tsx index 0f61a0e..d2a5312 100644 --- a/frontend/app/brand/campaigns/page.tsx +++ b/frontend/app/brand/campaigns/page.tsx @@ -24,9 +24,19 @@ import { Search, Target, Users, + Check, + X, + Clock, + UserCheck, } from "lucide-react"; import { useRouter } from "next/navigation"; import { useEffect, useState } from "react"; +import { updateCampaign } from "@/lib/campaignApi"; +import { + fetchCampaignApplications, + updateApplicationStatus, + type CampaignApplication, +} from "@/lib/api/campaignWall"; export default function CampaignsPage() { const router = useRouter(); @@ -41,6 +51,10 @@ export default function CampaignsPage() { const [startsAfter, setStartsAfter] = useState(""); const [endsBefore, setEndsBefore] = useState(""); const [expandedCampaign, setExpandedCampaign] = useState(null); + const [openCampaignsFilter, setOpenCampaignsFilter] = useState(false); + const [applications, setApplications] = useState>({}); + const [loadingApplications, setLoadingApplications] = useState>({}); + const [updatingCampaign, setUpdatingCampaign] = useState(null); useEffect(() => { loadCampaigns(); @@ -80,12 +94,70 @@ export default function CampaignsPage() { loadCampaigns(); }; - const toggleExpand = (campaignId: string) => { - setExpandedCampaign(expandedCampaign === campaignId ? null : campaignId); + const toggleExpand = async (campaignId: string) => { + if (expandedCampaign === campaignId) { + setExpandedCampaign(null); + } else { + setExpandedCampaign(campaignId); + // Load applications when expanding + await loadApplications(campaignId); + } + }; + + const loadApplications = async (campaignId: string) => { + if (loadingApplications[campaignId]) return; + try { + setLoadingApplications((prev) => ({ ...prev, [campaignId]: true })); + const apps = await fetchCampaignApplications(campaignId); + setApplications((prev) => ({ ...prev, [campaignId]: apps })); + } catch (err: any) { + console.error("Failed to load applications:", err); + setError(err.message || "Failed to load applications"); + } finally { + setLoadingApplications((prev) => ({ ...prev, [campaignId]: false })); + } + }; + + const handleToggleCampaignWall = async (campaign: Campaign, field: "is_open_for_applications" | "is_on_campaign_wall") => { + try { + setUpdatingCampaign(campaign.id); + const newValue = !campaign[field]; + await updateCampaign(campaign.id, { [field]: newValue }); + // Reload campaigns to reflect changes + await loadCampaigns(); + } catch (err: any) { + console.error("Failed to update campaign:", err); + setError(err.message || "Failed to update campaign"); + } finally { + setUpdatingCampaign(null); + } + }; + + const handleApplicationStatusChange = async ( + campaignId: string, + applicationId: string, + newStatus: "reviewing" | "accepted" | "rejected" + ) => { + try { + await updateApplicationStatus(campaignId, applicationId, newStatus); + // Reload applications + await loadApplications(campaignId); + if (newStatus === "accepted") { + alert("Application accepted! You can now create a proposal."); + } + } catch (err: any) { + console.error("Failed to update application status:", err); + setError(err.message || "Failed to update application status"); + } }; const normalizedSearch = searchTerm.trim().toLowerCase(); const filteredCampaigns = campaigns.filter((campaign) => { + // Filter by open campaigns if enabled + if (openCampaignsFilter && !campaign.is_open_for_applications) { + return false; + } + // Filter by search term if (!normalizedSearch) return true; return ( campaign.title.toLowerCase().includes(normalizedSearch) || @@ -202,6 +274,15 @@ export default function CampaignsPage() { > Search + @@ -465,6 +546,169 @@ export default function CampaignsPage() { )} + {/* Campaign Wall Settings */} +
+

Campaign Wall Settings

+
+
+
+ +

Allow creators to apply to this campaign

+
+ +
+
+
+ +

Display this campaign on the public campaign wall

+
+ +
+
+
+ + {/* Applications Section */} + {campaign.is_open_for_applications && ( +
+
+

+ + Applications ({applications[campaign.id]?.length || 0}) +

+ +
+ {loadingApplications[campaign.id] ? ( +
+
+

Loading applications...

+
+ ) : applications[campaign.id]?.length > 0 ? ( +
+ {applications[campaign.id].map((app) => ( +
+
+
+ {app.creator_profile_picture ? ( + {app.creator_name + ) : ( +
+ )} +
+

{app.creator_name || "Unknown Creator"}

+

{new Date(app.created_at).toLocaleDateString()}

+
+
+ + {app.status} + +
+ {app.description && ( +

{app.description}

+ )} +
+ {app.payment_min && app.payment_max && ( +
+ Payment: + + ₹{app.payment_min.toLocaleString()} - ₹{app.payment_max.toLocaleString()} + +
+ )} + {app.timeline_days && ( +
+ Timeline: + {app.timeline_days} days +
+ )} + {app.timeline_weeks && ( +
+ Timeline: + {app.timeline_weeks} weeks +
+ )} +
+ {(app.status === "applied" || app.status === "reviewing") && ( +
+ + {app.status === "applied" && ( + + )} + +
+ )} +
+ ))} +
+ ) : ( +
+ +

No applications yet

+
+ )} +
+ )} + {/* Actions */}
- + +
+ {activeView === "ai" ? ( + + ) : ( + )} diff --git a/frontend/app/creator/campaign-wall/page.tsx b/frontend/app/creator/campaign-wall/page.tsx new file mode 100644 index 0000000..091cf17 --- /dev/null +++ b/frontend/app/creator/campaign-wall/page.tsx @@ -0,0 +1,731 @@ +"use client"; + +import AuthGuard from "@/components/auth/AuthGuard"; +import SlidingMenu from "@/components/SlidingMenu"; +import { + fetchPublicCampaigns, + fetchCampaignRecommendations, + createCampaignApplication, + fetchCreatorApplications, + type CampaignApplication, + type CampaignApplicationCreate, +} from "@/lib/api/campaignWall"; +import { createProposal } from "@/lib/api/proposals"; +import { Campaign, PLATFORM_OPTIONS, NICHE_OPTIONS } from "@/types/campaign"; +import { + Search, + Filter, + Sparkles, + DollarSign, + Calendar, + Users, + Target, + Check, + Clock, + X, + Send, + Loader2, +} from "lucide-react"; +import { useEffect, useState } from "react"; +import { formatCurrency, formatDate } from "@/lib/campaignApi"; + +export default function CampaignWallPage() { + const [activeTab, setActiveTab] = useState<"browse" | "my-applications">("browse"); + const [campaigns, setCampaigns] = useState([]); + const [myApplications, setMyApplications] = useState([]); + const [loading, setLoading] = useState(true); + const [loadingApplications, setLoadingApplications] = useState(false); + const [error, setError] = useState(null); + const [searchTerm, setSearchTerm] = useState(""); + const [platformFilter, setPlatformFilter] = useState(""); + const [nicheFilter, setNicheFilter] = useState(""); + const [budgetMin, setBudgetMin] = useState(""); + const [budgetMax, setBudgetMax] = useState(""); + const [showApplicationModal, setShowApplicationModal] = useState(false); + const [selectedCampaign, setSelectedCampaign] = useState(null); + const [submittingApplication, setSubmittingApplication] = useState(false); + const [applicationData, setApplicationData] = useState({ + payment_min: undefined, + payment_max: undefined, + timeline_days: undefined, + timeline_weeks: undefined, + description: "", + }); + const [loadingRecommendations, setLoadingRecommendations] = useState(false); + const [showProposalModal, setShowProposalModal] = useState(false); + const [selectedApplication, setSelectedApplication] = useState(null); + const [submittingProposal, setSubmittingProposal] = useState(false); + const [proposalData, setProposalData] = useState({ + subject: "", + message: "", + proposed_amount: "", + content_ideas: "", + ideal_pricing: "", + }); + + useEffect(() => { + if (activeTab === "browse") { + loadCampaigns(); + } else { + loadMyApplications(); + } + }, [activeTab]); + + const loadCampaigns = async () => { + try { + setLoading(true); + setError(null); + const data = await fetchPublicCampaigns({ + search: searchTerm || undefined, + platform: platformFilter || undefined, + niche: nicheFilter || undefined, + budget_min: budgetMin ? parseFloat(budgetMin) : undefined, + budget_max: budgetMax ? parseFloat(budgetMax) : undefined, + }); + setCampaigns(data); + } catch (err: any) { + setError(err.message || "Failed to load campaigns"); + } finally { + setLoading(false); + } + }; + + const loadMyApplications = async () => { + try { + setLoadingApplications(true); + setError(null); + const data = await fetchCreatorApplications(); + setMyApplications(data); + } catch (err: any) { + setError(err.message || "Failed to load applications"); + } finally { + setLoadingApplications(false); + } + }; + + const loadRecommendations = async () => { + try { + setLoadingRecommendations(true); + setError(null); + const data = await fetchCampaignRecommendations({ limit: 20, use_ai: true }); + setCampaigns(data); + } catch (err: any) { + setError(err.message || "Failed to load recommendations"); + } finally { + setLoadingRecommendations(false); + } + }; + + const handleSearch = () => { + loadCampaigns(); + }; + + const handleOpenApplicationModal = (campaign: Campaign) => { + setSelectedCampaign(campaign); + setApplicationData({ + payment_min: campaign.budget_min || undefined, + payment_max: campaign.budget_max || undefined, + timeline_days: undefined, + timeline_weeks: undefined, + description: "", + }); + setShowApplicationModal(true); + }; + + const handleSubmitApplication = async (e: React.FormEvent) => { + e.preventDefault(); + if (!selectedCampaign || !applicationData.description.trim()) { + alert("Please fill in all required fields"); + return; + } + + try { + setSubmittingApplication(true); + setError(null); + await createCampaignApplication(selectedCampaign.id, applicationData); + alert("Application submitted successfully!"); + setShowApplicationModal(false); + setSelectedCampaign(null); + setApplicationData({ + payment_min: undefined, + payment_max: undefined, + timeline_days: undefined, + timeline_weeks: undefined, + description: "", + }); + // Reload campaigns to update applied status + await loadCampaigns(); + await loadMyApplications(); + } catch (err: any) { + setError(err.message || "Failed to submit application"); + alert(err.message || "Failed to submit application"); + } finally { + setSubmittingApplication(false); + } + }; + + const handleOpenProposalModal = (app: CampaignApplication) => { + setSelectedApplication(app); + setProposalData({ + subject: `Proposal for ${app.campaign_title || "Campaign"}`, + message: app.description || "", + proposed_amount: app.payment_max?.toString() || app.payment_min?.toString() || "", + content_ideas: "", + ideal_pricing: "", + }); + setShowProposalModal(true); + }; + + const handleSubmitProposal = async (e: React.FormEvent) => { + e.preventDefault(); + if (!selectedApplication || !proposalData.message.trim()) { + alert("Please fill in all required fields"); + return; + } + + try { + setSubmittingProposal(true); + setError(null); + + // Use the standard proposal creation endpoint + await createProposal({ + campaign_id: selectedApplication.campaign_id, + subject: proposalData.subject || `Proposal for ${selectedApplication.campaign_title || "Campaign"}`, + message: proposalData.message, + proposed_amount: proposalData.proposed_amount ? parseFloat(proposalData.proposed_amount) : undefined, + content_ideas: proposalData.content_ideas ? [proposalData.content_ideas] : [], + ideal_pricing: proposalData.ideal_pricing || undefined, + }); + + alert("Proposal sent successfully!"); + setShowProposalModal(false); + setSelectedApplication(null); + setProposalData({ + subject: "", + message: "", + proposed_amount: "", + content_ideas: "", + ideal_pricing: "", + }); + // Reload applications + await loadMyApplications(); + } catch (err: any) { + setError(err.message || "Failed to send proposal"); + alert(err.message || "Failed to send proposal"); + } finally { + setSubmittingProposal(false); + } + }; + + const getStatusColor = (status: string) => { + switch (status) { + case "accepted": + return "bg-green-100 text-green-800"; + case "rejected": + return "bg-red-100 text-red-800"; + case "reviewing": + return "bg-yellow-100 text-yellow-800"; + default: + return "bg-blue-100 text-blue-800"; + } + }; + + const hasApplied = (campaignId: string) => { + return myApplications.some((app) => app.campaign_id === campaignId); + }; + + return ( + +
+ + +
+ {/* Header */} +
+

Campaign Wall

+

Browse and apply to open campaigns

+
+ + {/* Tabs */} +
+ + +
+ + {/* Browse Tab */} + {activeTab === "browse" && ( + <> + {/* Search and Filters */} +
+
+
+ + setSearchTerm(e.target.value)} + onKeyPress={(e) => e.key === "Enter" && handleSearch()} + className="w-full rounded-lg border border-gray-300 py-3 pr-4 pl-10 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + /> +
+
+
+
+ + +
+ + setBudgetMin(e.target.value)} + className="w-28 rounded-lg border border-gray-300 px-3 py-3 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={0} + /> + setBudgetMax(e.target.value)} + className="w-28 rounded-lg border border-gray-300 px-3 py-3 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={0} + /> + + +
+
+ + {/* Error State */} + {error && ( +
+ {error} +
+ )} + + {/* Loading State */} + {loading && ( +
+
+

Loading campaigns...

+
+ )} + + {/* Campaigns Grid */} + {!loading && campaigns.length > 0 && ( +
+ {campaigns.map((campaign) => { + const applied = hasApplied(campaign.id); + return ( +
+
+
+

{campaign.title}

+ {applied && ( + + Applied + + )} +
+ {campaign.short_description && ( +

{campaign.short_description}

+ )} +
+ {campaign.budget_min && campaign.budget_max && ( +
+ + + {formatCurrency(campaign.budget_min)} - {formatCurrency(campaign.budget_max)} + +
+ )} + {campaign.platforms.length > 0 && ( +
+ + {campaign.platforms.join(", ")} +
+ )} + {campaign.preferred_creator_niches.length > 0 && ( +
+ + {campaign.preferred_creator_niches.join(", ")} +
+ )} +
+ +
+
+ ); + })} +
+ )} + + {/* Empty State */} + {!loading && campaigns.length === 0 && ( +
+ +

No campaigns found

+

Try adjusting your filters or check back later

+
+ )} + + )} + + {/* My Applications Tab */} + {activeTab === "my-applications" && ( + <> + {loadingApplications ? ( +
+
+

Loading applications...

+
+ ) : myApplications.length > 0 ? ( +
+ {myApplications.map((app) => ( +
+
+
+

{app.campaign_title || "Campaign"}

+

+ Applied on {new Date(app.created_at).toLocaleDateString()} +

+
+ + {app.status} + +
+ {app.description && ( +

{app.description}

+ )} +
+ {app.payment_min && app.payment_max && ( +
+ Payment: + + ₹{app.payment_min.toLocaleString()} - ₹{app.payment_max.toLocaleString()} + +
+ )} + {app.timeline_days && ( +
+ Timeline: + {app.timeline_days} days +
+ )} + {app.timeline_weeks && ( +
+ Timeline: + {app.timeline_weeks} weeks +
+ )} +
+ {app.status === "accepted" && ( +
+ +
+ )} +
+ ))} +
+ ) : ( +
+ +

No applications yet

+

Start applying to campaigns to see them here

+
+ )} + + )} + + {/* Application Modal */} + {showApplicationModal && selectedCampaign && ( +
+
+
+

Apply to Campaign

+

{selectedCampaign.title}

+
+
+
+ +
+ + setApplicationData({ + ...applicationData, + payment_min: e.target.value ? parseFloat(e.target.value) : undefined, + }) + } + className="rounded-lg border border-gray-300 px-3 py-2 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={0} + /> + + setApplicationData({ + ...applicationData, + payment_max: e.target.value ? parseFloat(e.target.value) : undefined, + }) + } + className="rounded-lg border border-gray-300 px-3 py-2 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={0} + /> +
+
+
+ +
+ + setApplicationData({ + ...applicationData, + timeline_days: e.target.value ? parseInt(e.target.value) : undefined, + timeline_weeks: undefined, + }) + } + className="rounded-lg border border-gray-300 px-3 py-2 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={1} + /> + + setApplicationData({ + ...applicationData, + timeline_weeks: e.target.value ? parseInt(e.target.value) : undefined, + timeline_days: undefined, + }) + } + className="rounded-lg border border-gray-300 px-3 py-2 focus:border-purple-500 focus:ring-2 focus:ring-purple-200 focus:outline-none" + min={1} + /> +
+
+
+ +