Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 17 additions & 5 deletions backend/ai_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,10 +21,11 @@
)

# Configure Gemini
# Use provided key as fallback if env var is missing
api_key = os.environ.get("GEMINI_API_KEY", "AIzaSyB8_i3tbDE3GmX4CsQ8G3mD3pB2WrHi5C8")
api_key = os.environ.get("GEMINI_API_KEY")
if api_key:
genai.configure(api_key=api_key)
else:
logger.warning("GEMINI_API_KEY not set. AI services will be disabled.")


@lru_cache(maxsize=1)
Expand Down Expand Up @@ -98,10 +99,21 @@ async def generate_action_plan(issue_description: str, category: str, image_path

try:
plan = json.loads(text_response)
except json.JSONDecodeError:
except json.JSONDecodeError as e:
# Try to fix common JSON errors if possible, or fallback
logger.error(f"Gemini returned invalid JSON: {text_response}")
raise Exception("Invalid JSON from AI")
logger.error(f"Gemini returned invalid JSON: {text_response} | Error: {e}")

# Last ditch effort: Try to find the first { and last }
try:
start_idx = text_response.find('{')
end_idx = text_response.rfind('}')
if start_idx != -1 and end_idx != -1:
potential_json = text_response[start_idx:end_idx+1]
plan = json.loads(potential_json)
else:
raise Exception("No JSON object found in response")
except Exception:
raise Exception("Invalid JSON from AI")

if "x_post" not in plan or not plan.get("x_post"):
plan["x_post"] = x_post
Expand Down
5 changes: 3 additions & 2 deletions backend/gemini_summary.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@
# Configure Gemini (mandatory environment variable)
api_key = os.environ.get("GEMINI_API_KEY")
if not api_key:
raise ValueError("GEMINI_API_KEY environment variable is required but not set. Please set it in your environment variables.")
genai.configure(api_key=api_key)
logger.warning("GEMINI_API_KEY environment variable is not set. AI summaries will be disabled.")
else:
genai.configure(api_key=api_key)


def _get_fallback_summary(mla_name: str, assembly_constituency: str, district: str) -> str:
Expand Down
56 changes: 28 additions & 28 deletions backend/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
load_maharashtra_pincode_data,
load_maharashtra_mla_data
)
from pydantic import BaseModel
import json
import os
import shutil
Expand Down Expand Up @@ -87,9 +86,9 @@ async def lifespan(app: FastAPI):
# These functions use lru_cache, so calling them once loads the data into memory
load_maharashtra_pincode_data()
load_maharashtra_mla_data()
print("Maharashtra data pre-loaded successfully.")
logger.info("Maharashtra data pre-loaded successfully.")
except Exception as e:
print(f"Error pre-loading Maharashtra data: {e}")
logger.error(f"Error pre-loading Maharashtra data: {e}", exc_info=True)

# Startup: Start Telegram Bot in background (non-blocking)
bot_task = None
Expand All @@ -101,7 +100,7 @@ async def start_bot_background():
try:
bot_app = await run_bot()
except Exception as e:
print(f"Error starting bot: {e}")
logger.error(f"Error starting bot: {e}", exc_info=True)

# Create background task for bot initialization
bot_task = asyncio.create_task(start_bot_background())
Expand Down Expand Up @@ -307,32 +306,36 @@ def get_recent_issues(db: Session = Depends(get_db)):
# Convert to Pydantic models for validation and serialization
data = []
for i in issues:
# Handle action_plan JSON string
action_plan_val = i.action_plan
if isinstance(action_plan_val, str) and action_plan_val:
try:
action_plan_val = json.loads(action_plan_val)
except json.JSONDecodeError:
pass # Keep as string if not valid JSON

data.append(IssueResponse(
id=i.id,
category=i.category,
description=i.description[:100] + "..." if len(i.description) > 100 else i.description,
created_at=i.created_at,
image_path=i.image_path,
status=i.status,
upvotes=i.upvotes if i.upvotes is not None else 0,
location=i.location,
latitude=i.latitude,
longitude=i.longitude,
action_plan=action_plan_val
).model_dump(mode='json')) # Store as JSON-compatible dict in cache
data.append(_serialize_issue(i))

recent_issues_cache.set(data)

return data

def _serialize_issue(i: Issue) -> dict:
"""Helper to serialize an Issue object to a dictionary."""
# Handle action_plan JSON string
action_plan_val = i.action_plan
if isinstance(action_plan_val, str) and action_plan_val:
try:
action_plan_val = json.loads(action_plan_val)
except json.JSONDecodeError:
pass # Keep as string if not valid JSON

return IssueResponse(
id=i.id,
category=i.category,
description=i.description[:100] + "..." if len(i.description) > 100 else i.description,
created_at=i.created_at,
image_path=i.image_path,
status=i.status,
upvotes=i.upvotes if i.upvotes is not None else 0,
location=i.location,
latitude=i.latitude,
longitude=i.longitude,
action_plan=action_plan_val
).model_dump(mode='json')

@app.post("/api/detect-pothole")
async def detect_pothole_endpoint(image: UploadFile = File(...)):
# Convert to PIL Image directly from file object to save memory
Expand Down Expand Up @@ -361,7 +364,6 @@ async def detect_infrastructure_endpoint(request: Request, image: UploadFile = F

# Run detection using unified service (local ML by default)
try:
detections = await detect_infrastructure(pil_image)
# Use shared HTTP client from app state
client = request.app.state.http_client
detections = await detect_infrastructure_clip(image_bytes, client=client)
Expand All @@ -381,7 +383,6 @@ async def detect_flooding_endpoint(request: Request, image: UploadFile = File(..

# Run detection using unified service (local ML by default)
try:
detections = await detect_flooding(pil_image)
# Use shared HTTP client from app state
client = request.app.state.http_client
detections = await detect_flooding_clip(image_bytes, client=client)
Expand All @@ -401,7 +402,6 @@ async def detect_vandalism_endpoint(request: Request, image: UploadFile = File(.

# Run detection using unified service (local ML by default)
try:
detections = await detect_vandalism(pil_image)
# Use shared HTTP client from app state
client = request.app.state.http_client
detections = await detect_vandalism_clip(image_bytes, client=client)
Expand Down
2 changes: 1 addition & 1 deletion backend/schemas.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ class IssueResponse(BaseModel):
location: Optional[str] = None
latitude: Optional[float] = None
longitude: Optional[float] = None
action_plan: Optional[Any] = None
action_plan: Optional[ActionPlan | dict] = None

model_config = ConfigDict(from_attributes=True)

Expand Down