Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
75 changes: 75 additions & 0 deletions backend/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
firebase-debug.log*
firebase-debug.*.log*

# Firebase cache
.firebase/

# Firebase config

# Uncomment this if you'd like others to create their own Firebase project.
# For a team working on the same Firebase project(s), it is recommended to leave
# it commented so all members can deploy to the same project(s) in .firebaserc.
# .firebaserc

# Runtime data
pids
*.pid
*.seed
*.pid.lock

# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov

# Coverage directory used by tools like istanbul
coverage

# nyc test coverage
.nyc_output

# Grunt intermediate storage (http://gruntjs.com/creating-plugins#storing-task-files)
.grunt

# Bower dependency directory (https://bower.io/)
bower_components

# node-waf configuration
.lock-wscript

# Compiled binary addons (http://nodejs.org/api/addons.html)
build/Release

# Dependency directories
node_modules/

# Optional npm cache directory
.npm

# Optional eslint cache
.eslintcache

# Optional REPL history
.node_repl_history

# Output of 'npm pack'
*.tgz

# Yarn Integrity file
.yarn-integrity

# dotenv environment variables file
.env

# dataconnect generated files
.dataconnect

# firebase-config files
app/services/firebase

# quarantine files
app/services/quarantine/
24 changes: 22 additions & 2 deletions backend/app/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,15 @@
from logging.config import dictConfig
from typing import Optional

from dotenv import load_dotenv
from PIL import Image, ImageFile
from pydantic import Field
from pydantic_settings import BaseSettings
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.requests import Request

load_dotenv()


class Settings(BaseSettings):
# Database
Expand All @@ -20,8 +25,15 @@ class Settings(BaseSettings):
access_token_expire_minutes: int = 15
refresh_token_expire_days: int = 30
# Firebase
firebase_project_id: Optional[str] = None
firebase_service_account_path: str = "./firebase-service-account.json"
use_firebase_emulator: bool = Field(
default=False, env="USE_FIREBASE_EMULATOR"
) # type:ignore
firebase_project_id: Optional[str] = Field(
default=None, env="FIREBASE_PROJECT_ID"
) # type:ignore
firebase_service_account_path: str = Field(
default="./firebase-service-account.json", env="FIREBASE_SERVICE_ACCOUNT_PATH"
) # type:ignore
Comment on lines +28 to +36
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Ensure Firebase creds load from environment explicitly

These optional fields won’t map to uppercase env vars unless explicitly bound. Add env=... to avoid surprises in different deployments.

@@
-    firebase_type: Optional[str] = None
-    firebase_private_key_id: Optional[str] = None
-    firebase_private_key: Optional[str] = None
-    firebase_client_email: Optional[str] = None
-    firebase_client_id: Optional[str] = None
-    firebase_auth_uri: Optional[str] = None
-    firebase_token_uri: Optional[str] = None
-    firebase_auth_provider_x509_cert_url: Optional[str] = None
-    firebase_client_x509_cert_url: Optional[str] = None
+    firebase_type: Optional[str] = Field(default=None, env="FIREBASE_TYPE")
+    firebase_private_key_id: Optional[str] = Field(default=None, env="FIREBASE_PRIVATE_KEY_ID")
+    firebase_private_key: Optional[str] = Field(default=None, env="FIREBASE_PRIVATE_KEY")
+    firebase_client_email: Optional[str] = Field(default=None, env="FIREBASE_CLIENT_EMAIL")
+    firebase_client_id: Optional[str] = Field(default=None, env="FIREBASE_CLIENT_ID")
+    firebase_auth_uri: Optional[str] = Field(default=None, env="FIREBASE_AUTH_URI")
+    firebase_token_uri: Optional[str] = Field(default=None, env="FIREBASE_TOKEN_URI")
+    firebase_auth_provider_x509_cert_url: Optional[str] = Field(default=None, env="FIREBASE_AUTH_PROVIDER_X509_CERT_URL")
+    firebase_client_x509_cert_url: Optional[str] = Field(default=None, env="FIREBASE_CLIENT_X509_CERT_URL")

Also applies to: 38-47

πŸ€– Prompt for AI Agents
In backend/app/config.py around lines 28-36 (and also apply same fix to lines
38-47), the Optional/str Firebase fields lack explicit env bindings for the
uppercase variable names, which can cause them to not map to environment
variables in some deployments; update each Field to include the explicit env
parameter matching the intended uppercase names (e.g.,
env="USE_FIREBASE_EMULATOR", env="FIREBASE_PROJECT_ID",
env="FIREBASE_SERVICE_ACCOUNT_PATH" etc.), ensuring optional types remain
correct and no other behavior changes.

# Firebase service account credentials as environment variables
firebase_type: Optional[str] = None
firebase_private_key_id: Optional[str] = None
Expand All @@ -32,6 +44,14 @@ class Settings(BaseSettings):
firebase_token_uri: Optional[str] = None
firebase_auth_provider_x509_cert_url: Optional[str] = None
firebase_client_x509_cert_url: Optional[str] = None
# Image validation configs
LOAD_TRUNCATED_IMAGES: bool = False
MAX_IMAGE_PIXELS: int = 50_00_000
MAX_FILE_SIZE: int = 5 * 1024 * 1024
SIGNED_URL_EXPIRY_SECONDS: int = Field(
default=3600, env="SIGNED_URL_EXPIRY_SECONDS"
) # type:ignore
CLAMAV_ENABLED: bool = False

Comment on lines +47 to 55
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Apply Pillow limits from settings and drop unused imports

You import Image/ImageFile but don’t apply settings. Centralize this here to avoid duplication in services.

@@
-    LOAD_TRUNCATED_IMAGES: bool = False
-    MAX_IMAGE_PIXELS: int = 50_00_000
+    LOAD_TRUNCATED_IMAGES: bool = False
+    MAX_IMAGE_PIXELS: int = 50_00_000
@@
 settings = Settings()
 
+# Apply image limits globally for Pillow
+ImageFile.LOAD_TRUNCATED_IMAGES = settings.LOAD_TRUNCATED_IMAGES
+Image.MAX_IMAGE_PIXELS = settings.MAX_IMAGE_PIXELS

Also applies to: 69-70

πŸ€– Prompt for AI Agents
In backend/app/config.py around lines 47 to 55 (and also apply the same change
for lines 69-70), you need to apply the Pillow global settings from these config
values and remove the unnecessary Image/ImageFile imports elsewhere: after
defining LOAD_TRUNCATED_IMAGES and MAX_IMAGE_PIXELS, set
ImageFile.LOAD_TRUNCATED_IMAGES = LOAD_TRUNCATED_IMAGES and
Image.MAX_IMAGE_PIXELS = MAX_IMAGE_PIXELS so Pillow uses the centralized config;
then remove unused direct imports of Image/ImageFile from other modules and any
redundant local settings there, relying on this centralized application instead.

# App
debug: bool = False
Expand Down
73 changes: 72 additions & 1 deletion backend/app/groups/routes.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,17 @@
RemoveMemberResponse,
)
from app.groups.service import group_service
from fastapi import APIRouter, Depends, HTTPException, status
from app.services.schemas import ImageUploadResponse
from app.services.storage import storage_service
from fastapi import (
APIRouter,
BackgroundTasks,
Depends,
File,
HTTPException,
UploadFile,
status,
)

router = APIRouter(prefix="/groups", tags=["Groups"])

Expand Down Expand Up @@ -145,3 +155,64 @@ async def remove_group_member(
if not removed:
raise HTTPException(status_code=400, detail="Failed to remove member")
return RemoveMemberResponse(success=True, message="Member removed successfully")


@router.post("/{group_id}/image", response_model=ImageUploadResponse)
async def upload_group_image(
group_id: str,
file: UploadFile = File(...),
background_tasks: BackgroundTasks = BackgroundTasks(),
current_user: dict = Depends(get_current_user),
):
Comment on lines +160 to +166
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Fix FastAPI parameter injection: don’t instantiate BackgroundTasks in defaults; keep Depends/File with noqa for B008

FastAPI injects BackgroundTasks without a default. Instantiating it in the signature creates a global object at import time. Also, Ruff’s B008 on File/Depends is a false-positive for FastAPI; add noqa if you keep it.

Apply:

-async def upload_group_image(
-    group_id: str,
-    file: UploadFile = File(...),
-    background_tasks: BackgroundTasks = BackgroundTasks(),
-    current_user: dict = Depends(get_current_user),
-):
+async def upload_group_image(
+    group_id: str,
+    file: UploadFile = File(...),  # noqa: B008 - FastAPI dependency
+    background_tasks: BackgroundTasks,
+    current_user: Dict[str, Any] = Depends(get_current_user),  # noqa: B008
+):
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
@router.post("/{group_id}/image", response_model=ImageUploadResponse)
async def upload_group_image(
group_id: str,
file: UploadFile = File(...),
background_tasks: BackgroundTasks = BackgroundTasks(),
current_user: dict = Depends(get_current_user),
):
@router.post("/{group_id}/image", response_model=ImageUploadResponse)
async def upload_group_image(
group_id: str,
file: UploadFile = File(...), # noqa: B008 - FastAPI dependency
background_tasks: BackgroundTasks,
current_user: Dict[str, Any] = Depends(get_current_user), # noqa: B008
):
...
🧰 Tools
πŸͺ› Ruff (0.12.2)

163-163: Do not perform function call File in argument defaults; instead, perform the call within the function, or read the default from a module-level singleton variable

(B008)


164-164: Do not perform function call BackgroundTasks in argument defaults; instead, perform the call within the function, or read the default from a module-level singleton variable

(B008)


165-165: Do not perform function call Depends in argument defaults; instead, perform the call within the function, or read the default from a module-level singleton variable

(B008)

πŸ€– Prompt for AI Agents
In backend/app/groups/routes.py around lines 160-166, remove the default
instantiation of BackgroundTasks in the route signature (do not use
BackgroundTasks() as a default) so FastAPI can inject a fresh BackgroundTasks
per request; change the parameter to background_tasks: BackgroundTasks (no
default). Also keep the File(...) and Depends(...) usages but add a # noqa: B008
comment on those parameters to silence Ruff false-positives if you choose to
keep explicit defaults, or simply ensure no instantiated defaults remain; update
the function signature accordingly.


await group_service.ensure_user_in_group(group_id, current_user["_id"])

try:
urls = await storage_service.upload_image_workflow(
file=file, folder="groups", entity_id=group_id
)

except ValueError as ve:
raise HTTPException(status_code=400, detail=str(ve))
except Exception:
raise HTTPException(status_code=500, detail="Group image upload failed")

background_tasks.add_task(
group_service.update_group_image_url, group_id, urls.get("full")
)

return ImageUploadResponse(
success=True, urls=urls, message="Group image uploaded successfully."
)
Comment on lines +168 to +186
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Ensure invalid files return 400; add pre-validation and proper exception chaining

upload_image_workflow wraps ValueError into RuntimeError, so this route currently returns 500 for invalid files. Guard with a pre-check and chain exceptions.

Apply:

     await group_service.ensure_user_in_group(group_id, current_user["_id"])

-    try:
+    # Pre-validate to ensure 400 on bad input
+    if not await storage_service.validate_file(file):
+        raise HTTPException(status_code=400, detail="Invalid file type or size.")
+    try:
         urls = await storage_service.upload_image_workflow(
             file=file, folder="groups", entity_id=group_id
         )
 
-    except ValueError as ve:
-        raise HTTPException(status_code=400, detail=str(ve))
-    except Exception:
-        raise HTTPException(status_code=500, detail="Group image upload failed")
+    except ValueError as ve:
+        # Defensive: in case workflow preserves ValueError later
+        raise HTTPException(status_code=400, detail=str(ve)) from ve
+    except Exception as e:
+        raise HTTPException(status_code=500, detail="Group image upload failed") from e
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
await group_service.ensure_user_in_group(group_id, current_user["_id"])
try:
urls = await storage_service.upload_image_workflow(
file=file, folder="groups", entity_id=group_id
)
except ValueError as ve:
raise HTTPException(status_code=400, detail=str(ve))
except Exception:
raise HTTPException(status_code=500, detail="Group image upload failed")
background_tasks.add_task(
group_service.update_group_image_url, group_id, urls.get("full")
)
return ImageUploadResponse(
success=True, urls=urls, message="Group image uploaded successfully."
)
await group_service.ensure_user_in_group(group_id, current_user["_id"])
# Pre-validate to ensure 400 on bad input
if not await storage_service.validate_file(file):
raise HTTPException(status_code=400, detail="Invalid file type or size.")
try:
urls = await storage_service.upload_image_workflow(
file=file, folder="groups", entity_id=group_id
)
except ValueError as ve:
# Defensive: in case workflow preserves ValueError later
raise HTTPException(status_code=400, detail=str(ve)) from ve
except Exception as e:
raise HTTPException(status_code=500, detail="Group image upload failed") from e
background_tasks.add_task(
group_service.update_group_image_url, group_id, urls.get("full")
)
return ImageUploadResponse(
success=True, urls=urls, message="Group image uploaded successfully."
)
🧰 Tools
πŸͺ› Ruff (0.12.2)

176-176: Within an except clause, raise exceptions with raise ... from err or raise ... from None to distinguish them from errors in exception handling

(B904)


177-177: Do not catch blind exception: Exception

(BLE001)


178-178: Within an except clause, raise exceptions with raise ... from err or raise ... from None to distinguish them from errors in exception handling

(B904)

πŸ€– Prompt for AI Agents
In backend/app/groups/routes.py around lines 168 to 186, the route currently
returns 500 for invalid image files because upload_image_workflow wraps
ValueError in a RuntimeError; add a pre-validation for the uploaded file (e.g.,
check mime type and allowed extensions and file size) and raise
HTTPException(status_code=400) immediately for invalid inputs, then call
storage_service.upload_image_workflow in a try/except that also catches
RuntimeError and inspects its __cause__ (or the message) to detect an underlying
ValueError and re-raise HTTPException(status_code=400, detail=...) using
exception chaining (raise ... from runtime_error) for proper traceback; leave
other exceptions as 500.

Comment on lines +180 to +186
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Avoid persisting expiring signed URLs; store a stable storage key instead

urls.get("full") is a signed URL (expires). Persisting it in DB will break avatars when it expires. Persist a stable storage key (e.g., the base file_path returned by storage) and generate fresh signed URLs at read time, or make objects public (less preferred).

Minimal approach:

  • Extend upload_image_workflow to also return storage_key (e.g., folder/entity/uuid).
  • Store storage_key in DB.
  • Expose a small endpoint to return fresh signed URLs for a given storage_key when needed.
    Do you want a patch for storage.py + schemas and the router updates?
πŸ€– Prompt for AI Agents
In backend/app/groups/routes.py around lines 180-186, the code currently
persists a signed URL (urls.get("full")) which expires; instead modify the
upload flow to return and persist a stable storage_key (e.g.,
folder/entity/uuid) from storage.upload, update the background task call to pass
and save that storage_key (not the signed URL), update the
ImageUploadResponse/schema to include storage_key (or keep it internal) and
change database column to store storage_key, and add a small endpoint that,
given a storage_key, generates and returns a fresh signed URL using
storage.sign_url at read time; also update storage.py and related schemas to
return storage_key alongside any signed URLs.



@router.delete("/{group_id}/image", response_model=DeleteGroupResponse)
async def delete_group_avatar(
group_id: str,
current_user: dict = Depends(get_current_user),
background_tasks: BackgroundTasks = BackgroundTasks(),
):
group = await group_service.get_group_by_id(group_id, current_user["_id"])
if not group:
raise HTTPException(status_code=404, detail="Group not found")

await group_service.ensure_user_in_group(group_id, current_user["_id"])

image_url = group.get("imageUrl")
if not image_url:
raise HTTPException(status_code=404, detail="Group avatar not found")

try:
file_path = storage_service.extract_path_from_url(image_url)
deleted = await storage_service.delete_image(file_path)
except Exception as e:
raise HTTPException(status_code=500, detail="Failed to delete group avatar")

if not deleted:
raise HTTPException(status_code=500, detail="Failed to delete group avatar")

background_tasks.add_task(group_service.update_group_image_url, group_id, None)

return DeleteGroupResponse(
success=True, message="Group image deleted successfully."
)
40 changes: 40 additions & 0 deletions backend/app/groups/service.py
Original file line number Diff line number Diff line change
Expand Up @@ -473,5 +473,45 @@ async def remove_member(self, group_id: str, member_id: str, user_id: str) -> bo
)
return result.modified_count == 1

async def ensure_user_in_group(self, group_id: str, user_id: str) -> dict:
"""Ensure that the user is a member of the group. Raises HTTPException if not."""
db = self.get_db()

try:
obj_id = ObjectId(group_id)

except errors.InvalidId:
logger.warning(f"Invalid group_id: {group_id}")
raise HTTPException(status_code=400, detail="Invalid group ID format")
except Exception as e:
logger.error(f"Unexpected error converting group_id to ObjectId: {e}")
raise HTTPException(status_code=500, detail="Internal server error")

Comment on lines +480 to +489
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Exception handling: drop broad catch and use exception chaining

Catching bare Exception trips linters (BLE001) and obscures root causes. Chain InvalidId for clearer traces (B904).

Apply:

-        try:
-            obj_id = ObjectId(group_id)
-        except errors.InvalidId:
-            logger.warning(f"Invalid group_id: {group_id}")
-            raise HTTPException(status_code=400, detail="Invalid group ID format")
-        except Exception as e:
-            logger.error(f"Unexpected error converting group_id to ObjectId: {e}")
-            raise HTTPException(status_code=500, detail="Internal server error")
+        try:
+            obj_id = ObjectId(group_id)
+        except errors.InvalidId as err:
+            logger.warning(f"Invalid group_id: {group_id}")
+            raise HTTPException(status_code=400, detail="Invalid group ID format") from err
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
try:
obj_id = ObjectId(group_id)
except errors.InvalidId:
logger.warning(f"Invalid group_id: {group_id}")
raise HTTPException(status_code=400, detail="Invalid group ID format")
except Exception as e:
logger.error(f"Unexpected error converting group_id to ObjectId: {e}")
raise HTTPException(status_code=500, detail="Internal server error")
try:
obj_id = ObjectId(group_id)
except errors.InvalidId as err:
logger.warning(f"Invalid group_id: {group_id}")
raise HTTPException(status_code=400, detail="Invalid group ID format") from err
🧰 Tools
πŸͺ› Ruff (0.12.2)

485-485: Within an except clause, raise exceptions with raise ... from err or raise ... from None to distinguish them from errors in exception handling

(B904)


486-486: Do not catch blind exception: Exception

(BLE001)


488-488: Within an except clause, raise exceptions with raise ... from err or raise ... from None to distinguish them from errors in exception handling

(B904)

πŸ€– Prompt for AI Agents
In backend/app/groups/service.py around lines 480 to 489, remove the broad
"except Exception" handler and instead chain the specific InvalidId exception
when converting group_id: change the InvalidId clause to "except
errors.InvalidId as e", keep the warning log, and re-raise the HTTPException
using exception chaining (raise HTTPException(status_code=400, detail="Invalid
group ID format") from e); delete the generic catch-all so other unexpected
errors bubble up (or, if you must convert them, catch specific exceptions only
and re-raise HTTPException using "from e" to preserve the original traceback).

group = await db.groups.find_one(
{"_id": obj_id, "members": {"$elemMatch": {"userId": user_id}}}
)

if not group:
raise HTTPException(
status_code=403, detail="You are not a member of this group"
)

return group # Optional return if route needs to read group data

async def update_group_image_url(self, group_id: str, image_url: str) -> bool:
"""Update the group's image URL in the database."""
db = self.get_db()

try:
obj_id = ObjectId(group_id)
except errors.InvalidId:
logger.warning(f"Invalid group_id: {group_id}")
return False

result = await db.groups.update_one(
{"_id": obj_id}, {"$set": {"imageUrl": image_url}}
)
return result.modified_count == 1

Comment on lines +501 to +515
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Validate image URL and return matched_count

  • Guard against empty/None values being written.
  • Use matched_count to consider idempotent updates successful.

Apply:

     async def update_group_image_url(self, group_id: str, image_url: str) -> bool:
         """Update the group's image URL in the database."""
         db = self.get_db()
 
-        try:
-            obj_id = ObjectId(group_id)
-        except errors.InvalidId:
-            logger.warning(f"Invalid group_id: {group_id}")
-            return False
-
-        result = await db.groups.update_one(
-            {"_id": obj_id}, {"$set": {"imageUrl": image_url}}
-        )
-        return result.modified_count == 1
+        # Basic input validation
+        if not isinstance(image_url, str) or not image_url.strip():
+            return False
+
+        try:
+            obj_id = ObjectId(group_id)
+        except errors.InvalidId:
+            logger.warning(f"Invalid group_id: {group_id}")
+            return False
+
+        result = await db.groups.update_one(
+            {"_id": obj_id}, {"$set": {"imageUrl": image_url.strip()}}
+        )
+        return result.matched_count == 1
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
async def update_group_image_url(self, group_id: str, image_url: str) -> bool:
"""Update the group's image URL in the database."""
db = self.get_db()
try:
obj_id = ObjectId(group_id)
except errors.InvalidId:
logger.warning(f"Invalid group_id: {group_id}")
return False
result = await db.groups.update_one(
{"_id": obj_id}, {"$set": {"imageUrl": image_url}}
)
return result.modified_count == 1
async def update_group_image_url(self, group_id: str, image_url: str) -> bool:
"""Update the group's image URL in the database."""
db = self.get_db()
# Basic input validation
if not isinstance(image_url, str) or not image_url.strip():
return False
try:
obj_id = ObjectId(group_id)
except errors.InvalidId:
logger.warning(f"Invalid group_id: {group_id}")
return False
result = await db.groups.update_one(
{"_id": obj_id}, {"$set": {"imageUrl": image_url.strip()}}
)
return result.matched_count == 1
πŸ€– Prompt for AI Agents
In backend/app/groups/service.py around lines 501 to 515, validate that
image_url is a non-empty string (e.g., image_url and image_url.strip()) and
return False early to avoid writing empty/None values; then perform the update
and treat a matched document as success (use result.matched_count to consider
idempotent updates successful) instead of only checking modified_count. Ensure
the guard happens before creating ObjectId or the DB update, and change the
final return to use matched_count (or matched_count == 1) so
unchanged-but-matching updates return True.


group_service = GroupService()
Empty file.
122 changes: 122 additions & 0 deletions backend/app/services/image_processor.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
import imghdr
from io import BytesIO
from typing import Dict, Tuple

from app.config import logger
from PIL import Image, ImageFile, UnidentifiedImageError

# Optional watermark path
WATERMARK_PATH = None # Example: "app/assets/watermark.png"

# Resize targets (square thumbnail + larger sizes)
RESIZE_CONFIG = {
"thumbnail": (150, 150),
"medium": (300, 300),
"full": (800, 800),
}

# Defining image file restrictions
ImageFile.LOAD_TRUNCATED_IMAGES = False
Image.MAX_IMAGE_PIXELS = 50_00_000 # 50MB in worst case

Comment on lines +19 to +21
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Incorrect comment and hard-coded Pillow limits; bind to settings

  • Image.MAX_IMAGE_PIXELS counts pixels, not β€œ50MB”. The comment is misleading.
  • Values should come from config to keep a single source of truth (backend/app/config.py already exposes these).
-from PIL import Image, ImageFile, UnidentifiedImageError
+from PIL import Image, ImageFile, UnidentifiedImageError
+from app.config import settings
@@
-ImageFile.LOAD_TRUNCATED_IMAGES = False
-Image.MAX_IMAGE_PIXELS = 50_00_000  # 50MB in worst case
+ImageFile.LOAD_TRUNCATED_IMAGES = settings.LOAD_TRUNCATED_IMAGES
+Image.MAX_IMAGE_PIXELS = settings.MAX_IMAGE_PIXELS  # max safe pixels
πŸ“ Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
ImageFile.LOAD_TRUNCATED_IMAGES = False
Image.MAX_IMAGE_PIXELS = 50_00_000 # 50MB in worst case
from PIL import Image, ImageFile, UnidentifiedImageError
from app.config import settings
-ImageFile.LOAD_TRUNCATED_IMAGES = False
ImageFile.LOAD_TRUNCATED_IMAGES = settings.LOAD_TRUNCATED_IMAGES
Image.MAX_IMAGE_PIXELS = settings.MAX_IMAGE_PIXELS # max safe pixels
πŸ€– Prompt for AI Agents
In backend/app/services/image_processor.py around lines 19 to 21, the comment
wrongly states "50MB" (Image.MAX_IMAGE_PIXELS counts pixels) and the Pillow
limits are hard-coded; instead import the appropriate settings from
backend/app/config.py and assign them (e.g., Image.MAX_IMAGE_PIXELS =
config.MAX_IMAGE_PIXELS and, if exposed, ImageFile.LOAD_TRUNCATED_IMAGES =
config.LOAD_TRUNCATED_IMAGES), and update the comment to explain that
MAX_IMAGE_PIXELS is a pixel count and is sourced from config to keep a single
source of truth.


def strip_exif(image: Image.Image) -> Image.Image:
"""
Returns a copy of the image with EXIF metadata stripped.
"""
clean_image = Image.new(image.mode, image.size)
clean_image.putdata(list(image.getdata()))
return clean_image


def validate_magic_bytes(file_content: bytes):
"""
Validates the actual file type of image
"""
fmt = imghdr.what(None, h=file_content)
if fmt not in ["jpeg", "png", "webp"]:
raise ValueError("Invalid or unsupported image type.")
Comment on lines +32 to +38
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

πŸ› οΈ Refactor suggestion

Replace deprecated imghdr with a safer detector

imghdr is deprecated and unreliable for some formats. You already depend on Pillow; optionally use python-magic if available.

-import imghdr
+from typing import Optional
+try:
+    import magic  # python-magic
+except Exception:  # library may be unavailable in some environments
+    magic = None
@@
-def validate_magic_bytes(file_content: bytes):
-    """
-    Validates the actual file type of image
-    """
-    fmt = imghdr.what(None, h=file_content)
-    if fmt not in ["jpeg", "png", "webp"]:
-        raise ValueError("Invalid or unsupported image type.")
+def validate_magic_bytes(file_content: bytes) -> None:
+    """
+    Validate the real file type from magic bytes.
+    Prefer python-magic; fall back to Pillow sniff.
+    """
+    allowed = {"jpeg", "png", "webp"}
+    detected: Optional[str] = None
+    if magic:
+        mime = magic.from_buffer(file_content, mime=True)  # e.g., "image/png"
+        if isinstance(mime, str) and mime.startswith("image/"):
+            detected = mime.split("/", 1)[1]
+    if detected is None:
+        try:
+            with Image.open(BytesIO(file_content)) as im:
+                detected = (im.format or "").lower()
+        except UnidentifiedImageError:
+            detected = None
+    if detected not in allowed:
+        raise ValueError("Invalid or unsupported image type.")

Committable suggestion skipped: line range outside the PR's diff.

🧰 Tools
πŸͺ› Ruff (0.12.2)

38-38: Avoid specifying long messages outside the exception class

(TRY003)

πŸ€– Prompt for AI Agents
In backend/app/services/image_processor.py around lines 32 to 38, replace the
deprecated imghdr check with a safe detector using Pillow: open the bytes with
PIL.Image via BytesIO, call Image.open(...) and read image.format (normalize to
lowercase) to verify it is one of "jpeg","png","webp"; ensure you catch and
handle PIL.UnidentifiedImageError and other exceptions to raise the same
ValueError message on invalid/unsupported images, and close the image/file
resources; optionally, if python-magic is installed, prefer using
magic.from_buffer(...) to get MIME/type and fall back to Pillow if magic is
unavailable.



def add_watermark(image: Image.Image, watermark: Image.Image) -> Image.Image:
"""
Adds watermark (bottom-right). Image and watermark must be RGBA.
"""
image = image.convert("RGBA")
watermark = watermark.convert("RGBA")

# Resize watermark if larger than image
wm_width = min(watermark.width, int(image.width * 0.3))
wm_height = int(watermark.height * (wm_width / watermark.width))
watermark = watermark.resize((wm_width, wm_height), Image.Resampling.LANCZOS)

# Paste watermark at bottom-right
position = (image.width - wm_width - 10, image.height - wm_height - 10)
image.alpha_composite(watermark, dest=position)
return image


def resize_image(image: Image.Image, size: Tuple[int, int]) -> Image.Image:
"""
Resize image while maintaining aspect ratio and padding to square if needed.
"""
image.thumbnail(size, Image.Resampling.LANCZOS)

# Pad to square if needed (for thumbnails)
if size[0] == size[1]:
padded = Image.new("RGB", size, (255, 255, 255))
offset = ((size[0] - image.width) // 2, (size[1] - image.height) // 2)
padded.paste(image, offset)
return padded

return image


async def process_image(file_content: bytes) -> Dict[str, bytes]:
"""
Validates, processes, resizes, strips metadata, compresses to WebP,
and optionally watermarks the image.
Returns a dict of resized images in WebP format.
"""
try:
validate_magic_bytes(file_content)

img = Image.open(BytesIO(file_content))
img_format = img.format.upper()

# Validate format
if img_format not in ["JPEG", "PNG", "WEBP"]:
raise ValueError(f"Unsupported image format: {img_format}")

img = strip_exif(img)

if WATERMARK_PATH:
watermark = Image.open(WATERMARK_PATH)
else:
watermark = None

results = {}

for label, size in RESIZE_CONFIG.items():
resized = resize_image(img.copy(), size)

if watermark:
resized = add_watermark(resized, watermark)

# Save to memory in WebP format
buffer = BytesIO()
resized.save(
buffer, format="WEBP", quality=85, method=6
) # High quality with compression
buffer.seek(0)

results[label] = buffer.read()

return results

except UnidentifiedImageError:
logger.exception("Uploaded file is not a valid image.")
raise ValueError("Invalid image content.")
except Exception as e:
logger.exception(f"Image processing error: {e}")
raise RuntimeError("Image processing failed.")
10 changes: 10 additions & 0 deletions backend/app/services/schemas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
from typing import Dict, Optional

from pydantic import BaseModel


class ImageUploadResponse(BaseModel):
success: bool
urls: Dict[str, str] # {"thumbnail": "url", "medium": "url", "full": "url"}
message: str
processing_id: Optional[str] = None
Loading
Loading