Skip to content

Commit 8b9a897

Browse files
style: apply ruff formatting to entire codebase
1 parent a85e43a commit 8b9a897

37 files changed

+502
-356
lines changed

s3proxy/app.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -50,9 +50,7 @@ def load_credentials() -> dict[str, str]:
5050
return credentials_store
5151

5252

53-
def create_lifespan(
54-
settings: Settings, credentials_store: dict[str, str]
55-
) -> AsyncIterator[None]:
53+
def create_lifespan(settings: Settings, credentials_store: dict[str, str]) -> AsyncIterator[None]:
5654
"""Create lifespan context manager for FastAPI app.
5755
5856
Args:

s3proxy/client/s3.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,6 @@ def _add_optional_kwargs(kwargs: dict[str, Any], **optional: Any) -> None:
3636
kwargs[key] = value
3737

3838

39-
40-
4139
class S3Client:
4240
"""Async S3 client wrapper with async context manager lifecycle.
4341

s3proxy/client/verifier.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def _parse_v4_credential(
5151
try:
5252
parts = credential.split("/")
5353
access_key, date_stamp, region, service = parts[0], parts[1], parts[2], parts[3]
54-
except (IndexError, ValueError):
54+
except IndexError, ValueError:
5555
return None, "", "", "", "Invalid credential format"
5656

5757
secret_key = self.credentials_store.get(access_key)
@@ -188,8 +188,10 @@ def _verify_presigned_v4(
188188
host_header = request.headers.get("host", "")
189189
if "host" in signed_headers_list:
190190
alternate_host = (
191-
host_header[:-3] if host_header.endswith(":80")
192-
else host_header + ":80" if ":" not in host_header
191+
host_header[:-3]
192+
if host_header.endswith(":80")
193+
else host_header + ":80"
194+
if ":" not in host_header
193195
else None
194196
)
195197
if alternate_host:
@@ -208,7 +210,11 @@ def _verify_presigned_v4(
208210
modified_request, path, signed_headers_list, query_for_signing
209211
)
210212
calculated_sig_alt = self._compute_v4_signature(
211-
canonical_request_alt, amz_date, date_stamp, region, service,
213+
canonical_request_alt,
214+
amz_date,
215+
date_stamp,
216+
region,
217+
service,
212218
credentials.secret_key,
213219
)
214220
if hmac.compare_digest(calculated_sig_alt, signature):

s3proxy/concurrency.py

Lines changed: 8 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def _create_malloc_release() -> Callable[[], int] | None:
3535
libc.malloc_trim.argtypes = [ctypes.c_size_t]
3636
libc.malloc_trim.restype = ctypes.c_int
3737
return lambda: libc.malloc_trim(0)
38-
except (OSError, AttributeError):
38+
except OSError, AttributeError:
3939
return None
4040

4141

@@ -87,8 +87,12 @@ async def try_acquire(self, bytes_needed: int) -> int:
8787
active_mb = self._active_bytes / 1024 / 1024
8888
request_mb = to_reserve / 1024 / 1024
8989
limit_mb = self._limit_bytes / 1024 / 1024
90-
logger.warning("MEMORY_REJECTED", active_mb=round(active_mb, 2),
91-
requested_mb=round(request_mb, 2), limit_mb=round(limit_mb, 2))
90+
logger.warning(
91+
"MEMORY_REJECTED",
92+
active_mb=round(active_mb, 2),
93+
requested_mb=round(request_mb, 2),
94+
limit_mb=round(limit_mb, 2),
95+
)
9296
MEMORY_REJECTIONS.inc()
9397
raise S3Error.slow_down(
9498
f"Memory limit: {active_mb:.0f}MB + {request_mb:.0f}MB > {limit_mb:.0f}MB"
@@ -120,9 +124,7 @@ async def release(self, bytes_reserved: int) -> None:
120124

121125

122126
# Default instance used by module-level functions
123-
_default = ConcurrencyLimiter(
124-
limit_mb=int(os.environ.get("S3PROXY_MEMORY_LIMIT_MB", "128"))
125-
)
127+
_default = ConcurrencyLimiter(limit_mb=int(os.environ.get("S3PROXY_MEMORY_LIMIT_MB", "128")))
126128

127129

128130
def estimate_memory_footprint(method: str, content_length: int) -> int:

s3proxy/errors.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -161,6 +161,7 @@ def not_implemented(cls, message: str = "Not Implemented") -> S3Error:
161161
def slow_down(cls, message: str = "Please reduce your request rate.") -> S3Error:
162162
return cls(503, "SlowDown", message)
163163

164+
164165
def get_s3_error_code(status_code: int, detail: str | None = None) -> str:
165166
"""Get S3 error code from HTTP status code and message.
166167

s3proxy/handlers/base.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -159,9 +159,7 @@ def _extract_conditional_headers(
159159
request.headers.get("if-unmodified-since"),
160160
)
161161

162-
async def _safe_abort(
163-
self, client: S3Client, bucket: str, key: str, upload_id: str
164-
) -> None:
162+
async def _safe_abort(self, client: S3Client, bucket: str, key: str, upload_id: str) -> None:
165163
try:
166164
await client.abort_multipart_upload(bucket, key, upload_id)
167165
logger.info(
@@ -170,7 +168,9 @@ async def _safe_abort(
170168
except Exception as e:
171169
logger.warning(
172170
"MULTIPART_ABORT_FAILED",
173-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
171+
bucket=bucket,
172+
key=key,
173+
upload_id=upload_id[:20] + "...",
174174
error=str(e),
175175
)
176176

s3proxy/handlers/buckets.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,6 @@ def _strip_minio_cache_suffix(value: str | None) -> str | None:
3939

4040

4141
class BucketHandlerMixin(BaseHandler):
42-
4342
async def handle_list_buckets(self, request: Request, creds: S3Credentials) -> Response:
4443
async with self._client(creds) as client:
4544
try:
@@ -358,6 +357,7 @@ async def handle_delete_objects(self, request: Request, creds: S3Credentials) ->
358357

359358
# Clean up multipart metadata for all deleted objects in parallel
360359
if deleted_items:
360+
361361
async def safe_delete_metadata(key: str) -> None:
362362
with contextlib.suppress(Exception):
363363
await delete_multipart_metadata(client, bucket, key)

s3proxy/handlers/multipart/copy.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,6 @@
2222

2323

2424
class CopyPartMixin(BaseHandler):
25-
2625
async def handle_upload_part_copy(self, request: Request, creds: S3Credentials) -> Response:
2726
bucket, key = self._parse_path(request.url.path)
2827
async with self._client(creds) as client:
@@ -52,10 +51,11 @@ async def handle_upload_part_copy(self, request: Request, creds: S3Credentials)
5251

5352
body_md5 = hashlib.md5(plaintext, usedforsecurity=False).hexdigest()
5453
await self.multipart_manager.add_part(
55-
bucket, key, upload_id,
54+
bucket,
55+
key,
56+
upload_id,
5657
PartMetadata(
57-
part_num, len(plaintext), len(ciphertext),
58-
resp["ETag"].strip('"'), body_md5
58+
part_num, len(plaintext), len(ciphertext), resp["ETag"].strip('"'), body_md5
5959
),
6060
)
6161

s3proxy/handlers/multipart/lifecycle.py

Lines changed: 73 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -34,15 +34,17 @@
3434

3535

3636
class LifecycleMixin(BaseHandler):
37-
3837
async def _recover_upload_state(
3938
self, client: S3Client, bucket: str, key: str, upload_id: str, context: str = ""
4039
) -> MultipartUploadState:
4140
from s3proxy.state import reconstruct_upload_state_from_s3
4241

4342
logger.warning(
4443
"RECOVER_STATE_FROM_S3",
45-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...", context=context,
44+
bucket=bucket,
45+
key=key,
46+
upload_id=upload_id[:20] + "...",
47+
context=context,
4648
)
4749

4850
state = await reconstruct_upload_state_from_s3(
@@ -54,7 +56,9 @@ async def _recover_upload_state(
5456
await self.multipart_manager.store_reconstructed_state(bucket, key, upload_id, state)
5557
logger.info(
5658
"RECOVER_STATE_SUCCESS",
57-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
59+
bucket=bucket,
60+
key=key,
61+
upload_id=upload_id[:20] + "...",
5862
parts_recovered=len(state.parts),
5963
)
6064
return state
@@ -83,7 +87,8 @@ async def handle_create_multipart_upload(
8387
upload_metadata[hdr[11:]] = val
8488

8589
resp = await client.create_multipart_upload(
86-
bucket, key,
90+
bucket,
91+
key,
8792
content_type=content_type,
8893
metadata=upload_metadata,
8994
tagging=tagging,
@@ -104,19 +109,25 @@ async def handle_create_multipart_upload(
104109
if attempt == 0:
105110
logger.warning(
106111
"PERSIST_STATE_RETRY",
107-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
112+
bucket=bucket,
113+
key=key,
114+
upload_id=upload_id[:20] + "...",
108115
error=str(e),
109116
)
110117
else:
111118
logger.error(
112119
"PERSIST_STATE_FAILED",
113-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
120+
bucket=bucket,
121+
key=key,
122+
upload_id=upload_id[:20] + "...",
114123
error=str(e),
115124
)
116125

117126
logger.info(
118127
"CREATE_MULTIPART_COMPLETE",
119-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
128+
bucket=bucket,
129+
key=key,
130+
upload_id=upload_id[:20] + "...",
120131
)
121132

122133
return Response(
@@ -146,7 +157,9 @@ async def handle_complete_multipart_upload(
146157

147158
logger.info(
148159
"COMPLETE_MULTIPART",
149-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
160+
bucket=bucket,
161+
key=key,
162+
upload_id=upload_id[:20] + "...",
150163
client_parts=len(completed_parts),
151164
s3_parts=len(s3_parts),
152165
total_mb=f"{total_plaintext / 1024 / 1024:.2f}MB",
@@ -166,7 +179,9 @@ async def handle_complete_multipart_upload(
166179
# lose the DEK, making the object permanently undecryptable.
167180
wrapped_dek = crypto.wrap_key(state.dek, self.settings.kek)
168181
await save_multipart_metadata(
169-
client, bucket, key,
182+
client,
183+
bucket,
184+
key,
170185
MultipartMetadata(
171186
version=1,
172187
part_count=len(completed_parts),
@@ -179,7 +194,9 @@ async def handle_complete_multipart_upload(
179194

180195
logger.info(
181196
"COMPLETE_MULTIPART_SUCCESS",
182-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
197+
bucket=bucket,
198+
key=key,
199+
upload_id=upload_id[:20] + "...",
183200
total_parts=len(completed_parts),
184201
total_mb=f"{total_plaintext / 1024 / 1024:.2f}MB",
185202
)
@@ -214,7 +231,9 @@ def internal_to_client_part(internal_part_number: int) -> int:
214231
# Upload exists but DEK is missing - internal state corruption
215232
logger.error(
216233
"RECOVER_DEK_MISSING",
217-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
234+
bucket=bucket,
235+
key=key,
236+
upload_id=upload_id[:20] + "...",
218237
message="Upload exists in S3 but DEK state is missing",
219238
)
220239
except Exception:
@@ -236,7 +255,9 @@ def internal_to_client_part(internal_part_number: int) -> int:
236255

237256
logger.debug(
238257
"RECOVER_STATE_GROUPING",
239-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
258+
bucket=bucket,
259+
key=key,
260+
upload_id=upload_id[:20] + "...",
240261
s3_parts=len(parts_resp.get("Parts", [])),
241262
client_parts=sorted(client_parts.keys()),
242263
)
@@ -255,22 +276,29 @@ def internal_to_client_part(internal_part_number: int) -> int:
255276
plaintext_size = crypto.plaintext_size(ciphertext_size)
256277
etag = s3_part.get("ETag", "").strip('"')
257278

258-
internal_parts_meta.append(InternalPartMetadata(
259-
internal_part_number=internal_num,
260-
plaintext_size=plaintext_size,
261-
ciphertext_size=ciphertext_size,
262-
etag=etag,
263-
))
279+
internal_parts_meta.append(
280+
InternalPartMetadata(
281+
internal_part_number=internal_num,
282+
plaintext_size=plaintext_size,
283+
ciphertext_size=ciphertext_size,
284+
etag=etag,
285+
)
286+
)
264287
part_plaintext_size += plaintext_size
265288
part_ciphertext_size += ciphertext_size
266289

267290
first_etag = internal_s3_parts[0].get("ETag", "").strip('"')
268291

269292
await self.multipart_manager.add_part(
270-
bucket, key, upload_id,
293+
bucket,
294+
key,
295+
upload_id,
271296
PartMetadata(
272-
client_part_num, part_plaintext_size, part_ciphertext_size,
273-
first_etag, "",
297+
client_part_num,
298+
part_plaintext_size,
299+
part_ciphertext_size,
300+
first_etag,
301+
"",
274302
internal_parts=internal_parts_meta,
275303
),
276304
)
@@ -279,7 +307,9 @@ def internal_to_client_part(internal_part_number: int) -> int:
279307
except Exception as e:
280308
logger.error(
281309
"RECOVER_STATE_FOR_COMPLETE_FAILED",
282-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
310+
bucket=bucket,
311+
key=key,
312+
upload_id=upload_id[:20] + "...",
283313
error=str(e),
284314
)
285315
return state
@@ -320,22 +350,24 @@ def _build_s3_parts(
320350
)
321351
for ip in sorted_internal:
322352
etag = f'"{ip.etag}"' if not ip.etag.startswith('"') else ip.etag
323-
s3_parts.append({
324-
"PartNumber": ip.internal_part_number,
325-
"ETag": etag,
326-
})
353+
s3_parts.append(
354+
{
355+
"PartNumber": ip.internal_part_number,
356+
"ETag": etag,
357+
}
358+
)
327359
else:
328-
s3_parts.append({
329-
"PartNumber": client_part_num,
330-
"ETag": cp["ETag"],
331-
})
360+
s3_parts.append(
361+
{
362+
"PartNumber": client_part_num,
363+
"ETag": cp["ETag"],
364+
}
365+
)
332366
else:
333367
missing_parts.append(client_part_num)
334368

335369
if missing_parts:
336-
raise S3Error.invalid_part(
337-
f"Parts {missing_parts} were never uploaded"
338-
)
370+
raise S3Error.invalid_part(f"Parts {missing_parts} were never uploaded")
339371
if not s3_parts:
340372
raise S3Error.invalid_part("No valid parts found")
341373

@@ -357,8 +389,11 @@ async def _handle_complete_error(
357389
if error_code == "EntityTooSmall":
358390
logger.warning(
359391
"ENTITY_TOO_SMALL",
360-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
361-
parts=len(s3_parts), total_plaintext=total_plaintext,
392+
bucket=bucket,
393+
key=key,
394+
upload_id=upload_id[:20] + "...",
395+
parts=len(s3_parts),
396+
total_plaintext=total_plaintext,
362397
)
363398
with contextlib.suppress(Exception):
364399
await client.abort_multipart_upload(bucket, key, upload_id)
@@ -380,7 +415,9 @@ async def handle_abort_multipart_upload(
380415

381416
logger.info(
382417
"ABORT_MULTIPART",
383-
bucket=bucket, key=key, upload_id=upload_id[:20] + "...",
418+
bucket=bucket,
419+
key=key,
420+
upload_id=upload_id[:20] + "...",
384421
)
385422

386423
await asyncio.gather(

0 commit comments

Comments
 (0)