From 0645bc14e946f3b2808b4adbed29066b5269a458 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Jan 2026 17:21:32 +0000 Subject: [PATCH 1/2] ci(pre-commit): autoupdate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/psf/black-pre-commit-mirror: 25.9.0 → 26.1.0](https://github.com/psf/black-pre-commit-mirror/compare/25.9.0...26.1.0) --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0768dff6f..e1f3b5e2e 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ repos: args: [--markdown-linebreak-ext=md] - repo: https://github.com/psf/black-pre-commit-mirror - rev: 25.9.0 + rev: 26.1.0 hooks: - id: black language_version: python3.11 From 522cca3e48b5e57256a518b7f780049e0611f0a6 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 19 Jan 2026 17:21:53 +0000 Subject: [PATCH 2/2] ci(pre-commit): autofix --- .../bevfusion/ops/voxel/scatter_points.py | 4 ++-- projects/PTv3/engines/defaults.py | 3 +-- .../models/utils/serialization/hilbert.py | 21 ++++++------------- .../models/dense_heads/petr_head_dn.py | 4 ++-- .../models/dense_heads/sparse_head.py | 4 ++-- .../models/dense_heads/streampetr_head.py | 4 ++-- .../models/dense_heads/yolox_head.py | 2 +- .../dataset_converters/kitti_converter.py | 2 +- .../dataset_converters/lyft_converter.py | 14 +++++-------- .../dataset_converters/nuscenes_converter.py | 2 +- 10 files changed, 23 insertions(+), 37 deletions(-) diff --git a/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py b/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py index d4cca5a63..a5a34db75 100644 --- a/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py +++ b/projects/BEVFusion/bevfusion/ops/voxel/scatter_points.py @@ -24,7 +24,7 @@ def forward(ctx, feats, coors, reduce_type="max"): coordinates: [M, ndim] int tensor, voxel coordinates. """ results = dynamic_point_to_voxel_forward(feats, coors, reduce_type) - (voxel_feats, voxel_coors, point2voxel_map, voxel_points_count) = results + voxel_feats, voxel_coors, point2voxel_map, voxel_points_count = results ctx.reduce_type = reduce_type ctx.save_for_backward(feats, voxel_feats, point2voxel_map, voxel_points_count) ctx.mark_non_differentiable(voxel_coors) @@ -32,7 +32,7 @@ def forward(ctx, feats, coors, reduce_type="max"): @staticmethod def backward(ctx, grad_voxel_feats, grad_voxel_coors=None): - (feats, voxel_feats, point2voxel_map, voxel_points_count) = ctx.saved_tensors + feats, voxel_feats, point2voxel_map, voxel_points_count = ctx.saved_tensors grad_feats = torch.zeros_like(feats) # TODO: whether to use index put or use cuda_backward # To use index put, need point to voxel index diff --git a/projects/PTv3/engines/defaults.py b/projects/PTv3/engines/defaults.py index b1af27ecc..0cdcb2e38 100644 --- a/projects/PTv3/engines/defaults.py +++ b/projects/PTv3/engines/defaults.py @@ -60,8 +60,7 @@ def worker_init_fn(worker_id, num_workers, rank, seed): def default_argument_parser(epilog=None): parser = argparse.ArgumentParser( - epilog=epilog - or f""" + epilog=epilog or f""" Examples: Run on single machine: $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml diff --git a/projects/PTv3/models/utils/serialization/hilbert.py b/projects/PTv3/models/utils/serialization/hilbert.py index f8d62e050..b697c325f 100644 --- a/projects/PTv3/models/utils/serialization/hilbert.py +++ b/projects/PTv3/models/utils/serialization/hilbert.py @@ -118,23 +118,17 @@ def encode(locs, num_dims, num_bits): bitpack_mask_rev = bitpack_mask.flip(-1) if orig_shape[-1] != num_dims: - raise ValueError( - """ + raise ValueError(""" The shape of locs was surprising in that the last dimension was of size %d, but num_dims=%d. These need to be equal. - """ - % (orig_shape[-1], num_dims) - ) + """ % (orig_shape[-1], num_dims)) if num_dims * num_bits > 63: - raise ValueError( - """ + raise ValueError(""" num_dims=%d and num_bits=%d for %d bits total, which can't be encoded into a int64. Are you sure you need that many points on your Hilbert curve? - """ - % (num_dims, num_bits, num_dims * num_bits) - ) + """ % (num_dims, num_bits, num_dims * num_bits)) # Treat the location integers as 64-bit unsigned and then split them up into # a sequence of uint8s. Preserve the association by dimension. @@ -206,14 +200,11 @@ def decode(hilberts, num_dims, num_bits): """ if num_dims * num_bits > 64: - raise ValueError( - """ + raise ValueError(""" num_dims=%d and num_bits=%d for %d bits total, which can't be encoded into a uint64. Are you sure you need that many points on your Hilbert curve? - """ - % (num_dims, num_bits) - ) + """ % (num_dims, num_bits)) # Handle the case where we got handed a naked integer. hilberts = torch.atleast_1d(hilberts) diff --git a/projects/StreamPETR/stream_petr/models/dense_heads/petr_head_dn.py b/projects/StreamPETR/stream_petr/models/dense_heads/petr_head_dn.py index 03a0e22a5..96ba294b7 100644 --- a/projects/StreamPETR/stream_petr/models/dense_heads/petr_head_dn.py +++ b/projects/StreamPETR/stream_petr/models/dense_heads/petr_head_dn.py @@ -615,7 +615,7 @@ def get_targets( num_imgs = len(cls_scores_list) gt_bboxes_ignore_list = [gt_bboxes_ignore_list for _ in range(num_imgs)] - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list = ( multi_apply( self._get_target_single, cls_scores_list, @@ -654,7 +654,7 @@ def loss_single(self, cls_scores, bbox_preds, gt_bboxes_list, gt_labels_list, gt cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list ) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg = ( cls_reg_targets ) labels = torch.cat(labels_list, 0) diff --git a/projects/StreamPETR/stream_petr/models/dense_heads/sparse_head.py b/projects/StreamPETR/stream_petr/models/dense_heads/sparse_head.py index 00ae1cfbf..b7b5d0d26 100644 --- a/projects/StreamPETR/stream_petr/models/dense_heads/sparse_head.py +++ b/projects/StreamPETR/stream_petr/models/dense_heads/sparse_head.py @@ -676,7 +676,7 @@ def get_targets( num_imgs = len(cls_scores_list) gt_bboxes_ignore_list = [gt_bboxes_ignore_list for _ in range(num_imgs)] - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list = ( multi_apply( self._get_target_single, cls_scores_list, @@ -715,7 +715,7 @@ def loss_single(self, cls_scores, bbox_preds, gt_bboxes_list, gt_labels_list, gt cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list ) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg = ( cls_reg_targets ) labels = torch.cat(labels_list, 0) diff --git a/projects/StreamPETR/stream_petr/models/dense_heads/streampetr_head.py b/projects/StreamPETR/stream_petr/models/dense_heads/streampetr_head.py index ec600fe70..9c5d591fc 100644 --- a/projects/StreamPETR/stream_petr/models/dense_heads/streampetr_head.py +++ b/projects/StreamPETR/stream_petr/models/dense_heads/streampetr_head.py @@ -827,7 +827,7 @@ def get_targets( num_imgs = len(cls_scores_list) gt_bboxes_ignore_list = [gt_bboxes_ignore_list for _ in range(num_imgs)] - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, pos_inds_list, neg_inds_list = ( multi_apply( self._get_target_single, cls_scores_list, @@ -866,7 +866,7 @@ def loss_single(self, cls_scores, bbox_preds, gt_bboxes_list, gt_labels_list, gt cls_reg_targets = self.get_targets( cls_scores_list, bbox_preds_list, gt_bboxes_list, gt_labels_list, gt_bboxes_ignore_list ) - (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg) = ( + labels_list, label_weights_list, bbox_targets_list, bbox_weights_list, num_total_pos, num_total_neg = ( cls_reg_targets ) labels = torch.cat(labels_list, 0) diff --git a/projects/StreamPETR/stream_petr/models/dense_heads/yolox_head.py b/projects/StreamPETR/stream_petr/models/dense_heads/yolox_head.py index 8feb3d1ac..8532c4fc8 100644 --- a/projects/StreamPETR/stream_petr/models/dense_heads/yolox_head.py +++ b/projects/StreamPETR/stream_petr/models/dense_heads/yolox_head.py @@ -310,7 +310,7 @@ def loss( gt_labels = [labels2d for i in gt_labels2d_list for labels2d in i] centers2d = [center2d for i in centers2d for center2d in i] - (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, centers2d_target, num_fg_imgs) = multi_apply( + pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets, centers2d_target, num_fg_imgs = multi_apply( self._get_target_single, flatten_cls_preds.detach(), flatten_objectness.detach(), diff --git a/tools/detection3d/dataset_converters/kitti_converter.py b/tools/detection3d/dataset_converters/kitti_converter.py index 2e124f99a..838860984 100644 --- a/tools/detection3d/dataset_converters/kitti_converter.py +++ b/tools/detection3d/dataset_converters/kitti_converter.py @@ -391,7 +391,7 @@ def export_2d_annotation(root_path, info_path, mono3d=True): for info in mmengine.track_iter_progress(kitti_infos): coco_infos = get_2d_boxes(info, occluded=[0, 1, 2, 3], mono3d=mono3d) - (height, width, _) = mmcv.imread(osp.join(root_path, info["image"]["image_path"])).shape + height, width, _ = mmcv.imread(osp.join(root_path, info["image"]["image_path"])).shape coco_2d_dict["images"].append( dict( file_name=info["image"]["image_path"], diff --git a/tools/detection3d/dataset_converters/lyft_converter.py b/tools/detection3d/dataset_converters/lyft_converter.py index 168ccf2ce..5bc608a88 100644 --- a/tools/detection3d/dataset_converters/lyft_converter.py +++ b/tools/detection3d/dataset_converters/lyft_converter.py @@ -62,10 +62,8 @@ def create_lyft_infos(root_path, info_prefix, version="v1.01-train", max_sweeps= if test: print(f"test scene: {len(train_scenes)}") else: - print( - f"train scene: {len(train_scenes)}, \ - val scene: {len(val_scenes)}" - ) + print(f"train scene: {len(train_scenes)}, \ + val scene: {len(val_scenes)}") train_lyft_infos, val_lyft_infos = _fill_trainval_infos( lyft, train_scenes, val_scenes, test, max_sweeps=max_sweeps ) @@ -78,10 +76,8 @@ def create_lyft_infos(root_path, info_prefix, version="v1.01-train", max_sweeps= info_path = osp.join(root_path, f"{info_name}.pkl") mmengine.dump(data, info_path) else: - print( - f"train sample: {len(train_lyft_infos)}, \ - val sample: {len(val_lyft_infos)}" - ) + print(f"train sample: {len(train_lyft_infos)}, \ + val sample: {len(val_lyft_infos)}") data = dict(infos=train_lyft_infos, metadata=metadata) train_info_name = f"{info_prefix}_infos_train" info_path = osp.join(root_path, f"{train_info_name}.pkl") @@ -234,7 +230,7 @@ def export_2d_annotation(root_path, info_path, version): for cam in camera_types: cam_info = info["cams"][cam] coco_infos = get_2d_boxes(lyft, cam_info["sample_data_token"], visibilities=["", "1", "2", "3", "4"]) - (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + height, width, _ = mmcv.imread(cam_info["data_path"]).shape coco_2d_dict["images"].append( dict(file_name=cam_info["data_path"], id=cam_info["sample_data_token"], width=width, height=height) ) diff --git a/tools/detection3d/dataset_converters/nuscenes_converter.py b/tools/detection3d/dataset_converters/nuscenes_converter.py index 9056115aa..6688c735d 100644 --- a/tools/detection3d/dataset_converters/nuscenes_converter.py +++ b/tools/detection3d/dataset_converters/nuscenes_converter.py @@ -346,7 +346,7 @@ def export_2d_annotation(root_path, info_path, version, mono3d=True): coco_infos = get_2d_boxes( nusc, cam_info["sample_data_token"], visibilities=["", "1", "2", "3", "4"], mono3d=mono3d ) - (height, width, _) = mmcv.imread(cam_info["data_path"]).shape + height, width, _ = mmcv.imread(cam_info["data_path"]).shape coco_2d_dict["images"].append( dict( file_name=cam_info["data_path"].split("data/nuscenes/")[-1],