From a5e1e13a19d8076ea22f197b692ada5188b8b32b Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Wed, 24 Jan 2024 17:54:26 +0100 Subject: [PATCH 01/14] WIP upgrade to pymotmetrics 1.4.0 --- python-sdk/nuscenes/eval/tracking/mot.py | 13 ++++++++++++- setup/requirements/requirements_tracking.txt | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/mot.py b/python-sdk/nuscenes/eval/tracking/mot.py index aa18421b0..f8015f377 100644 --- a/python-sdk/nuscenes/eval/tracking/mot.py +++ b/python-sdk/nuscenes/eval/tracking/mot.py @@ -6,6 +6,16 @@ py-motmetrics at: https://github.com/cheind/py-motmetrics + +Notes by Michael Hoss: +This code is mainly copy-pasted from the original motmetrics repo, likely from version 1.1.3. +TODO: upgrade this code to version 1.4.0, or even better, see if I can just use the origial code. +-> looks like I can pretty much use the original code, as the changes here were mostly done for speed. +But: the motmetrics 1.4.0 code seems to use floats as object ids, but nuscenes-devkit has strings. +-> TODO overwrite the motmetrics code to use strings (or `object`) as object ids again, but otherwise, +use the newer code. + +It looks """ from collections import OrderedDict from itertools import count @@ -15,7 +25,7 @@ import pandas as pd -class MOTAccumulatorCustom(motmetrics.mot.MOTAccumulator): +class MOTAccumulatorCustom(motmetrics.MOTAccumulator): def __init__(self): super().__init__() @@ -57,6 +67,7 @@ def new_event_dataframe(): @property def events(self): + """This is needed to call the custom new_event_dataframe_with_data with the speedup.""" if self.dirty_events: self.cached_events_df = MOTAccumulatorCustom.new_event_dataframe_with_data(self._indices, self._events) self.dirty_events = False diff --git a/setup/requirements/requirements_tracking.txt b/setup/requirements/requirements_tracking.txt index abcc4d75d..50ff8142c 100644 --- a/setup/requirements/requirements_tracking.txt +++ b/setup/requirements/requirements_tracking.txt @@ -1,2 +1,2 @@ -motmetrics<=1.1.3 +motmetrics==1.4.0 pandas>=0.24 From 939d36ef54f4199588c87dc194869fe4ea515d2a Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Fri, 26 Jan 2024 15:22:25 +0100 Subject: [PATCH 02/14] replace MOTAccumulatorCustom by 1.4.0 implementation --- python-sdk/nuscenes/eval/tracking/mot.py | 82 ++++++++++++++---------- 1 file changed, 49 insertions(+), 33 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/mot.py b/python-sdk/nuscenes/eval/tracking/mot.py index f8015f377..639919831 100644 --- a/python-sdk/nuscenes/eval/tracking/mot.py +++ b/python-sdk/nuscenes/eval/tracking/mot.py @@ -8,58 +8,72 @@ https://github.com/cheind/py-motmetrics Notes by Michael Hoss: -This code is mainly copy-pasted from the original motmetrics repo, likely from version 1.1.3. -TODO: upgrade this code to version 1.4.0, or even better, see if I can just use the origial code. --> looks like I can pretty much use the original code, as the changes here were mostly done for speed. -But: the motmetrics 1.4.0 code seems to use floats as object ids, but nuscenes-devkit has strings. --> TODO overwrite the motmetrics code to use strings (or `object`) as object ids again, but otherwise, -use the newer code. - -It looks +For Python 3.10, we need to update the version of py-motmetrics to 1.4.0. +Then, to keep this code working, we need to change back the types of OId HId to object because they are +strings in nuscenes-devkit, whereas motmetrics changed these types to float from 1.1.3 to 1.4.0. """ from collections import OrderedDict from itertools import count -import motmetrics import numpy as np import pandas as pd +from motmetrics import MOTAccumulator +_INDEX_FIELDS = ['FrameId', 'Event'] -class MOTAccumulatorCustom(motmetrics.MOTAccumulator): +class MOTAccumulatorCustom(MOTAccumulator): + """This custom class was created by nuscenes-devkit to use a faster implementation of + `new_event_dataframe_with_data` under compatibility with motmetrics<=1.1.3. + Now that we use motmetrics==1.4.0, we need to use this custom implementation to use + objects instead of strings for OId and HId. + """ def __init__(self): super().__init__() @staticmethod def new_event_dataframe_with_data(indices, events): - """ - Create a new DataFrame filled with data. - This version overwrites the original in MOTAccumulator achieves about 2x speedups. + """Create a new DataFrame filled with data. Params ------ - indices: list - list of tuples (frameid, eventid) - events: list - list of events where each event is a list containing - 'Type', 'OId', HId', 'D' + indices: dict + dict of lists with fields 'FrameId' and 'Event' + events: dict + dict of lists with fields 'Type', 'OId', 'HId', 'D' """ - idx = pd.MultiIndex.from_tuples(indices, names=['FrameId', 'Event']) - df = pd.DataFrame(events, index=idx, columns=['Type', 'OId', 'HId', 'D']) + + if len(events) == 0: + return MOTAccumulatorCustom.new_event_dataframe() + + raw_type = pd.Categorical( + events['Type'], + categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH', 'TRANSFER', 'ASCEND', 'MIGRATE'], + ordered=False) + series = [ + pd.Series(raw_type, name='Type'), + pd.Series(events['OId'], dtype=object, name='OId'), # OId is string in nuscenes-devkit + pd.Series(events['HId'], dtype=object, name='HId'), # HId is string in nuscenes-devkit + pd.Series(events['D'], dtype=float, name='D') + ] + + idx = pd.MultiIndex.from_arrays( + [indices[field] for field in _INDEX_FIELDS], + names=_INDEX_FIELDS) + df = pd.concat(series, axis=1) + df.index = idx return df @staticmethod def new_event_dataframe(): - """ Create a new DataFrame for event tracking. """ + """Create a new DataFrame for event tracking.""" idx = pd.MultiIndex(levels=[[], []], codes=[[], []], names=['FrameId', 'Event']) - cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH']) + cats = pd.Categorical([], categories=['RAW', 'FP', 'MISS', 'SWITCH', 'MATCH', 'TRANSFER', 'ASCEND', 'MIGRATE']) df = pd.DataFrame( OrderedDict([ - ('Type', pd.Series(cats)), # Type of event. One of FP (false positive), MISS, SWITCH, MATCH - ('OId', pd.Series(dtype=object)), - # Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways. - ('HId', pd.Series(dtype=object)), - # Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways. - ('D', pd.Series(dtype=float)), # Distance or NaN when FP or MISS + ('Type', pd.Series(cats)), # Type of event. One of FP (false positive), MISS, SWITCH, MATCH + ('OId', pd.Series(dtype=object)), # Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways. + ('HId', pd.Series(dtype=object)), # Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways. + ('D', pd.Series(dtype=float)), # Distance or NaN when FP or MISS ]), index=idx ) @@ -67,15 +81,13 @@ def new_event_dataframe(): @property def events(self): - """This is needed to call the custom new_event_dataframe_with_data with the speedup.""" if self.dirty_events: self.cached_events_df = MOTAccumulatorCustom.new_event_dataframe_with_data(self._indices, self._events) self.dirty_events = False return self.cached_events_df @staticmethod - def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True, - return_mappings=False): + def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, update_hids=True, return_mappings=False): """Merge dataframes. Params @@ -115,8 +127,8 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd # Update index if update_frame_indices: - next_frame_id = max(r.index.get_level_values(0).max() + 1, - r.index.get_level_values(0).unique().shape[0]) + # pylint: disable=cell-var-from-loop + next_frame_id = max(r.index.get_level_values(0).max() + 1, r.index.get_level_values(0).unique().shape[0]) if np.isnan(next_frame_id): next_frame_id = 0 copy.index = copy.index.map(lambda x: (x[0] + next_frame_id, x[1])) @@ -124,15 +136,19 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd # Update object / hypothesis ids if update_oids: + # pylint: disable=cell-var-from-loop oid_map = dict([oid, str(next(new_oid))] for oid in copy['OId'].dropna().unique()) copy['OId'] = copy['OId'].map(lambda x: oid_map[x], na_action='ignore') infos['oid_map'] = oid_map if update_hids: + # pylint: disable=cell-var-from-loop hid_map = dict([hid, str(next(new_hid))] for hid in copy['HId'].dropna().unique()) copy['HId'] = copy['HId'].map(lambda x: hid_map[x], na_action='ignore') infos['hid_map'] = hid_map + # Avoid pandas warning. But is this legit/do we need such a column later on again? + # copy = copy.dropna(axis=1, how='all') r = pd.concat((r, copy)) mapping_infos.append(infos) From d83fd248846cb3aeefc57706f057167558fc986f Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Fri, 26 Jan 2024 15:25:06 +0100 Subject: [PATCH 03/14] add pred_frequencies, as this is now required --- .../nuscenes/eval/tracking/constants.py | 1 + python-sdk/nuscenes/eval/tracking/evaluate.py | 14 ++++++++----- python-sdk/nuscenes/eval/tracking/utils.py | 20 +++++++++++++------ 3 files changed, 24 insertions(+), 11 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/constants.py b/python-sdk/nuscenes/eval/tracking/constants.py index c90fc63d1..c3c75bb32 100644 --- a/python-sdk/nuscenes/eval/tracking/constants.py +++ b/python-sdk/nuscenes/eval/tracking/constants.py @@ -17,6 +17,7 @@ MOT_METRIC_MAP = { # Mapping from motmetrics names to metric names used here. 'num_frames': '', # Used in FAF. 'num_objects': 'gt', # Used in MOTAR computation. + 'pred_frequencies': '', # Only printed out. 'num_predictions': '', # Only printed out. 'num_matches': 'tp', # Used in MOTAR computation and printed out. 'motar': 'motar', # Only used in AMOTA. diff --git a/python-sdk/nuscenes/eval/tracking/evaluate.py b/python-sdk/nuscenes/eval/tracking/evaluate.py index 971f7c64a..bc0d59eeb 100644 --- a/python-sdk/nuscenes/eval/tracking/evaluate.py +++ b/python-sdk/nuscenes/eval/tracking/evaluate.py @@ -5,10 +5,9 @@ import json import os import time -from typing import Tuple, List, Dict, Any +from typing import Any, Dict, List, Tuple import numpy as np - from nuscenes import NuScenes from nuscenes.eval.common.config import config_factory from nuscenes.eval.common.loaders import ( @@ -21,9 +20,14 @@ load_prediction_of_sample_tokens, ) from nuscenes.eval.tracking.algo import TrackingEvaluation -from nuscenes.eval.tracking.constants import AVG_METRIC_MAP, MOT_METRIC_MAP, LEGACY_METRICS -from nuscenes.eval.tracking.data_classes import TrackingMetrics, TrackingMetricDataList, TrackingConfig, TrackingBox, \ - TrackingMetricData +from nuscenes.eval.tracking.constants import AVG_METRIC_MAP, LEGACY_METRICS, MOT_METRIC_MAP +from nuscenes.eval.tracking.data_classes import ( + TrackingBox, + TrackingConfig, + TrackingMetricData, + TrackingMetricDataList, + TrackingMetrics, +) from nuscenes.eval.tracking.loaders import create_tracks from nuscenes.eval.tracking.render import recall_metric_curve, summary_plot from nuscenes.eval.tracking.utils import print_final_metrics diff --git a/python-sdk/nuscenes/eval/tracking/utils.py b/python-sdk/nuscenes/eval/tracking/utils.py index da078f290..4f2a335e0 100644 --- a/python-sdk/nuscenes/eval/tracking/utils.py +++ b/python-sdk/nuscenes/eval/tracking/utils.py @@ -3,7 +3,7 @@ import unittest import warnings -from typing import Optional, Dict +from typing import Dict, Optional import numpy as np @@ -14,8 +14,15 @@ raise unittest.SkipTest('Skipping test as motmetrics was not found!') from nuscenes.eval.tracking.data_classes import TrackingMetrics -from nuscenes.eval.tracking.metrics import motar, mota_custom, motp_custom, faf, track_initialization_duration, \ - longest_gap_duration, num_fragmentations_custom +from nuscenes.eval.tracking.metrics import ( + faf, + longest_gap_duration, + mota_custom, + motar, + motp_custom, + num_fragmentations_custom, + track_initialization_duration, +) def category_to_tracking_name(category_name: str) -> Optional[str]: @@ -111,6 +118,7 @@ def print_threshold_metrics(metrics: Dict[str, Dict[str, float]]) -> None: recall = metrics['recall'][threshold_str] num_frames = metrics['num_frames'][threshold_str] num_objects = metrics['num_objects'][threshold_str] + pred_frequencies = metrics['pred_frequencies'][threshold_str] num_predictions = metrics['num_predictions'][threshold_str] num_false_positives = metrics['num_false_positives'][threshold_str] num_misses = metrics['num_misses'][threshold_str] @@ -124,7 +132,7 @@ def print_threshold_metrics(metrics: Dict[str, Dict[str, float]]) -> None: 'Pred', 'Pred-TP', 'Pred-FP', 'Pred-IDS',)) print('%s\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d' % (threshold_str, motar_val, motp, recall, num_frames, - num_objects, num_matches, num_misses, num_switches, + num_objects, num_matches, num_misses, num_switches, pred_frequencies, num_predictions, num_matches, num_false_positives, num_switches)) print() @@ -148,8 +156,8 @@ def create_motmetrics() -> MetricsHost: # Register standard metrics. fields = [ 'num_frames', 'obj_frequencies', 'num_matches', 'num_switches', 'num_false_positives', 'num_misses', - 'num_detections', 'num_objects', 'num_predictions', 'mostly_tracked', 'mostly_lost', 'num_fragmentations', - 'motp', 'mota', 'precision', 'recall', 'track_ratios' + 'num_detections', 'num_objects', 'pred_frequencies', 'num_predictions', 'mostly_tracked', 'mostly_lost', + 'num_fragmentations', 'motp', 'mota', 'precision', 'recall', 'track_ratios' ] for field in fields: mh.register(getattr(motmetrics.metrics, field), formatter='{:d}'.format) From e6708eb0888c361132843f575e7b9a56a405b851 Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Fri, 26 Jan 2024 16:29:51 +0100 Subject: [PATCH 04/14] fix printing of pred_frequencies --- python-sdk/nuscenes/eval/tracking/constants.py | 2 +- python-sdk/nuscenes/eval/tracking/utils.py | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/constants.py b/python-sdk/nuscenes/eval/tracking/constants.py index c3c75bb32..5fde8db41 100644 --- a/python-sdk/nuscenes/eval/tracking/constants.py +++ b/python-sdk/nuscenes/eval/tracking/constants.py @@ -17,7 +17,7 @@ MOT_METRIC_MAP = { # Mapping from motmetrics names to metric names used here. 'num_frames': '', # Used in FAF. 'num_objects': 'gt', # Used in MOTAR computation. - 'pred_frequencies': '', # Only printed out. + 'pred_frequencies': '', # Only needed in background. 'num_predictions': '', # Only printed out. 'num_matches': 'tp', # Used in MOTAR computation and printed out. 'motar': 'motar', # Only used in AMOTA. diff --git a/python-sdk/nuscenes/eval/tracking/utils.py b/python-sdk/nuscenes/eval/tracking/utils.py index 4f2a335e0..3bbd59e96 100644 --- a/python-sdk/nuscenes/eval/tracking/utils.py +++ b/python-sdk/nuscenes/eval/tracking/utils.py @@ -118,7 +118,6 @@ def print_threshold_metrics(metrics: Dict[str, Dict[str, float]]) -> None: recall = metrics['recall'][threshold_str] num_frames = metrics['num_frames'][threshold_str] num_objects = metrics['num_objects'][threshold_str] - pred_frequencies = metrics['pred_frequencies'][threshold_str] num_predictions = metrics['num_predictions'][threshold_str] num_false_positives = metrics['num_false_positives'][threshold_str] num_misses = metrics['num_misses'][threshold_str] @@ -132,7 +131,7 @@ def print_threshold_metrics(metrics: Dict[str, Dict[str, float]]) -> None: 'Pred', 'Pred-TP', 'Pred-FP', 'Pred-IDS',)) print('%s\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d' % (threshold_str, motar_val, motp, recall, num_frames, - num_objects, num_matches, num_misses, num_switches, pred_frequencies, + num_objects, num_matches, num_misses, num_switches, num_predictions, num_matches, num_false_positives, num_switches)) print() From 5fa280f317aa59e809cb528e31fb031a5235ed8c Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Fri, 26 Jan 2024 16:23:02 +0100 Subject: [PATCH 05/14] fix ana in signature --- python-sdk/nuscenes/eval/tracking/metrics.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/metrics.py b/python-sdk/nuscenes/eval/tracking/metrics.py index fad933056..b4215b187 100644 --- a/python-sdk/nuscenes/eval/tracking/metrics.py +++ b/python-sdk/nuscenes/eval/tracking/metrics.py @@ -7,7 +7,7 @@ py-motmetrics at: https://github.com/cheind/py-motmetrics """ -from typing import Any +from typing import Any, Optional import numpy as np @@ -109,7 +109,7 @@ def longest_gap_duration(df: DataFrame, obj_frequencies: DataFrame) -> float: def motar(df: DataFrame, num_matches: int, num_misses: int, num_switches: int, num_false_positives: int, - num_objects: int, alpha: float = 1.0) -> float: + num_objects: int, alpha: float = 1.0, ana: Optional[dict] = None) -> float: """ Initializes a MOTAR class which refers to the modified MOTA metric at https://www.nuscenes.org/tracking. Note that we use the measured recall, which is not identical to the hypothetical recall of the @@ -121,6 +121,7 @@ def motar(df: DataFrame, num_matches: int, num_misses: int, num_switches: int, n :param num_false_positives: The number of false positives. :param num_objects: The total number of objects of this class in the GT. :param alpha: MOTAR weighting factor (previously 0.2). + :param ana: something for caching, introduced by motmetrics 1.4.0 :return: The MOTAR or nan if there are no GT objects. """ recall = num_matches / num_objects From 74496225d37a69c54f2a47965f744455dbea1ed0 Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Fri, 26 Jan 2024 17:53:15 +0100 Subject: [PATCH 06/14] leave notes for weird pandas issue. to be debugged further --- python-sdk/nuscenes/eval/tracking/mot.py | 37 ++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/python-sdk/nuscenes/eval/tracking/mot.py b/python-sdk/nuscenes/eval/tracking/mot.py index 639919831..b2d39ebda 100644 --- a/python-sdk/nuscenes/eval/tracking/mot.py +++ b/python-sdk/nuscenes/eval/tracking/mot.py @@ -57,6 +57,7 @@ def new_event_dataframe_with_data(indices, events): ] idx = pd.MultiIndex.from_arrays( + # TODO What types are the indices FrameId and Event? string or int? [indices[field] for field in _INDEX_FIELDS], names=_INDEX_FIELDS) df = pd.concat(series, axis=1) @@ -128,6 +129,42 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd # Update index if update_frame_indices: # pylint: disable=cell-var-from-loop + # TODO TypeError: can only concatenate tuple (not "int") to tuple + # This is likely because we have a multi-index dataframe r here (see new_event_dataframe()) + # See also https://stackoverflow.com/questions/39080555/pandas-get-level-values-for-multiple-columns + # Playground code: https://onecompiler.com/python/422kn8tev + """ + + import pandas as pd + + a={"gk":[15,12,13,22,32,12],"mk":[12,21,23,22,56,12], "sf": [1,2,3,4,5,5]} + df=pd.DataFrame(a) + + # B=df[df["mk"]>=21] + + # print(df) + # print(B) + + df = df.set_index(["gk", "sf"]) + + print(df) + + print("Experiment") + print(df.index.get_level_values(1)) + print("First argument of max") + print(df.index.get_level_values(0).max()) + print(df.index.get_level_values(0).max() +1) # the maximum value of the 0th index column incremented by 1 + print(df.index.get_level_values(1).max()) + print(df.index.get_level_values(1).max() +1) + print("Second argument of max") + print(df.index.get_level_values(0)) + print(df.index.get_level_values(0).unique()) + print(df.index.get_level_values(0).unique().shape) + print(df.index.get_level_values(0).unique().shape[0]) # number of unique values in the 0th index column + print("Final max evaluation") + print(max(df.index.get_level_values(0).max() +1,df.index.get_level_values(0).unique().shape[0])) + """ + next_frame_id = max(r.index.get_level_values(0).max() + 1, r.index.get_level_values(0).unique().shape[0]) if np.isnan(next_frame_id): next_frame_id = 0 From 928428880e0fccca74378954b9d0ec04cedab390 Mon Sep 17 00:00:00 2001 From: Michael Hoss Date: Tue, 30 Jan 2024 18:10:02 +0100 Subject: [PATCH 07/14] tidy up and fix bug of messing up an empty MultiIndex --- python-sdk/nuscenes/eval/tracking/mot.py | 40 ++---------------------- 1 file changed, 2 insertions(+), 38 deletions(-) diff --git a/python-sdk/nuscenes/eval/tracking/mot.py b/python-sdk/nuscenes/eval/tracking/mot.py index b2d39ebda..53b813f3c 100644 --- a/python-sdk/nuscenes/eval/tracking/mot.py +++ b/python-sdk/nuscenes/eval/tracking/mot.py @@ -57,7 +57,6 @@ def new_event_dataframe_with_data(indices, events): ] idx = pd.MultiIndex.from_arrays( - # TODO What types are the indices FrameId and Event? string or int? [indices[field] for field in _INDEX_FIELDS], names=_INDEX_FIELDS) df = pd.concat(series, axis=1) @@ -129,46 +128,11 @@ def merge_event_dataframes(dfs, update_frame_indices=True, update_oids=True, upd # Update index if update_frame_indices: # pylint: disable=cell-var-from-loop - # TODO TypeError: can only concatenate tuple (not "int") to tuple - # This is likely because we have a multi-index dataframe r here (see new_event_dataframe()) - # See also https://stackoverflow.com/questions/39080555/pandas-get-level-values-for-multiple-columns - # Playground code: https://onecompiler.com/python/422kn8tev - """ - - import pandas as pd - - a={"gk":[15,12,13,22,32,12],"mk":[12,21,23,22,56,12], "sf": [1,2,3,4,5,5]} - df=pd.DataFrame(a) - - # B=df[df["mk"]>=21] - - # print(df) - # print(B) - - df = df.set_index(["gk", "sf"]) - - print(df) - - print("Experiment") - print(df.index.get_level_values(1)) - print("First argument of max") - print(df.index.get_level_values(0).max()) - print(df.index.get_level_values(0).max() +1) # the maximum value of the 0th index column incremented by 1 - print(df.index.get_level_values(1).max()) - print(df.index.get_level_values(1).max() +1) - print("Second argument of max") - print(df.index.get_level_values(0)) - print(df.index.get_level_values(0).unique()) - print(df.index.get_level_values(0).unique().shape) - print(df.index.get_level_values(0).unique().shape[0]) # number of unique values in the 0th index column - print("Final max evaluation") - print(max(df.index.get_level_values(0).max() +1,df.index.get_level_values(0).unique().shape[0])) - """ - next_frame_id = max(r.index.get_level_values(0).max() + 1, r.index.get_level_values(0).unique().shape[0]) if np.isnan(next_frame_id): next_frame_id = 0 - copy.index = copy.index.map(lambda x: (x[0] + next_frame_id, x[1])) + if not copy.index.empty: + copy.index = copy.index.map(lambda x: (x[0] + next_frame_id, x[1])) infos['frame_offset'] = next_frame_id # Update object / hypothesis ids From 15e1eced9c7654ec6b8ebdf7dfc97e515bb90797 Mon Sep 17 00:00:00 2001 From: Dominik Dienlin Date: Fri, 22 Mar 2024 12:39:25 +0100 Subject: [PATCH 08/14] fix requirements specification for compatibility with python3.12 --- setup/requirements/requirements_base.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setup/requirements/requirements_base.txt b/setup/requirements/requirements_base.txt index 7674640e7..8e10cf441 100644 --- a/setup/requirements/requirements_base.txt +++ b/setup/requirements/requirements_base.txt @@ -1,13 +1,13 @@ cachetools descartes fire -matplotlib<3.6.0 -numpy>=1.22.0,<2.0.0 +matplotlib +numpy>=1.22.0,<2.0 opencv-python>=4.5.4.58 Pillow>6.2.1 pyquaternion>=0.9.5 scikit-learn scipy -Shapely<2.0.0 +Shapely~=2.0.3 tqdm parameterized From e5940c82ccba994c19590ba97d89dd4dd33070d8 Mon Sep 17 00:00:00 2001 From: Dominik Dienlin Date: Fri, 22 Mar 2024 12:39:52 +0100 Subject: [PATCH 09/14] fix issue with setting title via matplotlib when using newer versions of matplotlib --- python-sdk/nuscenes/nuscenes.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python-sdk/nuscenes/nuscenes.py b/python-sdk/nuscenes/nuscenes.py index f37676637..e64270870 100644 --- a/python-sdk/nuscenes/nuscenes.py +++ b/python-sdk/nuscenes/nuscenes.py @@ -1026,9 +1026,9 @@ def render_pointcloud_in_image(self, if ax is None: fig, ax = plt.subplots(1, 1, figsize=(9, 16)) if lidarseg_preds_bin_path: - fig.canvas.set_window_title(sample_token + '(predictions)') + fig.canvas.manager.set_window_title(sample_token + '(predictions)') else: - fig.canvas.set_window_title(sample_token) + fig.canvas.manager.set_window_title(sample_token) else: # Set title on if rendering as part of render_sample. ax.set_title(camera_channel) ax.imshow(im) From e1db722360ff7ad769e83355f9f94a15433767b7 Mon Sep 17 00:00:00 2001 From: ruoning Date: Mon, 25 Aug 2025 16:19:56 +0800 Subject: [PATCH 10/14] Support CI in GitHub Actions --- .github/workflows/pipeline.yml | 52 ++++++++ setup/requirements/requirements_base.txt | 6 +- setup/requirements_3_12_lock.txt | 154 +++++++++++++++++++++++ setup/test_tutorial.sh | 25 +++- 4 files changed, 232 insertions(+), 5 deletions(-) create mode 100644 .github/workflows/pipeline.yml create mode 100644 setup/requirements_3_12_lock.txt diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml new file mode 100644 index 000000000..fcfc9328b --- /dev/null +++ b/.github/workflows/pipeline.yml @@ -0,0 +1,52 @@ +name: nuscenes-devkit CI pipeline +on: [pull_request] +env: + NUSCENES: data/sets/nuscenes + NUIMAGES: data/sets/nuimages +jobs: + Test: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v4 + - name: Set up Python 3.12 + uses: actions/setup-python@v3 + with: + python-version: "3.12" + - name: Install datasets + run: | + mkdir -p ${NUSCENES} && mkdir -p ${NUIMAGES} + + echo "Installing: v1.0-mini.tgz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/v1.0-mini.tgz | tar -xzf - -C ${NUSCENES} --exclude sweeps + + echo "Installing: nuimages-v1.0-mini.tgz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuimages-v1.0/nuimages-v1.0-mini.tgz | tar -xzf - -C ${NUIMAGES} + + echo "Installing: nuScenes-lidarseg-mini-v1.0.tar.bz2" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-lidarseg-v1.0/nuScenes-lidarseg-mini-v1.0.tar.bz2 | tar -xjf - -C ${NUSCENES} + + echo "Installing: nuScenes-panoptic-v1.0-mini.tar.gz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-panoptic-v1.0/nuScenes-panoptic-v1.0-mini.tar.gz | tar -xzf - --strip-components=1 -C ${NUSCENES} + + echo "Installing: nuScenes-map-expansion-v1.3.zip" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/nuScenes-map-expansion-v1.3.zip -o nuScenes-map-expansion-v1.3.zip + unzip -q nuScenes-map-expansion-v1.3.zip -d ${NUSCENES}/maps/ + + echo "Installing: can_bus.zip" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/can_bus.zip -o can_bus.zip + unzip -q can_bus.zip -d ${NUSCENES} can_bus/scene-0001_* + + echo "Removing zip files . . ." + rm nuScenes-map-expansion-v1.3.zip can_bus.zip + - name: Install dependencies + run: | + pip install -r setup/requirements_3_12_lock.txt + - name: Run Python unit tests + run: | + python -m unittest discover python-sdk + - name: Run Jupyter notebook tests + run: | + pip install jupyter -q + export PYTHONPATH="${PYTHONPATH}:$(pwd)/python-sdk" + ./setup/test_tutorial.sh --ci diff --git a/setup/requirements/requirements_base.txt b/setup/requirements/requirements_base.txt index 8e10cf441..e40e161dd 100644 --- a/setup/requirements/requirements_base.txt +++ b/setup/requirements/requirements_base.txt @@ -1,9 +1,9 @@ cachetools descartes fire -matplotlib -numpy>=1.22.0,<2.0 -opencv-python>=4.5.4.58 +matplotlib>=3.6.0 +numpy>=1.22.0,<2.0.0 +opencv-python-headless>=4.5.4.58 Pillow>6.2.1 pyquaternion>=0.9.5 scikit-learn diff --git a/setup/requirements_3_12_lock.txt b/setup/requirements_3_12_lock.txt new file mode 100644 index 000000000..260c17f65 --- /dev/null +++ b/setup/requirements_3_12_lock.txt @@ -0,0 +1,154 @@ +# +# This file is autogenerated by pip-compile with Python 3.12 +# by the following command: +# +# pip-compile --output-file=setup/requirements_3_12_lock.txt setup/requirements.txt +# +cachetools==6.1.0 + # via -r setup/requirements/requirements_base.txt +contourpy==1.3.3 + # via matplotlib +cycler==0.12.1 + # via matplotlib +descartes==1.1.0 + # via -r setup/requirements/requirements_base.txt +filelock==3.19.1 + # via torch +fire==0.7.1 + # via -r setup/requirements/requirements_base.txt +fonttools==4.59.1 + # via matplotlib +fsspec==2025.7.0 + # via torch +jinja2==3.1.6 + # via torch +joblib==1.5.1 + # via scikit-learn +kiwisolver==1.4.9 + # via matplotlib +markupsafe==3.0.2 + # via jinja2 +matplotlib==3.10.5 + # via + # -r setup/requirements/requirements_base.txt + # descartes +motmetrics==1.4.0 + # via -r setup/requirements/requirements_tracking.txt +mpmath==1.3.0 + # via sympy +networkx==3.5 + # via torch +numpy==1.26.4 + # via + # -r setup/requirements/requirements_base.txt + # contourpy + # matplotlib + # motmetrics + # opencv-python-headless + # pandas + # pycocotools + # pyquaternion + # scikit-learn + # scipy + # shapely + # torchvision +nvidia-cublas-cu12==12.8.4.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.8.90 + # via torch +nvidia-cuda-nvrtc-cu12==12.8.93 + # via torch +nvidia-cuda-runtime-cu12==12.8.90 + # via torch +nvidia-cudnn-cu12==9.10.2.21 + # via torch +nvidia-cufft-cu12==11.3.3.83 + # via torch +nvidia-cufile-cu12==1.13.1.3 + # via torch +nvidia-curand-cu12==10.3.9.90 + # via torch +nvidia-cusolver-cu12==11.7.3.90 + # via torch +nvidia-cusparse-cu12==12.5.8.93 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.7.1 + # via torch +nvidia-nccl-cu12==2.27.3 + # via torch +nvidia-nvjitlink-cu12==12.8.93 + # via + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.8.90 + # via torch +opencv-python-headless==4.11.0.86 + # via -r setup/requirements/requirements_base.txt +packaging==25.0 + # via matplotlib +pandas==2.3.2 + # via + # -r setup/requirements/requirements_tracking.txt + # motmetrics +parameterized==0.9.0 + # via -r setup/requirements/requirements_base.txt +pillow==11.3.0 + # via + # -r setup/requirements/requirements_base.txt + # matplotlib + # torchvision +pycocotools==2.0.10 + # via -r setup/requirements/requirements_nuimages.txt +pyparsing==3.2.3 + # via matplotlib +pyquaternion==0.9.9 + # via -r setup/requirements/requirements_base.txt +python-dateutil==2.9.0.post0 + # via + # matplotlib + # pandas +pytz==2025.2 + # via pandas +scikit-learn==1.7.1 + # via -r setup/requirements/requirements_base.txt +scipy==1.16.1 + # via + # -r setup/requirements/requirements_base.txt + # motmetrics + # scikit-learn +shapely==2.0.7 + # via -r setup/requirements/requirements_base.txt +six==1.17.0 + # via python-dateutil +sympy==1.14.0 + # via torch +termcolor==3.1.0 + # via fire +threadpoolctl==3.6.0 + # via scikit-learn +torch==2.8.0 + # via + # -r setup/requirements/requirements_prediction.txt + # torchvision +torchvision==0.23.0 + # via -r setup/requirements/requirements_prediction.txt +tqdm==4.67.1 + # via -r setup/requirements/requirements_base.txt +triton==3.4.0 + # via torch +typing-extensions==4.14.1 + # via torch +tzdata==2025.2 + # via pandas +xmltodict==0.14.2 + # via motmetrics + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/setup/test_tutorial.sh b/setup/test_tutorial.sh index da236fce7..d6b9c1d69 100755 --- a/setup/test_tutorial.sh +++ b/setup/test_tutorial.sh @@ -1,8 +1,16 @@ #!/bin/bash set -ex -# This script is to be executed inside a Docker container -source activate nuscenes +RUNNING_IN_CI=False + +# Parse arguments +for arg in "$@"; do + case $arg in + --ci) + RUNNING_IN_CI=True + ;; + esac +done # Generate python script from Jupyter notebook and then copy into Docker image. jupyter nbconvert --to python python-sdk/tutorials/nuscenes_tutorial.ipynb || { echo "Failed to convert nuscenes_tutorial notebook to python script"; exit 1; } @@ -21,6 +29,19 @@ sed -i.bak "/get_ipython.*/d; s/\(nusc_map.render.*\)/#\1/" python-sdk/tutorial sed -i.bak "/get_ipython.*/d; s/\(ego_poses = .*\)/#\1/" python-sdk/tutorials/map_expansion_tutorial.py || { echo "error in sed command"; exit 1; } sed -i.bak "/get_ipython.*/d; s/\(plt.imshow.*\)/#\1/" python-sdk/tutorials/prediction_tutorial.py || { echo "error in sed command"; exit 1; } +# Replace dataset path for running on CI +# Use "data/sets/nuscenes" instead of "/data/sets/nuscenes" +# Use "data/sets/nuimages" instead of "/data/sets/nuimages" +if [[ ${RUNNING_IN_CI} == "True" ]]; then + echo "Running in CI. Replacing path to dataroot as: data/sets/nuimages" + sed -i 's/\/data\/sets\/nuscenes/data\/sets\/nuscenes/g' python-sdk/tutorials/nuscenes_tutorial.py + sed -i 's/\/data\/sets\/nuscenes/data\/sets\/nuscenes/g' python-sdk/tutorials/nuimages_tutorial.py + sed -i 's/\/data\/sets\/nuimages/data\/sets\/nuimages/g' python-sdk/tutorials/nuimages_tutorial.py + sed -i 's/\/data\/sets\/nuscenes/data\/sets\/nuscenes/g' python-sdk/tutorials/can_bus_tutorial.py + sed -i 's/\/data\/sets\/nuscenes/data\/sets\/nuscenes/g' python-sdk/tutorials/map_expansion_tutorial.py + sed -i 's/\/data\/sets\/nuscenes/data\/sets\/nuscenes/g' python-sdk/tutorials/prediction_tutorial.py +fi + # Run tutorial xvfb-run python python-sdk/tutorials/nuscenes_tutorial.py # xvfb-run python python-sdk/tutorials/nuimages_tutorial.py # skip until PR-440 merged From fff495f774efa7f68c276dbc0bf8bc598cdc0ba5 Mon Sep 17 00:00:00 2001 From: ruoning Date: Mon, 25 Aug 2025 16:21:00 +0800 Subject: [PATCH 11/14] Modifications to support Python 3.12 --- python-sdk/nuscenes/map_expansion/map_api.py | 16 ++++++++-------- .../prediction/input_representation/agents.py | 2 +- .../input_representation/tests/test_agents.py | 8 ++++---- .../tests/test_combinators.py | 8 ++++---- .../tests/test_static_layers.py | 8 ++++---- .../scripts/export_pointclouds_as_obj.py | 2 +- python-sdk/nuscenes/utils/geometry_utils.py | 2 +- python-sdk/nuscenes/utils/map_mask.py | 4 ++-- .../tutorials/map_expansion_tutorial.ipynb | 4 ++-- 9 files changed, 27 insertions(+), 27 deletions(-) diff --git a/python-sdk/nuscenes/map_expansion/map_api.py b/python-sdk/nuscenes/map_expansion/map_api.py index 53a14a0c9..b055f7aa5 100644 --- a/python-sdk/nuscenes/map_expansion/map_api.py +++ b/python-sdk/nuscenes/map_expansion/map_api.py @@ -29,7 +29,7 @@ from nuscenes.utils.geometry_utils import view_points # Recommended style to use as the plots will show grids. -plt.style.use('seaborn-whitegrid') +plt.style.use('seaborn-v0_8-whitegrid') # Define a map geometry type for polygons and lines. Geometry = Union[Polygon, LineString] @@ -1820,8 +1820,8 @@ def mask_for_polygons(polygons: MultiPolygon, mask: np.ndarray) -> np.ndarray: def int_coords(x): # function to round and convert to int return np.array(x).round().astype(np.int32) - exteriors = [int_coords(poly.exterior.coords) for poly in polygons] - interiors = [int_coords(pi.coords) for poly in polygons for pi in poly.interiors] + exteriors = [int_coords(poly.exterior.coords) for poly in polygons.geoms] + interiors = [int_coords(pi.coords) for poly in polygons.geoms for pi in poly.interiors] cv2.fillPoly(mask, exteriors, 1) cv2.fillPoly(mask, interiors, 0) return mask @@ -1885,7 +1885,7 @@ def _polygon_geom_to_mask(self, [1.0, 0.0, 0.0, 1.0, trans_x, trans_y]) new_polygon = affinity.scale(new_polygon, xfact=scale_width, yfact=scale_height, origin=(0, 0)) - if new_polygon.geom_type is 'Polygon': + if new_polygon.geom_type == 'Polygon': new_polygon = MultiPolygon([new_polygon]) map_mask = self.mask_for_polygons(new_polygon, map_mask) @@ -1922,7 +1922,7 @@ def _line_geom_to_mask(self, map_mask = np.zeros(canvas_size, np.uint8) - if layer_name is 'traffic_light': + if layer_name == 'traffic_light': return None for line in layer_geom: @@ -1968,7 +1968,7 @@ def _get_layer_polygon(self, origin=(patch_x, patch_y), use_radians=False) new_polygon = affinity.affine_transform(new_polygon, [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) - if new_polygon.geom_type is 'Polygon': + if new_polygon.geom_type == 'Polygon': new_polygon = MultiPolygon([new_polygon]) polygon_list.append(new_polygon) @@ -1983,7 +1983,7 @@ def _get_layer_polygon(self, origin=(patch_x, patch_y), use_radians=False) new_polygon = affinity.affine_transform(new_polygon, [1.0, 0.0, 0.0, 1.0, -patch_x, -patch_y]) - if new_polygon.geom_type is 'Polygon': + if new_polygon.geom_type == 'Polygon': new_polygon = MultiPolygon([new_polygon]) polygon_list.append(new_polygon) @@ -2003,7 +2003,7 @@ def _get_layer_line(self, if layer_name not in self.map_api.non_geometric_line_layers: raise ValueError("{} is not a line layer".format(layer_name)) - if layer_name is 'traffic_light': + if layer_name == 'traffic_light': return None patch_x = patch_box[0] diff --git a/python-sdk/nuscenes/prediction/input_representation/agents.py b/python-sdk/nuscenes/prediction/input_representation/agents.py index a19894c0f..c727d083c 100644 --- a/python-sdk/nuscenes/prediction/input_representation/agents.py +++ b/python-sdk/nuscenes/prediction/input_representation/agents.py @@ -193,7 +193,7 @@ def draw_agent_boxes(center_agent_annotation: Dict[str, Any], if num_points > 1: color = fade_color(color, i, num_points - 1) - cv2.fillPoly(base_image, pts=[np.int0(box)], color=color) + cv2.fillPoly(base_image, pts=[np.intp(box)], color=color) class AgentBoxesWithFadedHistory(AgentRepresentation): diff --git a/python-sdk/nuscenes/prediction/input_representation/tests/test_agents.py b/python-sdk/nuscenes/prediction/input_representation/tests/test_agents.py index 7bd17dc05..44a7616a4 100644 --- a/python-sdk/nuscenes/prediction/input_representation/tests/test_agents.py +++ b/python-sdk/nuscenes/prediction/input_representation/tests/test_agents.py @@ -153,9 +153,9 @@ def get_colors(name): agent_1_ts_0 = cv2.boxPoints(((300, 350), (30, 30), -90)) agent_1_ts_1 = cv2.boxPoints(((300, 300), (30, 30), -90)) - answer = cv2.fillPoly(answer, pts=[np.int0(agent_0_ts_0)], color=(102, 0, 0)) - answer = cv2.fillPoly(answer, pts=[np.int0(angent_0_ts_1)], color=(255, 0, 0)) - answer = cv2.fillPoly(answer, pts=[np.int0(agent_1_ts_0)], color=(102, 102, 0)) - answer = cv2.fillPoly(answer, pts=[np.int0(agent_1_ts_1)], color=(255, 255, 0)) + answer = cv2.fillPoly(answer, pts=[np.intp(agent_0_ts_0)], color=(102, 0, 0)) + answer = cv2.fillPoly(answer, pts=[np.intp(angent_0_ts_1)], color=(255, 0, 0)) + answer = cv2.fillPoly(answer, pts=[np.intp(agent_1_ts_0)], color=(102, 102, 0)) + answer = cv2.fillPoly(answer, pts=[np.intp(agent_1_ts_1)], color=(255, 255, 0)) np.testing.assert_allclose(answer, img) diff --git a/python-sdk/nuscenes/prediction/input_representation/tests/test_combinators.py b/python-sdk/nuscenes/prediction/input_representation/tests/test_combinators.py index bc81be4df..9171653c0 100644 --- a/python-sdk/nuscenes/prediction/input_representation/tests/test_combinators.py +++ b/python-sdk/nuscenes/prediction/input_representation/tests/test_combinators.py @@ -15,18 +15,18 @@ def test(self): layer_1 = np.zeros((100, 100, 3)) box_1 = cv2.boxPoints(((50, 50), (20, 20), 0)) - layer_1 = cv2.fillPoly(layer_1, pts=[np.int0(box_1)], color=(255, 255, 255)) + layer_1 = cv2.fillPoly(layer_1, pts=[np.intp(box_1)], color=(255, 255, 255)) layer_2 = np.zeros((100, 100, 3)) box_2 = cv2.boxPoints(((70, 30), (10, 10), 0)) - layer_2 = cv2.fillPoly(layer_2, pts=[np.int0(box_2)], color=(0, 0, 255)) + layer_2 = cv2.fillPoly(layer_2, pts=[np.intp(box_2)], color=(0, 0, 255)) rasterizer = Rasterizer() image = rasterizer.combine([layer_1.astype('uint8'), layer_2.astype('uint8')]) answer = np.zeros((100, 100, 3)) - answer = cv2.fillPoly(answer, pts=[np.int0(box_1)], color=(255, 255, 255)) - answer = cv2.fillPoly(answer, pts=[np.int0(box_2)], color=(0, 0, 255)) + answer = cv2.fillPoly(answer, pts=[np.intp(box_1)], color=(255, 255, 255)) + answer = cv2.fillPoly(answer, pts=[np.intp(box_2)], color=(0, 0, 255)) answer = answer.astype('uint8') np.testing.assert_allclose(answer, image) diff --git a/python-sdk/nuscenes/prediction/input_representation/tests/test_static_layers.py b/python-sdk/nuscenes/prediction/input_representation/tests/test_static_layers.py index 1ef17add0..a2ec0c53f 100644 --- a/python-sdk/nuscenes/prediction/input_representation/tests/test_static_layers.py +++ b/python-sdk/nuscenes/prediction/input_representation/tests/test_static_layers.py @@ -20,7 +20,7 @@ def get_layer_mocks(): layer_1 = np.zeros((100, 100, 3)) box = cv2.boxPoints(((50, 50), (20, 10), -90)) - layer_1 = cv2.fillPoly(layer_1, pts=[np.int0(box)], color=(1, 1, 1)) + layer_1 = cv2.fillPoly(layer_1, pts=[np.intp(box)], color=(1, 1, 1)) layer_1 = layer_1[::-1, :, 0] layer_2 = np.zeros((100, 100, 3)) @@ -56,7 +56,7 @@ def test_make_rasterization(self, mock_draw_lanes, mock_load_maps): lanes = np.zeros((100, 100, 3)).astype('uint8') lane_box = cv2.boxPoints(((25, 75), (5, 5), -90)) - lanes = cv2.fillPoly(lanes, pts=[np.int0(lane_box)], color=(255, 0, 0)) + lanes = cv2.fillPoly(lanes, pts=[np.intp(lane_box)], color=(255, 0, 0)) mock_draw_lanes.return_value = lanes layers = self.get_layer_mocks() @@ -81,8 +81,8 @@ def test_make_rasterization(self, mock_draw_lanes, mock_load_maps): answer = np.zeros((100, 100, 3)) box = cv2.boxPoints(((50, 50), (20, 10), -90)) - answer = cv2.fillPoly(answer, pts=[np.int0(box)], color=(255, 255, 255)) + answer = cv2.fillPoly(answer, pts=[np.intp(box)], color=(255, 255, 255)) answer = cv2.line(answer, (50, 50), (50, 40), color=(255, 0, 0), thickness=2) - answer = cv2.fillPoly(answer, pts=[np.int0(lane_box)], color=(255, 0, 0)) + answer = cv2.fillPoly(answer, pts=[np.intp(lane_box)], color=(255, 0, 0)) np.testing.assert_allclose(answer, image) diff --git a/python-sdk/nuscenes/scripts/export_pointclouds_as_obj.py b/python-sdk/nuscenes/scripts/export_pointclouds_as_obj.py index ef85697e4..b7cc05025 100644 --- a/python-sdk/nuscenes/scripts/export_pointclouds_as_obj.py +++ b/python-sdk/nuscenes/scripts/export_pointclouds_as_obj.py @@ -119,7 +119,7 @@ def pointcloud_color_from_image(nusc: NuScenes, :param nusc: NuScenes instance. :param pointsensor_token: Lidar/radar sample_data token. :param camera_token: Camera sample data token. - :return (coloring , mask ). Returns the colors for n points that reproject into the + :return (coloring , mask ). Returns the colors for n points that reproject into the image out of m total points. The mask indicates which points are selected. """ diff --git a/python-sdk/nuscenes/utils/geometry_utils.py b/python-sdk/nuscenes/utils/geometry_utils.py index 4a54cd979..17ec37719 100644 --- a/python-sdk/nuscenes/utils/geometry_utils.py +++ b/python-sdk/nuscenes/utils/geometry_utils.py @@ -118,7 +118,7 @@ def points_in_box(box: 'Box', points: np.ndarray, wlh_factor: float = 1.0): :param box: . :param points: . :param wlh_factor: Inflates or deflates the box. - :return: . + :return: . """ corners = box.corners(wlh_factor=wlh_factor) diff --git a/python-sdk/nuscenes/utils/map_mask.py b/python-sdk/nuscenes/utils/map_mask.py index 0042e73db..2229cc7d4 100644 --- a/python-sdk/nuscenes/utils/map_mask.py +++ b/python-sdk/nuscenes/utils/map_mask.py @@ -57,11 +57,11 @@ def is_on_mask(self, x: Any, y: Any, dilation: float = 0) -> np.array: :param x: Global x coordinates. Can be a scalar, list or a numpy array of x coordinates. :param y: Global y coordinates. Can be a scalar, list or a numpy array of x coordinates. :param dilation: Optional dilation of map mask. - :return: . Whether the points are on the mask. + :return: . Whether the points are on the mask. """ px, py = self.to_pixel_coords(x, y) - on_mask = np.ones(px.size, dtype=np.bool) + on_mask = np.ones(px.size, dtype=bool) this_mask = self.mask(dilation) on_mask[px < 0] = False diff --git a/python-sdk/tutorials/map_expansion_tutorial.ipynb b/python-sdk/tutorials/map_expansion_tutorial.ipynb index 95d6a1f56..fb8dd455e 100644 --- a/python-sdk/tutorials/map_expansion_tutorial.ipynb +++ b/python-sdk/tutorials/map_expansion_tutorial.ipynb @@ -197,7 +197,7 @@ "source": [ "# Init nuScenes. Requires the dataset to be stored on disk.\n", "from nuscenes.nuscenes import NuScenes\n", - "nusc = NuScenes(version='v1.0-mini', verbose=False)\n", + "nusc = NuScenes(version='v1.0-mini', dataroot='/data/sets/nuscenes', verbose=False)\n", "\n", "# Pick a sample and render the front camera image.\n", "sample_token = nusc.sample[9]['token']\n", @@ -222,7 +222,7 @@ "source": [ "# Init NuScenes. Requires the dataset to be stored on disk.\n", "from nuscenes.nuscenes import NuScenes\n", - "nusc = NuScenes(version='v1.0-mini', verbose=False)\n", + "nusc = NuScenes(version='v1.0-mini', dataroot='/data/sets/nuscenes', verbose=False)\n", "\n", "# Render ego poses.\n", "nusc_map_bos = NuScenesMap(dataroot='/data/sets/nuscenes', map_name='boston-seaport')\n", From b486f1d0ee6e8ccfe58a015ec37dd6473cc4ba3c Mon Sep 17 00:00:00 2001 From: ruoning Date: Tue, 26 Aug 2025 09:55:05 +0800 Subject: [PATCH 12/14] Make pipeline extensible to support multiple Python versions --- .github/actions/run-python-tests/action.yml | 62 ++++++++ .github/workflows/pipeline.yml | 54 ++----- setup/requirements_3_9_lock.txt | 162 ++++++++++++++++++++ 3 files changed, 237 insertions(+), 41 deletions(-) create mode 100644 .github/actions/run-python-tests/action.yml create mode 100644 setup/requirements_3_9_lock.txt diff --git a/.github/actions/run-python-tests/action.yml b/.github/actions/run-python-tests/action.yml new file mode 100644 index 000000000..6024af9c6 --- /dev/null +++ b/.github/actions/run-python-tests/action.yml @@ -0,0 +1,62 @@ +name: 'Run Python Tests' +description: 'Run Python Tests' +inputs: + python-version: + description: 'Python version' + required: true +runs: + using: "composite" + steps: + - name: Display Python version + shell: bash + env: + PYTHON_VERSION: ${{ inputs.python-version }} + run: echo "Running Python tests with version ${PYTHON_VERSION}" + - name: Set up Python + uses: actions/setup-python@v3 + with: + python-version: ${{ inputs.python-version }} + - name: Install datasets + shell: bash + run: | + mkdir -p ${NUSCENES} && mkdir -p ${NUIMAGES} + + echo "Installing: v1.0-mini.tgz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/v1.0-mini.tgz | tar -xzf - -C ${NUSCENES} --exclude sweeps + + echo "Installing: nuimages-v1.0-mini.tgz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuimages-v1.0/nuimages-v1.0-mini.tgz | tar -xzf - -C ${NUIMAGES} + + echo "Installing: nuScenes-lidarseg-mini-v1.0.tar.bz2" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-lidarseg-v1.0/nuScenes-lidarseg-mini-v1.0.tar.bz2 | tar -xjf - -C ${NUSCENES} + + echo "Installing: nuScenes-panoptic-v1.0-mini.tar.gz" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-panoptic-v1.0/nuScenes-panoptic-v1.0-mini.tar.gz | tar -xzf - --strip-components=1 -C ${NUSCENES} + + echo "Installing: nuScenes-map-expansion-v1.3.zip" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/nuScenes-map-expansion-v1.3.zip -o nuScenes-map-expansion-v1.3.zip + unzip -q nuScenes-map-expansion-v1.3.zip -d ${NUSCENES}/maps/ + + echo "Installing: can_bus.zip" + curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/can_bus.zip -o can_bus.zip + unzip -q can_bus.zip -d ${NUSCENES} can_bus/scene-0001_* + + echo "Removing zip files . . ." + rm nuScenes-map-expansion-v1.3.zip can_bus.zip + - name: Install dependencies + shell: bash + env: + PYTHON_VERSION: ${{ inputs.python-version }} + run: | + PYTHON_VERSION_UNDERSCORE=${PYTHON_VERSION//./_} + pip install -r setup/requirements_${PYTHON_VERSION_UNDERSCORE}_lock.txt + - name: Run Python unit tests + shell: bash + run: | + python -m unittest discover python-sdk + - name: Run Jupyter notebook tests + shell: bash + run: | + pip install jupyter -q + export PYTHONPATH="${PYTHONPATH}:$(pwd)/python-sdk" + ./setup/test_tutorial.sh --ci diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml index fcfc9328b..e256bdbb6 100644 --- a/.github/workflows/pipeline.yml +++ b/.github/workflows/pipeline.yml @@ -4,49 +4,21 @@ env: NUSCENES: data/sets/nuscenes NUIMAGES: data/sets/nuimages jobs: - Test: + test-in-3-9: runs-on: ubuntu-latest steps: - name: Check out repository code uses: actions/checkout@v4 - - name: Set up Python 3.12 - uses: actions/setup-python@v3 + - id: run-test-in-3-9 + uses: ./.github/actions/run-python-tests with: - python-version: "3.12" - - name: Install datasets - run: | - mkdir -p ${NUSCENES} && mkdir -p ${NUIMAGES} - - echo "Installing: v1.0-mini.tgz" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/v1.0-mini.tgz | tar -xzf - -C ${NUSCENES} --exclude sweeps - - echo "Installing: nuimages-v1.0-mini.tgz" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuimages-v1.0/nuimages-v1.0-mini.tgz | tar -xzf - -C ${NUIMAGES} - - echo "Installing: nuScenes-lidarseg-mini-v1.0.tar.bz2" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-lidarseg-v1.0/nuScenes-lidarseg-mini-v1.0.tar.bz2 | tar -xjf - -C ${NUSCENES} - - echo "Installing: nuScenes-panoptic-v1.0-mini.tar.gz" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/nuscenes-panoptic-v1.0/nuScenes-panoptic-v1.0-mini.tar.gz | tar -xzf - --strip-components=1 -C ${NUSCENES} - - echo "Installing: nuScenes-map-expansion-v1.3.zip" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/nuScenes-map-expansion-v1.3.zip -o nuScenes-map-expansion-v1.3.zip - unzip -q nuScenes-map-expansion-v1.3.zip -d ${NUSCENES}/maps/ - - echo "Installing: can_bus.zip" - curl -fsSL https://motional-nuscenes.s3-ap-northeast-1.amazonaws.com/public/v1.0/can_bus.zip -o can_bus.zip - unzip -q can_bus.zip -d ${NUSCENES} can_bus/scene-0001_* - - echo "Removing zip files . . ." - rm nuScenes-map-expansion-v1.3.zip can_bus.zip - - name: Install dependencies - run: | - pip install -r setup/requirements_3_12_lock.txt - - name: Run Python unit tests - run: | - python -m unittest discover python-sdk - - name: Run Jupyter notebook tests - run: | - pip install jupyter -q - export PYTHONPATH="${PYTHONPATH}:$(pwd)/python-sdk" - ./setup/test_tutorial.sh --ci + python-version: 3.9 + test-in-3-12: + runs-on: ubuntu-latest + steps: + - name: Check out repository code + uses: actions/checkout@v4 + - id: run-test-in-3-12 + uses: ./.github/actions/run-python-tests + with: + python-version: 3.12 diff --git a/setup/requirements_3_9_lock.txt b/setup/requirements_3_9_lock.txt new file mode 100644 index 000000000..ebbdc9bfb --- /dev/null +++ b/setup/requirements_3_9_lock.txt @@ -0,0 +1,162 @@ +# +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: +# +# pip-compile --output-file=setup/requirements_3_9_lock.txt setup/requirements.txt +# +cachetools==6.2.0 + # via -r setup/requirements/requirements_base.txt +contourpy==1.3.0 + # via matplotlib +cycler==0.12.1 + # via matplotlib +descartes==1.1.0 + # via -r setup/requirements/requirements_base.txt +filelock==3.19.1 + # via torch +fire==0.7.1 + # via -r setup/requirements/requirements_base.txt +fonttools==4.59.1 + # via matplotlib +fsspec==2025.7.0 + # via torch +importlib-metadata==8.7.0 + # via triton +importlib-resources==6.5.2 + # via matplotlib +jinja2==3.1.6 + # via torch +joblib==1.5.1 + # via scikit-learn +kiwisolver==1.4.7 + # via matplotlib +markupsafe==3.0.2 + # via jinja2 +matplotlib==3.9.4 + # via + # -r setup/requirements/requirements_base.txt + # descartes +motmetrics==1.4.0 + # via -r setup/requirements/requirements_tracking.txt +mpmath==1.3.0 + # via sympy +networkx==3.2.1 + # via torch +numpy==1.26.4 + # via + # -r setup/requirements/requirements_base.txt + # contourpy + # matplotlib + # motmetrics + # opencv-python-headless + # pandas + # pycocotools + # pyquaternion + # scikit-learn + # scipy + # shapely + # torchvision +nvidia-cublas-cu12==12.8.4.1 + # via + # nvidia-cudnn-cu12 + # nvidia-cusolver-cu12 + # torch +nvidia-cuda-cupti-cu12==12.8.90 + # via torch +nvidia-cuda-nvrtc-cu12==12.8.93 + # via torch +nvidia-cuda-runtime-cu12==12.8.90 + # via torch +nvidia-cudnn-cu12==9.10.2.21 + # via torch +nvidia-cufft-cu12==11.3.3.83 + # via torch +nvidia-cufile-cu12==1.13.1.3 + # via torch +nvidia-curand-cu12==10.3.9.90 + # via torch +nvidia-cusolver-cu12==11.7.3.90 + # via torch +nvidia-cusparse-cu12==12.5.8.93 + # via + # nvidia-cusolver-cu12 + # torch +nvidia-cusparselt-cu12==0.7.1 + # via torch +nvidia-nccl-cu12==2.27.3 + # via torch +nvidia-nvjitlink-cu12==12.8.93 + # via + # nvidia-cufft-cu12 + # nvidia-cusolver-cu12 + # nvidia-cusparse-cu12 + # torch +nvidia-nvtx-cu12==12.8.90 + # via torch +opencv-python-headless==4.11.0.86 + # via -r setup/requirements/requirements_base.txt +packaging==25.0 + # via matplotlib +pandas==2.3.2 + # via + # -r setup/requirements/requirements_tracking.txt + # motmetrics +parameterized==0.9.0 + # via -r setup/requirements/requirements_base.txt +pillow==11.3.0 + # via + # -r setup/requirements/requirements_base.txt + # matplotlib + # torchvision +pycocotools==2.0.10 + # via -r setup/requirements/requirements_nuimages.txt +pyparsing==3.2.3 + # via matplotlib +pyquaternion==0.9.9 + # via -r setup/requirements/requirements_base.txt +python-dateutil==2.9.0.post0 + # via + # matplotlib + # pandas +pytz==2025.2 + # via pandas +scikit-learn==1.6.1 + # via -r setup/requirements/requirements_base.txt +scipy==1.13.1 + # via + # -r setup/requirements/requirements_base.txt + # motmetrics + # scikit-learn +shapely==2.0.7 + # via -r setup/requirements/requirements_base.txt +six==1.17.0 + # via python-dateutil +sympy==1.14.0 + # via torch +termcolor==3.1.0 + # via fire +threadpoolctl==3.6.0 + # via scikit-learn +torch==2.8.0 + # via + # -r setup/requirements/requirements_prediction.txt + # torchvision +torchvision==0.23.0 + # via -r setup/requirements/requirements_prediction.txt +tqdm==4.67.1 + # via -r setup/requirements/requirements_base.txt +triton==3.4.0 + # via torch +typing-extensions==4.15.0 + # via torch +tzdata==2025.2 + # via pandas +xmltodict==0.14.2 + # via motmetrics +zipp==3.23.0 + # via + # importlib-metadata + # importlib-resources + +# The following packages are considered to be unsafe in a requirements file: +# setuptools From ffb801a7a2f07503f804af62d7556b53f84a5852 Mon Sep 17 00:00:00 2001 From: ruoning Date: Tue, 26 Aug 2025 11:22:44 +0800 Subject: [PATCH 13/14] Update Python versions supported --- setup/setup.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup/setup.py b/setup/setup.py index fcff90142..e7c8f1c44 100644 --- a/setup/setup.py +++ b/setup/setup.py @@ -48,14 +48,15 @@ def get_dirlist(_rootdir): long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/nutonomy/nuscenes-devkit', - python_requires='>=3.6', + python_requires='>=3.9', install_requires=requirements, packages=packages, package_dir={'': 'python-sdk'}, package_data={'': ['*.json']}, include_package_data=True, classifiers=[ - 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.12', 'Operating System :: OS Independent', 'License :: Free for non-commercial use' ], From 40762179892a62d10a4f0add0cf72da9fc6f8c8d Mon Sep 17 00:00:00 2001 From: ruoning Date: Tue, 26 Aug 2025 11:29:53 +0800 Subject: [PATCH 14/14] Remove Jenkinsfile --- setup/Jenkinsfile | 166 ---------------------------------------------- 1 file changed, 166 deletions(-) delete mode 100644 setup/Jenkinsfile diff --git a/setup/Jenkinsfile b/setup/Jenkinsfile deleted file mode 100644 index 43089cc3d..000000000 --- a/setup/Jenkinsfile +++ /dev/null @@ -1,166 +0,0 @@ -@Library('jenkins-shared-libraries') _ - -// Aborts previous builds of the same PR- -if( env.BRANCH_NAME != null && env.BRANCH_NAME != "master" ) { - def buildNumber = env.BUILD_NUMBER as int - if (buildNumber > 1) milestone(buildNumber - 1) - milestone(buildNumber) -} - -def update_deps() { - sh '''#!/usr/bin/env bash - set -e - source activate nuscenes - find . -name "*.txt" -exec sed -i -e '/pycocotools/d' {} \\; - pip install --no-cache -r /nuscenes-dev/requirements.txt - conda install --yes pycocotools - ''' -} - -def kubeagent(name, image) { - return jnlp.docker(name: name, - docker_image: image, - cpu: 7, maxcpu: 8, - memory: "8G", maxmemory: "30G", - yaml: """spec: - containers: - - name: docker - volumeMounts: - - mountPath: /data/ - name: nudeep-ci - subPath: data - volumes: - - name: nudeep-ci - persistentVolumeClaim: - claimName: nudeep-ci-gitlab""") -} - -pipeline { - - agent { - kubernetes (jnlp.docker(name: "nuscenes-builder", - cpu: 2, maxcpu: 2, - memory: "2G", maxmemory: "4G")) - } // agent - - environment { - PROD_IMAGE = "233885420847.dkr.ecr.us-east-1.amazonaws.com/nuscenes-test:production" - TEST_IMAGE = "233885420847.dkr.ecr.us-east-1.amazonaws.com/nuscenes-test:1.0" - TEST_IMAGE_3_6 = "${env.TEST_IMAGE}-3.6" - TEST_IMAGE_3_7 = "${env.TEST_IMAGE}-3.7" - NUSCENES = "/data/sets/nuscenes" - NUIMAGES = "/data/sets/nuimages" - PYTHONPATH = "${env.WORKSPACE}/python-sdk" - PYTHONUNBUFFERED = "1" - } - - parameters { - booleanParam(name: 'REBUILD_TEST_IMAGE', defaultValue: false, description: 'rebuild docker test image') - } - - stages { - stage('Build test docker image') { - when { - expression { return params.REBUILD_TEST_IMAGE } - } - failFast true - parallel { - stage('Build 3.6') { - steps { - withAWS(credentials: 'ecr-233') { - container('docker') { - // Build the Docker image, and then run python -m unittest inside - // an activated Conda environment inside of the container. - sh """#!/bin/bash - set -eux - docker build --build-arg PYTHON_VERSION=3.6 -t $TEST_IMAGE_3_6 -f setup/Dockerfile . - `aws ecr get-login --no-include-email --region us-east-1` - docker push $TEST_IMAGE_3_6 - """ - } // container - } - } // steps - } // stage - stage('Build 3.7') { - steps { - withAWS(credentials: 'ecr-233') { - container('docker') { - // Build the Docker image, and then run python -m unittest inside - // an activated Conda environment inside of the container. - sh """#!/bin/bash - set -eux - docker build --build-arg PYTHON_VERSION=3.7 -t $TEST_IMAGE_3_7 -f setup/Dockerfile . - `aws ecr get-login --no-include-email --region us-east-1` - docker push $TEST_IMAGE_3_7 - """ - } // container - } - } // steps - } // stage - } - } - - stage('Tests') { - failFast true - parallel { - stage('Test 3.6') { - agent { - kubernetes(kubeagent("nuscenes-test3.6", - env.TEST_IMAGE_3_6)) - } // agent - - steps { - container('docker') { - update_deps() - sh """#!/bin/bash - set -e - source activate nuscenes && python -m unittest discover python-sdk - bash setup/test_tutorial.sh - """ - } // container - } // steps - } // stage - - stage('Test 3.7') { - agent { - kubernetes(kubeagent("nuscenes-test3.7", - env.TEST_IMAGE_3_7)) - } // agent - - steps { - container('docker') { - update_deps() - sh """#!/bin/bash - set -e - source activate nuscenes && python -m unittest discover python-sdk - bash setup/test_tutorial.sh - """ - } // container - } // steps - } // stage - } // parallel - } // stage - - stage('Deploy') { - when { - branch 'master' - } - - steps { - // TODO: determine where to deploy Docker images. - container('docker'){ - withCredentials([[ - $class: 'AmazonWebServicesCredentialsBinding', - credentialsId: 'aws-ecr-staging', - ]]){ - sh """#!/bin/bash - echo 'Tagging docker image as ready for production. For now, this stage of the pipeline does nothing.' - # docker build -t $PROD_IMAGE . - # docker push $PROD_IMAGE - """ - } - } // container('docker') - } //steps - } // stage('Deploy') - } // stages -} // Pipeline