Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
92 changes: 92 additions & 0 deletions autoware_ml/configs/detection2d/dataset/t4dataset/comlops.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
dataset_version_config_root = "autoware_ml/configs/detection2d/dataset/t4dataset/"
dataset_version_list = [
"comlops",
]

classes = (
"animal",
"bicycle",
"building",
"bus",
"car",
"cone",
"construction",
"crosswalk",
"dashed_lane_marking",
"deceleration_line",
"gate",
"guide_post",
"laneline_dash_white",
"laneline_dash_yellow",
"laneline_solid_green",
"laneline_solid_red",
"laneline_solid_white",
"laneline_solid_yellow",
"marking_arrow",
"marking_character",
"marking_other",
"motorcycle",
"other_obstacle",
"other_pedestrian",
"other_vehicle",
"parking_lot",
"pedestrian",
"pole",
"road",
"road_debris",
"sidewalk",
"sky",
"stopline",
"striped_road_marking",
"traffic_light",
"traffic_sign",
"train",
"truck",
# "unknown",
"vegetation/terrain",
"wall/fence",
)

class_mappings = {
"animal": "animal",
"bicycle": "bicycle",
"building": "building",
"bus": "bus",
"car": "car",
"cone": "cone",
"construction": "construction",
"crosswalk": "crosswalk",
"dashed_lane_marking": "dashed_lane_marking",
"deceleration_line": "deceleration_line",
"gate": "gate",
"guide_post": "guide_post",
"laneline_dash_white": "laneline_dash_white",
"laneline_dash_yellow": "laneline_dash_yellow",
"laneline_solid_green": "laneline_solid_green",
"laneline_solid_red": "laneline_solid_red",
"laneline_solid_white": "laneline_solid_white",
"laneline_solid_yellow": "laneline_solid_yellow",
"marking_arrow": "marking_arrow",
"marking_character": "marking_character",
"marking_other": "marking_other",
"motorcycle": "motorcycle",
"other_obstacle": "other_obstacle",
"other_pedestrian": "other_pedestrian",
"other_vehicle": "other_vehicle",
"parking_lot": "parking_lot",
"pedestrian": "pedestrian",
"pole": "pole",
"road": "road",
"road_debris": "road_debris",
"sidewalk": "sidewalk",
"sky": "sky",
"stopline": "stopline",
"striped_road_marking": "striped_road_marking",
"traffic_light": "traffic_light",
"traffic_sign": "traffic_sign",
"train": "train",
"truck": "truck",
"vegetation/terrain": "vegetation/terrain",
"wall/fence": "wall/fence",
"unknown": "unknown",
}
7 changes: 6 additions & 1 deletion autoware_ml/detection2d/metrics/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,8 @@
from mmengine.registry import METRICS
from mmseg.evaluation.metrics import IoUMetric

from .tlr_metrics import TLRFineDetectorEvaluator

__all__ = ["TLRFineDetectorEvaluator"]
METRICS.register_module()(IoUMetric)

__all__ = ["TLRFineDetectorEvaluator", "IoUMetric"]
162 changes: 154 additions & 8 deletions tools/detection2d/create_data_t4dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,9 @@
import re
import warnings
from dataclasses import dataclass, field
from typing import Dict, List
from typing import Dict, List, Optional

import cv2
import mmengine
import numpy as np
import yaml
Expand All @@ -29,6 +30,8 @@ class DataEntry:
width: int
height: int
instances: List[Instance] = field(default_factory=list)
surfaces: List[Instance] = field(default_factory=list)
gt_semantic_seg: Optional[str] = None


@dataclass
Expand All @@ -37,28 +40,145 @@ class DetectionData:
data_list: List[DataEntry] = field(default_factory=list)


def save_semantic_mask_png(semantic_mask, output_path):

os.makedirs(os.path.dirname(output_path), exist_ok=True)

cv2.imwrite(output_path, semantic_mask.astype(np.uint8))


def convert_entry_instances_to_semantic(entry, allowed_classes):
height, width = entry.height, entry.width
# Initialize with ignore index (usually 255)
semantic_mask = np.full((height, width), 255, dtype=np.uint8)

# Draw surfaces first (background/stuff) if any exist
for surf in entry.surfaces:
cls_id = surf.bbox_label
for poly_flat in surf.mask:
if len(poly_flat) < 6:
continue
poly = np.array(poly_flat, dtype=np.int32).reshape(-1, 2)
cv2.fillPoly(semantic_mask, [poly], color=cls_id)

# Draw instances (objects/things) on top
for inst in entry.instances:
cls_id = inst.bbox_label
for poly_flat in inst.mask:
if len(poly_flat) < 6:
continue
poly = np.array(poly_flat, dtype=np.int32).reshape(-1, 2)
cv2.fillPoly(semantic_mask, [poly], color=cls_id)

return semantic_mask


def generate_colormap(num_classes):
np.random.seed(42)
colors = np.random.randint(0, 256, size=(num_classes, 3), dtype=np.uint8)
return colors


def save_colored_mask(semantic_mask, output_path, colormap):
height, width = semantic_mask.shape
color_mask = np.zeros((height, width, 3), dtype=np.uint8)

for cls_id, color in enumerate(colormap):
color_mask[semantic_mask == cls_id] = color

os.makedirs(os.path.dirname(output_path), exist_ok=True)
cv2.imwrite(output_path, color_mask[:, :, ::-1]) # OpenCV BGR


def update_detection_data_annotations(
data_list: Dict[str, DataEntry],
object_ann: List[ObjectAnn],
surface_ann: List[ObjectAnn],
attributes: Dict[str, str],
categories: Dict[str, str],
class_mappings: Dict[str, str],
allowed_classes: List[str],
root_path: str,
save_colored_masks: bool = False,
save_semantic_segmentation: bool = False,
) -> None:

# Instance (Objects)
for ann in object_ann:
class_name = class_mappings[categories[ann.category_token]]
if ann.sample_data_token not in data_list:
continue
class_name = class_mappings.get(categories[ann.category_token], None)
if class_name not in allowed_classes:
continue
bbox_label = allowed_classes.index(class_name)

# decode mask
binary = ann.mask.decode().astype(np.uint8)

contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

flat_polygons = []
for cnt in contours:
if len(cnt) >= 3:
poly = cnt.reshape(-1, 2).tolist() # [[x1,y1], [x2,y2], ...]
poly_flat = [coord for point in poly for coord in point]
flat_polygons.append(poly_flat)

instance = Instance(
bbox=ann.bbox,
bbox_label=bbox_label,
# TODO(someone): Please check this operation is correct!!!
mask=[[int(x), int(y)] for y, x in zip(*np.where(ann.mask.decode() == 1))],
mask=flat_polygons,
extra_anns=[attributes[x] for x in ann.attribute_tokens],
)
data_list[ann.sample_data_token].instances.append(instance)

# Surface (Background) - Only process if flag is True
if save_semantic_segmentation:
for ann in surface_ann:
class_name = class_mappings.get(categories[ann.category_token], None)

if class_name not in allowed_classes:
continue

bbox_label = allowed_classes.index(class_name)

binary = ann.mask.decode().astype(np.uint8)
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

flat_polygons = []
for cnt in contours:
if len(cnt) >= 3:
poly = cnt.reshape(-1, 2).tolist()
poly_flat = [coord for point in poly for coord in point]
flat_polygons.append(poly_flat)

surface_instance = Instance(
bbox=ann.bbox if hasattr(ann, "bbox") else [0, 0, 0, 0],
bbox_label=bbox_label,
mask=flat_polygons,
extra_anns=[],
)

if ann.sample_data_token in data_list:
data_list[ann.sample_data_token].surfaces.append(surface_instance)

# Generate and save semantic masks images
colormap = generate_colormap(len(allowed_classes) + 1)
for key, entry in data_list.items():
gray_mask = convert_entry_instances_to_semantic(entry, allowed_classes)
mask_file = f"{root_path}/semseg/masks/{(entry.img_path).split('/')[-4] +'_' + (entry.img_path).split('/')[-2] +'_' + (entry.img_path).split('/')[-1]}".replace(
".jpg", ".png"
)
os.makedirs(os.path.dirname(mask_file), exist_ok=True)
cv2.imwrite(mask_file, gray_mask.astype(np.uint8))
entry.gt_semantic_seg = mask_file

if save_colored_masks:
color_file = f"{root_path}/semseg/masks_color/{(entry.img_path).split('/')[-5] +'_' +(entry.img_path).split('/')[-2] +'_' + (entry.img_path).split('/')[-1]}".replace(
".jpg", ".png"
)
save_colored_mask(gray_mask, color_file, colormap)


def get_scene_root_dir_path(
root_path: str,
Expand Down Expand Up @@ -93,6 +213,16 @@ def parse_args() -> argparse.Namespace:
help="Will resort to using the available dataset version if the one specified in the config file does not exist.",
)
parser.add_argument("-o", "--out_dir", type=str, required=True, help="output directory of info file")
parser.add_argument(
"--save_semantic_segmentation",
action="store_true",
help="Whether to process surface annotations and save semantic segmentation masks.",
)
parser.add_argument(
"--save_colored_masks",
action="store_true",
help="Whether to save colored semantic masks.",
)
return parser.parse_args()


Expand Down Expand Up @@ -127,6 +257,7 @@ def assign_ids_and_save_detection_data(
}
for instance in entry.instances
],
"seg_map_path": entry.gt_semantic_seg,
}
for i, entry in enumerate(detection_data.data_list)
],
Expand Down Expand Up @@ -158,20 +289,31 @@ def main() -> None:
for scene_id in dataset_list_dict.get(split, []):
print_log(f"Creating data info for scene: {scene_id}")

t4_dataset_id, t4_dataset_version_id = scene_id.split(" ")
if os.path.exists(osp.join(args.root_path, t4_dataset_id, t4_dataset_version_id)):
parts = scene_id.split(" ")
if len(parts) == 2:
t4_dataset_id, t4_dataset_version_id = parts
else:
t4_dataset_id = scene_id.strip()
t4_dataset_version_id = None

if t4_dataset_version_id and os.path.exists(
osp.join(args.root_path, t4_dataset_id, t4_dataset_version_id)
):
scene_root_dir_path = osp.join(args.root_path, t4_dataset_id, t4_dataset_version_id)
elif args.use_available_dataset_version:
elif os.path.exists(osp.join(args.root_path, dataset_version, t4_dataset_id)):
print(
"Warning: The version of the dataset specified in the config file does not exist. Will use whatever is available locally."
f"Warning: {t4_dataset_id} has no t4_dataset_version_id or the specified version is missing. "
"Using the available version on disk."
)

scene_root_dir_path = get_scene_root_dir_path(args.root_path, dataset_version, t4_dataset_id)
else:
raise ValueError(f"{t4_dataset_id} does not exist.")

t4 = Tier4(
data_root=scene_root_dir_path,
verbose=False,
version="annotation",
)

data_list: Dict[str, DataEntry] = {}
Expand All @@ -192,10 +334,14 @@ def main() -> None:
update_detection_data_annotations(
data_list,
t4.object_ann,
t4.surface_ann,
attributes,
categories,
cfg.class_mappings,
cfg.classes,
args.root_path,
save_colored_masks=args.save_colored_masks,
save_semantic_segmentation=args.save_semantic_segmentation,
)
data_infos[split].extend(data_list.values())

Expand Down