diff --git a/.gitignore b/.gitignore index 5d34da6bb..b62a4a058 100644 --- a/.gitignore +++ b/.gitignore @@ -168,4 +168,8 @@ cython_debug/ .vscode/ setup/zed_sdk.run -cuda/ \ No newline at end of file + +cuda/ +#Ignore ROS bags +*.bag + diff --git a/GEMstack/offboard/calibration/capture_lidar_zed.py b/GEMstack/offboard/calibration/capture_lidar_zed.py index 21814b793..ee11ebb1a 100644 --- a/GEMstack/offboard/calibration/capture_lidar_zed.py +++ b/GEMstack/offboard/calibration/capture_lidar_zed.py @@ -1,99 +1,107 @@ -# ROS Headers +#!/usr/bin/env python3 +""" +Capture ROS messages and save each run in its own time-stamped folder. +""" + import rospy -from sensor_msgs.msg import Image,PointCloud2 +from sensor_msgs.msg import Image, PointCloud2 import sensor_msgs.point_cloud2 as pc2 import ctypes import struct - -# OpenCV and cv2 bridge import cv2 from cv_bridge import CvBridge import numpy as np import os +import datetime lidar_points = None camera_image = None depth_image = None bridge = CvBridge() -def lidar_callback(lidar : PointCloud2): +def lidar_callback(lidar: PointCloud2): global lidar_points lidar_points = lidar -def camera_callback(img : Image): +def camera_callback(img: Image): global camera_image camera_image = img -def depth_callback(img : Image): +def depth_callback(img: Image): global depth_image depth_image = img -def pc2_to_numpy(pc2_msg, want_rgb = False): +def pc2_to_numpy(pc2_msg, want_rgb=False): gen = pc2.read_points(pc2_msg, skip_nans=True) if want_rgb: - xyzpack = np.array(list(gen),dtype=np.float32) + xyzpack = np.array(list(gen), dtype=np.float32) if xyzpack.shape[1] != 4: raise ValueError("PointCloud2 does not have points") - xyzrgb = np.empty((xyzpack.shape[0],6)) - xyzrgb[:,:3] = xyzpack[:,:3] - for i,x in enumerate(xyzpack): - rgb = x[3] - # cast float32 to int so that bitwise operations are possible - s = struct.pack('>f' ,rgb) - i = struct.unpack('>l',s)[0] - # you can get back the float value by the inverse operations - pack = ctypes.c_uint32(i).value - r = (pack & 0x00FF0000)>> 16 - g = (pack & 0x0000FF00)>> 8 + xyzrgb = np.empty((xyzpack.shape[0], 6)) + xyzrgb[:, :3] = xyzpack[:, :3] + for i, x in enumerate(xyzpack): + rgb = x[3] + s = struct.pack('>f', rgb) + i_val = struct.unpack('>l', s)[0] + pack = ctypes.c_uint32(i_val).value + r = (pack & 0x00FF0000) >> 16 + g = (pack & 0x0000FF00) >> 8 b = (pack & 0x000000FF) - #r,g,b values in the 0-255 range - xyzrgb[i,3:] = (r,g,b) + xyzrgb[i, 3:] = (r, g, b) return xyzrgb else: - return np.array(list(gen),dtype=np.float32)[:,:3] + return np.array(list(gen), dtype=np.float32)[:, :3] -def save_scan(lidar_fn,color_fn,depth_fn): - print("Saving scan to",lidar_fn,color_fn,depth_fn) - pc = pc2_to_numpy(lidar_points,want_rgb=False) - np.savez(lidar_fn,pc) - cv2.imwrite(color_fn,bridge.imgmsg_to_cv2(camera_image)) +def save_scan(lidar_fn, color_fn, depth_fn): + print("Saving scan to", lidar_fn, color_fn, depth_fn) + pc = pc2_to_numpy(lidar_points, want_rgb=False) + np.savez(lidar_fn, pc) + cv2.imwrite(color_fn, bridge.imgmsg_to_cv2(camera_image)) dimage = bridge.imgmsg_to_cv2(depth_image) dimage_non_nan = dimage[np.isfinite(dimage)] - print("Depth range",np.min(dimage_non_nan),np.max(dimage_non_nan)) - dimage = np.nan_to_num(dimage,nan=0,posinf=0,neginf=0) - dimage = (dimage/4000*0xffff) - print("Depth pixel range",np.min(dimage),np.max(dimage)) + print("Depth range", np.min(dimage_non_nan), np.max(dimage_non_nan)) + dimage = np.nan_to_num(dimage, nan=0, posinf=0, neginf=0) + dimage = (dimage / 4000 * 0xffff) + print("Depth pixel range", np.min(dimage), np.max(dimage)) dimage = dimage.astype(np.uint16) - cv2.imwrite(depth_fn,dimage) + cv2.imwrite(depth_fn, dimage) + +def main(base_folder='data', start_index=1): + # unique folder for this capture run based on the current date/time. + run_timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + run_folder = os.path.join(base_folder, run_timestamp) + os.makedirs(run_folder, exist_ok=True) + print("Capture run folder:", run_folder) + + rospy.init_node("capture_lidar_zed", disable_signals=True) + rospy.Subscriber("/lidar1/velodyne_points", PointCloud2, lidar_callback) + rospy.Subscriber("/zed2/zed_node/rgb/image_rect_color", Image, camera_callback) + rospy.Subscriber("/zed2/zed_node/depth/depth_registered", Image, depth_callback) -def main(folder='data',start_index=1): - rospy.init_node("capture_lidar_zed",disable_signals=True) - lidar_sub = rospy.Subscriber("/lidar1/velodyne_points", PointCloud2, lidar_callback) - camera_sub = rospy.Subscriber("/zed2/zed_node/rgb/image_rect_color", Image, camera_callback) - depth_sub = rospy.Subscriber("/zed2/zed_node/depth/depth_registered", Image, depth_callback) index = start_index print("Press any key to:") - print(" store lidar point clouds as npz") - print(" store color images as png") - print(" store depth images (m scaled by 0xffff/4000) as 16-bit tif") + print(" - store lidar point clouds as npz") + print(" - store color images as png") + print(" - store depth images (m scaled by 0xffff/4000) as 16-bit tif") print("Press Escape or Ctrl+C to quit") - while True: - if camera_image: - cv2.imshow("result",bridge.imgmsg_to_cv2(camera_image)) + + while not rospy.is_shutdown(): + if camera_image is not None: + cv2.imshow("result", bridge.imgmsg_to_cv2(camera_image)) key = cv2.waitKey(30) if key == -1: - #nothing - pass + pass # No key pressed. elif key == 27: - #escape - break + break # Escape key pressed. else: if lidar_points is None or camera_image is None or depth_image is None: print("Missing some messages?") else: - files = [os.path.join(folder,'lidar{}.npz'.format(index)), - os.path.join(folder,'color{}.png'.format(index)), - os.path.join(folder,'depth{}.tif'.format(index))] + files = [ + os.path.join(run_folder, 'lidar{}.npz'.format(index)), + os.path.join(run_folder, 'color{}.png'.format(index)), + os.path.join(run_folder, 'depth{}.tif'.format(index)) + ] save_scan(*files) index += 1 @@ -105,4 +113,4 @@ def main(folder='data',start_index=1): folder = sys.argv[1] if len(sys.argv) >= 3: start_index = int(sys.argv[2]) - main(folder,start_index) + main(folder, start_index) diff --git a/GEMstack/offboard/log_management/s3.py b/GEMstack/offboard/log_management/s3.py new file mode 100644 index 000000000..1a2550d00 --- /dev/null +++ b/GEMstack/offboard/log_management/s3.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python3 +""" +This client interacts with S3 to upload (push) or download (pull) a specific folder. + +Before running this client make sure you have defined .env in root with following values: +AWS_ACCESS_KEY_ID=example +AWS_SECRET_ACCESS_KEY=example +AWS_DEFAULT_REGION=us-east-1 + +Requires: + pip install boto3 python-dotenv + +Example Usage: + + # Push a folder named "2025-02-12_15-30-00" inside "data/" to S3: + python3 GEMStack/offboard/log_management/s3.py \ + --action push \ + --folder 2025-02-12_15-30-00 \ + --bucket cs588 \ + --s3-prefix captures + + # Pull (download) that same folder from S3 into local "download/" directory: + python3 GEMStack/offboard/log_management/s3.py \ + --action pull \ + --folder 2025-02-12_15-30-00 \ + --bucket cs588 \ + --s3-prefix captures \ + --dest-dir download +""" + +import argparse +import boto3 +import os +import sys +from dotenv import load_dotenv + +def get_s3_client(): + """ + Initializes the S3 client using AWS credentials from environment variables. + Expects: + - AWS_ACCESS_KEY_ID + - AWS_SECRET_ACCESS_KEY + - AWS_DEFAULT_REGION + Exits if any of these are missing. + """ + # load environment variables from .env file + # override in case local config exists for AWS + load_dotenv(override=True) + + access_key = os.environ.get('AWS_ACCESS_KEY_ID') + secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') + region = os.environ.get('AWS_DEFAULT_REGION') + + if not access_key or not secret_key or not region: + sys.exit( + "Error: AWS credentials not set. Please set AWS_ACCESS_KEY_ID, " + "AWS_SECRET_ACCESS_KEY, and AWS_DEFAULT_REGION environment variables (in .env)." + ) + + print(access_key, secret_key, region) + return boto3.client( + 's3', + aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + region_name=region + ) + +def check_s3_connection(s3_client, bucket): + """ + Verifies that we can connect to S3 and access the specified bucket. + """ + try: + s3_client.head_bucket(Bucket=bucket) + print(f"Connection check: Successfully accessed bucket '{bucket}'") + except Exception as e: + sys.exit(f"Error: Could not connect to S3 bucket '{bucket}': {e}") + +def push_folder_to_s3(folder_path, bucket, s3_prefix): + """ + Walks through the folder and uploads each file to S3 under the given prefix. + Files are stored under the key: s3_prefix/folder_name/. + """ + s3_client = get_s3_client() + check_s3_connection(s3_client, bucket) + + folder_name = os.path.basename(folder_path) + files_uploaded = 0 + + for root, _, files in os.walk(folder_path): + for file in files: + local_path = os.path.join(root, file) + # determine the file's path relative to the folder being pushed. + relative_path = os.path.relpath(local_path, folder_path) + s3_key = os.path.join(s3_prefix, folder_name, relative_path) + try: + print(f"Uploading {local_path} to s3://{bucket}/{s3_key}") + s3_client.upload_file(local_path, bucket, s3_key) + files_uploaded += 1 + except Exception as e: + print(f"Error uploading {local_path}: {e}") + + if files_uploaded == 0: + sys.exit(f"Error: No files were uploaded from folder: {folder_path}") + +def pull_folder_from_s3(bucket, s3_prefix, folder_name, dest_dir): + """ + Downloads all objects from S3 whose key begins with s3_prefix/folder_name. + The folder will be recreated under `dest_dir/folder_name`. + """ + s3_client = get_s3_client() + check_s3_connection(s3_client, bucket) + + # this prefix is what we'll look for in the bucket. + prefix = os.path.join(s3_prefix, folder_name) + + paginator = s3_client.get_paginator('list_objects_v2') + pages = paginator.paginate(Bucket=bucket, Prefix=prefix) + + found_files = False + for page in pages: + if 'Contents' not in page: + continue + for obj in page['Contents']: + key = obj['Key'] + # if the object key is exactly the "folder" (a directory placeholder), skip it. + # e.g. sometimes you can have an empty key or just a trailing slash. + if key.endswith("/"): + continue + + found_files = True + # figure out the relative path to recreate the same structure locally. + relative_path = os.path.relpath(key, prefix) + local_path = os.path.join(dest_dir, folder_name, relative_path) + + # ensure the local directory exists. + os.makedirs(os.path.dirname(local_path), exist_ok=True) + + print(f"Downloading s3://{bucket}/{key} to {local_path}") + try: + s3_client.download_file(bucket, key, local_path) + except Exception as e: + print(f"Error downloading s3://{bucket}/{key}: {e}") + + if not found_files: + sys.exit(f"Error: No files found in bucket '{bucket}' with prefix '{prefix}'") + +def main(): + parser = argparse.ArgumentParser( + description="Push or pull a specific folder to/from an S3 bucket." + ) + parser.add_argument("--action", choices=["push", "pull"], required=True, + help="Choose whether to push (upload) or pull (download) a folder.") + parser.add_argument("--folder", required=True, + help="Folder name (e.g. 2025-02-12_15-30-00)") + parser.add_argument("--base-dir", default="data", + help="Local base directory where capture runs are stored (for push).") + parser.add_argument("--dest-dir", default="download", + help="Local directory to place the downloaded folder (for pull).") + parser.add_argument("--bucket", required=True, + help="S3 bucket name.") + parser.add_argument("--s3-prefix", default="captures", + help="S3 prefix (folder) where data is stored or will be uploaded.") + args = parser.parse_args() + + if args.action == "push": + # pushing from local 'base_dir/folder' to S3 + folder_path = os.path.join(args.base_dir, args.folder) + if not os.path.exists(folder_path): + sys.exit(f"Error: Folder does not exist: {folder_path}") + + # check if the folder is empty. + folder_empty = True + for _, _, files in os.walk(folder_path): + if files: + folder_empty = False + break + if folder_empty: + sys.exit(f"Error: Folder is empty: {folder_path}") + + push_folder_to_s3(folder_path, args.bucket, args.s3_prefix) + + elif args.action == "pull": + # pulling from S3 (prefix/folder) to local 'dest_dir/folder' + pull_folder_from_s3(args.bucket, args.s3_prefix, args.folder, args.dest_dir) + +if __name__ == '__main__': + main() diff --git a/GEMstack/scripts/convert_to_rosbag.py b/GEMstack/scripts/convert_to_rosbag.py new file mode 100644 index 000000000..791ef6e0a --- /dev/null +++ b/GEMstack/scripts/convert_to_rosbag.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 + +import os +import rosbag +import cv2 +import numpy as np +from cv_bridge import CvBridge +from sensor_msgs.msg import Image, PointCloud2 +import sensor_msgs.point_cloud2 as pc2 +import glob +from datetime import datetime +import time +import rospy +import std_msgs.msg +import argparse + +def get_matching_files(dir): + """Get files that have corresponding data in all three formats""" + # Get sorted lists of files + png_files = sorted(glob.glob(os.path.join(dir, 'color*.png'))) + tif_files = sorted(glob.glob(os.path.join(dir, 'depth*.tif'))) + npz_files = sorted(glob.glob(os.path.join(dir, 'lidar*.npz'))) + + print(f"Found {len(png_files)} PNG files") + print(f"Found {len(tif_files)} TIF files") + print(f"Found {len(npz_files)} NPZ files") + + # Extract numbers from filenames + def get_number(filename): + return int(''.join(filter(str.isdigit, os.path.basename(filename)))) + + # Create dictionaries with numbers as keys + png_dict = {get_number(f): f for f in png_files} + tif_dict = {get_number(f): f for f in tif_files} + npz_dict = {get_number(f): f for f in npz_files} + + # Find common numbers + common_numbers = set(png_dict.keys()) & set(tif_dict.keys()) & set(npz_dict.keys()) + print(f"\nFound {len(common_numbers)} matching frame numbers") + + # Get matching files in sorted order + common_numbers = sorted(common_numbers) + matching_pngs = [png_dict[n] for n in common_numbers] + matching_tifs = [tif_dict[n] for n in common_numbers] + matching_npzs = [npz_dict[n] for n in common_numbers] + + return matching_pngs, matching_tifs, matching_npzs + +def create_rosbag_from_data( + dir, + output_bag_path, + frame_interval +): + # Initialize CV bridge + bridge = CvBridge() + + # Initialize ROS node + rospy.init_node('bag_creator', anonymous=True) + + # Get matching files + png_files, tif_files, npz_files = get_matching_files(dir) + + print(f"Found {len(png_files)} matching files in all three formats") + + if len(png_files) == 0: + raise ValueError("No matching files found!") + + # Create a new bag file + with rosbag.Bag(output_bag_path, 'w') as bag: + # Start time for the messages + start_time = rospy.Time.from_sec(time.time()) + message_count = 0 + + for idx, (png_file, tif_file, npz_file) in enumerate(zip(png_files, tif_files, npz_files)): + # Calculate timestamp for this frame + timestamp = rospy.Time.from_sec(start_time.to_sec() + idx * (1/frame_interval)) + + try: + # Process PNG image + png_img = cv2.imread(png_file) + if png_img is not None: + png_msg = bridge.cv2_to_imgmsg(png_img, encoding="bgr8") + png_msg.header.stamp = timestamp + png_msg.header.frame_id = "camera_rgb" + bag.write('/camera/rgb/image_raw', png_msg, timestamp) + message_count += 1 + else: + print(f"Warning: Could not read PNG file: {png_file}") + + # Process TIF image + tif_img = cv2.imread(tif_file, -1) # -1 to preserve original depth + if tif_img is not None: + tif_msg = bridge.cv2_to_imgmsg(tif_img, encoding="passthrough") + tif_msg.header.stamp = timestamp + tif_msg.header.frame_id = "camera_depth" + bag.write('/camera/depth/image_raw', tif_msg, timestamp) + message_count += 1 + else: + print(f"Warning: Could not read TIF file: {tif_file}") + + # Process pointcloud NPZ + pc_data = np.load(npz_file) + points = pc_data['arr_0'] # Using 'arr_0' based on the provided files' + + # Create pointcloud message + header = std_msgs.msg.Header() + header.stamp = timestamp + header.frame_id = "velodyne" + + fields = [ + pc2.PointField('x', 0, pc2.PointField.FLOAT32, 1), + pc2.PointField('y', 4, pc2.PointField.FLOAT32, 1), + pc2.PointField('z', 8, pc2.PointField.FLOAT32, 1) + ] + + pc_msg = pc2.create_cloud(header, fields, points) + bag.write('/velodyne_points', pc_msg, timestamp) + message_count += 1 + + except Exception as e: + print(f"Error processing frame {idx}: {str(e)}") + continue + + if idx % 10 == 0: + print(f"Processed {idx} frames") + + print(f"Total messages written to bag: {message_count}") + +if __name__ == "__main__": + #This initializes the parser and sets the parameters that the user will be asked to provide in the terminal + parser = argparse.ArgumentParser( + description='A script to convert data gathered by cameras and lidar sensors to ros .bag messages' + ) + + parser.add_argument( + 'files_directory', type = str, + help = 'The path to the directory with all the rgb images, depth maps, and point clouds. The file formats must be PNG, TIF, and NPZ, respectively' + ) + + parser.add_argument( + 'output_bag', type = str, + help = 'The path to the directory where the bag file will be saved' + ) + + parser.add_argument( + 'rate', type = int, + help = 'The rate at which the data is collected in Hz' + ) + args = parser.parse_args() + directory = args.files_directory + output_bag = args.output_bag + rate = args.rate + # Example directories below: + #directory = "/home/username/host/CS588/GEMstack/data/data_sample/data/" + #output_bag = "/home/username/host/CS588/GEMstack/data/data_sample/data/output.bag" + + try: + create_rosbag_from_data( + directory, + output_bag, + rate + ) + print("Successfully created ROS bag file!") + + # Verify bag contents + print("\nBag file contents:") + info_cmd = f"rosbag info {output_bag}" + os.system(info_cmd) + + except Exception as e: + print(f"Error creating ROS bag: {str(e)}") diff --git a/GEMstack/scripts/visualization.py b/GEMstack/scripts/visualization.py new file mode 100644 index 000000000..3ff9dada7 --- /dev/null +++ b/GEMstack/scripts/visualization.py @@ -0,0 +1,191 @@ +import json +import os +import time +from klampt import vis +from klampt.model.trajectory import Trajectory +import matplotlib.pyplot as plt +from ..onboard.visualization.klampt_visualization import KlamptVisualization +from ..onboard.visualization.mpl_visualization import MPLVisualization + +LOG_DIR = "logs" +BEHAVIOR_FILE = "behavior.json" + +def select_log_folder(): + log_folders = [f for f in os.listdir(LOG_DIR) if os.path.isdir(os.path.join(LOG_DIR, f))] + if not log_folders: + print("\033[91mNo log folders found.\033[0m") + return None + + print("\n\033[94mAvailable log folders:\033[0m") + for idx, folder in enumerate(log_folders): + print(f"{idx + 1}. {folder}") + + while True: + try: + choice = int(input("\n\033[93mEnter the number of the log folder to use:\033[0m ")) - 1 + if 0 <= choice < len(log_folders): + return os.path.join(LOG_DIR, log_folders[choice]) + print("\033[91mInvalid selection. Please enter a valid number.\033[0m") + except ValueError: + print("\033[91mPlease enter a number.\033[0m") + +def select_visualizer(): + print("\n\033[94mChoose a visualization method:\033[0m") + print("1. Klampt (3D visualization)") + print("2. MPL (Matplotlib 2D visualization)") + + while True: + try: + choice = int(input("\n\033[93mEnter 1 or 2:\033[0m ")) + if choice in [1, 2]: + return choice + print("\033[91mInvalid selection. Please enter 1 or 2.\033[0m") + except ValueError: + print("\033[91mPlease enter a valid number.\033[0m") + +# Load and extract pedestrian and vehicle data +def extract_behavior_data(log_dir): + behavior_path = os.path.join(log_dir, BEHAVIOR_FILE) + if not os.path.exists(behavior_path): + print(f"\033[91mError: {behavior_path} not found.\033[0m") + return None, None, None, None, None, None, None, None + + with open(behavior_path, "r") as f: + data = [json.loads(line) for line in f] + + pedestrian_paths = {} + pedestrian_times = {} + vehicle_path = [] + vehicle_times = [] + speeds = [] + accelerators = [] + brakes = [] + steering_angles = [] + + for entry in data: + time_stamp = entry.get("time", 0) + + # Extract pedestrian data + if "agents" in entry: + for agent_id, agent in entry["agents"].items(): + agent_data = agent.get("data", {}) + if agent_data.get("type") == 3: # Type 3 = pedestrian + pose = agent_data.get("pose", {}) + x, y = pose.get("x", 0), pose.get("y", 0) + + if agent_id not in pedestrian_paths: + pedestrian_paths[agent_id] = [] + pedestrian_times[agent_id] = [] + + pedestrian_paths[agent_id].append((x, y)) + pedestrian_times[agent_id].append(time_stamp) + + # Extract vehicle data + if "vehicle" in entry: + vehicle_data = entry["vehicle"].get("data", {}) + pose = vehicle_data.get("pose", {}) + x, y = pose.get("x", 0), pose.get("y", 0) + + vehicle_path.append((x, y)) + vehicle_times.append(time_stamp) + speeds.append(vehicle_data.get("v", 0)) + accelerators.append(vehicle_data.get("accelerator_pedal_position", 0)) + brakes.append(vehicle_data.get("brake_pedal_position", 0)) + steering_angles.append(vehicle_data.get("steering_wheel_angle", 0)) + + return pedestrian_paths, pedestrian_times, vehicle_path, vehicle_times, speeds, accelerators, brakes, steering_angles + +# Klampt 3D Visualization +def visualize_with_klampt(pedestrian_paths, pedestrian_times, vehicle_path): + """Uses Klampt to visualize pedestrian and vehicle paths.""" + vis.init("PyQt6") + vis.setWindowTitle("Pedestrian and Vehicle Path Visualization") + + klampt_vis = KlamptVisualization(vehicle_interface=None, rate=20.0) + + for agent_id, path in pedestrian_paths.items(): + if len(path) < 2: + continue + + times = pedestrian_times[agent_id] + path_3d = [[float(x), float(y), 0.0] for x, y in path] + + trajectory = Trajectory(times, path_3d) + vis.add(agent_id, trajectory, color=(0, 1, 0, 1), width=2) + + # if vehicle_path: + # vehicle_x, vehicle_y = zip(*vehicle_path) + # vehicle_tuples = [[float(x), float(y), 0.0] for x, y in zip(vehicle_x, vehicle_y)] + # vis.add("Vehicle Path", vehicle_tuples, color=(1, 0, 0, 1), width=2) + + klampt_vis.initialize() + vis.show() + + while vis.shown(): + time.sleep(0.05) + + klampt_vis.cleanup() + vis.kill() + +# MPL 2D Visualization +def visualize_with_mpl(pedestrian_paths, pedestrian_times, vehicle_path, vehicle_data): + vis = MPLVisualization(rate=10.0) + vis.initialize() + + fig, axs = vis.fig, vis.axs + axs[0].clear() + axs[1].clear() + + # Left Plot: Pedestrian & Vehicle Trajectories + axs[0].set_xlabel("X Position") + axs[0].set_ylabel("Y Position") + axs[0].set_title("Pedestrian & Vehicle Trajectories") + + for agent_id, path in pedestrian_paths.items(): + path_x, path_y = zip(*path) + axs[0].plot(path_x, path_y, linestyle='-', marker='o', label=f"Pedestrian {agent_id}") + + # if vehicle_path: + # vehicle_x, vehicle_y = zip(*vehicle_path) + # axs[0].plot(vehicle_x, vehicle_y, linestyle='-', marker='s', color='red', label="Vehicle Path") + + axs[0].legend() + + # Right Plot: Vehicle Controls Over Time + times, speeds, accelerators, brakes, steering_angles = vehicle_data + axs[1].set_xlabel("Time (s)") + axs[1].set_title("Vehicle Controls Over Time") + + axs[1].plot(times, speeds, label="Speed (m/s)", color="blue") + axs[1].plot(times, accelerators, label="Accelerator (%)", color="green") + axs[1].plot(times, brakes, label="Brake (%)", color="red") + axs[1].plot(times, steering_angles, label="Steering Angle", color="purple") + axs[1].legend() + + fig.canvas.draw_idle() + fig.canvas.flush_events() + plt.show(block=True) + + vis.cleanup() + +# Main Execution +if __name__ == "__main__": + visualizer_choice = select_visualizer() + selected_log_folder = select_log_folder() + + if selected_log_folder: + print(f"\n\033[94mLoading behavior data from:\033[0m {selected_log_folder}") + data = extract_behavior_data(selected_log_folder) + + pedestrian_paths, pedestrian_times, vehicle_path, vehicle_times, speeds, accelerators, brakes, steering_angles = data + vehicle_data = (vehicle_times, speeds, accelerators, brakes, steering_angles) + + if pedestrian_paths or vehicle_path: + if visualizer_choice == 1: + print("\033[92mUsing Klampt for visualization...\033[0m") + visualize_with_klampt(pedestrian_paths, pedestrian_times, vehicle_path) + else: + print("\033[92mUsing MPL (Matplotlib) for visualization...\033[0m") + visualize_with_mpl(pedestrian_paths, pedestrian_times, vehicle_path, vehicle_data) + else: + print("\033[91mNo pedestrian or vehicle data found.\033[0m") diff --git a/GEMstack/utils/analysis.py b/GEMstack/utils/analysis.py new file mode 100644 index 000000000..0a354a6ca --- /dev/null +++ b/GEMstack/utils/analysis.py @@ -0,0 +1,153 @@ +""" +Log Analysis Tool + +This program allows users to analyze CSV files stored in log directories. Users can: +- Select a log directory from the `./logs` folder. +- Choose a CSV file within the selected directory. +- Select specific columns to analyze. +- Choose an analysis method such as MSE, RMS, Mean, or Standard Deviation. +- Define a custom lambda function for analysis. +- Save the results in the default log directory or specify a custom path. + +Usage: +1. Run the script: `python ./GEMstack/utils/analysis.py` +2. Follow the prompts to: + - Select a log directory. + - Choose a CSV file to analyze. + - Select the columns for analysis. + - Pick a predefined analysis method or define a custom lambda function. + - Save the results. + +Output: +- The analysis results are saved in CSV format, with column names included, in the chosen directory. + +""" + +import os +import pandas as pd +import numpy as np +from datetime import datetime + +def list_log_directories(): + logs_path = "./logs" + if not os.path.exists(logs_path): + print("Logs directory does not exist.") + return [] + + directories = [d for d in os.listdir(logs_path) if os.path.isdir(os.path.join(logs_path, d))] + return directories + +def choose_directory(directories): + if not directories: + print("No log directories found.") + return None + + print("Available log directories:") + for i, dir_name in enumerate(directories): + print(f"{i + 1}. {dir_name}") + + choice = int(input("Select a directory by number: ")) - 1 + if 0 <= choice < len(directories): + return os.path.join("./logs", directories[choice]) + else: + print("Invalid choice.") + return None + +def choose_csv_file(log_dir): + files = [f for f in os.listdir(log_dir) if f.endswith(".csv")] + if not files: + print("No CSV files found in the selected directory.") + return None + + print("Available CSV files:") + for i, file in enumerate(files): + print(f"{i + 1}. {file}") + + choice = int(input("Select a CSV file by number: ")) - 1 + if 0 <= choice < len(files): + return os.path.join(log_dir, files[choice]) + else: + print("Invalid choice.") + return None + +def load_csv(csv_path): + if not os.path.exists(csv_path): + print("CSV file not found.") + return None + + df = pd.read_csv(csv_path) + print("CSV file loaded successfully.") + print("Available columns:", list(df.columns)) + return df + +def choose_columns(df): + selected_columns = input("Enter column names to analyze (comma separated): ").split(',') + selected_columns = [col.strip() for col in selected_columns if col.strip() in df.columns] + if not selected_columns: + print("No valid columns selected.") + return None + return df[selected_columns] + +def analyze_data(df, log_dir): + methods = { + "1": ("Mean Squared Error (MSE)", lambda x: np.mean(np.square(x))), + "2": ("Root Mean Square (RMS)", lambda x: np.sqrt(np.mean(np.square(x)))), + "3": ("Mean", np.mean), + "4": ("Standard Deviation", np.std), + "5": ("Custom Lambda Function", None) + } + + print("Available analysis methods:") + for key, (name, _) in methods.items(): + print(f"{key}. {name}") + + choice = input("Select a method by number: ") + if choice in methods: + if choice == "5": + function_str = input("Enter a lambda function (e.g., lambda x: np.max(x) - np.min(x)): ").strip() + try: + func = eval(function_str, {"np": np}) + result = df.apply(func) + except Exception as e: + print(f"Invalid function: {e}") + return + else: + method_name, method_func = methods[choice] + print(f"Applying {method_name}...") + result = df.apply(method_func) + + print("Analysis result:") + print(result.to_string(header=True, index=True)) + + save_path = input("Enter a path to save the result (press Enter to save in the chosen log directory): ") + if not save_path: + timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(log_dir, f"analysis_result_{timestamp}.csv") + + result.to_frame().T.to_csv(save_path, index=False, header=True) + print(f"Analysis result saved to {save_path}") + else: + print("Invalid choice.") + +def main(): + directories = list_log_directories() + log_dir = choose_directory(directories) + if not log_dir: + return + + csv_path = choose_csv_file(log_dir) + if not csv_path: + return + + df = load_csv(csv_path) + if df is None: + return + + df_selected = choose_columns(df) + if df_selected is None: + return + + analyze_data(df_selected, log_dir) + +if __name__ == "__main__": + main() diff --git a/README.md b/README.md index 463597875..b7d69edd7 100644 --- a/README.md +++ b/README.md @@ -24,6 +24,7 @@ You should also have the following Python dependencies installed, which you can - Perception Dependencies - ultralytics + In order to interface with the actual GEM e2 vehicle, you will need [PACMOD2](https://github.com/astuff/pacmod2) - Autonomoustuff's low level interface to vehicle. You will also need Autonomoustuff's [sensor message packages](https://github.com/astuff/astuff_sensor_msgs). The onboard computer uses Ubuntu 20.04 with Python 3.8, CUDA 11.6, and NVIDIA driver 515, so to minimize compatibility issues you should ensure that these are installed on your development system. ## Running the stack on Ubuntu 20.04 without Docker @@ -63,6 +64,29 @@ If you see the output, you are good to go. Otherwise, you will need to install t - For **Nvidia Container Toolkit**, run `setup/get_nvidia_container.sh` from this directory to install, or see [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for more details. +## Running the stack on Ubuntu 20.04 without Docker +### Checking CUDA Version + +Before proceeding, check your Nvidia Driver and supported CUDA version: +```bash +nvidia-smi +``` +This will show your NVIDIA driver version and the maximum supported CUDA version. Make sure you have CUDA 11.8 or 12+ installed. + +From Ubuntu 20.04 install [CUDA 11.6](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5) or [CUDA 12+](https://gist.github.com/ksopyla/ee744bf013c83e4aa3fc525634d893c9) based on your current Nvidia Driver versio. + +To check the currently installed CUDA version: +```bash +nvcc --version +``` +you can install the dependencies or GEMstack by running `setup/setup_this_machine.sh` from the top-level GEMstack folder. + +## Running the stack on Ubuntu 20.04 or 22.04 with Docker +> [!NOTE] +> Make sure to check the Nvidia Driver and supported CUDA version before proceeding by following the steps in the previous section. + +For GPU support you will need the NVidia Container Toolkit (run `setup/get_nvidia_container.sh` from this directory to install, or see [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for more details). + ## Building the Docker image To build a Docker image with all these prerequisites, you can use the provided Dockerfile by running. diff --git a/setup/Dockerfile.cuda11.8 b/setup/Dockerfile.cuda11.8 index 687533803..8c299bf15 100644 --- a/setup/Dockerfile.cuda11.8 +++ b/setup/Dockerfile.cuda11.8 @@ -68,6 +68,7 @@ COPY requirements.txt /tmp/requirements.txt # Install Python dependencies RUN pip3 install -r /tmp/requirements.txt + # Install other Dependencies RUN apt-get install -y ros-noetic-septentrio-gnss-driver diff --git a/setup/Dockerfile.cuda12 b/setup/Dockerfile.cuda12 index ffafaaa3f..81e2c262e 100644 --- a/setup/Dockerfile.cuda12 +++ b/setup/Dockerfile.cuda12 @@ -68,6 +68,7 @@ COPY requirements.txt /tmp/requirements.txt # Install Python dependencies RUN pip3 install -r /tmp/requirements.txt + # Install other Dependencies RUN apt-get install -y ros-noetic-septentrio-gnss-driver