+ );
+};
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/agentConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/agentConfig.ts
new file mode 100644
index 000000000..7d64a400d
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/agentConfig.ts
@@ -0,0 +1,14 @@
+const agentConfig = {
+ pedestrian: {
+ name: "Pedestrian",
+ modelPath: "/models/pedestrian.glb",
+ scale: [0.2, 0.2, 0.2],
+ rotation: [0, Math.PI, 0],
+ offset: [0, 0, 0],
+ bodyColor: "#808080",
+ }
+};
+
+const currentAgent = agentConfig.pedestrian;
+
+export { agentConfig, currentAgent };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/cameraConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/cameraConfig.ts
new file mode 100644
index 000000000..ae017432c
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/cameraConfig.ts
@@ -0,0 +1,35 @@
+type CameraConfig = {
+ position: [number, number, number];
+ lookAt: [number, number, number];
+ damping?: number;
+};
+
+type CameraConfigMap = {
+ [key in 'first' | 'chase' | 'top' | 'side' | 'free']: Partial;
+};
+
+const cameraConfig: CameraConfigMap = {
+ first: {
+ position: [0, 1.5, 0],
+ lookAt: [10, 2, 0],
+ damping: 0.1,
+ },
+ chase: {
+ position: [-6, 3, 0],
+ lookAt: [0, 0, 0],
+ damping: 0.1,
+ },
+ top: {
+ position: [0, 10, 0],
+ lookAt: [0, 0, 0],
+ damping: 0.1,
+ },
+ side: {
+ position: [0, 2, 5],
+ lookAt: [0, 0, -10],
+ damping: 0.1,
+ },
+ free: {},
+};
+
+export { cameraConfig };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/groundConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/groundConfig.ts
new file mode 100644
index 000000000..751a789f7
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/groundConfig.ts
@@ -0,0 +1,32 @@
+export interface GroundConfig {
+ size: [number, number];
+ position: [number, number, number];
+ rotation: [number, number, number];
+ cellSize: number;
+ cellThickness: number;
+ cellColor: string;
+ sectionSize: number;
+ sectionThickness: number;
+ sectionColor: string;
+ fadeDistance: number;
+ fadeStrength: number;
+ infiniteGrid: boolean;
+}
+
+const groundConfig: GroundConfig = {
+ size: [1000, 1000],
+ position: [0, 0, 0],
+ rotation: [0, 0, 0],
+ cellSize: 1,
+ cellThickness: 0.5,
+ cellColor: "#000000",
+ sectionSize: 10,
+ sectionThickness: 1,
+ sectionColor: "#000000",
+ fadeDistance: 100,
+ fadeStrength: 1,
+ infiniteGrid: false,
+};
+
+
+export { groundConfig };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/otherVehicleConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/otherVehicleConfig.ts
new file mode 100644
index 000000000..2296e26d0
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/otherVehicleConfig.ts
@@ -0,0 +1,14 @@
+const otherVehicleConfig = {
+ car: {
+ name: "Car",
+ modelPath: "/models/model/gem_e2.urdf",
+ scale: [1, 1, 1],
+ rotation: [-Math.PI / 2, 0, 0],
+ offset: [0, 0, 0],
+ bodyColor: "#808080",
+ },
+};
+
+const currentOtherVehicle = otherVehicleConfig.car;
+
+export { otherVehicleConfig, currentOtherVehicle };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficConeConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficConeConfig.ts
new file mode 100644
index 000000000..76a1e6479
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficConeConfig.ts
@@ -0,0 +1,14 @@
+const trafficConeConfig = {
+ trafficCone: {
+ name: "Traffic Cone",
+ modelPath: "/models/traffic_cone.glb",
+ scale: [0.03, 0.03, 0.03],
+ rotation: [0, 0, 0],
+ offset: [0, 0, 0],
+ bodyColor: "#FFFFFF",
+ },
+};
+
+const currentTrafficCone = trafficConeConfig.trafficCone;
+
+export { trafficConeConfig, currentTrafficCone };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficLightConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficLightConfig.ts
new file mode 100644
index 000000000..ef86a63ee
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/trafficLightConfig.ts
@@ -0,0 +1,14 @@
+const trafficLightConfig = {
+ trafficLight: {
+ name: "Traffic Light",
+ modelPath: "/models/traffic_light.glb",
+ scale: [0.03, 0.03, 0.03],
+ rotation: [0, 0, 0],
+ offset: [0, 0, 0],
+ bodyColor: "#FFD700",
+ },
+};
+
+const currentTrafficLight = trafficLightConfig.trafficLight;
+
+export { trafficLightConfig, currentTrafficLight };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/config/vehicleConfig.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/vehicleConfig.ts
new file mode 100644
index 000000000..a23435ad0
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/config/vehicleConfig.ts
@@ -0,0 +1,14 @@
+const vehicles = {
+ gemE4: {
+ name: "Gem E4",
+ modelPath: "/models/model/gem_e4.urdf",
+ scale: [1, 1, 1],
+ rotation: [-Math.PI / 2, 0, 0],
+ offset: [0, 0, 0],
+ bodyColor: "#808080",
+ }
+};
+
+const currentVehicle = vehicles.gemE4;
+
+export { vehicles, currentVehicle };
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useCameraController.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useCameraController.ts
new file mode 100644
index 000000000..01649c9d3
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useCameraController.ts
@@ -0,0 +1,118 @@
+import { useEffect, useState, useRef } from "react";
+import { useThree, useFrame } from "@react-three/fiber";
+import * as THREE from "three";
+import { FrameData } from "@/types/FrameData";
+import { cameraConfig } from "@/config/cameraConfig";
+
+type CameraModeKey = "chase" | "top" | "side" | "free" | "first";
+
+export function useCameraController(
+ carRef: React.RefObject,
+ timeline: FrameData[] = [],
+ time: number
+): CameraModeKey {
+ const { camera, gl } = useThree();
+ const [mode, setMode] = useState("chase");
+ const lastMode = useRef("chase");
+ const modeRef = useRef("chase");
+
+ const smoothedPos = useRef(new THREE.Vector3());
+ const smoothedLookAt = useRef(new THREE.Vector3());
+
+ useEffect(() => {
+ modeRef.current = mode;
+ }, [mode]);
+
+ // Allow switching between all modes at any time
+ useEffect(() => {
+ const handleKey = (e: KeyboardEvent) => {
+ switch (e.key) {
+ case "1":
+ setMode("first");
+ break;
+ case "2":
+ setMode("chase");
+ break;
+ case "3":
+ setMode("top");
+ break;
+ case "4":
+ setMode("side");
+ break;
+ case "5":
+ lastMode.current = modeRef.current;
+ setMode("free");
+ break;
+ case "0":
+ if (lastMode.current !== "free") {
+ setMode(lastMode.current);
+ }
+ break;
+ }
+ };
+
+ window.addEventListener("keydown", handleKey);
+ return () => window.removeEventListener("keydown", handleKey);
+ }, []);
+
+ // Switch to free mode on scroll or click
+ useEffect(() => {
+ const dom = gl.domElement;
+
+ const activateFreeMode = () => {
+ if (modeRef.current !== "free") {
+ lastMode.current = modeRef.current;
+ setMode("free");
+ }
+ };
+
+ const onMouseDown = (e: MouseEvent) => {
+ if (e.button === 0) activateFreeMode(); // Left click
+ };
+
+ const onWheel = () => {
+ activateFreeMode(); // Scroll
+ };
+
+ dom.addEventListener("mousedown", onMouseDown);
+ dom.addEventListener("wheel", onWheel);
+
+ return () => {
+ dom.removeEventListener("mousedown", onMouseDown);
+ dom.removeEventListener("wheel", onWheel);
+ };
+ }, [gl.domElement]);
+
+ useFrame(() => {
+ if (modeRef.current === "free") return;
+ if (!carRef.current) return;
+
+ // Always fallback to a stable frame if timeline is empty
+ const frame =
+ timeline.find((f) => f.time >= time) ??
+ timeline.at(-1) ??
+ { x: 0, y: 0, yaw: 0 };
+
+ const config = cameraConfig[modeRef.current];
+ if (!config?.position || !config?.lookAt) return;
+
+ const carPos = new THREE.Vector3(frame.x, 0, frame.y);
+ const carQuat = new THREE.Quaternion().setFromEuler(new THREE.Euler(0, -frame.yaw, 0));
+
+ const damping = config.damping ?? 0.1;
+
+ // camera offset (relative to vehicle) and final position
+ const offset = new THREE.Vector3(...config.position).applyQuaternion(carQuat);
+ const camTargetPos = carPos.clone().add(offset);
+ smoothedPos.current.lerp(camTargetPos, damping);
+ camera.position.copy(smoothedPos.current);
+
+ // look at offset
+ const lookAtOffset = new THREE.Vector3(...config.lookAt).applyQuaternion(carQuat);
+ const camLookAt = carPos.clone().add(lookAtOffset);
+ smoothedLookAt.current.lerp(camLookAt, damping);
+ camera.lookAt(smoothedLookAt.current);
+ });
+
+ return mode;
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/usePlaybackTime.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/usePlaybackTime.ts
new file mode 100644
index 000000000..95843bb0f
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/usePlaybackTime.ts
@@ -0,0 +1,61 @@
+import { useEffect, useRef, useState } from "react";
+
+export function usePlaybackTime() {
+ const [time, setTime] = useState(0);
+ const [play, setPlay] = useState(true);
+ const [speed, setSpeed] = useState(1);
+ const [duration, setDuration] = useState(0);
+
+ const lastFrame = useRef(performance.now());
+
+ const reset = () => {
+ lastFrame.current = performance.now();
+ setTime(0);
+ setDuration(0);
+ setPlay(true);
+ };
+
+ const restart = () => {
+ lastFrame.current = performance.now();
+ setTime(0);
+ setPlay(true);
+ }
+
+ const togglePlay = () => {
+ setPlay((prev) => {
+ if (!prev) lastFrame.current = performance.now();
+ return !prev;
+ });
+ };
+
+ const setPlaybackSpeed = (newSpeed: number) => {
+ if (newSpeed <= 0) return;
+ setSpeed(newSpeed);
+ };
+
+ const moveToTime = (targetTime: number) => {
+ setTime(targetTime);
+ lastFrame.current = performance.now();
+ }
+
+ useEffect(() => {
+ let rafId: number;
+
+ const loop = () => {
+ rafId = requestAnimationFrame(loop);
+
+ if (!play) return;
+
+ const now = performance.now();
+ const delta = (now - lastFrame.current) / 1000;
+ lastFrame.current = now;
+
+ setTime((t) => t + delta * speed);
+ };
+
+ rafId = requestAnimationFrame(loop);
+ return () => cancelAnimationFrame(rafId);
+ }, [play, speed]);
+
+ return { time, reset, restart, play, setPlay, togglePlay, speed, setPlaybackSpeed, moveToTime, duration, setDuration };
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useTimelineStore.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useTimelineStore.ts
new file mode 100644
index 000000000..30df1e5df
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useTimelineStore.ts
@@ -0,0 +1,11 @@
+import { TimelineStore } from '@/types/TimelineStore';
+import { create } from 'zustand';
+
+export const useTimelineStore = create((set) => ({
+ vehicle: [],
+ agents: {},
+ trafficLights: {},
+ trafficCones: {},
+ otherVehicles: {},
+ setTimeline: (timeline) => set(timeline),
+}));
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useVehicleControls.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useVehicleControls.ts
new file mode 100644
index 000000000..27eccfc5f
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/hooks/useVehicleControls.ts
@@ -0,0 +1,67 @@
+import { useEffect, useState, useRef } from 'react';
+import { Group, Vector3, Euler } from 'three';
+import { useFrame } from '@react-three/fiber';
+
+export function useVehicleControls(
+ groupRef: React.RefObject,
+ getSpeed: () => number = () => 1
+) {
+ const [keys, setKeys] = useState<{ [key: string]: boolean }>({});
+ const targetPosition = useRef(new Vector3());
+ const targetRotation = useRef(new Euler());
+
+ useEffect(() => {
+ const handleKeyDown = (e: KeyboardEvent) =>
+ setKeys((k) => ({ ...k, [e.key.toLowerCase()]: true }));
+ const handleKeyUp = (e: KeyboardEvent) =>
+ setKeys((k) => ({ ...k, [e.key.toLowerCase()]: false }));
+
+ window.addEventListener('keydown', handleKeyDown);
+ window.addEventListener('keyup', handleKeyUp);
+ return () => {
+ window.removeEventListener('keydown', handleKeyDown);
+ window.removeEventListener('keyup', handleKeyUp);
+ };
+ }, []);
+
+ const state = { moving: false, direction: 0 };
+
+ useFrame((_, delta) => {
+ if (!groupRef.current) return;
+
+ const group = groupRef.current;
+ const speed = getSpeed();
+ const moveDir = new Vector3();
+ const currentRotation = group.rotation.clone();
+
+ state.moving = false;
+
+ const forward = new Vector3(1, 0, 0).applyEuler(currentRotation);
+
+ if (keys['w']) {
+ moveDir.add(forward);
+ state.moving = true;
+ state.direction = 1;
+ }
+ if (keys['s']) {
+ moveDir.sub(forward);
+ state.moving = true;
+ state.direction = -1;
+ }
+
+ moveDir.normalize().multiplyScalar(speed * delta);
+ targetPosition.current.add(moveDir);
+
+ const steerAmount = 1.5 * delta * (keys['s'] ? -1 : 1);
+ if (keys['a']) targetRotation.current.y += steerAmount;
+ if (keys['d']) targetRotation.current.y -= steerAmount;
+
+ // Smooth rotation
+ group.rotation.y += (targetRotation.current.y - group.rotation.y) * 0.2;
+
+ // Smooth position
+ group.position.lerp(targetPosition.current, 0.2);
+ });
+
+ return state;
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/types/FrameData.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/FrameData.ts
new file mode 100644
index 000000000..78ea540cd
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/FrameData.ts
@@ -0,0 +1,9 @@
+export interface FrameData {
+ time: number;
+ x: number;
+ y: number;
+ z: number;
+ yaw: number;
+ pitch: number;
+ roll: number;
+}
\ No newline at end of file
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/types/LogEntry.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/LogEntry.ts
new file mode 100644
index 000000000..15b88b363
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/LogEntry.ts
@@ -0,0 +1,7 @@
+export interface LogEntry {
+ key: string; // Top-level key (e.g., "vehicle", "agents", etc.)
+ type: string; // Inner type (e.g., "VehicleState")
+ time: number; // Timestamp of the event
+ data: T; // Full data object (includes pose, velocity, etc.)
+}
+
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineData.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineData.ts
new file mode 100644
index 000000000..513ef27bc
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineData.ts
@@ -0,0 +1,9 @@
+import { FrameData } from './FrameData';
+
+export interface TimelineData {
+ vehicle: FrameData[];
+ agents: Record;
+ trafficLights: Record;
+ trafficCones: Record;
+ otherVehicles: Record;
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineStore.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineStore.ts
new file mode 100644
index 000000000..ed2341de9
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/types/TimelineStore.ts
@@ -0,0 +1,5 @@
+import { TimelineData } from './TimelineData';
+
+export interface TimelineStore extends TimelineData {
+ setTimeline: (timeline: TimelineData) => void;
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/buildTimeline.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/buildTimeline.ts
new file mode 100644
index 000000000..1c07caa7d
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/buildTimeline.ts
@@ -0,0 +1,48 @@
+import { LogEntry } from '@/types/LogEntry';
+import { FrameData } from '@/types/FrameData';
+import { TimelineData } from '@/types/TimelineData';
+
+export function buildTimeline(entries: LogEntry[]): TimelineData {
+ const vehicle: FrameData[] = [];
+ const agents: Record = {};
+ const trafficLights: Record = {};
+ const trafficCones: Record = {};
+ const otherVehicles: Record = {};
+
+ for (const entry of entries) {
+ const pose = entry.data?.pose;
+ if (!pose || typeof pose.x !== 'number' || typeof pose.y !== 'number') continue;
+
+ const frame: FrameData = {
+ time: entry.time,
+ x: pose.x,
+ y: pose.y,
+ z: pose.z ?? 0,
+ yaw: pose.yaw ?? 0,
+ pitch: pose.pitch ?? 0,
+ roll: pose.roll ?? 0
+ };
+
+ if (entry.key === 'vehicle') {
+ vehicle.push(frame);
+ } else if (entry.type === 'AgentState') {
+ const key = entry.key.trim();
+ if (!agents[key]) agents[key] = [];
+ agents[key].push(frame);
+ } else if (entry.type === 'TrafficLightState') {
+ const key = entry.key.trim();
+ if (!trafficLights[key]) trafficLights[key] = [];
+ trafficLights[key].push(frame);
+ } else if (entry.type === "TrafficConeState") {
+ const key = entry.key.trim();
+ if (!trafficCones[key]) trafficCones[key] = [];
+ trafficCones[key].push(frame);
+ }else if (entry.type === 'OtherVehicleState') {
+ const key = entry.key.trim();
+ if (!otherVehicles[key]) otherVehicles[key] = [];
+ otherVehicles[key].push(frame);
+ }
+ }
+
+ return { vehicle, agents, trafficLights, trafficCones, otherVehicles };
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/parseLogFile.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/parseLogFile.ts
new file mode 100644
index 000000000..a742e8983
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/parseLogFile.ts
@@ -0,0 +1,58 @@
+import { LogEntry } from '../types/LogEntry';
+import { streamLogLines } from './streamLogLines';
+
+export async function parseLogFile(file: File): Promise {
+ const entries: LogEntry[] = [];
+
+ for await (const line of streamLogLines(file)) {
+ try {
+ const parsed = JSON.parse(line);
+ const time = parsed.time;
+
+ for (const [key, value] of Object.entries(parsed)) {
+ if (key === 'time' || typeof value !== 'object' || value === null) continue;
+
+ if (key === 'vehicle' && 'type' in value && 'data' in value && (value as any).data?.pose?.frame === 3) {
+ entries.push({
+ key,
+ type: (value as any).type,
+ time,
+ data: (value as any).data,
+ });
+ continue;
+ }
+
+ if (key === 'agents' || key === 'traffic_lights' || key === 'traffic_cones' || key === 'other_vehicles') {
+ for (const [itemId, itemValue] of Object.entries(value)) {
+ if (typeof itemValue === 'object' && itemValue !== null && 'data' in itemValue) {
+ const frame = (itemValue as any).data?.pose?.frame;
+
+ if (key === 'agents' && frame !== 1) {
+ continue;
+ }
+
+ entries.push({
+ key: itemId,
+ type: (itemValue as any).type ?? guessTypeFromKey(key),
+ time,
+ data: (itemValue as any).data,
+ });
+ }
+ }
+ }
+ }
+ } catch (err) {
+ console.warn('Failed to parse log line:', err);
+ }
+ }
+
+ return entries;
+}
+
+function guessTypeFromKey(key: string): string {
+ if (key === 'agents') return 'AgentState';
+ if (key === 'traffic_lights') return 'TrafficLightState';
+ if (key === 'traffic_cones') return 'TrafficConeState';
+ if (key === 'other_vehicles') return 'OtherVehicleState';
+ return 'Unknown';
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/streamLogLines.ts b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/streamLogLines.ts
new file mode 100644
index 000000000..d4c0c5ba8
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/src/utils/streamLogLines.ts
@@ -0,0 +1,20 @@
+export async function* streamLogLines(file: File): AsyncGenerator {
+ const reader = file.stream().getReader();
+ const decoder = new TextDecoder('utf-8');
+ let partial = '';
+
+ while (true) {
+ const { value, done } = await reader.read();
+ if (done) break;
+
+ partial += decoder.decode(value, { stream: true });
+ const lines = partial.split('\n');
+ partial = lines.pop() || '';
+
+ for (const line of lines) {
+ if (line.trim()) yield line;
+ }
+ }
+
+ if (partial.trim()) yield partial;
+}
diff --git a/GEMstack/onboard/visualization/sr_viz/threeD/tsconfig.json b/GEMstack/onboard/visualization/sr_viz/threeD/tsconfig.json
new file mode 100644
index 000000000..c1334095f
--- /dev/null
+++ b/GEMstack/onboard/visualization/sr_viz/threeD/tsconfig.json
@@ -0,0 +1,27 @@
+{
+ "compilerOptions": {
+ "target": "ES2017",
+ "lib": ["dom", "dom.iterable", "esnext"],
+ "allowJs": true,
+ "skipLibCheck": true,
+ "strict": true,
+ "noEmit": true,
+ "esModuleInterop": true,
+ "module": "esnext",
+ "moduleResolution": "bundler",
+ "resolveJsonModule": true,
+ "isolatedModules": true,
+ "jsx": "preserve",
+ "incremental": true,
+ "plugins": [
+ {
+ "name": "next"
+ }
+ ],
+ "paths": {
+ "@/*": ["./src/*"]
+ }
+ },
+ "include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
+ "exclude": ["node_modules"]
+}
diff --git a/GEMstack/scripts/__init__.py b/GEMstack/scripts/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/GEMstack/scripts/convert_to_rosbag.py b/GEMstack/scripts/convert_to_rosbag.py
new file mode 100644
index 000000000..791ef6e0a
--- /dev/null
+++ b/GEMstack/scripts/convert_to_rosbag.py
@@ -0,0 +1,171 @@
+#!/usr/bin/env python3
+
+import os
+import rosbag
+import cv2
+import numpy as np
+from cv_bridge import CvBridge
+from sensor_msgs.msg import Image, PointCloud2
+import sensor_msgs.point_cloud2 as pc2
+import glob
+from datetime import datetime
+import time
+import rospy
+import std_msgs.msg
+import argparse
+
+def get_matching_files(dir):
+ """Get files that have corresponding data in all three formats"""
+ # Get sorted lists of files
+ png_files = sorted(glob.glob(os.path.join(dir, 'color*.png')))
+ tif_files = sorted(glob.glob(os.path.join(dir, 'depth*.tif')))
+ npz_files = sorted(glob.glob(os.path.join(dir, 'lidar*.npz')))
+
+ print(f"Found {len(png_files)} PNG files")
+ print(f"Found {len(tif_files)} TIF files")
+ print(f"Found {len(npz_files)} NPZ files")
+
+ # Extract numbers from filenames
+ def get_number(filename):
+ return int(''.join(filter(str.isdigit, os.path.basename(filename))))
+
+ # Create dictionaries with numbers as keys
+ png_dict = {get_number(f): f for f in png_files}
+ tif_dict = {get_number(f): f for f in tif_files}
+ npz_dict = {get_number(f): f for f in npz_files}
+
+ # Find common numbers
+ common_numbers = set(png_dict.keys()) & set(tif_dict.keys()) & set(npz_dict.keys())
+ print(f"\nFound {len(common_numbers)} matching frame numbers")
+
+ # Get matching files in sorted order
+ common_numbers = sorted(common_numbers)
+ matching_pngs = [png_dict[n] for n in common_numbers]
+ matching_tifs = [tif_dict[n] for n in common_numbers]
+ matching_npzs = [npz_dict[n] for n in common_numbers]
+
+ return matching_pngs, matching_tifs, matching_npzs
+
+def create_rosbag_from_data(
+ dir,
+ output_bag_path,
+ frame_interval
+):
+ # Initialize CV bridge
+ bridge = CvBridge()
+
+ # Initialize ROS node
+ rospy.init_node('bag_creator', anonymous=True)
+
+ # Get matching files
+ png_files, tif_files, npz_files = get_matching_files(dir)
+
+ print(f"Found {len(png_files)} matching files in all three formats")
+
+ if len(png_files) == 0:
+ raise ValueError("No matching files found!")
+
+ # Create a new bag file
+ with rosbag.Bag(output_bag_path, 'w') as bag:
+ # Start time for the messages
+ start_time = rospy.Time.from_sec(time.time())
+ message_count = 0
+
+ for idx, (png_file, tif_file, npz_file) in enumerate(zip(png_files, tif_files, npz_files)):
+ # Calculate timestamp for this frame
+ timestamp = rospy.Time.from_sec(start_time.to_sec() + idx * (1/frame_interval))
+
+ try:
+ # Process PNG image
+ png_img = cv2.imread(png_file)
+ if png_img is not None:
+ png_msg = bridge.cv2_to_imgmsg(png_img, encoding="bgr8")
+ png_msg.header.stamp = timestamp
+ png_msg.header.frame_id = "camera_rgb"
+ bag.write('/camera/rgb/image_raw', png_msg, timestamp)
+ message_count += 1
+ else:
+ print(f"Warning: Could not read PNG file: {png_file}")
+
+ # Process TIF image
+ tif_img = cv2.imread(tif_file, -1) # -1 to preserve original depth
+ if tif_img is not None:
+ tif_msg = bridge.cv2_to_imgmsg(tif_img, encoding="passthrough")
+ tif_msg.header.stamp = timestamp
+ tif_msg.header.frame_id = "camera_depth"
+ bag.write('/camera/depth/image_raw', tif_msg, timestamp)
+ message_count += 1
+ else:
+ print(f"Warning: Could not read TIF file: {tif_file}")
+
+ # Process pointcloud NPZ
+ pc_data = np.load(npz_file)
+ points = pc_data['arr_0'] # Using 'arr_0' based on the provided files'
+
+ # Create pointcloud message
+ header = std_msgs.msg.Header()
+ header.stamp = timestamp
+ header.frame_id = "velodyne"
+
+ fields = [
+ pc2.PointField('x', 0, pc2.PointField.FLOAT32, 1),
+ pc2.PointField('y', 4, pc2.PointField.FLOAT32, 1),
+ pc2.PointField('z', 8, pc2.PointField.FLOAT32, 1)
+ ]
+
+ pc_msg = pc2.create_cloud(header, fields, points)
+ bag.write('/velodyne_points', pc_msg, timestamp)
+ message_count += 1
+
+ except Exception as e:
+ print(f"Error processing frame {idx}: {str(e)}")
+ continue
+
+ if idx % 10 == 0:
+ print(f"Processed {idx} frames")
+
+ print(f"Total messages written to bag: {message_count}")
+
+if __name__ == "__main__":
+ #This initializes the parser and sets the parameters that the user will be asked to provide in the terminal
+ parser = argparse.ArgumentParser(
+ description='A script to convert data gathered by cameras and lidar sensors to ros .bag messages'
+ )
+
+ parser.add_argument(
+ 'files_directory', type = str,
+ help = 'The path to the directory with all the rgb images, depth maps, and point clouds. The file formats must be PNG, TIF, and NPZ, respectively'
+ )
+
+ parser.add_argument(
+ 'output_bag', type = str,
+ help = 'The path to the directory where the bag file will be saved'
+ )
+
+ parser.add_argument(
+ 'rate', type = int,
+ help = 'The rate at which the data is collected in Hz'
+ )
+ args = parser.parse_args()
+ directory = args.files_directory
+ output_bag = args.output_bag
+ rate = args.rate
+ # Example directories below:
+ #directory = "/home/username/host/CS588/GEMstack/data/data_sample/data/"
+ #output_bag = "/home/username/host/CS588/GEMstack/data/data_sample/data/output.bag"
+
+ try:
+ create_rosbag_from_data(
+ directory,
+ output_bag,
+ rate
+ )
+ print("Successfully created ROS bag file!")
+
+ # Verify bag contents
+ print("\nBag file contents:")
+ info_cmd = f"rosbag info {output_bag}"
+ os.system(info_cmd)
+
+ except Exception as e:
+ print(f"Error creating ROS bag: {str(e)}")
diff --git a/GEMstack/scripts/visualization.py b/GEMstack/scripts/visualization.py
new file mode 100644
index 000000000..3ff9dada7
--- /dev/null
+++ b/GEMstack/scripts/visualization.py
@@ -0,0 +1,191 @@
+import json
+import os
+import time
+from klampt import vis
+from klampt.model.trajectory import Trajectory
+import matplotlib.pyplot as plt
+from ..onboard.visualization.klampt_visualization import KlamptVisualization
+from ..onboard.visualization.mpl_visualization import MPLVisualization
+
+LOG_DIR = "logs"
+BEHAVIOR_FILE = "behavior.json"
+
+def select_log_folder():
+ log_folders = [f for f in os.listdir(LOG_DIR) if os.path.isdir(os.path.join(LOG_DIR, f))]
+ if not log_folders:
+ print("\033[91mNo log folders found.\033[0m")
+ return None
+
+ print("\n\033[94mAvailable log folders:\033[0m")
+ for idx, folder in enumerate(log_folders):
+ print(f"{idx + 1}. {folder}")
+
+ while True:
+ try:
+ choice = int(input("\n\033[93mEnter the number of the log folder to use:\033[0m ")) - 1
+ if 0 <= choice < len(log_folders):
+ return os.path.join(LOG_DIR, log_folders[choice])
+ print("\033[91mInvalid selection. Please enter a valid number.\033[0m")
+ except ValueError:
+ print("\033[91mPlease enter a number.\033[0m")
+
+def select_visualizer():
+ print("\n\033[94mChoose a visualization method:\033[0m")
+ print("1. Klampt (3D visualization)")
+ print("2. MPL (Matplotlib 2D visualization)")
+
+ while True:
+ try:
+ choice = int(input("\n\033[93mEnter 1 or 2:\033[0m "))
+ if choice in [1, 2]:
+ return choice
+ print("\033[91mInvalid selection. Please enter 1 or 2.\033[0m")
+ except ValueError:
+ print("\033[91mPlease enter a valid number.\033[0m")
+
+# Load and extract pedestrian and vehicle data
+def extract_behavior_data(log_dir):
+ behavior_path = os.path.join(log_dir, BEHAVIOR_FILE)
+ if not os.path.exists(behavior_path):
+ print(f"\033[91mError: {behavior_path} not found.\033[0m")
+ return None, None, None, None, None, None, None, None
+
+ with open(behavior_path, "r") as f:
+ data = [json.loads(line) for line in f]
+
+ pedestrian_paths = {}
+ pedestrian_times = {}
+ vehicle_path = []
+ vehicle_times = []
+ speeds = []
+ accelerators = []
+ brakes = []
+ steering_angles = []
+
+ for entry in data:
+ time_stamp = entry.get("time", 0)
+
+ # Extract pedestrian data
+ if "agents" in entry:
+ for agent_id, agent in entry["agents"].items():
+ agent_data = agent.get("data", {})
+ if agent_data.get("type") == 3: # Type 3 = pedestrian
+ pose = agent_data.get("pose", {})
+ x, y = pose.get("x", 0), pose.get("y", 0)
+
+ if agent_id not in pedestrian_paths:
+ pedestrian_paths[agent_id] = []
+ pedestrian_times[agent_id] = []
+
+ pedestrian_paths[agent_id].append((x, y))
+ pedestrian_times[agent_id].append(time_stamp)
+
+ # Extract vehicle data
+ if "vehicle" in entry:
+ vehicle_data = entry["vehicle"].get("data", {})
+ pose = vehicle_data.get("pose", {})
+ x, y = pose.get("x", 0), pose.get("y", 0)
+
+ vehicle_path.append((x, y))
+ vehicle_times.append(time_stamp)
+ speeds.append(vehicle_data.get("v", 0))
+ accelerators.append(vehicle_data.get("accelerator_pedal_position", 0))
+ brakes.append(vehicle_data.get("brake_pedal_position", 0))
+ steering_angles.append(vehicle_data.get("steering_wheel_angle", 0))
+
+ return pedestrian_paths, pedestrian_times, vehicle_path, vehicle_times, speeds, accelerators, brakes, steering_angles
+
+# Klampt 3D Visualization
+def visualize_with_klampt(pedestrian_paths, pedestrian_times, vehicle_path):
+ """Uses Klampt to visualize pedestrian and vehicle paths."""
+ vis.init("PyQt6")
+ vis.setWindowTitle("Pedestrian and Vehicle Path Visualization")
+
+ klampt_vis = KlamptVisualization(vehicle_interface=None, rate=20.0)
+
+ for agent_id, path in pedestrian_paths.items():
+ if len(path) < 2:
+ continue
+
+ times = pedestrian_times[agent_id]
+ path_3d = [[float(x), float(y), 0.0] for x, y in path]
+
+ trajectory = Trajectory(times, path_3d)
+ vis.add(agent_id, trajectory, color=(0, 1, 0, 1), width=2)
+
+ # if vehicle_path:
+ # vehicle_x, vehicle_y = zip(*vehicle_path)
+ # vehicle_tuples = [[float(x), float(y), 0.0] for x, y in zip(vehicle_x, vehicle_y)]
+ # vis.add("Vehicle Path", vehicle_tuples, color=(1, 0, 0, 1), width=2)
+
+ klampt_vis.initialize()
+ vis.show()
+
+ while vis.shown():
+ time.sleep(0.05)
+
+ klampt_vis.cleanup()
+ vis.kill()
+
+# MPL 2D Visualization
+def visualize_with_mpl(pedestrian_paths, pedestrian_times, vehicle_path, vehicle_data):
+ vis = MPLVisualization(rate=10.0)
+ vis.initialize()
+
+ fig, axs = vis.fig, vis.axs
+ axs[0].clear()
+ axs[1].clear()
+
+ # Left Plot: Pedestrian & Vehicle Trajectories
+ axs[0].set_xlabel("X Position")
+ axs[0].set_ylabel("Y Position")
+ axs[0].set_title("Pedestrian & Vehicle Trajectories")
+
+ for agent_id, path in pedestrian_paths.items():
+ path_x, path_y = zip(*path)
+ axs[0].plot(path_x, path_y, linestyle='-', marker='o', label=f"Pedestrian {agent_id}")
+
+ # if vehicle_path:
+ # vehicle_x, vehicle_y = zip(*vehicle_path)
+ # axs[0].plot(vehicle_x, vehicle_y, linestyle='-', marker='s', color='red', label="Vehicle Path")
+
+ axs[0].legend()
+
+ # Right Plot: Vehicle Controls Over Time
+ times, speeds, accelerators, brakes, steering_angles = vehicle_data
+ axs[1].set_xlabel("Time (s)")
+ axs[1].set_title("Vehicle Controls Over Time")
+
+ axs[1].plot(times, speeds, label="Speed (m/s)", color="blue")
+ axs[1].plot(times, accelerators, label="Accelerator (%)", color="green")
+ axs[1].plot(times, brakes, label="Brake (%)", color="red")
+ axs[1].plot(times, steering_angles, label="Steering Angle", color="purple")
+ axs[1].legend()
+
+ fig.canvas.draw_idle()
+ fig.canvas.flush_events()
+ plt.show(block=True)
+
+ vis.cleanup()
+
+# Main Execution
+if __name__ == "__main__":
+ visualizer_choice = select_visualizer()
+ selected_log_folder = select_log_folder()
+
+ if selected_log_folder:
+ print(f"\n\033[94mLoading behavior data from:\033[0m {selected_log_folder}")
+ data = extract_behavior_data(selected_log_folder)
+
+ pedestrian_paths, pedestrian_times, vehicle_path, vehicle_times, speeds, accelerators, brakes, steering_angles = data
+ vehicle_data = (vehicle_times, speeds, accelerators, brakes, steering_angles)
+
+ if pedestrian_paths or vehicle_path:
+ if visualizer_choice == 1:
+ print("\033[92mUsing Klampt for visualization...\033[0m")
+ visualize_with_klampt(pedestrian_paths, pedestrian_times, vehicle_path)
+ else:
+ print("\033[92mUsing MPL (Matplotlib) for visualization...\033[0m")
+ visualize_with_mpl(pedestrian_paths, pedestrian_times, vehicle_path, vehicle_data)
+ else:
+ print("\033[91mNo pedestrian or vehicle data found.\033[0m")
diff --git a/GEMstack/state/relations.py b/GEMstack/state/relations.py
index 465d4e657..c24e1bc22 100644
--- a/GEMstack/state/relations.py
+++ b/GEMstack/state/relations.py
@@ -23,6 +23,9 @@ class EntityRelation:
type : EntityRelationEnum
obj1 : str # Named object in the scene. '' indicates ego-vehicle
obj2 : str # Named object in the scene. '' indicates ego-vehicle
+ yield_dist : float # Distance at which obj1 yields to obj2
+ yield_speed : float # Speed at which obj1 yields to obj2
+ yield_decel : float # Deceleration at which obj1 yields to obj2
class EntityRelationGraph:
diff --git a/GEMstack/state/trajectory.py b/GEMstack/state/trajectory.py
index d7a57db00..45e90b214 100644
--- a/GEMstack/state/trajectory.py
+++ b/GEMstack/state/trajectory.py
@@ -169,6 +169,7 @@ def trim(self, start : float, end : float) -> Path:
class Trajectory(Path):
"""A timed, piecewise linear path."""
times : List[float]
+ velocities : Optional[List[float]] = None
def domain(self) -> Tuple[float,float]:
"""Returns the time parameter domain"""
diff --git a/GEMstack/utils/analysis.py b/GEMstack/utils/analysis.py
new file mode 100644
index 000000000..0a354a6ca
--- /dev/null
+++ b/GEMstack/utils/analysis.py
@@ -0,0 +1,153 @@
+"""
+Log Analysis Tool
+
+This program allows users to analyze CSV files stored in log directories. Users can:
+- Select a log directory from the `./logs` folder.
+- Choose a CSV file within the selected directory.
+- Select specific columns to analyze.
+- Choose an analysis method such as MSE, RMS, Mean, or Standard Deviation.
+- Define a custom lambda function for analysis.
+- Save the results in the default log directory or specify a custom path.
+
+Usage:
+1. Run the script: `python ./GEMstack/utils/analysis.py`
+2. Follow the prompts to:
+ - Select a log directory.
+ - Choose a CSV file to analyze.
+ - Select the columns for analysis.
+ - Pick a predefined analysis method or define a custom lambda function.
+ - Save the results.
+
+Output:
+- The analysis results are saved in CSV format, with column names included, in the chosen directory.
+
+"""
+
+import os
+import pandas as pd
+import numpy as np
+from datetime import datetime
+
+def list_log_directories():
+ logs_path = "./logs"
+ if not os.path.exists(logs_path):
+ print("Logs directory does not exist.")
+ return []
+
+ directories = [d for d in os.listdir(logs_path) if os.path.isdir(os.path.join(logs_path, d))]
+ return directories
+
+def choose_directory(directories):
+ if not directories:
+ print("No log directories found.")
+ return None
+
+ print("Available log directories:")
+ for i, dir_name in enumerate(directories):
+ print(f"{i + 1}. {dir_name}")
+
+ choice = int(input("Select a directory by number: ")) - 1
+ if 0 <= choice < len(directories):
+ return os.path.join("./logs", directories[choice])
+ else:
+ print("Invalid choice.")
+ return None
+
+def choose_csv_file(log_dir):
+ files = [f for f in os.listdir(log_dir) if f.endswith(".csv")]
+ if not files:
+ print("No CSV files found in the selected directory.")
+ return None
+
+ print("Available CSV files:")
+ for i, file in enumerate(files):
+ print(f"{i + 1}. {file}")
+
+ choice = int(input("Select a CSV file by number: ")) - 1
+ if 0 <= choice < len(files):
+ return os.path.join(log_dir, files[choice])
+ else:
+ print("Invalid choice.")
+ return None
+
+def load_csv(csv_path):
+ if not os.path.exists(csv_path):
+ print("CSV file not found.")
+ return None
+
+ df = pd.read_csv(csv_path)
+ print("CSV file loaded successfully.")
+ print("Available columns:", list(df.columns))
+ return df
+
+def choose_columns(df):
+ selected_columns = input("Enter column names to analyze (comma separated): ").split(',')
+ selected_columns = [col.strip() for col in selected_columns if col.strip() in df.columns]
+ if not selected_columns:
+ print("No valid columns selected.")
+ return None
+ return df[selected_columns]
+
+def analyze_data(df, log_dir):
+ methods = {
+ "1": ("Mean Squared Error (MSE)", lambda x: np.mean(np.square(x))),
+ "2": ("Root Mean Square (RMS)", lambda x: np.sqrt(np.mean(np.square(x)))),
+ "3": ("Mean", np.mean),
+ "4": ("Standard Deviation", np.std),
+ "5": ("Custom Lambda Function", None)
+ }
+
+ print("Available analysis methods:")
+ for key, (name, _) in methods.items():
+ print(f"{key}. {name}")
+
+ choice = input("Select a method by number: ")
+ if choice in methods:
+ if choice == "5":
+ function_str = input("Enter a lambda function (e.g., lambda x: np.max(x) - np.min(x)): ").strip()
+ try:
+ func = eval(function_str, {"np": np})
+ result = df.apply(func)
+ except Exception as e:
+ print(f"Invalid function: {e}")
+ return
+ else:
+ method_name, method_func = methods[choice]
+ print(f"Applying {method_name}...")
+ result = df.apply(method_func)
+
+ print("Analysis result:")
+ print(result.to_string(header=True, index=True))
+
+ save_path = input("Enter a path to save the result (press Enter to save in the chosen log directory): ")
+ if not save_path:
+ timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
+ save_path = os.path.join(log_dir, f"analysis_result_{timestamp}.csv")
+
+ result.to_frame().T.to_csv(save_path, index=False, header=True)
+ print(f"Analysis result saved to {save_path}")
+ else:
+ print("Invalid choice.")
+
+def main():
+ directories = list_log_directories()
+ log_dir = choose_directory(directories)
+ if not log_dir:
+ return
+
+ csv_path = choose_csv_file(log_dir)
+ if not csv_path:
+ return
+
+ df = load_csv(csv_path)
+ if df is None:
+ return
+
+ df_selected = choose_columns(df)
+ if df_selected is None:
+ return
+
+ analyze_data(df_selected, log_dir)
+
+if __name__ == "__main__":
+ main()
diff --git a/GEMstack/utils/log_plot.py b/GEMstack/utils/log_plot.py
new file mode 100644
index 000000000..e2b11753f
--- /dev/null
+++ b/GEMstack/utils/log_plot.py
@@ -0,0 +1,148 @@
+import matplotlib.pyplot as plt
+import pandas as pd
+import os
+import numpy as np
+
+# Most of the code in this file is copied from ./tuning_logs/log_plot.py by Mikayel Aramyan (mikayel2)
+
+def create_plot(t, actual, desired, xlabel, ylabel, title, legend, save_path=None):
+ plt.figure()
+ plt.plot(t, actual)
+ plt.plot(t, desired)
+ overshoot = np.max(np.array(actual) - np.array(desired))
+ rmse = np.sqrt(np.mean((np.array(actual) - np.array(desired))**2))
+ print(f'{title} - Maximum Overshoot: {overshoot}, RMSE: {rmse}')
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
+ plt.title(f'{title} (Max Overshoot: {overshoot:.2f}, RMSE: {rmse:.2f})')
+ plt.legend(legend)
+ plt.grid(True)
+ if save_path:
+ plt.savefig(save_path, dpi=600)
+ plt.close()
+
+def create_error_plot(t, error, xlabel, ylabel, title, save_path=None):
+ plt.figure()
+ plt.plot(t, error)
+ plt.xlabel(xlabel)
+ plt.ylabel(ylabel)
+ plt.title(title)
+ plt.grid(True)
+ if save_path:
+ plt.savefig(save_path, dpi=600)
+ plt.close()
+
+def main(log_folder):
+ if log_folder is None:
+ return
+ df = pd.read_csv(log_folder + '/PurePursuitTrajectoryTracker_debug.csv')
+ save_figures =True
+
+ plots_folder = os.path.join(log_folder, 'plots')
+ os.makedirs(plots_folder, exist_ok=True)
+
+ t = df['curr pt[0] vehicle time'].tolist()
+ x = df['curr pt[0]'].tolist()
+ y = df['curr pt[1]'].tolist()
+ v = df['current speed (m/s)'].tolist()
+ yaw = df['current yaw (rad)'].tolist()
+
+ xd = df['desired pt[0]'].tolist()
+ yd = df['desired pt[1]'].tolist()
+ vd = df['desired speed (m/s)'].tolist()
+ yawd = df['desired yaw (rad)'].tolist()
+
+ cte = df['crosstrack error'].tolist()
+ front_wheel_angle = df['front wheel angle (rad)'].tolist()
+ accel = df['output accel (m/s^2)'].tolist()
+
+
+
+ rmse_cte = np.sqrt(np.mean(np.array(cte)**2))
+ print(f'RMSE (cte): {rmse_cte}')
+
+ max_accel_error = np.max((np.array(accel) - 1.0)**2)
+ print(f'Maximum (acceleration - 1.0)^2: {max_accel_error}')
+
+ rms_forward_acceleration = np.sqrt(np.mean(np.array(accel)**2))
+ print(f'RMS of Acceleration: {rms_forward_acceleration}')
+
+ dt = np.mean(np.diff(t))
+ jerk = np.gradient(np.array(accel), dt)
+ rms_jerk = np.sqrt(np.mean(jerk**2))
+ print(f'RMS of Jerk: {rms_jerk}')
+
+ speed_error = np.array(v) - np.array(vd)
+ rms_speed_error = np.sqrt(np.mean(speed_error**2))
+ print(f'RMS of Speed Error: {rms_speed_error}')
+
+ angular_velocity = np.gradient(front_wheel_angle, dt)
+ angular_acceleration = np.gradient(angular_velocity, dt)
+ angular_jerk = np.gradient(angular_acceleration, dt)
+ rms_angular_acceleration = np.sqrt(np.mean(angular_acceleration**2))
+ rms_angular_jerk = np.sqrt(np.mean(angular_jerk**2))
+ print(f'RMS of Angular Acceleration: {rms_angular_acceleration}')
+ print(f'RMS of Angular Jerk: {rms_angular_jerk}')
+
+ yaw_error = np.array(yaw) - np.array(yawd)
+ rms_yaw_error = np.sqrt(np.mean(yaw_error**2))
+ print(f'RMS of Yaw Error: {rms_yaw_error}')
+
+ w1, w2, w3 = 0.5, 0.3, 0.2
+ comfort_index = w1 * rms_forward_acceleration + w2 * rms_jerk + w3 * rms_speed_error
+ print(f'===== Comfort Index: {comfort_index:.2f} =====')
+
+ w1, w2, w3, w4, w5, w6, w7 = 0.2, 0.1, 0.15, 0.2, 0.15, 0.2, 0.1
+ safety_index = (w1 * rmse_cte + w2 * rms_angular_jerk + w3 * rms_angular_acceleration +
+ w4 * rms_forward_acceleration + w5 * rms_jerk + w6 * rms_speed_error +
+ w7 * rms_yaw_error)
+ print(f'===== Safety Index: {safety_index:.2f} =====')
+
+ create_plot(t, y, yd, '$t$ (s)', '$y(t)$, $y_{d}(t)$ (m)', 'Actual and Desired y', ['Actual $y(t)$', 'Desired $y_{d}(t)$'], os.path.join(plots_folder, 'y_vs_yd.png') if save_figures else None)
+ create_error_plot(t, np.array(y) - np.array(yd), '$t$ (s)', 'Error in $y(t)$ (m)', 'Error between Actual and Desired $y(t)$', os.path.join(plots_folder, 'error_y.png') if save_figures else None)
+
+ create_plot(t, x, xd, '$t$ (s)', '$x(t)$, $x_{d}(t)$ (m)', 'Actual and Desired x', ['Actual $x(t)$', 'Desired $x_{d}(t)$'], os.path.join(plots_folder, 'x_vs_xd.png') if save_figures else None)
+ create_error_plot(t, np.array(x) - np.array(xd), '$t$ (s)', 'Error in $x(t)$ (m)', 'Error between Actual and Desired $x(t)$', os.path.join(plots_folder, 'error_x.png') if save_figures else None)
+
+ create_plot(t, v, vd, '$t$ (s)', '$v(t)$, $v_{d}(t)$ (m/s)', 'Actual and Desired v', ['Actual $v(t)$', 'Desired $v_{d}(t)$'], os.path.join(plots_folder, 'v_vs_vd.png') if save_figures else None)
+ create_error_plot(t, np.array(v) - np.array(vd), '$t$ (s)', 'Error in $v(t)$ (m/s)', 'Error between Actual and Desired $v(t)$', os.path.join(plots_folder, 'error_v.png') if save_figures else None)
+
+ plt.figure()
+ plt.plot(t, cte)
+ rmse_cte = np.sqrt(np.mean(np.array(cte)**2))
+ print(f'RMSE (cte): {rmse_cte}')
+ plt.xlabel('$t$ (s)')
+ plt.ylabel('Crosstrack Error (m)')
+ plt.title(f'Crosstrack Error over Time (RMSE: {rmse_cte:.2f})')
+ plt.grid(True)
+ if save_figures:
+ plt.savefig(os.path.join(plots_folder, 'cte.png'), dpi=600)
+ plt.close()
+
+ plt.figure()
+ plt.plot(t, front_wheel_angle)
+ plt.xlabel('$t$ (s)')
+ plt.ylabel('Front Wheel Angle (rad)')
+ plt.title('Front Wheel Angle over Time')
+ plt.grid(True)
+ if save_figures:
+ plt.savefig(os.path.join(plots_folder, 'front_wheel_angle.png'), dpi=600)
+ plt.close()
+
+ plt.figure()
+ plt.plot(t, accel)
+ plt.xlabel('$t$ (s)')
+ plt.ylabel('Acceleration (m/s^2)')
+ plt.title('Acceleration over Time')
+ plt.grid(True)
+ if save_figures:
+ plt.savefig(os.path.join(plots_folder, 'accel.png'), dpi=600)
+ plt.close()
+
+
+
+
+if __name__ == '__main__':
+ # Change this to the log folder you want to plot if running this script manually
+ log_folder = '2025-02-13_21-10-39'
+ main(log_folder)
diff --git a/HOMEWORK.md b/HOMEWORK.md
new file mode 100644
index 000000000..e5bbe7b9d
--- /dev/null
+++ b/HOMEWORK.md
@@ -0,0 +1,194 @@
+# CS 588 Phase 1 Homework
+
+Due date: 2/24
+
+**Overall objective**: work together in large teams to create a nontrivial integrated behavior on the vehicle. The target behavior will be to drive along a predefined track, and slowing down for pedestrians that would otherwise be hit if the vehicle continues at normal speed. The vehicle should stop when necessary.
+
+**Skills**: Familiarization with GEMstack repo, Git branches and pull requests, lab and equipment logistics, inter-team communication.
+
+**On-board development**: If you are working on the GEM vehicle, your work will go into the `cs588_groupX/` folder. We recommend that before you start, you
+a) Ensure that your GEMstack repository is on the correct branch. cd into `cs588_groupX/GEMstack` and run `git status`. It should show that you are on the `s2025_groupX` branch.
+b) Get the latest updates to the course stack. To do so, run `git pull` and `git merge s2025`.
+
+When you are done, commit your development of this component and launch files to your group's branch. **DO NOT** set the global git authentication on the vehicle to your account. The easiest way to do this is:
+a) Log onto Github under your account name. [Create a Github Personal Access Token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-personal-access-token-classic). To reduce the chance of mischief, choose the "Only select repositories" option and set the access token just to push to this repository. Open the "Repository permissions" and change the "Contents" access to "Read and write"
+b) When your token is generated, copy the token and keep it somewhere safe.
+c) Commit your changes to your branch.
+d) Run "git push". In "Username" put your Github username, and in "Password" use the token that you copied in step b). This should now be reflected in your branch.
+
+When you are creating a pull request to the `s2025_teamX` branch of GEMstack, you can do this through the Github webpage. You can also use the hub command line tool: https://hub.github.com/. Ensure that this shows up in the `s2025_teamX` branch's Pull Requests tab.
+
+**Quality-of-life tips**
+- By default, the log folder is shown after each run. If this is annoying, change the launch file to read `after: show_log_folder: False`.
+- Each component can generate CSV files that collect fast streaming data for later analysis. Use `self.debug(item,value)` or `self.debug_event(label)` in your component's initialize(), update(), or cleanup() functions. These will be stored in LOG_FOLDER/COMPONENT_debug.csv.
+- Find all magic constants in your code. Move them into the `GEMstack/knowledge/defaults/current.yaml` file under appropriate sub-keys.
+
+
+**To submit**: ensure your group's work is merged into the `s2025_teamA` or `s2025_teamB` branches (as appropriate) with appropriately logged PRs. We will review your team's branch history. Your group will submit some evidence of its component working, and if successful your team will submit an integrated video of the behavior working. These may be posted to Slack or Clickup.
+
+
+## Group Leads
+
+- Decide on a good day of the week and time for group meetings.
+- Discuss and decide on Github pull request procedures for your team's branches.
+- Discuss and decide on project management and communication features for your team (e.g., Slack / Clickup).
+- Lead your group and team with clear expectations to prepare work in time for integration. Don't leave this to the last minute! Suggest establishing an internal deadline of 2/17 for all technical pieces.
+- Keep track of team members' efforts, especially if their contributions are not apparent through branch commit history. At the end of this phase you will be reporting on your members' contributions, so if they are not listed in PRs and you have little to say about their work, they will receive low individual scores for this assignment. Note that they will be reporting on your leadership as well, so remember to treat your group fairly.
+- Gather sufficient evidence (e.g., videos of partial system tests or simulation tests, or a report with text and figures) that your group has succeeded in its objectives.
+- Decide which group on your team will test the integrated behavior. Have them report results, and capture a video of successful runs.
+
+
+## Control & Dynamics Group
+- Starter code is provided in the `homework/` folder in your group's branch.
+- All members must complete safety lookout training; at least two must complete safety driver training.
+
+Part 1:
+
+1. Flash a distress signal to the vehicle via ROS. Modify the `blink.py` program to print messages from some of the "/pacmod/parsed_tx/X" topics (at least 2). For each topic, you will need to create a ROS subscriber. Read the documentation on https://github.com/astuff/pacmod2 to figure out which message types to use and extract printable data from these messages.
+
+ Now, create a publisher for the "/pacmod/as_rx/turn_cmd" topic. You will then flash a "distress signal" corresponding to left turn signal for 2 seconds, right turn signal for 2s, and then turn signals off for 2s. This will then repeat forever until you press Ctrl+C.
+
+ To run your code on the vehicle, you will launch the sensors and drive-by-wire system, then enable Pacmod control via the joystick. (Please do not try to operate the vehicle with the joystick!) Then, run your program with `python3 blink.py`. After you are done, you should disable Pacmod control via the joystick.
+
+2. Adapt `blink.py` to run the behavior in the GEMstack executor. Copy blink_launch.yaml to `GEMstack/launch`. Copy `blink_component.py` to `GEMstack/onboard/planning/` and make your edits there.
+
+ Replicate your sensor printing and distress signal code in the BlinkDistress class in blink_component.py. You will need to adapt your code from using ROS subscribers / publishers to using the `GEMVehicleInterface` and `GEMVehicleReading` objects provided in the template code. Opening the GEMstack folder in the VSCode IDE will help you find the documentation for these classes.
+
+ Run your code in simulation first. (If you are running on the vehicle, first open one terminal window and run `roscore`. Keep this running.) Then, in the GEMstack folder, run `python3 main.py --variant=sim launch/blink_launch.yaml`. You should see your blinking sequence in a 3D visualization window. Use Ctrl+C to quit.
+
+ Now, run your code on the real vehicle. Enable Pacmod control as before, then run `python3 main.py launch/blink_launch.yaml`.
+
+3. Adapt your code so the distress signal runs on GEMstack as a `signaling` component, and blinks the distress signal only when the `AllState.intent.intent=HALTING` flag is set. You will need to modify the computation graph in `GEMstack/knowledge/defaults` to add the `signaling` component, and modify launch files so your class is no longer listed under `trajectory_tracking`.
+4. Use Git to create a pull request, and have your PR approved to the `s2025_teamX` branch.
+
+Part 2:
+1. Tune the low-level controller (currently `PurePursuit`) to track the trajectory `AllState.trajectory` produced by the Planning group as accurately as possible. You may tune steering, acceleration/braking, the tracking strategies, or all three.
+2. Decide on an appropriate tracking quality metric with the Infra team.
+3. Report on what you tuned and how the tracking accuracy metric was improved.
+4. Move all magic constants to an appropriate settings file, and have your pull request approved.
+5. Integrate as necessary.
+
+
+## State estimation & Calibration Group
+
+- Have at least one team member complete safety lookout & driver training.
+
+Part 1:
+
+1. You will be using both the Oak RGB-D camera and the Ouster top lidar to estimate the 3D positions of pedestrians relative to the vehicle frame. Because the stereo depth estimate is not as accurate as the lidar, we will be using the image to detect objects and the lidar to measure their 3D extent. The first step of this process is to associate points in the camera with points in the lidar, which requires an understanding of projection and relative pose (extrinsics) calibration.
+2. You will first need to capture a calibration dataset consisting of paired images and lidar scans. Write code to capture paired scans and take several snapshots of interesting scenes. You should save images as standard image files, and save lidar scans as numpy arrays or PCD files. Store these to a directory named GEMstack/data/SOMETHING where "SOMETHING" is a descriptive name of your dataset. In the main GEMstack folder we had a GEM e2 program `python3 GEMstack/offboard/calibration/capture_lidar_zed.py data/SOMETHING` that could capture these files. Create a similar program `capture_ouster_oak.py` that reads from the appropriate ROS topics.
+3. Use the `CameraInfo` message to obtain the intrinsics of the camera (http://docs.ros.org/en/noetic/api/sensor_msgs/html/msg/CameraInfo.html) from the `/oak/rgb/camera_info` ROS topic. You may use the image_geometry module (https://docs.ros.org/en/api/image_geometry/html/python/) to project points in the camera frame to the image. A camera frame is defined with the z axis pointing forward, x pointing right, and y pointing down. The important values of this message are the fx, fy, cx ,and cy intrinsic parameter terms. You can store these values to disk or read them live in your code.
+4. We do not have the relative transform `T_toplidar^frontcamera` that converts points from the top lidar (Ouster) frame to the Oak frame. Create a way to calibrate this transform. Specifically, if you have a dataset of paired image, point cloud pairs `(image^t,point_cloud^t)`, you should observe that the points belonging to some object in the point cloud are mapped directly to those pixels in the image. Specifically, if `image_pt` is an image pixel belonging to point pt in the point cloud, then we should observe the relation "image_pt = project(T_toplidar^frontcamera * pt, oak_intrinsics)"
+
+ To obtain this matrix you may use manual calibration if you need to, or you could use a more sophisticated method.
+
+ For manual calibration, you can modify the GEM e2 code `python3 GEMstack/offboard/calibration/klampt_show_lidar_zed.py data/SOMETHING` to visualize the stored point clouds from the front camera and the lidar. You can see why the forward depth estimates are quite poor.
+
+ Automated methods could use a detection method to obtain a distinctive shape like a sphere or a square (e.g., a face of the foam cubes) in the Zed image, paired with the same shape in the lidar point cloud. An optimization would ensure that all lidar points belonging to the shape are projected to its projection in the image. You can also match the stereo and lidar point clouds using a dynamically-associated registration method like ICP.
+
+
+Part 2:
+
+1. Now we will perform absolute calibration of vehicle height, rear axle center, and sensors, i.e., the vehicle frame. The vehicle frame is centered at the rear axle center, with x pointing forward, y pointing to the left, and z pointing up. We want the transforms `T_frontcamera^vehicle` and `T_toplidar^vehicle`.
+
+ - A flat ground and walls are good references for the pitch/roll of the sensors. Using the lidar point cloud, calibrate the rotation component of this transform. Also, use the height of the ground to determine the z above ground.
+ - Using distinctive shapes (e.g., the foam cubes), create a dataset in which you set up objects on the centerline of the vehicle. Use these to calibrate the yaw and the horizontal offset of the lidar.
+ - Measure the height of the rear axle over the ground. Use this to figure out the z above the rear axle center.
+
+2. Combine all of these measurements to establish `T_toplidar^vehicle`. Use your result in Step 1 to compute `T_frontcamera^vehicle` as well. Store these calibrations into the files referenced by `GEMstack/knowledge/calibration/gem_e4.yaml`. Name your calibrations meaningfully -- specifying which vehicle and what type of calibration you are performing -- and include the time and data of the calibration in your files.
+3. Put your offline calibration code in the `GEMstack/offboard/calibration/` directory and write a README with a description of the method that you used, and instructions about how to use it. Describe the results of your calibration steps 1 and 2 in this file.
+
+ DO NOT commit data files used in calibration. Put your datasets in the `data` directory, named appropriately. Files in this directory will not be committed to the repo.
+
+4. Use Git to create a pull request to the `s2025_teamX` branch, and have your PR approved.
+5. Integrate as necessary.
+
+
+## Perception Group
+
+- Starter code is provided in the `homework/` folder in your group's branch.
+- Have at least one team member complete safety lookout & driver training.
+- Many of your team members should plan to work on the vehicle, at least in the early stages of the assignment. Plan to coordinate with the Infra team to enable a method for working offline.
+
+Part 1:
+1. Use an object detector to identify a pedestrian from the front camera. Implement the function `detect_people(img)` in `person_detector.py` to return a list of bounding boxes of people in an image. This is a function that inputs OpenCV images and returns a list of (x,y,w,h) image boxes that contain people. To run it, call `person_detector.py IMAGE` where IMAGE is an image file. You may download images from the internet or take your own. If you have a webcam, you can run `person_detector.py webcam` to run the live detector.
+
+ I suggest using the YOLOv8 package (https://github.com/ultralytics/ultralytics) and choosing one of its pretrained models, such as `yolov8n.pt`. Follow the basic tutorials for running the detector. The ID of people is 0, and you can examine the Result object to extract out the boxes that have that ID.
+2. Refactor your detector to work with GEMstack. Move your detector logic into a GEMstack Component defined in `pedestrian_detection.py`. The provided code will turn your boxes into a dictionary of `AgentState` objects. Read the documentation of the `AgentState` class to see what data should be stored there. For now, you will put temporary values for most of these fields.
+
+ Specifically you should:
+
+ 1. Implement the `PedestrianDetector2D` component to call your detection function in `image_callback(img)`. Your model should be loaded once in the component's `initialize()` method.
+ 2. Move your pretrained model to `GEMstack/knowledge/detection`.
+ 3. Move the Python code to `GEMstack/onboard/perception` where the launch file can find them.
+ 4. Make sure that your code can find the pretrained model in the `knowledge` directory.
+ 5. Using the detector-only variant of the launch file `python3 main.py --variant=detector_only pedestrian_detection.yaml`, test your code on the real vehicle. You will need to run "source ~/demo_ws/devel/setup.bash" and "roslaunch basic_launch sensor_init.launch" in another terminal, and run the same "source ..." command before running GEMstack code.
+ 6. Have a group member(s) walk in front of the Zed camera and observe the printouts.
+ 7. Finally, tune the rate at which you run your component to obtain the highest framerate possible.
+
+3. Use Git to create a pull request to `s2025_teamX`, and have your PR approved.
+
+Part 2:
+1. Fuse detections with depth (`front_depth` sensor), multiple cameras, and/or LIDAR (`top_lidar` sensor) to reduce errors and place agents in their 3D locations in the scene. Use the sensor calibration from the Calibration group. Look at the lidar readings near the center of the detection, and use those to estimate a region of interest in the point cloud. Then, fit the center and dimensions of the object to the points in the point cloud that you associate with the pedestrian. Finally, put the estimated center and dimensions of the pedestrian into the "pose" and "dimensions" attributes, making sure to respect the vehicle's coordinate convention.
+2. If the Infra team has worked quickly enough, you should be able to work offline, replaying from ROS bags. It is a good idea to coordinate with them.
+3. Provide your results in a `PedestrianDetector` component and provide this to the Planning group.
+
+Part 3:
+1. Keep track of `AgentState` objects as the vehicle moves and when they disappear out of the field of view.
+2. Track pedestrians over time to fill out the "velocity" attribute and to give them relatively consistent names over time.
+ - number pedestrian IDs as "ped1", "ped2", etc. so that if you detect that a single pedestrian has moved across your field of view, you consistently output "ped1" as the agent's ID.
+ - keep track of the detected pedestrians on the prior frame to estimate the velocity of each pedestrian. Make sure to store 3D previous detections in the START frame. For each current detection, examine whether the box of the new detection overlaps any prior detection. If so, this begins a "track" detection in which you a) keep the last ID, and b) estimate the pedestrian's velocity in the CURRENT frame. The reported velocity should NOT be relative to the vehicle's velocity. If this is a new pedestrian, increment the pedestrian ID and output zero velocity for this frame.
+
+ If you wish to use more sophisticated trackers, e.g., Hungarian algorithm, velocity prediction, etc, you may do so.
+
+3. Make sure this works while the vehicle is moving. For example, if the pedestrian is staying still but the vehicle is moving, their reported velocity remains relatively close to 0. Provide these results to the Planning group.
+4. Integrate as necessary.
+
+
+## Planning Group
+
+- Have at least one team member complete safety lookout & driver training.
+
+Part 1:
+1. We will introduce two new components that operate on the output of a `PedestrianDetector2D` / `PedestrianDetector` model produced by the perception team:
+
+ - Logic in the `PedestrianYielder` component in `pedestrian_yield_logic.py`.
+ - A planner in the `YieldTrajectoryPlanner` component in `longitudinal_planning.py`.
+
+ Move these files to `GEMstack/onboard/planning` where launch files can find them.
+
+3. For the planning logic, you will implement a braking strategy in `longitudinal_plan_brake()`, and implement a trapezoidal velocity profile strategy in `longitudinal_plan()`.
+
+ You will first do your development using a unit test script, then in simulation. Run `test_longitudinal_planning.py` from the main GEMstack folder to plot the results of your methods. Using the launch file `python3 main.py --variant=fake_sim pedestrian_detection.yaml`, you will receive perfect knowledge about the pedestrian. Test to make sure this produces reasonable behavior in the simulation. It should slow and stop at a safe distance when a pedestrian is detected, and then resume slowly when the pedestrian disappears.
+4. Run on the vehicle using `--variant fake_real`. This will use a "fake" detector that will just report pedestrians at some point. Work with the Control and Dynamics team to ensure that reasonable behavior results.
+5. Use Git to create a pull request, and have your PR approved.
+
+Part 2:
+1. Use collision detection to determine the distance to collision and use that to determine whether to begin to brake.
+
+ - Determine the lookahead distance that you would need to avoid collision at the current velocity and desired braking deceleration.
+ - For many steps along the route up to that distance, determine whether the vehicle would be likely to hit (more precisely, get sufficiently close) to the pedestrian. Use a 1m lateral distance and 3m longitudinal distance buffer in your vehicle geometry.
+ - For the distance-to-collision you determine above, use the longitudinal planner to determine your desired acceleration.
+
+ Determine how quickly you can run your planner. If it is too slow (<5Hz), try using the vehicle-pedestrian distance to dynamically determine your collision checking resolution.
+
+2. Modify the `PedestrianYielder` so that it selects a subset of pedestrians for which the above calculations should be performed.
+3. Adapt the motion planner to slow gently for distant crossing pedestrians to boost rate of progress after the pedestrian crosses the vehicle's route. Use the Perception group's estimated velocity for each pedestrian.
+4. Using the `--variant real_real` will run on the real vehicle using trajectory predictions from the Perception group.
+5. Integrate as necessary.
+
+
+## Infra Group
+- Have at least one team member complete safety lookout & driver training.
+
+Part 1:
+1. Develop a systematic method for saving camera data for the Calibration and Vision groups. Work closely with them to figure out a method to test their methods from logged data.
+2. Write documentation for this method, detailing usage and performance issues.
+3. Use Git to create a pull request to `s2025_teamX`, and have your PR approved.
+
+Part 2:
+1. Develop a visualizer for rapidly plotting detected pedestrian trajectories from log data (e.g., matplotlib).
+2. Work with the Control & Dynamics team to test the integrated system.
+3. Develop metrics for pedestrian safety and passenger comfort. Analyze logs from tests.
+4. Integrate as necessary.
+
diff --git a/README.md b/README.md
index 51f26e290..b7d69edd7 100644
--- a/README.md
+++ b/README.md
@@ -11,25 +11,124 @@
GEMstack uses Python 3.7+ and ROS Noetic. (It is possible to do some offline and simulation work without ROS, but it is highly recommended to install it if you are working on any onboard behavior or training for rosbag files.)
You should also have the following Python dependencies installed, which you can install from this folder using `pip install -r requirements.txt`:
-
-- numpy
-- scipy
-- matplotlib
-- opencv-python
-- torch
-- klampt
-- shapely
-- dacite
-- pyyaml
+- GEMstack Dependencies
+ - numpy
+ - scipy
+ - matplotlib
+ - opencv-python
+ - torch
+ - klampt==0.9.2
+ - shapely
+ - dacite
+ - pyyaml
+- Perception Dependencies
+ - ultralytics
In order to interface with the actual GEM e2 vehicle, you will need [PACMOD2](https://github.com/astuff/pacmod2) - Autonomoustuff's low level interface to vehicle. You will also need Autonomoustuff's [sensor message packages](https://github.com/astuff/astuff_sensor_msgs). The onboard computer uses Ubuntu 20.04 with Python 3.8, CUDA 11.6, and NVIDIA driver 515, so to minimize compatibility issues you should ensure that these are installed on your development system.
-From a fresh Ubuntu 20.04 with ROS Noetic and [CUDA 11.6 installed](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5), you can install these dependencies by running `setup/setup_this_machine.sh` from the top-level GEMstack folder.
+## Running the stack on Ubuntu 20.04 without Docker
+### Checking CUDA Version
+
+Before proceeding, check your Nvidia Driver and supported CUDA version:
+```bash
+nvidia-smi
+```
+This will show your NVIDIA driver version and the maximum supported CUDA version. Make sure you have CUDA 11.8 or 12+ installed.
+
+From Ubuntu 20.04 install [CUDA 11.6](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5) or [CUDA 12+](https://gist.github.com/ksopyla/ee744bf013c83e4aa3fc525634d893c9) based on your current Nvidia Driver versio.
+
+To check the currently installed CUDA version:
+```bash
+nvcc --version
+```
+you can install the dependencies or GEMstack by running `setup/setup_this_machine.sh` from the top-level GEMstack folder.
+
+## Running the stack on Ubuntu 20.04 or 22.04 with Docker
+> [!NOTE]
+> Make sure to check the Nvidia Driver and supported CUDA version before proceeding by following the steps in the previous section.
+
+## Prerequisites
+- Docker (In Linux - Make sure to follow the post-installation steps from [here](https://docs.docker.com/engine/install/linux-postinstall/))
+- Nvidia Container Toolkit
+
+Try running the sample workload from the [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/sample-workload.html) to check if your system is compatible.
-To build a Docker container with all these prerequisites, you can use the provided Dockerfile by running `docker build -t gem_stack setup/`. For GPU support you will need the NVidia Container Runtime (run `setup/get_nvidia_container.sh` from this directory to install, or see [this tutorial](https://collabnix.com/introducing-new-docker-cli-api-support-for-nvidia-gpus-under-docker-engine-19-03-0-beta-release/) to install) and run `docker run -it --gpus all gem_stack /bin/bash`.
+```bash
+sudo docker run --rm --runtime=nvidia --gpus all ubuntu nvidia-smi
+```
+You should see the nvidia-smi output similar to [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/sample-workload.html#:~:text=all%20ubuntu%20nvidia%2Dsmi-,Your%20output%20should%20resemble%20the%20following%20output%3A,-%2B%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2D%2B%0A%7C%20NVIDIA%2DSMI%20535.86.10).
+
+If you see the output, you are good to go. Otherwise, you will need to install the Docker and NVidia Container Toolkit by following the instructions.
+- For **Docker**, follow the instructions [here](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository).
+
+- For **Nvidia Container Toolkit**, run `setup/get_nvidia_container.sh` from this directory to install, or see [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for more details.
+
+## Running the stack on Ubuntu 20.04 without Docker
+### Checking CUDA Version
+
+Before proceeding, check your Nvidia Driver and supported CUDA version:
+```bash
+nvidia-smi
+```
+This will show your NVIDIA driver version and the maximum supported CUDA version. Make sure you have CUDA 11.8 or 12+ installed.
+
+From Ubuntu 20.04 install [CUDA 11.6](https://gist.github.com/ksopyla/bf74e8ce2683460d8de6e0dc389fc7f5) or [CUDA 12+](https://gist.github.com/ksopyla/ee744bf013c83e4aa3fc525634d893c9) based on your current Nvidia Driver versio.
+
+To check the currently installed CUDA version:
+```bash
+nvcc --version
+```
+you can install the dependencies or GEMstack by running `setup/setup_this_machine.sh` from the top-level GEMstack folder.
+## Running the stack on Ubuntu 20.04 or 22.04 with Docker
+> [!NOTE]
+> Make sure to check the Nvidia Driver and supported CUDA version before proceeding by following the steps in the previous section.
+For GPU support you will need the NVidia Container Toolkit (run `setup/get_nvidia_container.sh` from this directory to install, or see [this](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html) for more details).
+
+## Building the Docker image
+
+To build a Docker image with all these prerequisites, you can use the provided Dockerfile by running.
+
+```bash
+bash setup/build_docker_image.sh
+```
+
+## Running the Docker container
+
+To run the container, you can use the provided Docker Compose file by running.
+> [!NOTE]
+> If you want to open multiple terminals to run the container, you can use the same command. It will automatically start a new terminal inside the same container.
+```bash
+bash run_docker_container.sh
+```
+## Usage Tips and Instructions
+
+### Using Host Volume
+
+You can use the host volume under the container's home directory inside the `` folder. This allows you to build and run files that are on the host machine. For example, if you have a file on the host machine at `/home//project`, you can access it inside the container at `/home//host/project`.
+
+### Using Dev Containers Extension in VSCode
+
+To have a good developer environment inside the Docker container, you can use the Dev Containers extension in VSCode. Follow these steps:
+
+1. Install the Dev Containers extension in VSCode.
+2. Open the cloned repository in VSCode.
+3. Press `ctrl+shift+p`(or select the remote explorer icon from the left bar) and select `Dev-Containers: Attach to Running Container...`.
+4. Select the container name `gem_stack-container`.
+5. Once attached, Select `File->Open Folder...`.
+6. Select the folder/workspace you want to open in the container.
+
+This will set up the development environment inside the Docker container, allowing you to use VSCode features seamlessly.
+
+## Stopping the Docker container
+
+To stop the container, you can use the provided stop script by running.
+
+```bash
+bash stop_docker_container.sh
+```
## In this folder
diff --git a/data/.gitignore b/data/.gitignore
old mode 100644
new mode 100755
diff --git a/data/README.md b/data/README.md
old mode 100644
new mode 100755
diff --git a/homework/blink.py b/homework/blink.py
new file mode 100644
index 000000000..b83d3c49c
--- /dev/null
+++ b/homework/blink.py
@@ -0,0 +1,130 @@
+import rospy
+from std_msgs.msg import String, Bool, Float32, Float64
+from pacmod_msgs.msg import PositionWithSpeed, PacmodCmd, SystemRptInt, SystemRptFloat, VehicleSpeedRpt
+
+TURN_RIGHT = 0
+TURN_NONE = 1
+TURN_LEFT = 2
+TURN_HAZARD = 3
+# For message format, see
+# https://github.com/astuff/astuff_sensor_msgs/blob/3.3.0/pacmod_msgs/msg/PacmodCmd.msg
+
+class BlinkDistress:
+ """Your control loop code should go here. You will use ROS and Pacmod messages
+ to communicate with drive-by-wire system to control the vehicle's turn signals.
+ """
+ def __init__(self):
+ # TODO: Initialize your publishers and subscribers here
+
+ # When you create a subscriber to a /pacmod/parsed_tx/X topic, you will need to provide a callback function.
+ # You will want this callback to be a BlinkDistress method, such as print_X(self, msg). msg will have a
+ # ROS message type, and you can find out what this is by either reading the documentation or running
+ # "rostopic info /pacmod/parsed_tx/X" on the command line.
+ # check update file good
+
+ self.turn_blink_pub = rospy.Publisher('/pacmod/as_rx/turn_cmd', PacmodCmd, queue_size=1)
+ # Define blink cmd
+ self.turn_cmd = PacmodCmd()
+
+ # Subscribers to /pacmod/parsed_tx/X (at least 2)
+ self.speed_sub = rospy.Subscriber("/pacmod/parsed_tx/vehicle_speed_rpt", VehicleSpeedRpt, self.speed_callback)
+ self.accel_sub = rospy.Subscriber("/pacmod/parsed_tx/accel_rpt", SystemRptFloat, self.accel_callback)
+
+ pass
+
+ def rate(self):
+ """Requested update frequency, in Hz"""
+ return 0.5
+
+ def initialize(self):
+ """Run first"""
+ pass
+
+ def cleanup(self):
+ """Run last"""
+ pass
+
+ def get_dir_distress(self):
+ pass
+
+ def update(self):
+ """Run in a loop"""
+ # TODO: Implement your control loop here
+ # You will need to publish a PacmodCmd() to /pacmod/as_rx/turn_cmd. Read the documentation to see
+ # what the data in the message indicates.
+ #self.turn_cmd = PacmodCmd()
+ # TODO change to actual direction in Part 2
+
+ if self.turn_cmd.ui16_cmd == TURN_NONE:
+ self.turn_cmd.ui16_cmd = TURN_LEFT
+ elif self.turn_cmd.ui16_cmd == TURN_LEFT:
+ self.turn_cmd.ui16_cmd = TURN_RIGHT
+ else:
+ self.turn_cmd.ui16_cmd = TURN_NONE
+ self.turn_blink_pub.publish(self.turn_cmd)
+
+ pass
+
+ def healthy(self):
+ """Returns True if the element is in a stable state."""
+ return True
+
+ def done(self):
+ """Return True if you want to exit."""
+ return False
+
+ def speed_callback(self, msg):
+ """Callback for /pacmod/parsed_tx/vehicle_speed_rpt"""
+ rospy.loginfo(f"Vehicle Speed: {msg.vehicle_speed} m/s | Valid: {msg.vehicle_speed_valid}")
+
+ def accel_callback(self, msg):
+ """Callback for /pacmod/parsed_tx/accel_rpt"""
+ rospy.loginfo("--- Acceleration Report ---")
+ rospy.loginfo(f"Header Timestamp: {msg.header.stamp.to_sec()}")
+
+ # Boolean status flags
+ rospy.loginfo(f"Enabled: {msg.enabled}")
+ rospy.loginfo(f"Override Active: {msg.override_active}")
+ rospy.loginfo(f"Command Output Fault: {msg.command_output_fault}")
+ rospy.loginfo(f"Input Output Fault: {msg.input_output_fault}")
+ rospy.loginfo(f"Output Reported Fault: {msg.output_reported_fault}")
+ rospy.loginfo(f"PACMod Fault: {msg.pacmod_fault}")
+ rospy.loginfo(f"Vehicle Fault: {msg.vehicle_fault}")
+
+ # Float values
+ rospy.loginfo(f"Manual Input: {msg.manual_input} (Driver Input)")
+ rospy.loginfo(f"Command: {msg.command} (Requested Acceleration)")
+ rospy.loginfo(f"Output: {msg.output} (Final Output to Vehicle)")
+ rospy.loginfo("---------------------------\n")
+
+def run_ros_loop(node, Allstate):
+ """Executes the event loop of a node using ROS. `node` should support
+ rate(), initialize(), cleanup(), update(), and done(). You should not modify
+ this code.
+ """
+ #intializes the node. We use disable_signals so that the end() function can be called when Ctrl+C is issued.
+ #See http://wiki.ros.org/rospy/Overview/Initialization%20and%20Shutdown
+ rospy.init_node(node.__class__.__name__, disable_signals=True)
+
+ node.initialize()
+ rate = rospy.Rate(node.rate())
+ termination_reason = "undetermined"
+ try:
+ while not rospy.is_shutdown() and not node.done() and node.healthy():
+ node.update(node, Allstate)
+ rate.sleep()
+ if node.done():
+ termination_reason = "Node done"
+ if not node.healthy():
+ termination_reason = "Node in unhealthy state"
+ except KeyboardInterrupt:
+ termination_reason = "Killed by user"
+ except Exception as e:
+ termination_reason = "Exception "+str(e)
+ raise
+ finally:
+ node.cleanup()
+ rospy.signal_shutdown(termination_reason)
+
+if __name__ == '__main__':
+ run_ros_loop(BlinkDistress())
\ No newline at end of file
diff --git a/homework/blink_component.py b/homework/blink_component.py
new file mode 100644
index 000000000..18fb41c6b
--- /dev/null
+++ b/homework/blink_component.py
@@ -0,0 +1,39 @@
+from ..component import Component
+from ..interface.gem import GEMInterface,GEMVehicleCommand,GEMVehicleReading
+
+
+class BlinkDistress(Component):
+ """Your control loop code should go here. You will use GEMVehicleCommand
+ to communicate with drive-by-wire system to control the vehicle's turn signals.
+ """
+ def __init__(self, vehicle_interface : GEMInterface):
+ self.vehicle_interface = vehicle_interface
+ self.command = None
+
+ def rate(self):
+ """Requested update frequency, in Hz"""
+ return 0.5
+
+ def initialize(self):
+ """Run first"""
+ pass
+
+ def cleanup(self):
+ """Run last"""
+ pass
+
+ def update(self):
+ """Run in a loop"""
+ # we need to set up a GEMVehicleCommand which encapsulates all commands that will be
+ # sent to the drive-by-wire system, simultaneously. To avoid doing arbitrary things
+ # to the vehicle, let's maintain the current values (e.g., accelerator, brake pedal,
+ # steering angle) from its current readings.
+ command = self.vehicle_interface.command_from_reading()
+ # TODO: alter command to execute turn signals, then uncomment line below to send
+ # the command to vehicle
+ # self.vehicle_interface.send_command(command)
+
+ def healthy(self):
+ """Returns True if the element is in a stable state."""
+ return True
+
diff --git a/homework/longitudinal_planning.py b/homework/longitudinal_planning.py
new file mode 100644
index 000000000..c3f3bf737
--- /dev/null
+++ b/homework/longitudinal_planning.py
@@ -0,0 +1,93 @@
+from typing import List
+from ..component import Component
+from ...state import AllState, VehicleState, EntityRelation, EntityRelationEnum, Path, Trajectory, Route, ObjectFrameEnum
+from ...utils import serialization
+from ...mathutils.transforms import vector_madd
+
+def longitudinal_plan(path : Path, acceleration : float, deceleration : float, max_speed : float, current_speed : float) -> Trajectory:
+ """Generates a longitudinal trajectory for a path with a
+ trapezoidal velocity profile.
+
+ 1. accelerates from current speed toward max speed
+ 2. travel along max speed
+ 3. if at any point you can't brake before hitting the end of the path,
+ decelerate with accel = -deceleration until velocity goes to 0.
+ """
+ path_normalized = path.arc_length_parameterize()
+ #TODO: actually do something to points and times
+ points = [p for p in path_normalized.points]
+ times = [t for t in path_normalized.times]
+ trajectory = Trajectory(path.frame,points,times)
+ return trajectory
+
+
+def longitudinal_brake(path : Path, deceleration : float, current_speed : float) -> Trajectory:
+ """Generates a longitudinal trajectory for braking along a path."""
+ path_normalized = path.arc_length_parameterize()
+ #TODO: actually do something to points and times
+ points = [p for p in path_normalized.points]
+ times = [t for t in path_normalized.times]
+ trajectory = Trajectory(path.frame,points,times)
+ return trajectory
+
+
+class YieldTrajectoryPlanner(Component):
+ """Follows the given route. Brakes if you have to yield or
+ you are at the end of the route, otherwise accelerates to
+ the desired speed.
+ """
+ def __init__(self):
+ self.route_progress = None
+ self.t_last = None
+ self.acceleration = 0.5
+ self.desired_speed = 1.0
+ self.deceleration = 2.0
+
+ def state_inputs(self):
+ return ['all']
+
+ def state_outputs(self) -> List[str]:
+ return ['trajectory']
+
+ def rate(self):
+ return 10.0
+
+ def update(self, state : AllState):
+ vehicle = state.vehicle # type: VehicleState
+ route = state.route # type: Route
+ t = state.t
+
+ if self.t_last is None:
+ self.t_last = t
+ dt = t - self.t_last
+
+ curr_x = vehicle.pose.x
+ curr_y = vehicle.pose.y
+ curr_v = vehicle.v
+
+ #figure out where we are on the route
+ if self.route_progress is None:
+ self.route_progress = 0.0
+ closest_dist,closest_parameter = state.route.closest_point_local((curr_x,curr_y),[self.route_progress-5.0,self.route_progress+5.0])
+ self.route_progress = closest_parameter
+
+ #extract out a 10m segment of the route
+ route_with_lookahead = route.trim(closest_parameter,closest_parameter+10.0)
+
+ #parse the relations indicated
+ should_brake = False
+ for r in state.relations:
+ if r.type == EntityRelationEnum.YIELD and r.obj1 == '':
+ #yielding to something, brake
+ should_brake = True
+ should_accelerate = (not should_brake and curr_v < self.desired_speed)
+
+ #choose whether to accelerate, brake, or keep at current velocity
+ if should_accelerate:
+ traj = longitudinal_plan(route_with_lookahead, self.acceleration, self.deceleration, self.desired_speed, curr_v)
+ elif should_brake:
+ traj = longitudinal_brake(route_with_lookahead, self.deceleration, curr_v)
+ else:
+ traj = longitudinal_plan(route_with_lookahead, 0.0, self.deceleration, self.desired_speed, curr_v)
+
+ return traj
diff --git a/homework/pedestrian_detection.py b/homework/pedestrian_detection.py
new file mode 100644
index 000000000..3134b1e35
--- /dev/null
+++ b/homework/pedestrian_detection.py
@@ -0,0 +1,84 @@
+from ...state import AllState,VehicleState,ObjectPose,ObjectFrameEnum,AgentState,AgentEnum,AgentActivityEnum
+from ..interface.gem import GEMInterface
+from ..component import Component
+#from ultralytics import YOLO
+#import cv2
+from typing import Dict
+
+def box_to_fake_agent(box):
+ """Creates a fake agent state from an (x,y,w,h) bounding box.
+
+ The location and size are pretty much meaningless since this is just giving a 2D location.
+ """
+ x,y,w,h = box
+ pose = ObjectPose(t=0,x=x+w/2,y=y+h/2,z=0,yaw=0,pitch=0,roll=0,frame=ObjectFrameEnum.CURRENT)
+ dims = (w,h,0)
+ return AgentState(pose=pose,dimensions=dims,outline=None,type=AgentEnum.PEDESTRIAN,activity=AgentActivityEnum.MOVING,velocity=(0,0,0),yaw_rate=0)
+
+
+class PedestrianDetector2D(Component):
+ """Detects pedestrians."""
+ def __init__(self,vehicle_interface : GEMInterface):
+ self.vehicle_interface = vehicle_interface
+ #self.detector = YOLO('../../knowledge/detection/yolov8n.pt')
+ self.last_person_boxes = []
+
+ def rate(self):
+ return 4.0
+
+ def state_inputs(self):
+ return ['vehicle']
+
+ def state_outputs(self):
+ return ['agents']
+
+ def initialize(self):
+ #tell the vehicle to use image_callback whenever 'front_camera' gets a reading, and it expects images of type cv2.Mat
+ #self.vehicle_interface.subscribe_sensor('front_camera',self.image_callback,cv2.Mat)
+ pass
+
+ #def image_callback(self, image : cv2.Mat):
+ # detection_result = self.detector(image)
+ # self.last_person_boxes = []
+ # #uncomment if you want to debug the detector...
+ # #for bb in self.last_person_boxes:
+ # # x,y,w,h = bb
+ # # cv2.rectangle(image, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 255), 3)
+ # #cv2.imwrite("pedestrian_detections.png",image)
+
+ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
+ res = {}
+ for i,b in enumerate(self.last_person_boxes):
+ x,y,w,h = b
+ res['pedestrian'+str(i)] = box_to_fake_agent(b)
+ if len(res) > 0:
+ print("Detected",len(res),"pedestrians")
+ return res
+
+
+class FakePedestrianDetector2D(Component):
+ """Triggers a pedestrian detection at some random time ranges"""
+ def __init__(self,vehicle_interface : GEMInterface):
+ self.vehicle_interface = vehicle_interface
+ self.times = [(5.0,20.0),(30.0,35.0)]
+ self.t_start = None
+
+ def rate(self):
+ return 4.0
+
+ def state_inputs(self):
+ return ['vehicle']
+
+ def state_outputs(self):
+ return ['agents']
+
+ def update(self, vehicle : VehicleState) -> Dict[str,AgentState]:
+ if self.t_start is None:
+ self.t_start = self.vehicle_interface.time()
+ t = self.vehicle_interface.time() - self.t_start
+ res = {}
+ for times in self.times:
+ if t >= times[0] and t <= times[1]:
+ res['pedestrian0'] = box_to_fake_agent((0,0,0,0))
+ print("Detected a pedestrian")
+ return res
diff --git a/homework/pedestrian_detection.yaml b/homework/pedestrian_detection.yaml
new file mode 100644
index 000000000..0fffa89fc
--- /dev/null
+++ b/homework/pedestrian_detection.yaml
@@ -0,0 +1,97 @@
+description: "Run the yielding trajectory planner on the real vehicle with real perception"
+mode: hardware
+vehicle_interface: gem_hardware.GEMHardwareInterface
+mission_execution: StandardExecutor
+
+# Recovery behavior after a component failure
+recovery:
+ planning:
+ trajectory_tracking : recovery.StopTrajectoryTracker
+
+# Driving behavior for the GEM vehicle. Runs real pedestrian perception, yield planner, but does not send commands to real vehicle.
+drive:
+ perception:
+ state_estimation : GNSSStateEstimator
+ agent_detection : pedestrian_detection.PedestrianDetector2D
+ perception_normalization : StandardPerceptionNormalizer
+ planning:
+ relations_estimation: pedestrian_yield_logic.PedestrianYielder
+ route_planning:
+ type: StaticRoutePlanner
+ args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv','start']
+ motion_planning: longitudinal_planning.YieldTrajectoryPlanner
+ trajectory_tracking:
+ type: pure_pursuit.PurePursuitTrajectoryTracker
+ print: False
+
+
+log:
+ # Specify the top-level folder to save the log files. Default is 'logs'
+ #folder : 'logs'
+ # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix
+ #prefix : 'fixed_route_'
+ # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix
+ #suffix : 'test3'
+ # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics.
+ ros_topics :
+ # Specify options to pass to rosbag record. Default is no options.
+ #rosbag_options : '--split --size=1024'
+ # If True, then record all readings / commands of the vehicle interface. Default False
+ vehicle_interface : True
+ # Specify which components to record to behavior.json. Default records nothing
+ components : ['state_estimation','agent_detection','motion_planning']
+ # Specify which components of state to record to state.json. Default records nothing
+ #state: ['all']
+ # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate
+ #state_rate: 10
+replay: # Add items here to set certain topics / inputs to be replayed from logs
+ # Specify which log folder to replay from
+ log:
+ ros_topics : []
+ components : []
+
+#usually can keep this constant
+computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml"
+
+variants:
+ detector_only:
+ run:
+ description: "Run the pedestrian detection code"
+ drive:
+ planning:
+ trajectory_tracking:
+ real_sim:
+ run:
+ description: "Run the pedestrian detection code with real detection and fake simulation"
+ mode: hardware
+ vehicle_interface:
+ type: gem_mixed.GEMRealSensorsWithSimMotionInterface
+ args:
+ scene: !relative_path '../scenes/xyhead_demo.yaml'
+ mission_execution: StandardExecutor
+ require_engaged: False
+ visualization: !include "klampt_visualization.yaml"
+ drive:
+ perception:
+ state_estimation : OmniscientStateEstimator
+
+
+ fake_real:
+ run:
+ description: "Run the yielding trajectory planner on the real vehicle with faked perception"
+ drive:
+ perception:
+ agent_detection : pedestrian_detection.FakePedestrianDetector2D
+
+ fake_sim:
+ run:
+ description: "Run the yielding trajectory planner in simulation with faked perception"
+ mode: simulation
+ vehicle_interface:
+ type: gem_simulator.GEMDoubleIntegratorSimulationInterface
+ args:
+ scene: !relative_path '../scenes/xyhead_demo.yaml'
+ visualization: !include "klampt_visualization.yaml"
+ drive:
+ perception:
+ state_estimation : OmniscientStateEstimator
diff --git a/homework/pedestrian_yield_logic.py b/homework/pedestrian_yield_logic.py
new file mode 100644
index 000000000..2567c0093
--- /dev/null
+++ b/homework/pedestrian_yield_logic.py
@@ -0,0 +1,22 @@
+from ...state import AgentState,AgentEnum,EntityRelation,EntityRelationEnum
+from ..component import Component
+from typing import List,Dict
+
+class PedestrianYielder(Component):
+ """Yields for all pedestrians in the scene.
+
+ Result is stored in the relations graph.
+ """
+ def rate(self):
+ return None
+ def state_inputs(self):
+ return ['agents']
+ def state_outputs(self):
+ return ['relations']
+ def update(self,agents : Dict[str,AgentState]) -> List[EntityRelation]:
+ res = []
+ for n,a in agents.items():
+ if a.type == AgentEnum.PEDESTRIAN:
+ #relation: ego-vehicle yields to pedestrian
+ res.append(EntityRelation(type=EntityRelationEnum.YIELDING,obj1='',obj2=n))
+ return res
diff --git a/homework/person_detector.py b/homework/person_detector.py
new file mode 100644
index 000000000..8f4fa3588
--- /dev/null
+++ b/homework/person_detector.py
@@ -0,0 +1,48 @@
+#from ultralytics import YOLO
+import cv2
+import sys
+
+def person_detector(img : cv2.Mat):
+ #TODO: implement me to produce a list of (x,y,w,h) bounding boxes of people in the image
+ return []
+
+def main(fn):
+ image = cv2.imread(fn)
+ bboxes = person_detector(image)
+ print("Detected",len(bboxes),"people")
+ for bb in bboxes:
+ x,y,w,h = bb
+ if not isinstance(x,(int,float)) or not isinstance(y,(int,float)) or not isinstance(w,(int,float)) or not isinstance(h,(int,float)):
+ print("WARNING: make sure to return Python numbers rather than PyTorch Tensors")
+ print("Corner",(x,y),"size",(w,h))
+ cv2.rectangle(image, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 255), 3)
+ cv2.imshow('Results', image)
+ cv2.waitKey(0)
+
+def main_webcam():
+ cap = cv2.VideoCapture(0)
+ cap.set(3, 640)
+ cap.set(4, 480)
+
+ print("Press space to exit")
+ while True:
+ _, image = cap.read()
+
+ bboxes = person_detector(image)
+ for bb in bboxes:
+ x,y,w,h = bb
+ cv2.rectangle(image, (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), (255, 0, 255), 3)
+
+ cv2.imshow('Person detection', image)
+ if cv2.waitKey(1) & 0xFF == ord(' '):
+ break
+
+ cap.release()
+
+
+if __name__ == '__main__':
+ fn = sys.argv[1]
+ if fn != 'webcam':
+ main(fn)
+ else:
+ main_webcam()
\ No newline at end of file
diff --git a/homework/test_longitudinal_plan.py b/homework/test_longitudinal_plan.py
new file mode 100644
index 000000000..400f34fea
--- /dev/null
+++ b/homework/test_longitudinal_plan.py
@@ -0,0 +1,80 @@
+#needed to import GEMstack from top level directory
+import sys
+import os
+sys.path.append(os.getcwd())
+
+from GEMstack.state import Path, ObjectFrameEnum
+from GEMstack.onboard.planning.longitudinal_planning import longitudinal_plan,longitudinal_brake
+import matplotlib.pyplot as plt
+
+def test_longitudinal_planning():
+ test_path = Path(ObjectFrameEnum.START,[(0,0),(1,0),(2,0),(3,0),(4,0),(5,0),(6,0)])
+ test_path2 = Path(ObjectFrameEnum.START,[(5,0),(6,1),(7,2),(9,4)])
+
+ test_traj = longitudinal_brake(test_path, 2.0, 0.0)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Braking from 0 m/s (should just stay still)")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_brake(test_path, 2.0, 2.0)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Braking from 2 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_plan(test_path, 1.0, 2.0, 3.0, 0.0)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Accelerating from 0 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+
+ test_traj = longitudinal_plan(test_path, 1.0, 2.0, 3.0, 2.0)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Accelerating from 2 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_plan(test_path, 0.0, 2.0, 3.0, 3.1)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Keeping constant velocity at 3.1 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_plan(test_path, 2.0, 2.0, 20.0, 10.0)
+ assert (t1 < t2 for (t1,t2) in zip(test_traj.times[:-1],test_traj.times[1:]) )
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Too little time to stop, starting at 10 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_brake(test_path, 2.0, 10.0)
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Too little time to stop, braking at 10 m/s")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+ test_traj = longitudinal_plan(test_path2, 1.0, 2.0, 3.0, 0.0)
+ plt.plot(test_traj.times,[p[0] for p in test_traj.points])
+ plt.title("Nonuniform planning")
+ plt.xlabel('time')
+ plt.ylabel('position')
+ plt.show()
+
+
+
+if __name__ == '__main__':
+ test_longitudinal_planning()
diff --git a/launch/blink_launch.yaml b/launch/blink_launch.yaml
new file mode 100644
index 000000000..ab28ae31a
--- /dev/null
+++ b/launch/blink_launch.yaml
@@ -0,0 +1,68 @@
+description: "Launch just the blink distress code"
+mode: hardware
+vehicle_interface: gem_hardware.GEMHardwareInterface
+mission_execution: StandardExecutor
+require_engaged: False
+# Recovery behavior after a component failure
+recovery:
+ perception:
+ state_estimation : GNSSStateEstimator
+ perception_normalization : StandardPerceptionNormalizer
+ planning:
+ driving_logic: driving_logic_component.DrivingLogic
+ signaling: blink_component.BlinkDistress
+# Driving behavior for the GEM vehicle. Runs perception and planner but doesn't execute anything (no controller).
+drive:
+ perception:
+ state_estimation : GNSSStateEstimator
+ perception_normalization : StandardPerceptionNormalizer
+ planning:
+ driving_logic: driving_logic_component.DrivingLogic
+ signaling: blink_component.BlinkDistress
+log:
+ # Specify the top-level folder to save the log files. Default is 'logs'
+ folder : 'logs'
+ # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix
+ prefix : 'hw1_'
+ # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix
+ #suffix : 'test3'
+ # Specify which ros topics to record to vehicle.bag.
+ #ros_topics : ['/pacmod/as_rx/turn_cmd','/pacmod/as_tx/turn_rpt']
+ # Specify options to pass to rosbag record. Default is no options.
+ #rosbag_options : '--split --size=1024'
+ # If True, then record all readings / commands of the vehicle interface. Default False
+ vehicle_interface : True
+ # Specify which components to record to behavior.json. Default records nothing
+ components : []
+ # Specify which components of state to record to state.json. Default records nothing
+ #state: ['all']
+ # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate
+ #state_rate: 10
+ # If True, then make plots of the recorded data specifically from PurePursuitTrajectoryTracker_debug.csv and report metrics. Default False
+ #auto_plot : True
+replay: # Add items here to set certain topics / inputs to be replayed from logs
+ # Specify which log folder to replay from
+ log:
+ ros_topics : []
+ components : []
+
+#usually can keep this constant
+computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml"
+
+variants:
+ sim:
+ run:
+ description: "Runs the distress signal, but in simulation"
+ mode: simulation
+ vehicle_interface:
+ type: gem_simulator.GEMDoubleIntegratorSimulationInterface
+ args:
+ scene: !relative_path '../scenes/xyhead_demo.yaml'
+ require_engaged: False
+ visualization: !include "klampt_visualization.yaml"
+ recovery:
+ perception:
+ state_estimation : OmniscientStateEstimator
+ drive:
+ perception:
+ state_estimation : OmniscientStateEstimator
diff --git a/launch/fixed_route.yaml b/launch/fixed_route.yaml
index c05de8ff7..924ae1a9b 100644
--- a/launch/fixed_route.yaml
+++ b/launch/fixed_route.yaml
@@ -43,6 +43,8 @@ log:
#state: ['all']
# Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate
#state_rate: 10
+ # If True, then make plots of the recorded data specifically from PurePursuitTrajectoryTracker_debug.csv and report metrics. Default False
+ #auto_plot : True
replay: # Add items here to set certain topics / inputs to be replayed from logs
# Specify which log folder to replay from
log:
diff --git a/launch/gather_data.yaml b/launch/gather_data.yaml
index 4f3d03d4c..e04e8337d 100644
--- a/launch/gather_data.yaml
+++ b/launch/gather_data.yaml
@@ -37,6 +37,8 @@ log:
#state: ['all']
# Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate
#state_rate: 10
+ # If True, then make plots of the recorded data specifically from PurePursuitTrajectoryTracker_debug.csv and report metrics. Default False
+ #auto_plot : True
replay: # Add items here to set certain topics / inputs to be replayed from logs
# Specify which log folder to replay from
log:
diff --git a/launch/launch_all.sh b/launch/launch_all.sh
new file mode 100644
index 000000000..c75fbecc9
--- /dev/null
+++ b/launch/launch_all.sh
@@ -0,0 +1,24 @@
+#!/bin/bash
+<< COMMENTOUT
+##########
+- This script opens three new tabs in GNOME Terminal.
+- Each tab will change to the GEMstack directory, source the ROS environment, and then run the specified command.
+
+- Run these commands at each tab:
+cd ~/catkin_ws/src/GEMstack
+source ~/catkin_ws/devel/setup.bash && roslaunch basic_launch sensor_init.launch
+source ~/catkin_ws/devel/setup.bash && roslaunch basic_launch dbw_joystick.launch
+source ~/catkin_ws/devel/setup.bash && python3 main.py --variant=fake_real launch/pedestrian_detection.yaml
+
+- If you don't want to detect virtual pedestrian, run this command instead:
+source ~/catkin_ws/devel/setup.bash && python3 main.py launch/pedestrian_detection.yaml
+
+- If roscore cannot run, run this command to kill all roscore:
+killall -9 roscore rosmaster
+##########
+COMMENTOUT
+
+cd ~/catkin_ws/src/GEMstack
+gnome-terminal --tab -- bash -c "source ~/catkin_ws/devel/setup.bash && roslaunch basic_launch sensor_init.launch; exec bash"
+gnome-terminal --tab -- bash -c "source ~/catkin_ws/devel/setup.bash && roslaunch basic_launch dbw_joystick.launch; exec bash"
+gnome-terminal --tab -- bash -c "source ~/catkin_ws/devel/setup.bash && python3 main.py --variant=fake_real launch/pedestrian_detection.yaml; exec bash"
\ No newline at end of file
diff --git a/launch/pedestrian_detection.yaml b/launch/pedestrian_detection.yaml
new file mode 100644
index 000000000..4b59cc3a3
--- /dev/null
+++ b/launch/pedestrian_detection.yaml
@@ -0,0 +1,119 @@
+description: "Run the yielding trajectory planner on the real vehicle with real perception"
+mode: hardware
+vehicle_interface: gem_hardware.GEMHardwareInterface
+mission_execution: StandardExecutor
+
+# Recovery behavior after a component failure
+recovery:
+ planning:
+ trajectory_tracking : recovery.StopTrajectoryTracker
+
+# Driving behavior for the GEM vehicle. Runs real pedestrian perception, yield planner, but does not send commands to real vehicle.
+drive:
+ perception:
+ state_estimation : GNSSStateEstimator
+ agent_detection : pedestrian_detection.PedestrianDetector2D
+ perception_normalization : StandardPerceptionNormalizer
+ planning:
+ relations_estimation:
+ type: pedestrian_yield_logic.PedestrianYielder
+ args:
+ mode: 'real'
+ params: {
+ 'yielder': 'expert', # 'expert', 'analytic', or 'simulation'
+ 'planner': 'milestone', # 'milestone', 'dt', or 'dx'
+ 'desired_speed': 1.0, # m/s, 1.5 m/s seems max speed? Feb24
+ 'acceleration': 0.75 # m/s2, 0.5 is not enough to start moving. Feb24
+ }
+ route_planning:
+ type: StaticRoutePlanner
+ args: [!relative_path '../GEMstack/knowledge/routes/forward_15m.csv','start']
+ motion_planning: longitudinal_planning.YieldTrajectoryPlanner
+ trajectory_tracking:
+ type: pure_pursuit.PurePursuitTrajectoryTracker
+ print: False
+
+
+log:
+ # Specify the top-level folder to save the log files. Default is 'logs'
+ #folder : 'logs'
+ # If prefix is specified, then the log folder will be named with the prefix followed by the date and time. Default no prefix
+ #prefix : 'fixed_route_'
+ # If suffix is specified, then logs will output to folder/prefix+suffix. Default uses date and time as the suffix
+ #suffix : 'test3'
+ # Specify which ros topics to record to vehicle.bag. Default records nothing. This records the "standard" ROS topics.
+ ros_topics :
+ # Specify options to pass to rosbag record. Default is no options.
+ #rosbag_options : '--split --size=1024'
+ # If True, then record all readings / commands of the vehicle interface. Default False
+ vehicle_interface : True
+ # Specify which components to record to behavior.json. Default records nothing
+ components : ['state_estimation','agent_detection','motion_planning']
+ # Specify which components of state to record to state.json. Default records nothing
+ #state: ['all']
+ # Specify the rate in Hz at which to record state to state.json. Default records at the pipeline's rate
+ #state_rate: 10
+ # If True, then make plots of the recorded data specifically from PurePursuitTrajectoryTracker_debug.csv and report metrics. Default False
+ #auto_plot : True
+replay: # Add items here to set certain topics / inputs to be replayed from logs
+ # Specify which log folder to replay from
+ log:
+ ros_topics : []
+ components : []
+
+#usually can keep this constant
+computation_graph: !include "../GEMstack/knowledge/defaults/computation_graph.yaml"
+
+variants:
+ detector_only:
+ run:
+ description: "Run the pedestrian detection code"
+ drive:
+ planning:
+ trajectory_tracking:
+ real_sim:
+ run:
+ description: "Run the pedestrian detection code with real detection and fake simulation"
+ mode: hardware
+ vehicle_interface:
+ type: gem_mixed.GEMRealSensorsWithSimMotionInterface
+ args:
+ scene: !relative_path '../scenes/xyhead_demo.yaml'
+ mission_execution: StandardExecutor
+ require_engaged: False
+ visualization: !include "klampt_visualization.yaml"
+ drive:
+ perception:
+ state_estimation : OmniscientStateEstimator
+ planning:
+ relations_estimation:
+ type: pedestrian_yield_logic.PedestrianYielder
+ args:
+ mode: 'sim'
+
+ fake_real:
+ run:
+ description: "Run the yielding trajectory planner on the real vehicle with faked perception"
+ drive:
+ perception:
+ agent_detection : pedestrian_detection.FakePedestrianDetector2D
+
+ fake_sim:
+ run:
+ description: "Run the yielding trajectory planner in simulation with faked perception"
+ mode: simulation
+ vehicle_interface:
+ type: gem_simulator.GEMDoubleIntegratorSimulationInterface
+ args:
+ scene: !relative_path '../scenes/xyhead_demo.yaml'
+ visualization: !include "klampt_visualization.yaml"
+ drive:
+ perception:
+ # agent_detection : pedestrian_detection.FakePedestrianDetector2D
+ agent_detection : OmniscientAgentDetector #this option reads agents from the simulator
+ state_estimation : OmniscientStateEstimator
+ planning:
+ relations_estimation:
+ type: pedestrian_yield_logic.PedestrianYielder
+ args:
+ mode: 'sim'
\ No newline at end of file
diff --git a/log_dashboard/README.md b/log_dashboard/README.md
new file mode 100644
index 000000000..86c65fdb1
--- /dev/null
+++ b/log_dashboard/README.md
@@ -0,0 +1,18 @@
+Log Dashboard
+
+This dashboard can be used to browse GEMSTACK logs in a web browser.
+
+- It should display summary information (date, run duration, termination reason, sim vs real, launch command) in a table on the splash screen.
+
+- Can filter logs based on date
+
+- Upon choosing a log, it displays more detailed information about the run, allows file exporing directly in the browser (Button to open the folder is also present).
+
+- Upon visualizing behavior.json, you see the trajectory length, and a plot of the trajectory driven.
+
+To run the code:
+1. pip install -r requirements.txt
+2. Run python app.py
+3. Go to 127.0.0.1:5000
+
+
diff --git a/log_dashboard/app.py b/log_dashboard/app.py
new file mode 100644
index 000000000..ac1e7a384
--- /dev/null
+++ b/log_dashboard/app.py
@@ -0,0 +1,579 @@
+from flask import Flask, render_template, request, jsonify, send_file
+import os
+import yaml
+import datetime
+import functools
+import time
+from cachelib import SimpleCache
+import json
+import numpy as np
+import platform
+import matplotlib
+from flask import Response, stream_with_context
+from flask_cors import CORS
+
+# Use the 'Agg' backend which is thread-safe and doesn't require a GUI
+matplotlib.use("Agg")
+
+import matplotlib.pyplot as plt
+
+app = Flask(__name__)
+CORS(app)
+CORS(app, origins=["http://localhost:3000"])
+
+# Configure cache
+cache = SimpleCache(threshold=500, default_timeout=300) # 5 minutes cache timeout
+
+LOG_DIR = "../logs"
+
+
+def generate_behavior_plots(log_folder, behavior_file, target_frame=3):
+ """
+ Generate a comprehensive visualization plot for vehicle, agents, and trajectory
+
+ Args:
+ log_folder (str): Name of the log folder
+ behavior_file (str): Path to the behavior.json file
+ target_frame (int, optional): Specific frame to filter data. Defaults to None.
+
+ Returns:
+ dict: Paths to generated plot files
+ """
+ target_frame = 3
+ # Define output plot directory
+ plot_dir = os.path.join("./plots", log_folder, "viz")
+ os.makedirs(plot_dir, exist_ok=True)
+
+ # Create cache file to track previous plot generation
+ cache_file = os.path.join(plot_dir, f"plot_cache_frame_{target_frame}.json")
+
+ # Check if plots have been previously generated
+ if os.path.exists(cache_file):
+ with open(cache_file, "r") as f:
+ return json.load(f)
+
+ # Initialize data collections
+ vehicle_data = []
+ agent_data = {}
+ trajectory_data = []
+
+ # Parse behavior file
+ with open(behavior_file, "r") as f:
+ for line in f:
+ try:
+ entry = json.loads(line.strip())
+
+ # Vehicle state
+ if "vehicle" in entry and "data" in entry["vehicle"]:
+ vehicle_state = entry["vehicle"]["data"]["pose"]
+ # Check frame filter if specified
+ if (
+ target_frame is None
+ or vehicle_state.get("frame") == target_frame
+ ):
+ vehicle_data.append(
+ {
+ "time": entry["time"],
+ "x": vehicle_state.get("x", 0),
+ "y": vehicle_state.get("y", 0),
+ "frame": vehicle_state.get("frame"),
+ }
+ )
+
+ # Agent states
+ if "agents" in entry:
+ for agent_name, agent_info in entry["agents"].items():
+ agent_state = agent_info["data"]["pose"]
+ # Check frame filter if specified
+ if (
+ target_frame is None
+ or agent_state.get("frame") == target_frame
+ ):
+ if agent_name not in agent_data:
+ agent_data[agent_name] = []
+
+ agent_data[agent_name].append(
+ {
+ "time": entry["time"],
+ "x": agent_state.get("x", 0),
+ "y": agent_state.get("y", 0),
+ "frame": agent_state.get("frame"),
+ }
+ )
+
+ # Trajectory
+ if "trajectory" in entry and "data" in entry["trajectory"]:
+ traj_points = entry["trajectory"]["data"]["points"]
+ traj_times = entry["trajectory"]["data"]["times"]
+ traj_frames = entry["trajectory"]["data"].get(
+ "frames", [None] * len(traj_points)
+ )
+
+ trajectory_data = [
+ {"x": point[0], "y": point[1], "time": time, "frame": frame}
+ for point, time, frame in zip(
+ traj_points, traj_times, traj_frames
+ )
+ if target_frame is None or frame == target_frame
+ ]
+
+ except json.JSONDecodeError:
+ continue
+
+ # Comprehensive Plot
+ plt.figure(figsize=(12, 8))
+
+ # Plot vehicle trajectory
+ if vehicle_data:
+ vehicle_xs = [v["x"] for v in vehicle_data]
+ vehicle_ys = [v["y"] for v in vehicle_data]
+ plt.plot(
+ vehicle_xs,
+ vehicle_ys,
+ label="Vehicle Path",
+ color="red",
+ linewidth=3,
+ marker="o",
+ markersize=5,
+ )
+
+ # Plot agent trajectories
+ for agent_name, positions in agent_data.items():
+ agent_xs = [a["x"] for a in positions]
+ agent_ys = [a["y"] for a in positions]
+ plt.plot(agent_xs, agent_ys, label=agent_name, marker="x")
+
+ # Plot planned trajectory
+ if trajectory_data:
+ traj_xs = [t["x"] for t in trajectory_data]
+ traj_ys = [t["y"] for t in trajectory_data]
+ plt.plot(
+ traj_xs,
+ traj_ys,
+ label="Planned Trajectory",
+ color="green",
+ linestyle="--",
+ linewidth=2,
+ )
+
+ plt.title(f"Comprehensive Movement Visualization (Frame {target_frame})")
+ plt.xlabel("X Position")
+ plt.ylabel("Y Position")
+ plt.legend()
+ plt.grid(True, linestyle="--", alpha=0.7)
+
+ # Save the plot
+ plot_path = os.path.join(
+ plot_dir, f"comprehensive_trajectory_frame_{target_frame}.png"
+ )
+ plt.savefig(plot_path, dpi=300, bbox_inches="tight")
+ plt.close()
+
+ # Cache plot file paths
+ plot_files = {"comprehensive": plot_path}
+ with open(cache_file, "w") as f:
+ json.dump(plot_files, f)
+
+ return plot_files
+
+
+@app.route("/view_log//render")
+def render_vis_html(log_folder):
+ print(f"Rendering visualization HTML for log folder: {log_folder}")
+ return render_template("render.html", log_folder=log_folder)
+
+
+@app.route("/view_log//get_render")
+def render_behavior_visualization(log_folder):
+ """
+ Render behavior visualization for a specific log folder
+
+ Returns:
+ JSON response with plot file paths
+ """
+ log_folder_path = os.path.join(LOG_DIR, log_folder)
+
+ # Find behavior.json file
+ behavior_files = [f for f in os.listdir(log_folder_path) if f == "behavior.json"]
+
+ if not behavior_files:
+ return jsonify({"error": "No behavior.json file found"}), 404
+
+ behavior_file_path = os.path.join(log_folder_path, behavior_files[0])
+
+ # Get frame from query parameter, default to None if not specified
+ target_frame = request.args.get("frame", type=int)
+
+ try:
+ # Generate behavior plots
+ plot_files = generate_behavior_plots(
+ log_folder, behavior_file_path, target_frame
+ )
+ print(plot_files)
+ # return jsonify(plot_files)
+ # return render_template("render.html", image_path=plot_files)
+ return send_file(plot_files["comprehensive"], mimetype="image/png")
+
+ except Exception as e:
+ return jsonify({"error": str(e)}), 500
+
+
+# Add a route to serve plot files
+@app.route("/plots//viz/")
+def serve_plot(log_folder, filename):
+ """
+ Serve plot files from the plots directory
+ """
+ plot_dir = os.path.join("./plots", log_folder, "viz")
+ return send_file(os.path.join(plot_dir, filename))
+
+
+def get_cache_key(prefix, *args):
+ """Generate a cache key with a prefix and arguments"""
+ return f"{prefix}_{hash(str(args))}"
+
+
+def parse_behavior_data(file_path):
+ """
+ Parse behavior.json file and extract vehicle and agent positions
+
+ Returns:
+ {
+ 'vehicle': [{'time': float, 'x': float, 'y': float}, ...],
+ 'agents': {
+ 'ped1': [{'time': float, 'x': float, 'y': float}, ...],
+ 'ped2': [...],
+ ...
+ }
+ }
+ """
+ try:
+ with open(file_path, "r") as f:
+ # Read file line by line to handle large files
+ positions = {"vehicle": [], "agents": {}}
+
+ for line in f:
+ try:
+ entry = json.loads(line.strip())
+
+ # Process Vehicle State
+ if "vehicle" in entry:
+ vehicle_data = entry["vehicle"]["data"]["pose"]
+ positions["vehicle"].append(
+ {
+ "time": entry["time"],
+ "x": vehicle_data.get("x", 0),
+ "y": vehicle_data.get("y", 0),
+ }
+ )
+
+ # Process Agent States
+ if "agents" in entry:
+ for agent_name, agent_data in entry["agents"].items():
+ if agent_name not in positions["agents"]:
+ positions["agents"][agent_name] = []
+
+ agent_pose = agent_data["data"]["pose"]
+ positions["agents"][agent_name].append(
+ {
+ "time": entry["time"],
+ "x": agent_pose.get("x", 0),
+ "y": agent_pose.get("y", 0),
+ }
+ )
+
+ except json.JSONDecodeError:
+ # Skip invalid JSON lines
+ continue
+
+ return positions
+ except Exception as e:
+ print(f"Error parsing behavior data: {e}")
+ return None
+
+
+@functools.lru_cache(maxsize=100)
+def load_log_data():
+ """Load all log data with caching"""
+ cache_key = "all_logs"
+ cached_logs = cache.get(cache_key)
+
+ if cached_logs is not None:
+ return cached_logs
+
+ start_time = time.time()
+ logs = []
+
+ for log_folder in sorted(os.listdir(LOG_DIR), reverse=True):
+ log_path = os.path.join(LOG_DIR, log_folder)
+ if not os.path.isdir(log_path):
+ continue
+
+ meta_path = os.path.join(log_path, "meta.yaml")
+ settings_path = os.path.join(log_path, "settings.yaml")
+
+ try:
+ with open(meta_path, "r") as meta_file:
+ meta_data = yaml.safe_load(meta_file)
+ with open(settings_path, "r") as settings_file:
+ settings_data = yaml.safe_load(settings_file).get("run", {})
+ except Exception as e:
+ print(f"Error loading log data: {e}")
+ continue
+
+ logs.append(
+ {
+ "date": log_folder,
+ "run_duration": meta_data.get("run_duration", "Unknown"),
+ "exit_reason": meta_data.get("exit_reason", "Unknown"),
+ "mode": settings_data.get("mode", "Unknown"),
+ "launch_command": settings_data.get("log", {}).get(
+ "launch_command", "Unknown"
+ ),
+ "folder": log_path,
+ }
+ )
+
+ # Store results in cache
+ cache.set(cache_key, logs)
+ end_time = time.time()
+ print(f"Log data loaded in {end_time - start_time:.2f} seconds")
+ print(logs)
+ return logs
+
+
+def filter_logs_by_date(logs, start_date=None, end_date=None):
+ """Filter logs by date range"""
+ cache_key = get_cache_key("filtered_logs", start_date, end_date)
+ cached_result = cache.get(cache_key)
+
+ if cached_result is not None:
+ return cached_result
+
+ if start_date:
+ start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
+ if end_date:
+ end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
+
+ filtered_logs = []
+ for log in logs:
+ try:
+ log_date = datetime.datetime.strptime(log["date"][:10], "%Y-%m-%d")
+ if (not start_date or log_date >= start_date) and (
+ not end_date or log_date <= end_date
+ ):
+ filtered_logs.append(log)
+ except ValueError:
+ # Skip logs with invalid date format
+ continue
+
+ # Cache the filtered results
+ cache.set(cache_key, filtered_logs)
+ return filtered_logs
+
+
+@functools.lru_cache(maxsize=50)
+def get_log_metadata(log_folder_path):
+ """Get metadata for a specific log with caching"""
+ cache_key = f"metadata_{log_folder_path}"
+ cached_metadata = cache.get(cache_key)
+
+ if cached_metadata is not None:
+ return cached_metadata
+
+ metadata = {"folder": log_folder_path}
+
+ # Read metadata from files
+ meta_path = os.path.join(log_folder_path, "meta.yaml")
+ settings_path = os.path.join(log_folder_path, "settings.yaml")
+
+ try:
+ if os.path.exists(meta_path):
+ with open(meta_path, "r") as meta_file:
+ metadata.update(yaml.safe_load(meta_file))
+ if os.path.exists(settings_path):
+ with open(settings_path, "r") as settings_file:
+ log_settings = yaml.safe_load(settings_file).get("run", {})
+
+ metadata.update(
+ {
+ "mode": log_settings.get("mode", "Unknown"),
+ "launch_command": log_settings.get("log", {}).get(
+ "launch_command", "Unknown"
+ ),
+ }
+ )
+ except Exception as e:
+ print(f"Error loading metadata: {e}")
+
+ # Cache the results
+ cache.set(cache_key, metadata)
+ return metadata
+
+
+@app.route("/")
+def index():
+ logs = load_log_data()
+ return render_template("index.html", logs=logs)
+
+
+@app.route("/filter_logs", methods=["POST"])
+def filter_logs():
+ start_time = time.time()
+ logs = load_log_data()
+ data = request.json
+ start_date = data.get("start_date")
+ end_date = data.get("end_date")
+
+ filtered_logs = filter_logs_by_date(logs, start_date, end_date)
+
+ end_time = time.time()
+ print(f"Filtered logs in {end_time - start_time:.2f} seconds")
+ return jsonify(filtered_logs)
+
+
+@app.route("/view_log/")
+def view_log(log_folder):
+ log_folder_path = os.path.join(LOG_DIR, log_folder)
+ if not os.path.exists(log_folder_path):
+ return "Log folder not found!", 404
+
+ # Get metadata with caching
+ metadata = get_log_metadata(log_folder_path)
+
+ # Get directory structure
+ files = sorted(os.listdir(log_folder_path))
+
+ return render_template(
+ "view_log.html", log_folder=log_folder, metadata=metadata, files=files
+ )
+
+
+@app.route("/open_folder/")
+def open_folder(folder):
+ import os, platform
+
+ full_path = os.path.abspath(os.path.join(LOG_DIR, folder))
+ if not full_path.startswith(os.path.abspath(LOG_DIR)):
+ return "Invalid path", 400
+ if not os.path.isdir(full_path):
+ return f"Folder does not exist: {folder}", 404
+
+ try:
+ if platform.system() == "Windows":
+ os.system(f'explorer "{full_path}"')
+ elif platform.system() == "Linux":
+ os.system(f'xdg-open "{full_path}"')
+ elif platform.system() == "Darwin":
+ os.system(f'open "{full_path}"')
+ else:
+ return "Unsupported platform", 400
+ except Exception as e:
+ return f"Failed to open folder: {e}", 500
+
+ # if result != 0:
+ # return 'Failed to open folder', 500
+
+ return "Folder opened successfully.", 204
+
+
+@app.route("/view_file//")
+def view_file(log_folder, filename):
+ file_path = os.path.join(LOG_DIR, log_folder, filename)
+ if not os.path.exists(file_path):
+ return jsonify({"error": "File not found!"}), 404
+
+ # Check file size first
+ file_size = os.path.getsize(file_path)
+
+ # Define chunk size for pagination (50,000 lines or ~1MB)
+ CHUNK_SIZE = 1000
+
+ # Get page number from query parameter (default to 1)
+ page = request.args.get("page", 1, type=int)
+
+ try:
+ with open(file_path, "r") as f:
+ # Skip lines for previous pages
+ for _ in range((page - 1) * CHUNK_SIZE):
+ f.readline()
+
+ # Read next chunk of lines
+ lines = [f.readline() for _ in range(CHUNK_SIZE)]
+ # Check if there are more lines
+ has_more = bool(f.readline())
+
+ # Prepare response
+ return jsonify(
+ {
+ "filename": filename,
+ "content": "".join(lines),
+ "total_size": file_size,
+ "page": page,
+ "has_more": has_more,
+ }
+ )
+ except UnicodeDecodeError:
+ return jsonify(
+ {
+ "filename": filename,
+ "content": "This file contains binary data and cannot be displayed in the browser.",
+ "total_size": file_size,
+ "page": page,
+ "has_more": False,
+ }
+ )
+
+
+@app.route("/raw_logs//")
+def stream_raw_log(log_folder, filename):
+ file_path = os.path.join(LOG_DIR, log_folder, filename)
+ if not os.path.isfile(file_path):
+ return "File not found!", 404
+
+ def generate():
+ with open(file_path, "r", encoding="utf-8") as f:
+ for line in f:
+ yield line
+
+ return Response(stream_with_context(generate()), mimetype="text/plain")
+
+
+@app.route("/parse_behavior//")
+def parse_behavior(log_folder, filename):
+ """
+ Parse behavior.json and return structured position data
+ """
+ file_path = os.path.join(LOG_DIR, log_folder, filename)
+
+ if not os.path.exists(file_path):
+ return jsonify({"error": "File not found!"}), 404
+
+ # Parse behavior data
+ behavior_data = parse_behavior_data(file_path)
+
+ if behavior_data is None:
+ return jsonify({"error": "Could not parse behavior data"}), 500
+
+ return jsonify(behavior_data)
+
+
+# Clear cache after certain time period
+@app.after_request
+def add_header(response):
+ # Invalidate cache for certain requests
+ if request.path == "/":
+ # Clear cache periodically for main page
+ current_time = int(time.time())
+ last_cleared = cache.get("last_cache_clear") or 0
+
+ if current_time - last_cleared > 300: # 5 minutes
+ # Reset log data cache
+ load_log_data.cache_clear()
+ cache.set("last_cache_clear", current_time)
+
+ return response
+
+
+if __name__ == "__main__":
+ app.run(debug=True)
diff --git a/log_dashboard/plots/.gitignore b/log_dashboard/plots/.gitignore
new file mode 100644
index 000000000..9f992b4d7
--- /dev/null
+++ b/log_dashboard/plots/.gitignore
@@ -0,0 +1,5 @@
+*
+
+# track just these files
+!.gitignore
+!sample_plot.png
diff --git a/log_dashboard/plots/sample_plot.png b/log_dashboard/plots/sample_plot.png
new file mode 100644
index 000000000..9d1fac35d
Binary files /dev/null and b/log_dashboard/plots/sample_plot.png differ
diff --git a/log_dashboard/requirements.txt b/log_dashboard/requirements.txt
new file mode 100644
index 000000000..e6dac0d83
--- /dev/null
+++ b/log_dashboard/requirements.txt
@@ -0,0 +1,6 @@
+flask
+pyyaml
+cachelib
+numpy
+matplotlib
+flask-cors
\ No newline at end of file
diff --git a/log_dashboard/templates/index.html b/log_dashboard/templates/index.html
new file mode 100644
index 000000000..d0401710b
--- /dev/null
+++ b/log_dashboard/templates/index.html
@@ -0,0 +1,102 @@
+
+
+
+
+
+ GEMstack Log Dashboard
+
+
+
+
+
+