From 2ee15ee430c68a3fc616e476d7ae97fed445d31b Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Sun, 21 Dec 2025 12:47:15 +0800 Subject: [PATCH 01/11] =?UTF-8?q?=E8=BF=90=E8=A1=8C=E6=96=B0=E5=AD=90?= =?UTF-8?q?=E9=A1=B9=E7=9B=AE=EF=BC=8C=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E6=90=AD=E5=BB=BA=EF=BC=8C=E6=88=90=E5=8A=9F=E6=90=AD?= =?UTF-8?q?=E5=BB=BA=E4=BA=86=E5=85=B3=E4=BA=8Eneck=E7=9A=84=E9=83=A8?= =?UTF-8?q?=E5=88=86=EF=BC=8C=E5=B9=B6=E7=94=9F=E6=88=90=E5=8A=A8=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models_neck.py | 177 ++++++++++++++++++ 1 file changed, 177 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_neck.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_neck.py b/src/biomechanical_hcl_smart_simulation_platform/models_neck.py new file mode 100644 index 0000000000..2a53f0c950 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/models_neck.py @@ -0,0 +1,177 @@ +""" +Render a PNG image for the MyoConverter Neck6D converted model (MuJoCo XML). + +What this script does: +1) Load the converted MuJoCo model: models/mjc/Neck6D/neck6d_cvt3.xml +2) Reset and load keyframe 0 (required by MyoConverter README) +3) Step a few frames +4) Offscreen render a PNG image to output/neck6d_render.png + +Optional: +- If you pass --viewer, it will open an interactive viewer (if supported). + +Usage: + python examples/render_neck6d_png.py + python examples/render_neck6d_png.py --xml models/mjc/Neck6D/neck6d_cvt3.xml --out output/neck6d_render.png + python examples/render_neck6d_png.py --viewer +""" + +import argparse +import os +from pathlib import Path + +import mujoco +from PIL import Image + + +def ensure_parent_dir(filepath: str) -> None: + Path(filepath).parent.mkdir(parents=True, exist_ok=True) + + +def set_camera_like_default(scene, lookat=None, distance=None, azimuth=None, elevation=None): + """ + Simple camera setup helper. + If your teacher wants exactly the same viewpoint as a reference image, + you can tune these parameters. + """ + # scene is mujoco.MjvScene; camera stored in scene.camera + cam = scene.camera + + # cam.lookat is a 3-vector + if lookat is not None: + cam.lookat[:] = lookat + + # cam.distance is a float + if distance is not None: + cam.distance = float(distance) + + # cam.azimuth/elevation in degrees + if azimuth is not None: + cam.azimuth = float(azimuth) + + if elevation is not None: + cam.elevation = float(elevation) + + +def offscreen_render_png( + xml_path: str, + out_path: str, + width: int = 1400, + height: int = 1000, + steps: int = 10, + camera_lookat=(0.0, 0.0, 1.1), + camera_distance=2.2, + camera_azimuth=135.0, + camera_elevation=-15.0, +) -> None: + """ + Offscreen render a single PNG frame. + """ + if not os.path.exists(xml_path): + raise FileNotFoundError(f"MuJoCo XML not found: {xml_path}") + + # Load model and data + model = mujoco.MjModel.from_xml_path(xml_path) + data = mujoco.MjData(model) + + # IMPORTANT: Load keyframe 0 (per README) + mujoco.mj_resetDataKeyframe(model, data, 0) + + # Step a few frames to settle (optional but often makes rendering stable) + for _ in range(max(0, int(steps))): + mujoco.mj_step(model, data) + + # Create renderer + renderer = mujoco.Renderer(model, width=width, height=height) + + # Update scene (so camera edits apply) + renderer.update_scene(data) + + # Adjust camera to a reasonable view (tune to match your reference) + set_camera_like_default( + renderer.scene, + lookat=camera_lookat, + distance=camera_distance, + azimuth=camera_azimuth, + elevation=camera_elevation, + ) + + # Render + img = renderer.render() + + # Save + ensure_parent_dir(out_path) + Image.fromarray(img).save(out_path) + print(f"[OK] Saved render to: {out_path}") + + +def optional_viewer(xml_path: str) -> None: + """ + Launch an interactive viewer (requires a working OpenGL context). + """ + import time + import mujoco.viewer + + model = mujoco.MjModel.from_xml_path(xml_path) + data = mujoco.MjData(model) + mujoco.mj_resetDataKeyframe(model, data, 0) + + with mujoco.viewer.launch_passive(model, data) as viewer: + while viewer.is_running(): + mujoco.mj_step(model, data) + viewer.sync() + time.sleep(0.01) + + +def main(): + parser = argparse.ArgumentParser(description="Render Neck6D MuJoCo model to a PNG.") + parser.add_argument( + "--xml", + type=str, + default="models/mjc/Neck6D/neck6d_cvt3.xml", + help="Path to MuJoCo XML model (default: models/mjc/Neck6D/neck6d_cvt3.xml)", + ) + parser.add_argument( + "--out", + type=str, + default="output/neck6d_render.png", + help="Output PNG path (default: output/neck6d_render.png)", + ) + parser.add_argument("--width", type=int, default=1400, help="Image width") + parser.add_argument("--height", type=int, default=1000, help="Image height") + parser.add_argument("--steps", type=int, default=10, help="Simulation steps before capture") + + # Camera parameters (easy to justify + easy for老师复现) + parser.add_argument("--lookat-x", type=float, default=0.0) + parser.add_argument("--lookat-y", type=float, default=0.0) + parser.add_argument("--lookat-z", type=float, default=1.1) + parser.add_argument("--distance", type=float, default=2.2) + parser.add_argument("--azimuth", type=float, default=135.0) + parser.add_argument("--elevation", type=float, default=-15.0) + + parser.add_argument( + "--viewer", + action="store_true", + help="Open interactive viewer after rendering (optional).", + ) + + args = parser.parse_args() + + offscreen_render_png( + xml_path=args.xml, + out_path=args.out, + width=args.width, + height=args.height, + steps=args.steps, + camera_lookat=(args.lookat_x, args.lookat_y, args.lookat_z), + camera_distance=args.distance, + camera_azimuth=args.azimuth, + camera_elevation=args.elevation, + ) + + if args.viewer: + optional_viewer(args.xml) + + +if __name__ == "__main__": + main() From 8333c74300115745ee515f473cd5db1cb2a0e57e Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Sun, 21 Dec 2025 20:24:34 +0800 Subject: [PATCH 02/11] =?UTF-8?q?=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E6=90=AD=E5=BB=BA=EF=BC=8C=E6=88=90=E5=8A=9F=E6=90=AD?= =?UTF-8?q?=E5=BB=BA=E4=BA=86=E5=85=B3=E4=BA=8Earm=E7=9A=84=E9=83=A8?= =?UTF-8?q?=E5=88=86=EF=BC=8C=E5=B9=B6=E7=94=9F=E6=88=90=E5=8A=A8=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models_arm.py | 184 ++++++++++++++++++ 1 file changed, 184 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_arm.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_arm.py b/src/biomechanical_hcl_smart_simulation_platform/models_arm.py new file mode 100644 index 0000000000..5252bae576 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/models_arm.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Render an arm model (e.g., Arm26 converted by MyoConverter) to PNG/GIF using MuJoCo Python bindings. + +Examples: + # Render a GIF (recommended) + python examples/render_arm.py --xml ./models/mjc/Arm26/arm26_cvt3.xml --out arm.gif + + # Render a still image + python examples/render_arm.py --xml ./models/mjc/Arm26/arm26_cvt3.xml --out arm.png + +Notes: + - MyoConverter converted XML often contains a keyframe that must be loaded to satisfy joint/muscle path constraints. + This script will automatically load keyframe 0 if present. (See README.) +""" + +from __future__ import annotations + +import argparse +import math +from pathlib import Path + +import numpy as np + +import mujoco +import imageio.v2 as imageio + + +def _pick_first_hinge_joint(model: mujoco.MjModel) -> int | None: + """Pick the first hinge joint as a simple 'elbow-like' joint.""" + for jid in range(model.njnt): + if int(model.jnt_type[jid]) == int(mujoco.mjtJoint.mjJNT_HINGE): + return jid + return None + + +def _joint_id_by_name(model: mujoco.MjModel, name: str) -> int | None: + try: + jid = mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_JOINT, name) + return int(jid) if jid >= 0 else None + except Exception: + return None + + +def _setup_camera(model: mujoco.MjModel, azimuth: float, elevation: float, distance_scale: float) -> mujoco.MjvCamera: + cam = mujoco.MjvCamera() + mujoco.mjv_defaultCamera(cam) + + # A reasonable default framing: focus model center, distance proportional to extent + extent = float(model.stat.extent) if float(model.stat.extent) > 0 else 1.0 + center = np.array(model.stat.center, dtype=float) + + cam.lookat[:] = center + cam.distance = max(0.05, distance_scale * extent) + cam.azimuth = float(azimuth) + cam.elevation = float(elevation) + return cam + + +def _reset_to_keyframe_if_any(model: mujoco.MjModel, data: mujoco.MjData) -> None: + # README: converted XML contains keyframe; MuJoCo doesn't load it by default. + # Use mj_resetDataKeyframe(model, data, 0) to load keyframe 0. + if model.nkey > 0: + mujoco.mj_resetDataKeyframe(model, data, 0) + else: + mujoco.mj_resetData(model, data) + mujoco.mj_forward(model, data) + + +def render( + xml_path: Path, + out_path: Path, + width: int, + height: int, + fps: int, + seconds: float, + joint_name: str | None, + azimuth: float, + elevation: float, + distance_scale: float, +) -> None: + if not xml_path.exists(): + raise FileNotFoundError(f"XML not found: {xml_path}") + + model = mujoco.MjModel.from_xml_path(str(xml_path)) + data = mujoco.MjData(model) + + _reset_to_keyframe_if_any(model, data) + + # Choose joint + jid = _joint_id_by_name(model, joint_name) if joint_name else None + if jid is None: + jid = _pick_first_hinge_joint(model) + + if jid is None: + print("[WARN] No hinge joint found. Will just render the initial pose.") + else: + print(f"[INFO] Using joint id={jid}, name='{mujoco.mj_id2name(model, mujoco.mjtObj.mjOBJ_JOINT, jid)}'") + + # Joint qpos address (hinge = 1 DoF) + qadr = int(model.jnt_qposadr[jid]) if jid is not None else None + + # Joint range (if not defined, fall back) + if jid is not None: + jrange = np.array(model.jnt_range[jid], dtype=float) + if np.all(np.isfinite(jrange)) and (jrange[1] > jrange[0]): + lo, hi = float(jrange[0]), float(jrange[1]) + else: + lo, hi = -0.5, 0.5 + mid = 0.5 * (lo + hi) + amp = 0.45 * (hi - lo) + else: + lo, hi, mid, amp = 0.0, 0.0, 0.0, 0.0 + + cam = _setup_camera(model, azimuth=azimuth, elevation=elevation, distance_scale=distance_scale) + renderer = mujoco.Renderer(model, height=height, width=width) + + out_path.parent.mkdir(parents=True, exist_ok=True) + suffix = out_path.suffix.lower() + + if suffix == ".png": + # still image + renderer.update_scene(data, camera=cam) + rgb = renderer.render() + imageio.imwrite(out_path, rgb) + print(f"[OK] Saved: {out_path}") + return + + if suffix != ".gif": + raise ValueError(f"Unsupported output format '{suffix}'. Use .gif or .png") + + n_frames = max(1, int(round(seconds * fps))) + frames = [] + + for i in range(n_frames): + t = i / fps + + if jid is not None and qadr is not None: + # simple sinusoidal motion within range + angle = mid + amp * math.sin(2.0 * math.pi * (t / max(1e-6, seconds))) + data.qpos[qadr] = angle + data.qvel[:] = 0.0 + mujoco.mj_forward(model, data) + + renderer.update_scene(data, camera=cam) + rgb = renderer.render() + frames.append(rgb) + + imageio.mimsave(out_path, frames, fps=fps) + print(f"[OK] Saved: {out_path}") + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--xml", type=str, required=True, help="Path to MuJoCo XML (e.g. ./models/mjc/Arm26/arm26_cvt3.xml)") + ap.add_argument("--out", type=str, default="arm.gif", help="Output file: .gif or .png") + ap.add_argument("--width", type=int, default=1280) + ap.add_argument("--height", type=int, default=720) + ap.add_argument("--fps", type=int, default=30) + ap.add_argument("--seconds", type=float, default=2.0) + ap.add_argument("--joint", type=str, default=None, help="Optional joint name to animate (otherwise first hinge joint)") + ap.add_argument("--azimuth", type=float, default=110.0) + ap.add_argument("--elevation", type=float, default=-20.0) + ap.add_argument("--distance_scale", type=float, default=1.6) + + args = ap.parse_args() + render( + xml_path=Path(args.xml), + out_path=Path(args.out), + width=args.width, + height=args.height, + fps=args.fps, + seconds=args.seconds, + joint_name=args.joint, + azimuth=args.azimuth, + elevation=args.elevation, + distance_scale=args.distance_scale, + ) + + +if __name__ == "__main__": + main() From b7e99c196d289c9d7aaacf3c2fe7a52f2ebcf439 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Sun, 21 Dec 2025 21:56:02 +0800 Subject: [PATCH 03/11] =?UTF-8?q?=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E6=90=AD=E5=BB=BA=EF=BC=8C=E6=88=90=E5=8A=9F=E6=90=AD?= =?UTF-8?q?=E5=BB=BA=EF=BC=8C=E5=B9=B6=E7=94=9F=E6=88=90=E5=8A=A8=E5=9B=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../models_leg.py | 214 ++++++++++++++++++ 1 file changed, 214 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_leg.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_leg.py b/src/biomechanical_hcl_smart_simulation_platform/models_leg.py new file mode 100644 index 0000000000..c6b825049b --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/models_leg.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Render a leg model (e.g., Leg6Dof9Musc converted by MyoConverter) to PNG/GIF using MuJoCo Python bindings. + +Examples: + # Render a GIF (recommended) + python examples/render_leg.py --xml ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml --out leg.gif + + # Render a still image + python examples/render_leg.py --xml ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml --out leg.png + +Notes: + - Converted XML contains a keyframe that should be loaded to satisfy joint/muscle path constraints. + MuJoCo doesn't load it by default, so we call mj_resetDataKeyframe(model, data, 0). +""" + +from __future__ import annotations + +import argparse +import math +from pathlib import Path +from typing import List, Tuple + +import numpy as np +import mujoco +import imageio.v2 as imageio + + +# ----------------------------- helpers ----------------------------- + +def reset_to_keyframe_if_any(model: mujoco.MjModel, data: mujoco.MjData) -> None: + """Load keyframe 0 if present; otherwise do a normal reset.""" + if model.nkey > 0: + mujoco.mj_resetDataKeyframe(model, data, 0) + else: + mujoco.mj_resetData(model, data) + mujoco.mj_forward(model, data) + + +def setup_camera(model: mujoco.MjModel, azimuth: float, elevation: float, distance_scale: float) -> mujoco.MjvCamera: + cam = mujoco.MjvCamera() + mujoco.mjv_defaultCamera(cam) + + extent = float(model.stat.extent) if float(model.stat.extent) > 0 else 1.0 + center = np.array(model.stat.center, dtype=float) + + cam.lookat[:] = center + cam.distance = max(0.05, distance_scale * extent) + cam.azimuth = float(azimuth) + cam.elevation = float(elevation) + return cam + + +def joint_name(model: mujoco.MjModel, jid: int) -> str: + n = mujoco.mj_id2name(model, mujoco.mjtObj.mjOBJ_JOINT, jid) + return "" if n is None else str(n) + + +def pick_leg_joints(model: mujoco.MjModel) -> List[int]: + """ + Heuristic: + 1) Prefer hinge joints whose names contain knee/ankle/hip (case-insensitive). + 2) If none found, fall back to the first hinge joint. + """ + candidates: List[Tuple[int, int]] = [] # (priority, jid) + keys = [ + ("knee", 0), + ("ankle", 1), + ("hip", 2), + ("talus", 3), + ("subtalar", 3), + ] + + for jid in range(model.njnt): + jtype = int(model.jnt_type[jid]) + if jtype != int(mujoco.mjtJoint.mjJNT_HINGE): + continue + name = joint_name(model, jid).lower() + for k, pri in keys: + if k in name: + candidates.append((pri, jid)) + break + + if candidates: + candidates.sort(key=lambda x: x[0]) + # Keep up to 3 joints to animate (knee + ankle + hip is usually enough) + return [jid for _, jid in candidates[:3]] + + # fallback: first hinge joint + for jid in range(model.njnt): + if int(model.jnt_type[jid]) == int(mujoco.mjtJoint.mjJNT_HINGE): + return [jid] + + return [] + + +def get_range_or_default(model: mujoco.MjModel, jid: int, default_lo=-0.5, default_hi=0.5) -> Tuple[float, float]: + r = np.array(model.jnt_range[jid], dtype=float) + if np.all(np.isfinite(r)) and (r[1] > r[0]): + return float(r[0]), float(r[1]) + return float(default_lo), float(default_hi) + + +# ----------------------------- main render ----------------------------- + +def render_leg( + xml_path: Path, + out_path: Path, + width: int, + height: int, + fps: int, + seconds: float, + azimuth: float, + elevation: float, + distance_scale: float, + joint_names: List[str], +) -> None: + if not xml_path.exists(): + raise FileNotFoundError(f"XML not found: {xml_path}") + + model = mujoco.MjModel.from_xml_path(str(xml_path)) + data = mujoco.MjData(model) + + reset_to_keyframe_if_any(model, data) + + # Choose joints + jids: List[int] = [] + + # user-specified joint names (optional) + if joint_names: + for nm in joint_names: + try: + jid = int(mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_JOINT, nm)) + except Exception: + jid = -1 + if jid >= 0: + jids.append(jid) + + # heuristic fallback + if not jids: + jids = pick_leg_joints(model) + + if not jids: + print("[WARN] No hinge joint found. Will just render initial pose.") + else: + print("[INFO] Animating joints:") + for jid in jids: + print(f" - id={jid:3d} name='{joint_name(model, jid)}' qposadr={int(model.jnt_qposadr[jid])}") + + cam = setup_camera(model, azimuth=azimuth, elevation=elevation, distance_scale=distance_scale) + renderer = mujoco.Renderer(model, height=height, width=width) + + out_path.parent.mkdir(parents=True, exist_ok=True) + suffix = out_path.suffix.lower() + + # still image + if suffix == ".png": + renderer.update_scene(data, camera=cam) + rgb = renderer.render() + imageio.imwrite(out_path, rgb) + print(f"[OK] Saved: {out_path}") + return + + # gif + if suffix != ".gif": + raise ValueError(f"Unsupported output format '{suffix}'. Use .gif or .png") + + n_frames = max(1, int(round(seconds * fps))) + frames = [] + + # Precompute per-joint motion params + motion = [] + for jid in jids: + qadr = int(model.jnt_qposadr[jid]) # hinge => 1 dof + lo, hi = get_range_or_default(model, jid) + mid = 0.5 * (lo + hi) + amp = 0.40 * (hi - lo) # keep margin inside limits + motion.append((qadr, mid, amp)) + + for i in range(n_frames): + t = i / fps + + # simple gait-like phase offsets between joints + for k, (qadr, mid, amp) in enumerate(motion): + phase = (k * math.pi / 3.0) # 0, 60deg, 120deg + val = mid + amp * math.sin(2.0 * math.pi * (t / max(1e-6, seconds)) + phase) + data.qpos[qadr] = val + + data.qvel[:] = 0.0 + mujoco.mj_forward(model, data) + + renderer.update_scene(data, camera=cam) + frames.append(renderer.render()) + + imageio.mimsave(out_path, frames, fps=fps) + print(f"[OK] Saved: {out_path}") + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--xml", type=str, required=True, + help="Path to MuJoCo XML (e.g. ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml)") + ap.add_argument("--out", type=str, default="leg.gif", help="Output file: .gif or .png") + ap.add_argument("--width", type=int, default=1280) + ap.add_argument("--height", type=int, default=720) + ap.add_argument("--fps", type=int, default=30) + ap.add_argument("--seconds", type=float, default=2.5) + + # camera + ap.add_argument("--azimuth", type=float, default=95.0) + ap.add_argument("--elevation", type=float, default=-15.0) + ap.add_arg_ From e557bf72f4f7847d7f6be132b5e961465b6cd509 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Mon, 22 Dec 2025 10:02:09 +0800 Subject: [PATCH 04/11] =?UTF-8?q?=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E5=88=9D=E6=AD=A5=E6=B8=B2=E6=9F=93=E6=88=90=E5=8A=9F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../human_model_initial_render.py | 595 ++++++++++++++++++ 1 file changed, 595 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py b/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py new file mode 100644 index 0000000000..c80043dcb5 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py @@ -0,0 +1,595 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +Render a connected (welded) leg + neck composite MJCF. + +What this script does: +1) Load leg.xml and neck.xml (original files). +2) Reset each model to keyframe(0) if present (README-style init). +3) Export namespaced copies (leg_, neck_) WITHOUT to avoid nq mismatch when composing. + Also converts all `file="..."` paths to absolute paths, so assets still load from anywhere. +4) Compute relpose for an equality/weld constraint from the keyframe poses. +5) Generate a scene MJCF that s the two namespaced parts and adds the weld. +6) Load the scene, copy qpos/qvel/act from each part’s keyframe into the composite model, then render. + +Usage: + python scripts/render_leg_neck_connected.py \ + --leg /path/to/leg.xml \ + --neck /path/to/neck.xml \ + --out /path/to/out.gif + +Notes: +- If auto-detected root body is not what you want, pass --leg_root_body / --neck_root_body. +- Output scene + temp MJCFs are written under --workdir (default: ./_build_leg_neck). +""" + +from __future__ import annotations + +import argparse +import os +import sys +import math +import shutil +import xml.etree.ElementTree as ET +from dataclasses import dataclass +from typing import Dict, List, Tuple, Optional + +import numpy as np + +# imageio is optional unless you save gif/mp4 +try: + import imageio.v2 as imageio +except Exception: + imageio = None + +try: + import mujoco +except Exception as e: + raise SystemExit( + "Cannot import `mujoco`. Please install official MuJoCo python bindings, e.g.\n" + " pip install mujoco\n" + f"Original error: {e}" + ) + + +# ------------------------- +# XML helpers +# ------------------------- + +FILE_ATTR_WHITELIST = {"file"} # attributes that should be treated as filesystem paths + + +def _indent(elem: ET.Element, level: int = 0) -> None: + """Pretty-print indentation (in-place).""" + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + for child in elem: + _indent(child, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +def _strip_keyframes(root: ET.Element) -> None: + """Remove blocks entirely.""" + for kf in list(root.findall("keyframe")): + root.remove(kf) + + +def _collect_rename_tokens(root: ET.Element) -> Dict[str, str]: + """ + Collect all tokens that should be namespaced: + - any 'name' + - any class-related tokens ('class', 'childclass') so defaults don't collide + """ + tokens: set[str] = set() + for el in root.iter(): + for k in ("name", "class", "childclass"): + v = el.get(k) + if v and v.strip(): + tokens.add(v.strip()) + return {t: t for t in tokens} + + +def _abspath_if_relative(path: str, base_dir: str) -> str: + # ignore already-absolute or URLs or mujoco-style placeholders + if not path: + return path + if path.startswith(("http://", "https://", "file://")): + return path + if path.startswith("${"): # environment var style + return path + if os.path.isabs(path): + return path + return os.path.abspath(os.path.join(base_dir, path)) + + +def namespace_mjcf( + in_xml: str, + out_xml: str, + prefix: str, + remove_keyframe: bool = True, + absolutize_files: bool = True, +) -> Tuple[str, Dict[str, str]]: + """ + Create a namespaced copy of MJCF: + - prefixes 'name' / 'class' / 'childclass' values + - replaces references by token matching (whitespace-separated) + - optionally removes + - optionally converts all file="..." to absolute paths based on in_xml directory + + Returns: + (root_body_name_original, rename_map_old_to_new) + """ + base_dir = os.path.dirname(os.path.abspath(in_xml)) + tree = ET.parse(in_xml) + root = tree.getroot() + + if remove_keyframe: + _strip_keyframes(root) + + # Build rename map + tokens = _collect_rename_tokens(root) + rename_map: Dict[str, str] = {old: f"{prefix}{old}" for old in tokens.keys()} + + # Apply rename on attributes + for el in root.iter(): + # absolutize file paths + if absolutize_files: + for attr, val in list(el.attrib.items()): + if attr in FILE_ATTR_WHITELIST and val: + el.set(attr, _abspath_if_relative(val, base_dir)) + + # rename primary naming attrs + for k in ("name", "class", "childclass"): + v = el.get(k) + if v and v in rename_map: + el.set(k, rename_map[v]) + + # rename references by token substitution + for attr, val in list(el.attrib.items()): + # skip file paths and root model name + if attr in FILE_ATTR_WHITELIST: + continue + if el is root and attr == "model": + continue + + if not val or not val.strip(): + continue + + parts = val.split() + changed = False + for i, tok in enumerate(parts): + if tok in rename_map: + parts[i] = rename_map[tok] + changed = True + if changed: + el.set(attr, " ".join(parts)) + + _indent(root) + os.makedirs(os.path.dirname(os.path.abspath(out_xml)), exist_ok=True) + tree.write(out_xml, encoding="utf-8", xml_declaration=True) + return out_xml, rename_map + + +def detect_root_body_name(xml_path: str) -> str: + """ + Heuristic: pick the first directly under . + """ + tree = ET.parse(xml_path) + root = tree.getroot() + worldbody = root.find("worldbody") + if worldbody is None: + raise ValueError(f"No found in: {xml_path}") + + bodies = worldbody.findall("body") + if not bodies: + raise ValueError(f"No directly under in: {xml_path}") + + # prefer named body + for b in bodies: + nm = b.get("name") + if nm and nm.strip(): + return nm.strip() + # fallback + return bodies[0].get("name", "").strip() or "unnamed_root_body" + + +# ------------------------- +# MuJoCo state helpers +# ------------------------- + +@dataclass +class ModelState: + joint_qpos: Dict[str, np.ndarray] + joint_qvel: Dict[str, np.ndarray] + actuator_act: Dict[str, np.ndarray] + body_pose_world: Dict[str, Tuple[np.ndarray, np.ndarray]] # name -> (pos(3), quat(4)) + + +def _joint_qpos_len(jnt_type: int) -> int: + # mjtJoint: FREE=0, BALL=1, SLIDE=2, HINGE=3 (MuJoCo convention) + if jnt_type == mujoco.mjtJoint.mjJNT_FREE: + return 7 + if jnt_type == mujoco.mjtJoint.mjJNT_BALL: + return 4 + return 1 + + +def _joint_dof_len(jnt_type: int) -> int: + if jnt_type == mujoco.mjtJoint.mjJNT_FREE: + return 6 + if jnt_type == mujoco.mjtJoint.mjJNT_BALL: + return 3 + return 1 + + +def load_and_reset(model_xml: str) -> Tuple["mujoco.MjModel", "mujoco.MjData"]: + m = mujoco.MjModel.from_xml_path(model_xml) + d = mujoco.MjData(m) + + if m.nkey > 0: + # README-style: reset to keyframe 0 + mujoco.mj_resetDataKeyframe(m, d, 0) + mujoco.mj_forward(m, d) + else: + mujoco.mj_resetData(m, d) + d.qpos[:] = m.qpos0 + mujoco.mj_forward(m, d) + return m, d + + +def extract_state(m: "mujoco.MjModel", d: "mujoco.MjData") -> ModelState: + joint_qpos: Dict[str, np.ndarray] = {} + joint_qvel: Dict[str, np.ndarray] = {} + + # joints + for jid in range(m.njnt): + jname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_JOINT, jid) + if not jname: + continue + jtype = int(m.jnt_type[jid]) + qpos_adr = int(m.jnt_qposadr[jid]) + dof_adr = int(m.jnt_dofadr[jid]) + + qpos_len = _joint_qpos_len(jtype) + dof_len = _joint_dof_len(jtype) + + joint_qpos[jname] = np.array(d.qpos[qpos_adr:qpos_adr + qpos_len], dtype=float) + joint_qvel[jname] = np.array(d.qvel[dof_adr:dof_adr + dof_len], dtype=float) + + # actuator activations (if any) + actuator_act: Dict[str, np.ndarray] = {} + for aid in range(m.na): + aname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_ACTUATOR, aid) + if not aname: + continue + actadr = int(m.actuator_actadr[aid]) + actnum = int(m.actuator_actnum[aid]) + if actnum > 0: + actuator_act[aname] = np.array(d.act[actadr:actadr + actnum], dtype=float) + + # body poses + body_pose_world: Dict[str, Tuple[np.ndarray, np.ndarray]] = {} + for bid in range(m.nbody): + bname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_BODY, bid) + if not bname: + continue + pos = np.array(d.xpos[bid], dtype=float) + quat = np.array(d.xquat[bid], dtype=float) # w x y z + body_pose_world[bname] = (pos, quat) + + return ModelState( + joint_qpos=joint_qpos, + joint_qvel=joint_qvel, + actuator_act=actuator_act, + body_pose_world=body_pose_world, + ) + + +# ------------------------- +# Quaternion math +# ------------------------- + +def quat_mul(q1: np.ndarray, q2: np.ndarray) -> np.ndarray: + """Hamilton product. q = [w,x,y,z].""" + w1, x1, y1, z1 = q1 + w2, x2, y2, z2 = q2 + return np.array([ + w1*w2 - x1*x2 - y1*y2 - z1*z2, + w1*x2 + x1*w2 + y1*z2 - z1*y2, + w1*y2 - x1*z2 + y1*w2 + z1*x2, + w1*z2 + x1*y2 - y1*x2 + z1*w2 + ], dtype=float) + + +def quat_conj(q: np.ndarray) -> np.ndarray: + w, x, y, z = q + return np.array([w, -x, -y, -z], dtype=float) + + +def quat_norm(q: np.ndarray) -> float: + return float(np.linalg.norm(q)) + + +def quat_inv(q: np.ndarray) -> np.ndarray: + n2 = float(np.dot(q, q)) + if n2 <= 0: + return np.array([1, 0, 0, 0], dtype=float) + return quat_conj(q) / n2 + + +def quat_to_mat(q: np.ndarray) -> np.ndarray: + """Quaternion (w,x,y,z) to rotation matrix.""" + w, x, y, z = q + # normalize + n = quat_norm(q) + if n == 0: + return np.eye(3) + w, x, y, z = (q / n).tolist() + + return np.array([ + [1 - 2*(y*y + z*z), 2*(x*y - z*w), 2*(x*z + y*w)], + [ 2*(x*y + z*w), 1 - 2*(x*x + z*z), 2*(y*z - x*w)], + [ 2*(x*z - y*w), 2*(y*z + x*w), 1 - 2*(x*x + y*y)], + ], dtype=float) + + +def compute_relpose(body1_pos, body1_quat, body2_pos, body2_quat) -> Tuple[np.ndarray, np.ndarray]: + """ + relpose is pose of body2 in body1 frame: + p_rel = R1^T (p2 - p1) + q_rel = inv(q1) * q2 + """ + R1 = quat_to_mat(body1_quat) + p_rel = R1.T @ (body2_pos - body1_pos) + q_rel = quat_mul(quat_inv(body1_quat), body2_quat) + + # normalize quaternion + n = quat_norm(q_rel) + if n > 0: + q_rel = q_rel / n + else: + q_rel = np.array([1, 0, 0, 0], dtype=float) + + return p_rel, q_rel + + +# ------------------------- +# Composite scene generation +# ------------------------- + +def write_scene_xml( + scene_xml: str, + leg_part_xml: str, + neck_part_xml: str, + leg_root_body: str, + neck_root_body: str, + relpose_pos: np.ndarray, + relpose_quat: np.ndarray, +) -> None: + """ + Create a scene MJCF which includes two parts and adds an equality/weld between their root bodies. + """ + os.makedirs(os.path.dirname(os.path.abspath(scene_xml)), exist_ok=True) + + rp = list(relpose_pos.flatten()) + list(relpose_quat.flatten()) + rp_str = " ".join(f"{x:.8g}" for x in rp) + + # Keep this scene minimal; you can add cameras/lights as needed. + mjcf = f""" + + + + + + + + + + + + + + +""" + with open(scene_xml, "w", encoding="utf-8") as f: + f.write(mjcf) + + +def apply_state_to_composite( + composite_model: "mujoco.MjModel", + composite_data: "mujoco.MjData", + src_state: ModelState, + prefix: str, +) -> None: + """ + Copy qpos/qvel/act by name from a source model state into the composite model, + assuming composite names are prefixed. + """ + # joints + for jname, qpos in src_state.joint_qpos.items(): + tgt_name = prefix + jname + jid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_JOINT, tgt_name) + if jid < 0: + continue + jtype = int(composite_model.jnt_type[jid]) + qpos_adr = int(composite_model.jnt_qposadr[jid]) + qpos_len = _joint_qpos_len(jtype) + if len(qpos) == qpos_len: + composite_data.qpos[qpos_adr:qpos_adr + qpos_len] = qpos + + for jname, qvel in src_state.joint_qvel.items(): + tgt_name = prefix + jname + jid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_JOINT, tgt_name) + if jid < 0: + continue + jtype = int(composite_model.jnt_type[jid]) + dof_adr = int(composite_model.jnt_dofadr[jid]) + dof_len = _joint_dof_len(jtype) + if len(qvel) == dof_len: + composite_data.qvel[dof_adr:dof_adr + dof_len] = qvel + + # actuator activation states + for aname, act in src_state.actuator_act.items(): + tgt_name = prefix + aname + aid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_ACTUATOR, tgt_name) + if aid < 0: + continue + actadr = int(composite_model.actuator_actadr[aid]) + actnum = int(composite_model.actuator_actnum[aid]) + if actnum > 0 and len(act) == actnum: + composite_data.act[actadr:actadr + actnum] = act + + +# ------------------------- +# Rendering +# ------------------------- + +def render_gif( + model: "mujoco.MjModel", + data: "mujoco.MjData", + out_path: str, + width: int = 960, + height: int = 540, + fps: int = 30, + frames: int = 180, + camera: str = "cam", + settle_steps: int = 20, +) -> None: + if imageio is None: + raise SystemExit("imageio is not installed. Please `pip install imageio` to save GIF/MP4.") + + # Let constraints settle a bit + for _ in range(settle_steps): + mujoco.mj_step(model, data) + + renderer = mujoco.Renderer(model, width=width, height=height) + imgs = [] + + for _ in range(frames): + mujoco.mj_step(model, data) + renderer.update_scene(data, camera=camera) + img = renderer.render() # HxWx3 uint8 + imgs.append(img) + + os.makedirs(os.path.dirname(os.path.abspath(out_path)), exist_ok=True) + ext = os.path.splitext(out_path)[1].lower() + + if ext == ".gif": + imageio.mimsave(out_path, imgs, fps=fps) + elif ext in (".mp4", ".mkv", ".mov"): + imageio.mimsave(out_path, imgs, fps=fps) # imageio chooses writer by extension + else: + raise ValueError(f"Unsupported output format: {ext} (use .gif or .mp4)") + + +# ------------------------- +# Main +# ------------------------- + +def main() -> int: + ap = argparse.ArgumentParser() + ap.add_argument("--leg", required=True, help="Path to leg MJCF (original).") + ap.add_argument("--neck", required=True, help="Path to neck MJCF (original).") + ap.add_argument("--out", required=True, help="Output .gif/.mp4 path.") + ap.add_argument("--workdir", default="./_build_leg_neck", help="Directory for generated temp MJCFs and scene.") + ap.add_argument("--leg_prefix", default="leg_", help="Namespace prefix for leg model.") + ap.add_argument("--neck_prefix", default="neck_", help="Namespace prefix for neck model.") + + ap.add_argument("--leg_root_body", default="", help="Override: body name in leg model to weld from.") + ap.add_argument("--neck_root_body", default="", help="Override: body name in neck model to weld to.") + + ap.add_argument("--width", type=int, default=960) + ap.add_argument("--height", type=int, default=540) + ap.add_argument("--fps", type=int, default=30) + ap.add_argument("--frames", type=int, default=180) + ap.add_argument("--settle_steps", type=int, default=20) + + args = ap.parse_args() + + leg_xml = os.path.abspath(args.leg) + neck_xml = os.path.abspath(args.neck) + workdir = os.path.abspath(args.workdir) + os.makedirs(workdir, exist_ok=True) + + # 1) Detect root bodies (in ORIGINAL names) + leg_root = args.leg_root_body.strip() or detect_root_body_name(leg_xml) + neck_root = args.neck_root_body.strip() or detect_root_body_name(neck_xml) + + # 2) Load originals and reset keyframe 0 (README-style) + leg_m, leg_d = load_and_reset(leg_xml) + neck_m, neck_d = load_and_reset(neck_xml) + leg_state = extract_state(leg_m, leg_d) + neck_state = extract_state(neck_m, neck_d) + + if leg_root not in leg_state.body_pose_world: + raise SystemExit(f"Cannot find body '{leg_root}' in leg model after reset. Try --leg_root_body.") + if neck_root not in neck_state.body_pose_world: + raise SystemExit(f"Cannot find body '{neck_root}' in neck model after reset. Try --neck_root_body.") + + leg_pos, leg_quat = leg_state.body_pose_world[leg_root] + neck_pos, neck_quat = neck_state.body_pose_world[neck_root] + + # 3) Compute relpose (neck body pose in leg body frame) from keyframe poses + rel_p, rel_q = compute_relpose(leg_pos, leg_quat, neck_pos, neck_quat) + + # 4) Write namespaced, no-keyframe copies (assets file paths become absolute) + leg_ns_xml = os.path.join(workdir, "leg_ns_nokey.xml") + neck_ns_xml = os.path.join(workdir, "neck_ns_nokey.xml") + + _, leg_map = namespace_mjcf(leg_xml, leg_ns_xml, prefix=args.leg_prefix, remove_keyframe=True, absolutize_files=True) + _, neck_map = namespace_mjcf(neck_xml, neck_ns_xml, prefix=args.neck_prefix, remove_keyframe=True, absolutize_files=True) + + leg_root_ns = args.leg_prefix + leg_root + neck_root_ns = args.neck_prefix + neck_root + + # 5) Generate composite scene XML + scene_xml = os.path.join(workdir, "leg_neck_connected_scene.xml") + write_scene_xml( + scene_xml=scene_xml, + leg_part_xml=leg_ns_xml, + neck_part_xml=neck_ns_xml, + leg_root_body=leg_root_ns, + neck_root_body=neck_root_ns, + relpose_pos=rel_p, + relpose_quat=rel_q, + ) + + # 6) Load composite and apply initial state by copying from original keyframes + comp_m = mujoco.MjModel.from_xml_path(scene_xml) + comp_d = mujoco.MjData(comp_m) + + mujoco.mj_resetData(comp_m, comp_d) + apply_state_to_composite(comp_m, comp_d, leg_state, prefix=args.leg_prefix) + apply_state_to_composite(comp_m, comp_d, neck_state, prefix=args.neck_prefix) + mujoco.mj_forward(comp_m, comp_d) + + # 7) Render + render_gif( + model=comp_m, + data=comp_d, + out_path=os.path.abspath(args.out), + width=args.width, + height=args.height, + fps=args.fps, + frames=args.frames, + camera="cam", + settle_steps=args.settle_steps, + ) + + print(f"[OK] Scene: {scene_xml}") + print(f"[OK] Output: {os.path.abspath(args.out)}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) From bf6f5c8e75e81c166b33c5d81196983e01e99bfc Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Mon, 22 Dec 2025 14:30:49 +0800 Subject: [PATCH 05/11] =?UTF-8?q?=E4=BC=98=E5=8C=96=E4=BA=86=E5=85=B3?= =?UTF-8?q?=E4=BA=8E=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B=E7=9A=84=E5=A4=A7?= =?UTF-8?q?=E8=85=BF=E8=82=8C=E8=82=89=E7=9A=84=E6=B8=B2=E6=9F=93=EF=BC=8C?= =?UTF-8?q?=E4=BD=BF=E6=A8=A1=E5=9E=8B=E6=9B=B4=E5=8A=A0=E7=9C=9F=E5=AE=9E?= =?UTF-8?q?=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../enhanced_human_model.py | 297 +++++++++ .../human_model_initial_render.py | 595 ------------------ 2 files changed, 297 insertions(+), 595 deletions(-) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py delete mode 100644 src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py b/src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py new file mode 100644 index 0000000000..b9bb6e47e1 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py @@ -0,0 +1,297 @@ +# preliminary_human_model_render_thigh_muscles.py +# Render MuJoCo MJCF with "thigh muscle" visualization as thick capsules, optionally exporting a GIF. + +from __future__ import annotations + +import argparse +import math +from dataclasses import dataclass +from typing import Iterable, List, Optional, Sequence, Tuple + +import numpy as np + +import mujoco + +try: + import imageio.v3 as iio +except Exception: # pragma: no cover + iio = None + + +# ----------------------------- +# Config +# ----------------------------- +DEFAULT_THIGH_KEYWORDS = ( + # Quadriceps + "rectus", "rect_fem", "rf", "vasti", "vastus", + # Hamstrings + "ham", "biceps_fem", "bf", "semiten", "semimem", + # Adductors / sartorius / gracilis / TFL + "adductor", "add", "sart", "sartorius", "grac", "gracilis", "tfl", "tensor", + # Hip flexors / glutes (often affect thigh look) + "iliopsoas", "psoas", "iliacus", "glut", "glute", +) + +RGBA_REST = np.array([0.85, 0.15, 0.15, 0.45], dtype=np.float32) # relaxed muscle (semi-transparent red) +RGBA_ACTIVE = np.array([1.00, 0.25, 0.25, 0.85], dtype=np.float32) # active muscle (brighter, less transparent) + + +@dataclass +class MuscleVizSpec: + base_radius: float = 0.010 # meters + bulge_gain: float = 1.25 # radius multiplier gain: r = base*(1 + bulge_gain*act) + max_capsules: int = 3000 # budget for extra geoms + + +# ----------------------------- +# Utilities +# ----------------------------- +def _safe_name(model: mujoco.MjModel, objtype: mujoco.mjtObj, idx: int) -> str: + name = mujoco.mj_id2name(model, objtype, idx) + return name or f"{objtype.name}_{idx}" + + +def _pick_actuators_by_keywords(model: mujoco.MjModel, keywords: Sequence[str]) -> List[int]: + keys = tuple(k.lower() for k in keywords) + picked: List[int] = [] + for i in range(model.nu): # in MuJoCo, #controls == #actuators for typical models + nm = _safe_name(model, mujoco.mjtObj.mjOBJ_ACTUATOR, i).lower() + if any(k in nm for k in keys): + picked.append(i) + return picked + + +def _actuator_to_tendon_id(model: mujoco.MjModel, act_id: int) -> Optional[int]: + """ + For muscle actuators using tendon transmission, actuator_trntype should be mjTRN_TENDON + and actuator_trnid[act_id, 0] is the tendon id. + """ + if not hasattr(model, "actuator_trntype") or not hasattr(model, "actuator_trnid"): + return None + trntype = int(model.actuator_trntype[act_id]) + if trntype != int(mujoco.mjtTrn.mjTRN_TENDON): + return None + tid = int(model.actuator_trnid[act_id, 0]) + if tid < 0 or tid >= model.ntendon: + return None + return tid + + +def _tendon_path_sites_world(model: mujoco.MjModel, data: mujoco.MjData, tendon_id: int) -> Optional[np.ndarray]: + """ + Best-effort extraction of tendon path point(s) using wrap sites. + If wrap-site info isn't available, returns None. + """ + required = ("tendon_adr", "tendon_num", "wrap_type", "wrap_objid") + if not all(hasattr(model, f) for f in required): + return None + + adr = int(model.tendon_adr[tendon_id]) + num = int(model.tendon_num[tendon_id]) + if num <= 0: + return None + + pts: List[np.ndarray] = [] + for w in range(adr, adr + num): + wtype = int(model.wrap_type[w]) + if wtype == int(mujoco.mjtWrap.mjWRAP_SITE): + sid = int(model.wrap_objid[w]) + pts.append(data.site_xpos[sid].copy()) + # NOTE: other wrap types (geom/pulley) are ignored here for simplicity. + + if len(pts) >= 2: + return np.stack(pts, axis=0) + return None + + +def _normalize01(x: float) -> float: + return float(max(0.0, min(1.0, x))) + + +def _activation_from_ctrl(data: mujoco.MjData, act_id: int) -> float: + if data.ctrl is None or act_id >= data.ctrl.shape[0]: + return 0.0 + return _normalize01(float(data.ctrl[act_id])) + + +def _lerp_rgba(a: np.ndarray, b: np.ndarray, t: float) -> np.ndarray: + t = _normalize01(t) + return (1.0 - t) * a + t * b + + +def _set_default_camera(model: mujoco.MjModel, cam: mujoco.MjvCamera) -> None: + # Use model statistics if available to frame the whole model nicely. + center = np.array(model.stat.center, dtype=np.float64) + extent = float(model.stat.extent) + + cam.type = mujoco.mjtCamera.mjCAMERA_FREE + cam.lookat[:] = center + cam.distance = 2.2 * extent if extent > 1e-6 else 2.0 + cam.azimuth = 90.0 + cam.elevation = -20.0 + + +def _add_capsule(scene: mujoco.MjvScene, geom_id: int, p0: np.ndarray, p1: np.ndarray, radius: float, rgba: np.ndarray) -> None: + """ + Uses mjv_initGeom + mjv_connector to create a capsule between p0 and p1. + mjv_connector assumes mjv_initGeom has set other properties like rgba. :contentReference[oaicite:3]{index=3} + """ + mujoco.mjv_initGeom( + scene.geoms[geom_id], + type=mujoco.mjtGeom.mjGEOM_CAPSULE, + size=np.zeros(3, dtype=np.float64), + pos=np.zeros(3, dtype=np.float64), + mat=np.eye(3, dtype=np.float64).flatten(), + rgba=rgba.astype(np.float32), + ) + mujoco.mjv_connector( + scene.geoms[geom_id], + type=mujoco.mjtGeom.mjGEOM_CAPSULE, + width=float(radius), + from_=p0.astype(np.float64), + to=p1.astype(np.float64), + ) + + +# ----------------------------- +# Offscreen render loop +# ----------------------------- +def render_offscreen_gif( + mjcf_path: str, + out_path: str, + seconds: float, + fps: int, + width: int, + height: int, + keywords: Sequence[str], + viz: MuscleVizSpec, +) -> None: + if iio is None: + raise RuntimeError("imageio 未安装:请先 `pip install imageio`(或 imageio[ffmpeg] 生成 mp4)。") + + model = mujoco.MjModel.from_xml_path(mjcf_path) + data = mujoco.MjData(model) + + # Load keyframe 0 as recommended for MyoConverter outputs. :contentReference[oaicite:4]{index=4} + try: + mujoco.mj_resetDataKeyframe(model, data, 0) + mujoco.mj_forward(model, data) + except Exception: + # If no keyframe exists, continue. + mujoco.mj_forward(model, data) + + # Pick thigh-related actuators and map to tendons + act_ids = _pick_actuators_by_keywords(model, keywords) + act_to_tendon = {a: _actuator_to_tendon_id(model, a) for a in act_ids} + act_to_tendon = {a: t for a, t in act_to_tendon.items() if t is not None} + + if not act_to_tendon: + print("[WARN] 没找到匹配关键词且使用 tendon transmission 的 actuator;将只渲染骨架/身体,不画肌肉胶囊。") + + # Create OpenGL context for offscreen rendering. :contentReference[oaicite:5]{index=5} + gl_ctx = mujoco.GLContext(width, height) + gl_ctx.make_current() + + cam = mujoco.MjvCamera() + opt = mujoco.MjvOption() + scn = mujoco.MjvScene(model, maxgeom=viz.max_capsules) # allocate lots of geom slots + con = mujoco.MjrContext(model, mujoco.mjtFontScale.mjFONTSCALE_100) + viewport = mujoco.MjrRect(0, 0, width, height) + + _set_default_camera(model, cam) + + # Make sure we have an offscreen buffer sized correctly + mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_OFFSCREEN, con) + mujoco.mjr_resizeOffscreen(width, height, con) + + nframes = int(math.ceil(seconds * fps)) + dt = 1.0 / fps + + frames: List[np.ndarray] = [] + + # simple “demo” control signal so you can see bulging: sine-wave activations + phases = np.linspace(0.0, 2.0 * math.pi, num=max(1, len(act_to_tendon)), endpoint=False) + + for k in range(nframes): + t = k * dt + + # drive the selected thigh muscles a bit (0..1) + if model.nu > 0: + data.ctrl[:] = 0.0 + for j, (act_id, _) in enumerate(act_to_tendon.items()): + data.ctrl[act_id] = 0.5 + 0.5 * math.sin(2.0 * math.pi * 0.8 * t + float(phases[j])) + + mujoco.mj_step(model, data) + + # Update base scene (standard pipeline). :contentReference[oaicite:6]{index=6} + mujoco.mjv_updateScene(model, data, opt, None, cam, mujoco.mjtCatBit.mjCAT_ALL, scn) + + # Append custom "muscle" capsules after the model geoms + geom_id = int(scn.ngeom) + for act_id, tendon_id in act_to_tendon.items(): + pts = _tendon_path_sites_world(model, data, tendon_id) + if pts is None: + continue + + act = _activation_from_ctrl(data, act_id) + radius = float(viz.base_radius * (1.0 + viz.bulge_gain * act)) + rgba = _lerp_rgba(RGBA_REST, RGBA_ACTIVE, act) + + for p0, p1 in zip(pts[:-1], pts[1:]): + if geom_id >= scn.maxgeom: + break + # skip tiny segments + if float(np.linalg.norm(p1 - p0)) < 1e-4: + continue + _add_capsule(scn, geom_id, p0, p1, radius, rgba) + geom_id += 1 + + scn.ngeom = geom_id + + mujoco.mjr_render(viewport, scn, con) + + rgb = np.zeros((height, width, 3), dtype=np.uint8) + depth = np.zeros((height, width), dtype=np.float32) + mujoco.mjr_readPixels(rgb, depth, viewport, con) # :contentReference[oaicite:7]{index=7} + rgb = np.flipud(rgb) # OpenGL origin is bottom-left + frames.append(rgb) + + iio.imwrite(out_path, np.stack(frames, axis=0), fps=fps) + gl_ctx.free() + + print(f"[OK] saved: {out_path}") + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--mjcf", required=True, help="Path to MJCF xml (e.g. models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml)") + parser.add_argument("--out", default="thigh_muscles.gif", help="Output GIF path") + parser.add_argument("--seconds", type=float, default=3.0) + parser.add_argument("--fps", type=int, default=30) + parser.add_argument("--width", type=int, default=960) + parser.add_argument("--height", type=int, default=720) + parser.add_argument("--base-radius", type=float, default=0.010) + parser.add_argument("--bulge-gain", type=float, default=1.25) + parser.add_argument("--keyword", action="append", default=None, + help="Add a muscle name keyword to match actuators (can repeat). If not set, uses a thigh keyword list.") + parser.add_argument("--maxgeom", type=int, default=4000, help="MjvScene maxgeom budget (increase if many muscles)") + + args = parser.parse_args() + + keywords = args.keyword if args.keyword else list(DEFAULT_THIGH_KEYWORDS) + viz = MuscleVizSpec(base_radius=args.base_radius, bulge_gain=args.bulge_gain, max_capsules=args.maxgeom) + + render_offscreen_gif( + mjcf_path=args.mjcf, + out_path=args.out, + seconds=args.seconds, + fps=args.fps, + width=args.width, + height=args.height, + keywords=keywords, + viz=viz, + ) + + +if __name__ == "__main__": + main() diff --git a/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py b/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py deleted file mode 100644 index c80043dcb5..0000000000 --- a/src/biomechanical_hcl_smart_simulation_platform/human_model_initial_render.py +++ /dev/null @@ -1,595 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Render a connected (welded) leg + neck composite MJCF. - -What this script does: -1) Load leg.xml and neck.xml (original files). -2) Reset each model to keyframe(0) if present (README-style init). -3) Export namespaced copies (leg_, neck_) WITHOUT to avoid nq mismatch when composing. - Also converts all `file="..."` paths to absolute paths, so assets still load from anywhere. -4) Compute relpose for an equality/weld constraint from the keyframe poses. -5) Generate a scene MJCF that s the two namespaced parts and adds the weld. -6) Load the scene, copy qpos/qvel/act from each part’s keyframe into the composite model, then render. - -Usage: - python scripts/render_leg_neck_connected.py \ - --leg /path/to/leg.xml \ - --neck /path/to/neck.xml \ - --out /path/to/out.gif - -Notes: -- If auto-detected root body is not what you want, pass --leg_root_body / --neck_root_body. -- Output scene + temp MJCFs are written under --workdir (default: ./_build_leg_neck). -""" - -from __future__ import annotations - -import argparse -import os -import sys -import math -import shutil -import xml.etree.ElementTree as ET -from dataclasses import dataclass -from typing import Dict, List, Tuple, Optional - -import numpy as np - -# imageio is optional unless you save gif/mp4 -try: - import imageio.v2 as imageio -except Exception: - imageio = None - -try: - import mujoco -except Exception as e: - raise SystemExit( - "Cannot import `mujoco`. Please install official MuJoCo python bindings, e.g.\n" - " pip install mujoco\n" - f"Original error: {e}" - ) - - -# ------------------------- -# XML helpers -# ------------------------- - -FILE_ATTR_WHITELIST = {"file"} # attributes that should be treated as filesystem paths - - -def _indent(elem: ET.Element, level: int = 0) -> None: - """Pretty-print indentation (in-place).""" - i = "\n" + level * " " - if len(elem): - if not elem.text or not elem.text.strip(): - elem.text = i + " " - for child in elem: - _indent(child, level + 1) - if not elem.tail or not elem.tail.strip(): - elem.tail = i - else: - if level and (not elem.tail or not elem.tail.strip()): - elem.tail = i - - -def _strip_keyframes(root: ET.Element) -> None: - """Remove blocks entirely.""" - for kf in list(root.findall("keyframe")): - root.remove(kf) - - -def _collect_rename_tokens(root: ET.Element) -> Dict[str, str]: - """ - Collect all tokens that should be namespaced: - - any 'name' - - any class-related tokens ('class', 'childclass') so defaults don't collide - """ - tokens: set[str] = set() - for el in root.iter(): - for k in ("name", "class", "childclass"): - v = el.get(k) - if v and v.strip(): - tokens.add(v.strip()) - return {t: t for t in tokens} - - -def _abspath_if_relative(path: str, base_dir: str) -> str: - # ignore already-absolute or URLs or mujoco-style placeholders - if not path: - return path - if path.startswith(("http://", "https://", "file://")): - return path - if path.startswith("${"): # environment var style - return path - if os.path.isabs(path): - return path - return os.path.abspath(os.path.join(base_dir, path)) - - -def namespace_mjcf( - in_xml: str, - out_xml: str, - prefix: str, - remove_keyframe: bool = True, - absolutize_files: bool = True, -) -> Tuple[str, Dict[str, str]]: - """ - Create a namespaced copy of MJCF: - - prefixes 'name' / 'class' / 'childclass' values - - replaces references by token matching (whitespace-separated) - - optionally removes - - optionally converts all file="..." to absolute paths based on in_xml directory - - Returns: - (root_body_name_original, rename_map_old_to_new) - """ - base_dir = os.path.dirname(os.path.abspath(in_xml)) - tree = ET.parse(in_xml) - root = tree.getroot() - - if remove_keyframe: - _strip_keyframes(root) - - # Build rename map - tokens = _collect_rename_tokens(root) - rename_map: Dict[str, str] = {old: f"{prefix}{old}" for old in tokens.keys()} - - # Apply rename on attributes - for el in root.iter(): - # absolutize file paths - if absolutize_files: - for attr, val in list(el.attrib.items()): - if attr in FILE_ATTR_WHITELIST and val: - el.set(attr, _abspath_if_relative(val, base_dir)) - - # rename primary naming attrs - for k in ("name", "class", "childclass"): - v = el.get(k) - if v and v in rename_map: - el.set(k, rename_map[v]) - - # rename references by token substitution - for attr, val in list(el.attrib.items()): - # skip file paths and root model name - if attr in FILE_ATTR_WHITELIST: - continue - if el is root and attr == "model": - continue - - if not val or not val.strip(): - continue - - parts = val.split() - changed = False - for i, tok in enumerate(parts): - if tok in rename_map: - parts[i] = rename_map[tok] - changed = True - if changed: - el.set(attr, " ".join(parts)) - - _indent(root) - os.makedirs(os.path.dirname(os.path.abspath(out_xml)), exist_ok=True) - tree.write(out_xml, encoding="utf-8", xml_declaration=True) - return out_xml, rename_map - - -def detect_root_body_name(xml_path: str) -> str: - """ - Heuristic: pick the first directly under . - """ - tree = ET.parse(xml_path) - root = tree.getroot() - worldbody = root.find("worldbody") - if worldbody is None: - raise ValueError(f"No found in: {xml_path}") - - bodies = worldbody.findall("body") - if not bodies: - raise ValueError(f"No directly under in: {xml_path}") - - # prefer named body - for b in bodies: - nm = b.get("name") - if nm and nm.strip(): - return nm.strip() - # fallback - return bodies[0].get("name", "").strip() or "unnamed_root_body" - - -# ------------------------- -# MuJoCo state helpers -# ------------------------- - -@dataclass -class ModelState: - joint_qpos: Dict[str, np.ndarray] - joint_qvel: Dict[str, np.ndarray] - actuator_act: Dict[str, np.ndarray] - body_pose_world: Dict[str, Tuple[np.ndarray, np.ndarray]] # name -> (pos(3), quat(4)) - - -def _joint_qpos_len(jnt_type: int) -> int: - # mjtJoint: FREE=0, BALL=1, SLIDE=2, HINGE=3 (MuJoCo convention) - if jnt_type == mujoco.mjtJoint.mjJNT_FREE: - return 7 - if jnt_type == mujoco.mjtJoint.mjJNT_BALL: - return 4 - return 1 - - -def _joint_dof_len(jnt_type: int) -> int: - if jnt_type == mujoco.mjtJoint.mjJNT_FREE: - return 6 - if jnt_type == mujoco.mjtJoint.mjJNT_BALL: - return 3 - return 1 - - -def load_and_reset(model_xml: str) -> Tuple["mujoco.MjModel", "mujoco.MjData"]: - m = mujoco.MjModel.from_xml_path(model_xml) - d = mujoco.MjData(m) - - if m.nkey > 0: - # README-style: reset to keyframe 0 - mujoco.mj_resetDataKeyframe(m, d, 0) - mujoco.mj_forward(m, d) - else: - mujoco.mj_resetData(m, d) - d.qpos[:] = m.qpos0 - mujoco.mj_forward(m, d) - return m, d - - -def extract_state(m: "mujoco.MjModel", d: "mujoco.MjData") -> ModelState: - joint_qpos: Dict[str, np.ndarray] = {} - joint_qvel: Dict[str, np.ndarray] = {} - - # joints - for jid in range(m.njnt): - jname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_JOINT, jid) - if not jname: - continue - jtype = int(m.jnt_type[jid]) - qpos_adr = int(m.jnt_qposadr[jid]) - dof_adr = int(m.jnt_dofadr[jid]) - - qpos_len = _joint_qpos_len(jtype) - dof_len = _joint_dof_len(jtype) - - joint_qpos[jname] = np.array(d.qpos[qpos_adr:qpos_adr + qpos_len], dtype=float) - joint_qvel[jname] = np.array(d.qvel[dof_adr:dof_adr + dof_len], dtype=float) - - # actuator activations (if any) - actuator_act: Dict[str, np.ndarray] = {} - for aid in range(m.na): - aname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_ACTUATOR, aid) - if not aname: - continue - actadr = int(m.actuator_actadr[aid]) - actnum = int(m.actuator_actnum[aid]) - if actnum > 0: - actuator_act[aname] = np.array(d.act[actadr:actadr + actnum], dtype=float) - - # body poses - body_pose_world: Dict[str, Tuple[np.ndarray, np.ndarray]] = {} - for bid in range(m.nbody): - bname = mujoco.mj_id2name(m, mujoco.mjtObj.mjOBJ_BODY, bid) - if not bname: - continue - pos = np.array(d.xpos[bid], dtype=float) - quat = np.array(d.xquat[bid], dtype=float) # w x y z - body_pose_world[bname] = (pos, quat) - - return ModelState( - joint_qpos=joint_qpos, - joint_qvel=joint_qvel, - actuator_act=actuator_act, - body_pose_world=body_pose_world, - ) - - -# ------------------------- -# Quaternion math -# ------------------------- - -def quat_mul(q1: np.ndarray, q2: np.ndarray) -> np.ndarray: - """Hamilton product. q = [w,x,y,z].""" - w1, x1, y1, z1 = q1 - w2, x2, y2, z2 = q2 - return np.array([ - w1*w2 - x1*x2 - y1*y2 - z1*z2, - w1*x2 + x1*w2 + y1*z2 - z1*y2, - w1*y2 - x1*z2 + y1*w2 + z1*x2, - w1*z2 + x1*y2 - y1*x2 + z1*w2 - ], dtype=float) - - -def quat_conj(q: np.ndarray) -> np.ndarray: - w, x, y, z = q - return np.array([w, -x, -y, -z], dtype=float) - - -def quat_norm(q: np.ndarray) -> float: - return float(np.linalg.norm(q)) - - -def quat_inv(q: np.ndarray) -> np.ndarray: - n2 = float(np.dot(q, q)) - if n2 <= 0: - return np.array([1, 0, 0, 0], dtype=float) - return quat_conj(q) / n2 - - -def quat_to_mat(q: np.ndarray) -> np.ndarray: - """Quaternion (w,x,y,z) to rotation matrix.""" - w, x, y, z = q - # normalize - n = quat_norm(q) - if n == 0: - return np.eye(3) - w, x, y, z = (q / n).tolist() - - return np.array([ - [1 - 2*(y*y + z*z), 2*(x*y - z*w), 2*(x*z + y*w)], - [ 2*(x*y + z*w), 1 - 2*(x*x + z*z), 2*(y*z - x*w)], - [ 2*(x*z - y*w), 2*(y*z + x*w), 1 - 2*(x*x + y*y)], - ], dtype=float) - - -def compute_relpose(body1_pos, body1_quat, body2_pos, body2_quat) -> Tuple[np.ndarray, np.ndarray]: - """ - relpose is pose of body2 in body1 frame: - p_rel = R1^T (p2 - p1) - q_rel = inv(q1) * q2 - """ - R1 = quat_to_mat(body1_quat) - p_rel = R1.T @ (body2_pos - body1_pos) - q_rel = quat_mul(quat_inv(body1_quat), body2_quat) - - # normalize quaternion - n = quat_norm(q_rel) - if n > 0: - q_rel = q_rel / n - else: - q_rel = np.array([1, 0, 0, 0], dtype=float) - - return p_rel, q_rel - - -# ------------------------- -# Composite scene generation -# ------------------------- - -def write_scene_xml( - scene_xml: str, - leg_part_xml: str, - neck_part_xml: str, - leg_root_body: str, - neck_root_body: str, - relpose_pos: np.ndarray, - relpose_quat: np.ndarray, -) -> None: - """ - Create a scene MJCF which includes two parts and adds an equality/weld between their root bodies. - """ - os.makedirs(os.path.dirname(os.path.abspath(scene_xml)), exist_ok=True) - - rp = list(relpose_pos.flatten()) + list(relpose_quat.flatten()) - rp_str = " ".join(f"{x:.8g}" for x in rp) - - # Keep this scene minimal; you can add cameras/lights as needed. - mjcf = f""" - - - - - - - - - - - - - - -""" - with open(scene_xml, "w", encoding="utf-8") as f: - f.write(mjcf) - - -def apply_state_to_composite( - composite_model: "mujoco.MjModel", - composite_data: "mujoco.MjData", - src_state: ModelState, - prefix: str, -) -> None: - """ - Copy qpos/qvel/act by name from a source model state into the composite model, - assuming composite names are prefixed. - """ - # joints - for jname, qpos in src_state.joint_qpos.items(): - tgt_name = prefix + jname - jid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_JOINT, tgt_name) - if jid < 0: - continue - jtype = int(composite_model.jnt_type[jid]) - qpos_adr = int(composite_model.jnt_qposadr[jid]) - qpos_len = _joint_qpos_len(jtype) - if len(qpos) == qpos_len: - composite_data.qpos[qpos_adr:qpos_adr + qpos_len] = qpos - - for jname, qvel in src_state.joint_qvel.items(): - tgt_name = prefix + jname - jid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_JOINT, tgt_name) - if jid < 0: - continue - jtype = int(composite_model.jnt_type[jid]) - dof_adr = int(composite_model.jnt_dofadr[jid]) - dof_len = _joint_dof_len(jtype) - if len(qvel) == dof_len: - composite_data.qvel[dof_adr:dof_adr + dof_len] = qvel - - # actuator activation states - for aname, act in src_state.actuator_act.items(): - tgt_name = prefix + aname - aid = mujoco.mj_name2id(composite_model, mujoco.mjtObj.mjOBJ_ACTUATOR, tgt_name) - if aid < 0: - continue - actadr = int(composite_model.actuator_actadr[aid]) - actnum = int(composite_model.actuator_actnum[aid]) - if actnum > 0 and len(act) == actnum: - composite_data.act[actadr:actadr + actnum] = act - - -# ------------------------- -# Rendering -# ------------------------- - -def render_gif( - model: "mujoco.MjModel", - data: "mujoco.MjData", - out_path: str, - width: int = 960, - height: int = 540, - fps: int = 30, - frames: int = 180, - camera: str = "cam", - settle_steps: int = 20, -) -> None: - if imageio is None: - raise SystemExit("imageio is not installed. Please `pip install imageio` to save GIF/MP4.") - - # Let constraints settle a bit - for _ in range(settle_steps): - mujoco.mj_step(model, data) - - renderer = mujoco.Renderer(model, width=width, height=height) - imgs = [] - - for _ in range(frames): - mujoco.mj_step(model, data) - renderer.update_scene(data, camera=camera) - img = renderer.render() # HxWx3 uint8 - imgs.append(img) - - os.makedirs(os.path.dirname(os.path.abspath(out_path)), exist_ok=True) - ext = os.path.splitext(out_path)[1].lower() - - if ext == ".gif": - imageio.mimsave(out_path, imgs, fps=fps) - elif ext in (".mp4", ".mkv", ".mov"): - imageio.mimsave(out_path, imgs, fps=fps) # imageio chooses writer by extension - else: - raise ValueError(f"Unsupported output format: {ext} (use .gif or .mp4)") - - -# ------------------------- -# Main -# ------------------------- - -def main() -> int: - ap = argparse.ArgumentParser() - ap.add_argument("--leg", required=True, help="Path to leg MJCF (original).") - ap.add_argument("--neck", required=True, help="Path to neck MJCF (original).") - ap.add_argument("--out", required=True, help="Output .gif/.mp4 path.") - ap.add_argument("--workdir", default="./_build_leg_neck", help="Directory for generated temp MJCFs and scene.") - ap.add_argument("--leg_prefix", default="leg_", help="Namespace prefix for leg model.") - ap.add_argument("--neck_prefix", default="neck_", help="Namespace prefix for neck model.") - - ap.add_argument("--leg_root_body", default="", help="Override: body name in leg model to weld from.") - ap.add_argument("--neck_root_body", default="", help="Override: body name in neck model to weld to.") - - ap.add_argument("--width", type=int, default=960) - ap.add_argument("--height", type=int, default=540) - ap.add_argument("--fps", type=int, default=30) - ap.add_argument("--frames", type=int, default=180) - ap.add_argument("--settle_steps", type=int, default=20) - - args = ap.parse_args() - - leg_xml = os.path.abspath(args.leg) - neck_xml = os.path.abspath(args.neck) - workdir = os.path.abspath(args.workdir) - os.makedirs(workdir, exist_ok=True) - - # 1) Detect root bodies (in ORIGINAL names) - leg_root = args.leg_root_body.strip() or detect_root_body_name(leg_xml) - neck_root = args.neck_root_body.strip() or detect_root_body_name(neck_xml) - - # 2) Load originals and reset keyframe 0 (README-style) - leg_m, leg_d = load_and_reset(leg_xml) - neck_m, neck_d = load_and_reset(neck_xml) - leg_state = extract_state(leg_m, leg_d) - neck_state = extract_state(neck_m, neck_d) - - if leg_root not in leg_state.body_pose_world: - raise SystemExit(f"Cannot find body '{leg_root}' in leg model after reset. Try --leg_root_body.") - if neck_root not in neck_state.body_pose_world: - raise SystemExit(f"Cannot find body '{neck_root}' in neck model after reset. Try --neck_root_body.") - - leg_pos, leg_quat = leg_state.body_pose_world[leg_root] - neck_pos, neck_quat = neck_state.body_pose_world[neck_root] - - # 3) Compute relpose (neck body pose in leg body frame) from keyframe poses - rel_p, rel_q = compute_relpose(leg_pos, leg_quat, neck_pos, neck_quat) - - # 4) Write namespaced, no-keyframe copies (assets file paths become absolute) - leg_ns_xml = os.path.join(workdir, "leg_ns_nokey.xml") - neck_ns_xml = os.path.join(workdir, "neck_ns_nokey.xml") - - _, leg_map = namespace_mjcf(leg_xml, leg_ns_xml, prefix=args.leg_prefix, remove_keyframe=True, absolutize_files=True) - _, neck_map = namespace_mjcf(neck_xml, neck_ns_xml, prefix=args.neck_prefix, remove_keyframe=True, absolutize_files=True) - - leg_root_ns = args.leg_prefix + leg_root - neck_root_ns = args.neck_prefix + neck_root - - # 5) Generate composite scene XML - scene_xml = os.path.join(workdir, "leg_neck_connected_scene.xml") - write_scene_xml( - scene_xml=scene_xml, - leg_part_xml=leg_ns_xml, - neck_part_xml=neck_ns_xml, - leg_root_body=leg_root_ns, - neck_root_body=neck_root_ns, - relpose_pos=rel_p, - relpose_quat=rel_q, - ) - - # 6) Load composite and apply initial state by copying from original keyframes - comp_m = mujoco.MjModel.from_xml_path(scene_xml) - comp_d = mujoco.MjData(comp_m) - - mujoco.mj_resetData(comp_m, comp_d) - apply_state_to_composite(comp_m, comp_d, leg_state, prefix=args.leg_prefix) - apply_state_to_composite(comp_m, comp_d, neck_state, prefix=args.neck_prefix) - mujoco.mj_forward(comp_m, comp_d) - - # 7) Render - render_gif( - model=comp_m, - data=comp_d, - out_path=os.path.abspath(args.out), - width=args.width, - height=args.height, - fps=args.fps, - frames=args.frames, - camera="cam", - settle_steps=args.settle_steps, - ) - - print(f"[OK] Scene: {scene_xml}") - print(f"[OK] Output: {os.path.abspath(args.out)}") - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) From 87fd74f4d6e64d7d8508bc1e8c931f86accc096e Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Tue, 23 Dec 2025 10:23:06 +0800 Subject: [PATCH 06/11] =?UTF-8?q?=E6=88=90=E5=8A=9F=E6=B8=B2=E6=9F=93?= =?UTF-8?q?=E4=BA=863D=E5=AE=8C=E6=95=B4=E7=9A=84=E4=BA=BA=E4=BD=93?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../initial_human_mode_main.py | 518 ++++++++++++++++++ .../models_arm.py | 184 ------- .../models_leg.py | 214 -------- .../models_neck.py | 177 ------ 4 files changed, 518 insertions(+), 575 deletions(-) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py delete mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_arm.py delete mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_leg.py delete mode 100644 src/biomechanical_hcl_smart_simulation_platform/models_neck.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py b/src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py new file mode 100644 index 0000000000..fc18ba9333 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py @@ -0,0 +1,518 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +""" +initial_human_model_render.py +Render a MyoConverter MuJoCo human model (MJCF) with enhanced visuals. + +Key points (from README): +- Converted XML contains a keyframe that should be loaded at init. + Use mujoco.mj_resetDataKeyframe(model, data, 0). :contentReference[oaicite:1]{index=1} + +Features: +- Patches MJCF into a "render-friendly" version (skybox/ground/lights/quality). +- Offscreen render to PNG/GIF/MP4. +- Optional: render leg/thigh muscle "volume" as semi-transparent capsules along tendon site paths. + +Examples: + # Still image + python initial_human_model_render.py --xml path/to/model.xml --out render.png + + # Orbit GIF + python initial_human_model_render.py --xml path/to/model.xml --out render.gif --seconds 4 --orbit + + # Add muscle volume (legs by default) + python initial_human_model_render.py --xml path/to/model.xml --out render.gif --orbit --muscle-volume + +Headless: + export MUJOCO_GL=egl # (or osmesa) +""" + +from __future__ import annotations + +import argparse +import math +import os +import xml.etree.ElementTree as ET +from pathlib import Path +from typing import Dict, List, Optional, Sequence, Tuple + +import numpy as np +import mujoco + +try: + import imageio.v2 as imageio +except Exception: + imageio = None + + +# ------------------------- +# MJCF patching (for realism) +# ------------------------- + +def _abspath_if_relative(p: str, base_dir: str) -> str: + if not p: + return p + if p.startswith(("http://", "https://", "file://", "${")): + return p + if os.path.isabs(p): + return p + return os.path.abspath(os.path.join(base_dir, p)) + + +def _indent(elem: ET.Element, level: int = 0) -> None: + i = "\n" + level * " " + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + " " + for ch in elem: + _indent(ch, level + 1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +def patch_mjcf_for_render(in_xml: Path, out_xml: Path) -> None: + """ + - Keeps the model intact (including keyframe). + - Adds skybox, ground texture/material, floor geom, lights, and visual quality knobs. + - Makes file=... paths absolute so writing to a new folder won't break assets. + """ + base_dir = str(in_xml.parent.resolve()) + tree = ET.parse(str(in_xml)) + root = tree.getroot() + if root.tag != "mujoco": + raise ValueError(f"Not an MJCF root: {in_xml}") + + # Absolutize all file="" references + for el in root.iter(): + if "file" in el.attrib: + el.set("file", _abspath_if_relative(el.get("file", ""), base_dir)) + + asset = root.find("asset") + if asset is None: + asset = ET.SubElement(root, "asset") + + # Skybox texture (no external file) + if asset.find("./texture[@type='skybox']") is None: + ET.SubElement(asset, "texture", { + "type": "skybox", + "builtin": "gradient", + "rgb1": "0.72 0.86 1.0", + "rgb2": "0.12 0.20 0.35", + "width": "512", + "height": "512", + }) + + # Ground checker texture + material + if asset.find("./texture[@name='tex_ground']") is None: + ET.SubElement(asset, "texture", { + "name": "tex_ground", + "type": "2d", + "builtin": "checker", + "rgb1": "0.80 0.80 0.80", + "rgb2": "0.63 0.63 0.63", + "width": "512", + "height": "512", + "mark": "cross", + "markrgb": "0.35 0.35 0.35", + }) + + if asset.find("./material[@name='mat_ground']") is None: + ET.SubElement(asset, "material", { + "name": "mat_ground", + "texture": "tex_ground", + "texrepeat": "6 6", + "reflectance": "0.15", + "shininess": "0.25", + "specular": "0.35", + }) + + # Visual quality (shadows, AA, fog) + visual = root.find("visual") + if visual is None: + visual = ET.SubElement(root, "visual") + + if visual.find("quality") is None: + ET.SubElement(visual, "quality", { + "shadowsize": "4096", + "offsamples": "4", + "numslices": "28", + "numstacks": "28", + }) + + if visual.find("map") is None: + ET.SubElement(visual, "map", { + "znear": "0.01", + "zfar": "60", + "fogstart": "8", + "fogend": "25", + }) + + # Add a floor + lights into existing worldbody + worldbody = root.find("worldbody") + if worldbody is None: + worldbody = ET.SubElement(root, "worldbody") + + # Floor (avoid duplicates) + has_floor = any((g.tag == "geom" and g.get("name", "") == "floor") for g in worldbody.findall("geom")) + if not has_floor: + ET.SubElement(worldbody, "geom", { + "name": "floor", + "type": "plane", + "pos": "0 0 0", + "size": "10 10 0.1", + "material": "mat_ground", + "rgba": "1 1 1 1", + }) + + # Simple 3-point lights (avoid duplicates by name) + existing_light_names = {l.get("name", "") for l in worldbody.findall("light")} + if "key_light" not in existing_light_names: + ET.SubElement(worldbody, "light", { + "name": "key_light", + "pos": "2.5 -2.5 3.0", + "dir": "-0.6 0.6 -0.7", + "diffuse": "0.9 0.9 0.9", + "specular": "0.4 0.4 0.4", + }) + if "fill_light" not in existing_light_names: + ET.SubElement(worldbody, "light", { + "name": "fill_light", + "pos": "-2.0 2.0 2.5", + "dir": "0.5 -0.5 -0.7", + "diffuse": "0.45 0.45 0.50", + "specular": "0.15 0.15 0.15", + }) + if "rim_light" not in existing_light_names: + ET.SubElement(worldbody, "light", { + "name": "rim_light", + "pos": "0 3.5 2.8", + "dir": "0 -1 -0.6", + "diffuse": "0.35 0.35 0.40", + "specular": "0.20 0.20 0.20", + }) + + _indent(root) + out_xml.parent.mkdir(parents=True, exist_ok=True) + tree.write(str(out_xml), encoding="utf-8", xml_declaration=True) + + +# ------------------------- +# Muscle volume (capsules along tendon site path) +# ------------------------- + +DEFAULT_LEG_KEYWORDS = ( + "rect", "vasti", "vastus", "ham", "biceps", "semiten", "semimem", + "adductor", "sart", "grac", "tfl", "iliopsoas", "psoas", "iliacus", + "glut", "soleus", "gastro", "tibialis", "perone", "fibular", +) + +RGBA_REST = np.array([0.85, 0.15, 0.15, 0.40], dtype=np.float32) +RGBA_ACTIVE = np.array([1.00, 0.28, 0.28, 0.85], dtype=np.float32) + + +def _safe_name(model: mujoco.MjModel, objtype: mujoco.mjtObj, idx: int) -> str: + nm = mujoco.mj_id2name(model, objtype, idx) + return nm if nm else f"{objtype.name}_{idx}" + + +def _pick_actuators_by_keywords(model: mujoco.MjModel, keywords: Sequence[str]) -> List[int]: + keys = tuple(k.lower() for k in keywords) + picked = [] + for aid in range(model.nu): + nm = _safe_name(model, mujoco.mjtObj.mjOBJ_ACTUATOR, aid).lower() + if any(k in nm for k in keys): + picked.append(aid) + return picked + + +def _actuator_to_tendon_id(model: mujoco.MjModel, aid: int) -> Optional[int]: + if not (hasattr(model, "actuator_trntype") and hasattr(model, "actuator_trnid")): + return None + if int(model.actuator_trntype[aid]) != int(mujoco.mjtTrn.mjTRN_TENDON): + return None + tid = int(model.actuator_trnid[aid, 0]) + if 0 <= tid < model.ntendon: + return tid + return None + + +def _tendon_sites_world(model: mujoco.MjModel, data: mujoco.MjData, tid: int) -> Optional[np.ndarray]: + # Best-effort: take SITE wrap points only + need = ("tendon_adr", "tendon_num", "wrap_type", "wrap_objid") + if not all(hasattr(model, n) for n in need): + return None + adr = int(model.tendon_adr[tid]) + num = int(model.tendon_num[tid]) + if num <= 0: + return None + + pts: List[np.ndarray] = [] + for w in range(adr, adr + num): + if int(model.wrap_type[w]) == int(mujoco.mjtWrap.mjWRAP_SITE): + sid = int(model.wrap_objid[w]) + pts.append(np.array(data.site_xpos[sid], dtype=np.float64)) + if len(pts) >= 2: + return np.stack(pts, axis=0) + return None + + +def _lerp(a: np.ndarray, b: np.ndarray, t: float) -> np.ndarray: + t = float(max(0.0, min(1.0, t))) + return (1.0 - t) * a + t * b + + +def _activation_from_ctrl(data: mujoco.MjData, aid: int) -> float: + if data.ctrl is None or aid >= data.ctrl.shape[0]: + return 0.0 + return float(max(0.0, min(1.0, float(data.ctrl[aid])))) + + +def _add_capsule(scene: mujoco.MjvScene, geom_id: int, p0: np.ndarray, p1: np.ndarray, radius: float, rgba: np.ndarray) -> None: + mujoco.mjv_initGeom( + scene.geoms[geom_id], + type=mujoco.mjtGeom.mjGEOM_CAPSULE, + size=np.zeros(3, dtype=np.float64), + pos=np.zeros(3, dtype=np.float64), + mat=np.eye(3, dtype=np.float64).flatten(), + rgba=rgba.astype(np.float32), + ) + mujoco.mjv_connector( + scene.geoms[geom_id], + type=mujoco.mjtGeom.mjGEOM_CAPSULE, + width=float(radius), + from_=p0.astype(np.float64), + to=p1.astype(np.float64), + ) + + +# ------------------------- +# Rendering +# ------------------------- + +def _init_camera(model: mujoco.MjModel, azimuth: float, elevation: float, dist_scale: float) -> mujoco.MjvCamera: + cam = mujoco.MjvCamera() + mujoco.mjv_defaultCamera(cam) + center = np.array(model.stat.center, dtype=np.float64) + extent = float(model.stat.extent) if float(model.stat.extent) > 1e-9 else 1.0 + cam.lookat[:] = center + cam.distance = max(0.05, dist_scale * extent) + cam.azimuth = float(azimuth) + cam.elevation = float(elevation) + cam.type = mujoco.mjtCamera.mjCAMERA_FREE + return cam + + +def _init_vis_options(show_tendons: bool) -> mujoco.MjvOption: + opt = mujoco.MjvOption() + mujoco.mjv_defaultOption(opt) + + # Realism-ish flags + opt.flags[mujoco.mjtVisFlag.mjVIS_SHADOW] = 1 + opt.flags[mujoco.mjtVisFlag.mjVIS_REFLECTION] = 1 + opt.flags[mujoco.mjtVisFlag.mjVIS_SKYBOX] = 1 + opt.flags[mujoco.mjtVisFlag.mjVIS_TEXTURE] = 1 + opt.flags[mujoco.mjtVisFlag.mjVIS_FOG] = 1 + opt.flags[mujoco.mjtVisFlag.mjVIS_TRANSPARENT] = 1 + + # Tendons (red lines) — can turn off when drawing muscle volume + opt.flags[mujoco.mjtVisFlag.mjVIS_TENDON] = 1 if show_tendons else 0 + return opt + + +def render( + xml_path: Path, + out_path: Path, + seconds: float, + fps: int, + width: int, + height: int, + orbit: bool, + azimuth: float, + elevation: float, + dist_scale: float, + ctrl_mode: str, + muscle_volume: bool, + muscle_keywords: Sequence[str], + base_radius: float, + bulge_gain: float, + maxgeom: int, +) -> None: + if imageio is None and out_path.suffix.lower() in (".gif", ".mp4", ".mov", ".mkv"): + raise RuntimeError("需要 imageio 导出 gif/mp4:请 `pip install imageio`。") + + model = mujoco.MjModel.from_xml_path(str(xml_path)) + data = mujoco.MjData(model) + + # Load keyframe 0 (README recommendation). :contentReference[oaicite:2]{index=2} + if model.nkey > 0: + mujoco.mj_resetDataKeyframe(model, data, 0) + else: + mujoco.mj_resetData(model, data) + mujoco.mj_forward(model, data) + + # Offscreen GL context + gl = mujoco.GLContext(width, height) + gl.make_current() + + cam = _init_camera(model, azimuth=azimuth, elevation=elevation, dist_scale=dist_scale) + opt = _init_vis_options(show_tendons=not muscle_volume) + + scn = mujoco.MjvScene(model, maxgeom=maxgeom) + con = mujoco.MjrContext(model, mujoco.mjtFontScale.mjFONTSCALE_100) + viewport = mujoco.MjrRect(0, 0, width, height) + + mujoco.mjr_setBuffer(mujoco.mjtFramebuffer.mjFB_OFFSCREEN, con) + mujoco.mjr_resizeOffscreen(width, height, con) + + # Precompute muscle->tendon mapping + act_to_tendon: Dict[int, int] = {} + if muscle_volume and model.nu > 0 and model.ntendon > 0: + act_ids = _pick_actuators_by_keywords(model, muscle_keywords) + for aid in act_ids: + tid = _actuator_to_tendon_id(model, aid) + if tid is not None: + act_to_tendon[aid] = tid + if not act_to_tendon: + print("[WARN] muscle-volume=ON 但未找到 tendon transmission 的 actuator;将仅使用原始渲染。") + + # output + ext = out_path.suffix.lower() + nframes = 1 if ext == ".png" else max(1, int(round(seconds * fps))) + + frames: List[np.ndarray] = [] + + phases = np.linspace(0.0, 2.0 * math.pi, num=max(1, len(act_to_tendon)), endpoint=False) + + for k in range(nframes): + t = k / max(1, fps) + + # orbit camera + if orbit: + cam.azimuth = float(azimuth + 360.0 * (k / max(1, nframes))) + + # drive controls (optional, for visible "muscle bulge") + if ctrl_mode != "none" and model.nu > 0: + data.ctrl[:] = 0.0 + if ctrl_mode == "sine": + # only drive the selected actuators (if muscle volume); otherwise drive nothing + for j, aid in enumerate(act_to_tendon.keys()): + data.ctrl[aid] = 0.5 + 0.5 * math.sin(2.0 * math.pi * 0.8 * t + float(phases[j])) + + mujoco.mj_step(model, data) + + # update scene + mujoco.mjv_updateScene(model, data, opt, None, cam, mujoco.mjtCatBit.mjCAT_ALL, scn) + + # add muscle capsules after base geoms + if muscle_volume and act_to_tendon: + base_ngeom = int(scn.ngeom) + geom_id = base_ngeom + + for aid, tid in act_to_tendon.items(): + pts = _tendon_sites_world(model, data, tid) + if pts is None: + continue + + act = _activation_from_ctrl(data, aid) + radius = float(base_radius * (1.0 + bulge_gain * act)) + rgba = _lerp(RGBA_REST, RGBA_ACTIVE, act) + + for p0, p1 in zip(pts[:-1], pts[1:]): + if geom_id >= scn.maxgeom: + break + if float(np.linalg.norm(p1 - p0)) < 1e-4: + continue + _add_capsule(scn, geom_id, p0, p1, radius, rgba) + geom_id += 1 + + scn.ngeom = geom_id + + mujoco.mjr_render(viewport, scn, con) + + rgb = np.zeros((height, width, 3), dtype=np.uint8) + depth = np.zeros((height, width), dtype=np.float32) + mujoco.mjr_readPixels(rgb, depth, viewport, con) + rgb = np.flipud(rgb) # OpenGL origin bottom-left + frames.append(rgb) + + out_path.parent.mkdir(parents=True, exist_ok=True) + + if ext == ".png": + imageio.imwrite(str(out_path), frames[0]) + else: + imageio.mimsave(str(out_path), frames, fps=fps) + + gl.free() + print(f"[OK] saved: {out_path}") + + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument("--xml", required=True, help="Path to MyoConverter MJCF xml (e.g. models/mjc/Gait2354Simbody/gait2354_cvt3.xml)") + ap.add_argument("--out", default="human_render.gif", help="Output: .png/.gif/.mp4 (gif recommended here)") + + ap.add_argument("--workdir", default="./_render_build", help="Where to write patched XML") + ap.add_argument("--no-patch", action="store_true", help="Do not patch MJCF (use original as-is)") + + ap.add_argument("--seconds", type=float, default=4.0) + ap.add_argument("--fps", type=int, default=30) + ap.add_argument("--width", type=int, default=1280) + ap.add_argument("--height", type=int, default=720) + + ap.add_argument("--orbit", action="store_true", help="Orbit camera around the model") + ap.add_argument("--azimuth", type=float, default=95.0) + ap.add_argument("--elevation", type=float, default=-18.0) + ap.add_argument("--dist-scale", type=float, default=2.0) + + ap.add_argument("--ctrl-mode", choices=["none", "sine"], default="sine", + help="Control signal for visualizing muscle bulge (sine is just a demo)") + + ap.add_argument("--muscle-volume", action="store_true", + help="Draw semi-transparent muscle volume capsules along tendon site path (leg keywords by default).") + ap.add_argument("--muscle-keyword", action="append", default=None, + help="Add keyword for selecting actuators (repeatable). If not set, uses a leg/thigh keyword set.") + ap.add_argument("--base-radius", type=float, default=0.010, help="Base capsule radius (m)") + ap.add_argument("--bulge-gain", type=float, default=1.25, help="Radius gain by activation") + ap.add_argument("--maxgeom", type=int, default=20000, help="Scene maxgeom budget") + + args = ap.parse_args() + + src_xml = Path(args.xml).resolve() + out_path = Path(args.out).resolve() + workdir = Path(args.workdir).resolve() + workdir.mkdir(parents=True, exist_ok=True) + + if args.no_patch: + patched_xml = src_xml + else: + patched_xml = workdir / (src_xml.stem + "_render.xml") + patch_mjcf_for_render(src_xml, patched_xml) + + keywords = args.muscle_keyword if args.muscle_keyword else list(DEFAULT_LEG_KEYWORDS) + + render( + xml_path=patched_xml, + out_path=out_path, + seconds=args.seconds, + fps=args.fps, + width=args.width, + height=args.height, + orbit=args.orbit, + azimuth=args.azimuth, + elevation=args.elevation, + dist_scale=args.dist_scale, + ctrl_mode=args.ctrl_mode, + muscle_volume=args.muscle_volume, + muscle_keywords=keywords, + base_radius=args.base_radius, + bulge_gain=args.bulge_gain, + maxgeom=args.maxgeom, + ) + + +if __name__ == "__main__": + main() diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_arm.py b/src/biomechanical_hcl_smart_simulation_platform/models_arm.py deleted file mode 100644 index 5252bae576..0000000000 --- a/src/biomechanical_hcl_smart_simulation_platform/models_arm.py +++ /dev/null @@ -1,184 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Render an arm model (e.g., Arm26 converted by MyoConverter) to PNG/GIF using MuJoCo Python bindings. - -Examples: - # Render a GIF (recommended) - python examples/render_arm.py --xml ./models/mjc/Arm26/arm26_cvt3.xml --out arm.gif - - # Render a still image - python examples/render_arm.py --xml ./models/mjc/Arm26/arm26_cvt3.xml --out arm.png - -Notes: - - MyoConverter converted XML often contains a keyframe that must be loaded to satisfy joint/muscle path constraints. - This script will automatically load keyframe 0 if present. (See README.) -""" - -from __future__ import annotations - -import argparse -import math -from pathlib import Path - -import numpy as np - -import mujoco -import imageio.v2 as imageio - - -def _pick_first_hinge_joint(model: mujoco.MjModel) -> int | None: - """Pick the first hinge joint as a simple 'elbow-like' joint.""" - for jid in range(model.njnt): - if int(model.jnt_type[jid]) == int(mujoco.mjtJoint.mjJNT_HINGE): - return jid - return None - - -def _joint_id_by_name(model: mujoco.MjModel, name: str) -> int | None: - try: - jid = mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_JOINT, name) - return int(jid) if jid >= 0 else None - except Exception: - return None - - -def _setup_camera(model: mujoco.MjModel, azimuth: float, elevation: float, distance_scale: float) -> mujoco.MjvCamera: - cam = mujoco.MjvCamera() - mujoco.mjv_defaultCamera(cam) - - # A reasonable default framing: focus model center, distance proportional to extent - extent = float(model.stat.extent) if float(model.stat.extent) > 0 else 1.0 - center = np.array(model.stat.center, dtype=float) - - cam.lookat[:] = center - cam.distance = max(0.05, distance_scale * extent) - cam.azimuth = float(azimuth) - cam.elevation = float(elevation) - return cam - - -def _reset_to_keyframe_if_any(model: mujoco.MjModel, data: mujoco.MjData) -> None: - # README: converted XML contains keyframe; MuJoCo doesn't load it by default. - # Use mj_resetDataKeyframe(model, data, 0) to load keyframe 0. - if model.nkey > 0: - mujoco.mj_resetDataKeyframe(model, data, 0) - else: - mujoco.mj_resetData(model, data) - mujoco.mj_forward(model, data) - - -def render( - xml_path: Path, - out_path: Path, - width: int, - height: int, - fps: int, - seconds: float, - joint_name: str | None, - azimuth: float, - elevation: float, - distance_scale: float, -) -> None: - if not xml_path.exists(): - raise FileNotFoundError(f"XML not found: {xml_path}") - - model = mujoco.MjModel.from_xml_path(str(xml_path)) - data = mujoco.MjData(model) - - _reset_to_keyframe_if_any(model, data) - - # Choose joint - jid = _joint_id_by_name(model, joint_name) if joint_name else None - if jid is None: - jid = _pick_first_hinge_joint(model) - - if jid is None: - print("[WARN] No hinge joint found. Will just render the initial pose.") - else: - print(f"[INFO] Using joint id={jid}, name='{mujoco.mj_id2name(model, mujoco.mjtObj.mjOBJ_JOINT, jid)}'") - - # Joint qpos address (hinge = 1 DoF) - qadr = int(model.jnt_qposadr[jid]) if jid is not None else None - - # Joint range (if not defined, fall back) - if jid is not None: - jrange = np.array(model.jnt_range[jid], dtype=float) - if np.all(np.isfinite(jrange)) and (jrange[1] > jrange[0]): - lo, hi = float(jrange[0]), float(jrange[1]) - else: - lo, hi = -0.5, 0.5 - mid = 0.5 * (lo + hi) - amp = 0.45 * (hi - lo) - else: - lo, hi, mid, amp = 0.0, 0.0, 0.0, 0.0 - - cam = _setup_camera(model, azimuth=azimuth, elevation=elevation, distance_scale=distance_scale) - renderer = mujoco.Renderer(model, height=height, width=width) - - out_path.parent.mkdir(parents=True, exist_ok=True) - suffix = out_path.suffix.lower() - - if suffix == ".png": - # still image - renderer.update_scene(data, camera=cam) - rgb = renderer.render() - imageio.imwrite(out_path, rgb) - print(f"[OK] Saved: {out_path}") - return - - if suffix != ".gif": - raise ValueError(f"Unsupported output format '{suffix}'. Use .gif or .png") - - n_frames = max(1, int(round(seconds * fps))) - frames = [] - - for i in range(n_frames): - t = i / fps - - if jid is not None and qadr is not None: - # simple sinusoidal motion within range - angle = mid + amp * math.sin(2.0 * math.pi * (t / max(1e-6, seconds))) - data.qpos[qadr] = angle - data.qvel[:] = 0.0 - mujoco.mj_forward(model, data) - - renderer.update_scene(data, camera=cam) - rgb = renderer.render() - frames.append(rgb) - - imageio.mimsave(out_path, frames, fps=fps) - print(f"[OK] Saved: {out_path}") - - -def main(): - ap = argparse.ArgumentParser() - ap.add_argument("--xml", type=str, required=True, help="Path to MuJoCo XML (e.g. ./models/mjc/Arm26/arm26_cvt3.xml)") - ap.add_argument("--out", type=str, default="arm.gif", help="Output file: .gif or .png") - ap.add_argument("--width", type=int, default=1280) - ap.add_argument("--height", type=int, default=720) - ap.add_argument("--fps", type=int, default=30) - ap.add_argument("--seconds", type=float, default=2.0) - ap.add_argument("--joint", type=str, default=None, help="Optional joint name to animate (otherwise first hinge joint)") - ap.add_argument("--azimuth", type=float, default=110.0) - ap.add_argument("--elevation", type=float, default=-20.0) - ap.add_argument("--distance_scale", type=float, default=1.6) - - args = ap.parse_args() - render( - xml_path=Path(args.xml), - out_path=Path(args.out), - width=args.width, - height=args.height, - fps=args.fps, - seconds=args.seconds, - joint_name=args.joint, - azimuth=args.azimuth, - elevation=args.elevation, - distance_scale=args.distance_scale, - ) - - -if __name__ == "__main__": - main() diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_leg.py b/src/biomechanical_hcl_smart_simulation_platform/models_leg.py deleted file mode 100644 index c6b825049b..0000000000 --- a/src/biomechanical_hcl_smart_simulation_platform/models_leg.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -""" -Render a leg model (e.g., Leg6Dof9Musc converted by MyoConverter) to PNG/GIF using MuJoCo Python bindings. - -Examples: - # Render a GIF (recommended) - python examples/render_leg.py --xml ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml --out leg.gif - - # Render a still image - python examples/render_leg.py --xml ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml --out leg.png - -Notes: - - Converted XML contains a keyframe that should be loaded to satisfy joint/muscle path constraints. - MuJoCo doesn't load it by default, so we call mj_resetDataKeyframe(model, data, 0). -""" - -from __future__ import annotations - -import argparse -import math -from pathlib import Path -from typing import List, Tuple - -import numpy as np -import mujoco -import imageio.v2 as imageio - - -# ----------------------------- helpers ----------------------------- - -def reset_to_keyframe_if_any(model: mujoco.MjModel, data: mujoco.MjData) -> None: - """Load keyframe 0 if present; otherwise do a normal reset.""" - if model.nkey > 0: - mujoco.mj_resetDataKeyframe(model, data, 0) - else: - mujoco.mj_resetData(model, data) - mujoco.mj_forward(model, data) - - -def setup_camera(model: mujoco.MjModel, azimuth: float, elevation: float, distance_scale: float) -> mujoco.MjvCamera: - cam = mujoco.MjvCamera() - mujoco.mjv_defaultCamera(cam) - - extent = float(model.stat.extent) if float(model.stat.extent) > 0 else 1.0 - center = np.array(model.stat.center, dtype=float) - - cam.lookat[:] = center - cam.distance = max(0.05, distance_scale * extent) - cam.azimuth = float(azimuth) - cam.elevation = float(elevation) - return cam - - -def joint_name(model: mujoco.MjModel, jid: int) -> str: - n = mujoco.mj_id2name(model, mujoco.mjtObj.mjOBJ_JOINT, jid) - return "" if n is None else str(n) - - -def pick_leg_joints(model: mujoco.MjModel) -> List[int]: - """ - Heuristic: - 1) Prefer hinge joints whose names contain knee/ankle/hip (case-insensitive). - 2) If none found, fall back to the first hinge joint. - """ - candidates: List[Tuple[int, int]] = [] # (priority, jid) - keys = [ - ("knee", 0), - ("ankle", 1), - ("hip", 2), - ("talus", 3), - ("subtalar", 3), - ] - - for jid in range(model.njnt): - jtype = int(model.jnt_type[jid]) - if jtype != int(mujoco.mjtJoint.mjJNT_HINGE): - continue - name = joint_name(model, jid).lower() - for k, pri in keys: - if k in name: - candidates.append((pri, jid)) - break - - if candidates: - candidates.sort(key=lambda x: x[0]) - # Keep up to 3 joints to animate (knee + ankle + hip is usually enough) - return [jid for _, jid in candidates[:3]] - - # fallback: first hinge joint - for jid in range(model.njnt): - if int(model.jnt_type[jid]) == int(mujoco.mjtJoint.mjJNT_HINGE): - return [jid] - - return [] - - -def get_range_or_default(model: mujoco.MjModel, jid: int, default_lo=-0.5, default_hi=0.5) -> Tuple[float, float]: - r = np.array(model.jnt_range[jid], dtype=float) - if np.all(np.isfinite(r)) and (r[1] > r[0]): - return float(r[0]), float(r[1]) - return float(default_lo), float(default_hi) - - -# ----------------------------- main render ----------------------------- - -def render_leg( - xml_path: Path, - out_path: Path, - width: int, - height: int, - fps: int, - seconds: float, - azimuth: float, - elevation: float, - distance_scale: float, - joint_names: List[str], -) -> None: - if not xml_path.exists(): - raise FileNotFoundError(f"XML not found: {xml_path}") - - model = mujoco.MjModel.from_xml_path(str(xml_path)) - data = mujoco.MjData(model) - - reset_to_keyframe_if_any(model, data) - - # Choose joints - jids: List[int] = [] - - # user-specified joint names (optional) - if joint_names: - for nm in joint_names: - try: - jid = int(mujoco.mj_name2id(model, mujoco.mjtObj.mjOBJ_JOINT, nm)) - except Exception: - jid = -1 - if jid >= 0: - jids.append(jid) - - # heuristic fallback - if not jids: - jids = pick_leg_joints(model) - - if not jids: - print("[WARN] No hinge joint found. Will just render initial pose.") - else: - print("[INFO] Animating joints:") - for jid in jids: - print(f" - id={jid:3d} name='{joint_name(model, jid)}' qposadr={int(model.jnt_qposadr[jid])}") - - cam = setup_camera(model, azimuth=azimuth, elevation=elevation, distance_scale=distance_scale) - renderer = mujoco.Renderer(model, height=height, width=width) - - out_path.parent.mkdir(parents=True, exist_ok=True) - suffix = out_path.suffix.lower() - - # still image - if suffix == ".png": - renderer.update_scene(data, camera=cam) - rgb = renderer.render() - imageio.imwrite(out_path, rgb) - print(f"[OK] Saved: {out_path}") - return - - # gif - if suffix != ".gif": - raise ValueError(f"Unsupported output format '{suffix}'. Use .gif or .png") - - n_frames = max(1, int(round(seconds * fps))) - frames = [] - - # Precompute per-joint motion params - motion = [] - for jid in jids: - qadr = int(model.jnt_qposadr[jid]) # hinge => 1 dof - lo, hi = get_range_or_default(model, jid) - mid = 0.5 * (lo + hi) - amp = 0.40 * (hi - lo) # keep margin inside limits - motion.append((qadr, mid, amp)) - - for i in range(n_frames): - t = i / fps - - # simple gait-like phase offsets between joints - for k, (qadr, mid, amp) in enumerate(motion): - phase = (k * math.pi / 3.0) # 0, 60deg, 120deg - val = mid + amp * math.sin(2.0 * math.pi * (t / max(1e-6, seconds)) + phase) - data.qpos[qadr] = val - - data.qvel[:] = 0.0 - mujoco.mj_forward(model, data) - - renderer.update_scene(data, camera=cam) - frames.append(renderer.render()) - - imageio.mimsave(out_path, frames, fps=fps) - print(f"[OK] Saved: {out_path}") - - -def main(): - ap = argparse.ArgumentParser() - ap.add_argument("--xml", type=str, required=True, - help="Path to MuJoCo XML (e.g. ./models/mjc/Leg6Dof9Musc/leg6dof9musc_cvt3.xml)") - ap.add_argument("--out", type=str, default="leg.gif", help="Output file: .gif or .png") - ap.add_argument("--width", type=int, default=1280) - ap.add_argument("--height", type=int, default=720) - ap.add_argument("--fps", type=int, default=30) - ap.add_argument("--seconds", type=float, default=2.5) - - # camera - ap.add_argument("--azimuth", type=float, default=95.0) - ap.add_argument("--elevation", type=float, default=-15.0) - ap.add_arg_ diff --git a/src/biomechanical_hcl_smart_simulation_platform/models_neck.py b/src/biomechanical_hcl_smart_simulation_platform/models_neck.py deleted file mode 100644 index 2a53f0c950..0000000000 --- a/src/biomechanical_hcl_smart_simulation_platform/models_neck.py +++ /dev/null @@ -1,177 +0,0 @@ -""" -Render a PNG image for the MyoConverter Neck6D converted model (MuJoCo XML). - -What this script does: -1) Load the converted MuJoCo model: models/mjc/Neck6D/neck6d_cvt3.xml -2) Reset and load keyframe 0 (required by MyoConverter README) -3) Step a few frames -4) Offscreen render a PNG image to output/neck6d_render.png - -Optional: -- If you pass --viewer, it will open an interactive viewer (if supported). - -Usage: - python examples/render_neck6d_png.py - python examples/render_neck6d_png.py --xml models/mjc/Neck6D/neck6d_cvt3.xml --out output/neck6d_render.png - python examples/render_neck6d_png.py --viewer -""" - -import argparse -import os -from pathlib import Path - -import mujoco -from PIL import Image - - -def ensure_parent_dir(filepath: str) -> None: - Path(filepath).parent.mkdir(parents=True, exist_ok=True) - - -def set_camera_like_default(scene, lookat=None, distance=None, azimuth=None, elevation=None): - """ - Simple camera setup helper. - If your teacher wants exactly the same viewpoint as a reference image, - you can tune these parameters. - """ - # scene is mujoco.MjvScene; camera stored in scene.camera - cam = scene.camera - - # cam.lookat is a 3-vector - if lookat is not None: - cam.lookat[:] = lookat - - # cam.distance is a float - if distance is not None: - cam.distance = float(distance) - - # cam.azimuth/elevation in degrees - if azimuth is not None: - cam.azimuth = float(azimuth) - - if elevation is not None: - cam.elevation = float(elevation) - - -def offscreen_render_png( - xml_path: str, - out_path: str, - width: int = 1400, - height: int = 1000, - steps: int = 10, - camera_lookat=(0.0, 0.0, 1.1), - camera_distance=2.2, - camera_azimuth=135.0, - camera_elevation=-15.0, -) -> None: - """ - Offscreen render a single PNG frame. - """ - if not os.path.exists(xml_path): - raise FileNotFoundError(f"MuJoCo XML not found: {xml_path}") - - # Load model and data - model = mujoco.MjModel.from_xml_path(xml_path) - data = mujoco.MjData(model) - - # IMPORTANT: Load keyframe 0 (per README) - mujoco.mj_resetDataKeyframe(model, data, 0) - - # Step a few frames to settle (optional but often makes rendering stable) - for _ in range(max(0, int(steps))): - mujoco.mj_step(model, data) - - # Create renderer - renderer = mujoco.Renderer(model, width=width, height=height) - - # Update scene (so camera edits apply) - renderer.update_scene(data) - - # Adjust camera to a reasonable view (tune to match your reference) - set_camera_like_default( - renderer.scene, - lookat=camera_lookat, - distance=camera_distance, - azimuth=camera_azimuth, - elevation=camera_elevation, - ) - - # Render - img = renderer.render() - - # Save - ensure_parent_dir(out_path) - Image.fromarray(img).save(out_path) - print(f"[OK] Saved render to: {out_path}") - - -def optional_viewer(xml_path: str) -> None: - """ - Launch an interactive viewer (requires a working OpenGL context). - """ - import time - import mujoco.viewer - - model = mujoco.MjModel.from_xml_path(xml_path) - data = mujoco.MjData(model) - mujoco.mj_resetDataKeyframe(model, data, 0) - - with mujoco.viewer.launch_passive(model, data) as viewer: - while viewer.is_running(): - mujoco.mj_step(model, data) - viewer.sync() - time.sleep(0.01) - - -def main(): - parser = argparse.ArgumentParser(description="Render Neck6D MuJoCo model to a PNG.") - parser.add_argument( - "--xml", - type=str, - default="models/mjc/Neck6D/neck6d_cvt3.xml", - help="Path to MuJoCo XML model (default: models/mjc/Neck6D/neck6d_cvt3.xml)", - ) - parser.add_argument( - "--out", - type=str, - default="output/neck6d_render.png", - help="Output PNG path (default: output/neck6d_render.png)", - ) - parser.add_argument("--width", type=int, default=1400, help="Image width") - parser.add_argument("--height", type=int, default=1000, help="Image height") - parser.add_argument("--steps", type=int, default=10, help="Simulation steps before capture") - - # Camera parameters (easy to justify + easy for老师复现) - parser.add_argument("--lookat-x", type=float, default=0.0) - parser.add_argument("--lookat-y", type=float, default=0.0) - parser.add_argument("--lookat-z", type=float, default=1.1) - parser.add_argument("--distance", type=float, default=2.2) - parser.add_argument("--azimuth", type=float, default=135.0) - parser.add_argument("--elevation", type=float, default=-15.0) - - parser.add_argument( - "--viewer", - action="store_true", - help="Open interactive viewer after rendering (optional).", - ) - - args = parser.parse_args() - - offscreen_render_png( - xml_path=args.xml, - out_path=args.out, - width=args.width, - height=args.height, - steps=args.steps, - camera_lookat=(args.lookat_x, args.lookat_y, args.lookat_z), - camera_distance=args.distance, - camera_azimuth=args.azimuth, - camera_elevation=args.elevation, - ) - - if args.viewer: - optional_viewer(args.xml) - - -if __name__ == "__main__": - main() From f1c842b0358c5319d87f7d74184cbaf3c7c74899 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Wed, 24 Dec 2025 14:50:10 +0800 Subject: [PATCH 07/11] =?UTF-8?q?=E4=BA=BA=E4=BD=93=E6=A8=A1=E5=9E=8B?= =?UTF-8?q?=E7=9A=84=E6=B8=B2=E6=9F=93=EF=BC=9A=E6=96=B0=E5=A2=9E=20Isaac?= =?UTF-8?q?=20Sim/Isaac=20Lab=20=E5=90=AF=E5=8A=A8=E4=B8=8E=20Humanoid=20?= =?UTF-8?q?=E8=BF=90=E8=A1=8C=E7=A4=BA=E4=BE=8B=E8=84=9A=E6=9C=AC?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../eval_in_isaaclab.py | 399 ++++++++++++++++++ 1 file changed, 399 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py b/src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py new file mode 100644 index 0000000000..f00dda4e12 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py @@ -0,0 +1,399 @@ +# Copyright (c) 2022-2024, The Isaac Lab Project Developers. +# All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause + +"""This script demonstrates how to use the interactive scene interface to setup a scene with multiple prims. + +.. code-block:: bash + + # Usage + ./isaaclab.sh -p source/standalone/tutorials/02_scene/create_scene.py --num_envs 32 + +""" + +"""Launch Isaac Sim Simulator first.""" + +import glob +import os +import sys +import pdb +import os.path as osp +sys.path.append(os.getcwd()) +import argparse +from isaaclab.app import AppLauncher +import sys + +# add argparse arguments +parser = argparse.ArgumentParser(description="Tutorial on using the interactive scene interface.") +parser.add_argument("--num_envs", type=int, default=1, help="Number of environments to spawn.") +parser.add_argument("--motion_file", type=str, default="sample_data/amass_isaac_standing_upright_slim.pkl", help="Path to motion file to load.") +parser.add_argument("--policy_path", type=str, default="output/HumanoidIm/phc_3/Humanoid.pth", help="Path to policy checkpoint file.") +parser.add_argument("--action_offset_file", type=str, default="phc/data/action_offset_smpl.pkl", help="Path to action offset file.") +parser.add_argument("--humanoid_type", type=str, default="smpl", choices=["smpl", "smplx"], help="Type of humanoid model to use.") +parser.add_argument("--num_motions", type=int, default=10, help="Number of motions to load from motion library.") +# append AppLauncher cli args +AppLauncher.add_app_launcher_args(parser) +# parse the arguments +args_cli, hydra_args = parser.parse_known_args() + +sys.argv = [sys.argv[0]] + hydra_args + + +# launch omniverse app +app_launcher = AppLauncher(args_cli) +simulation_app = app_launcher.app + +"""Rest everything follows.""" + +import torch + +import isaaclab.sim as sim_utils +from isaaclab.assets import Articulation, ArticulationCfg, AssetBaseCfg +from isaaclab.envs import DirectRLEnvCfg +from isaaclab.sim import SimulationCfg +from isaaclab.scene import InteractiveScene, InteractiveSceneCfg +from isaaclab.sim import SimulationContext, PhysxCfg +from isaaclab.utils import configclass +from isaaclab.envs import DirectRLEnv, DirectRLEnvCfg, ViewerCfg +from isaaclab.devices.keyboard.se2_keyboard import Se2Keyboard +import isaaclab.utils.math as lab_math_utils +import carb +import imageio +from carb.input import KeyboardEventType + + + +## +# Pre-defined configs +## +# from isaaclab_assets import CARTPOLE_CFG # isort:skip +from phc.assets.smpl_config import SMPL_Upright_CFG, SMPL_CFG, SMPLX_Upright_CFG, SMPLX_CFG + +from smpl_sim.utils.rotation_conversions import xyzw_to_wxyz, wxyz_to_xyzw +from collections.abc import Sequence +from phc.utils.flags import flags +from phc.utils.motion_lib_smpl import MotionLibSMPL as MotionLibSMPL +from phc.utils.motion_lib_base import FixHeightMode +from smpl_sim.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState +from smpl_sim.smpllib.smpl_local_robot import SMPL_Robot +from smpl_sim.smpllib.smpl_joint_names import SMPL_BONE_ORDER_NAMES, SMPLX_BONE_ORDER_NAMES, SMPLH_BONE_ORDER_NAMES, SMPL_MUJOCO_NAMES, SMPLH_MUJOCO_NAMES +from phc.learning.network_loader import load_z_encoder, load_z_decoder, load_pnn, load_mcp_mlp +from phc.utils.isaacgym_humanoid_funcs import compute_humanoid_observations_smpl_max, compute_imitation_observations_v6 +from rl_games.algos_torch import torch_ext +from isaaclab.sensors import TiledCamera, TiledCameraCfg, save_images_to_file +from rl_games.algos_torch.players import rescale_actions +import torch +import joblib +from easydict import EasyDict +import numpy as np +import copy +from scipy.spatial.transform import Rotation as sRot +import time + + + +flags.test=False + +@configclass +class SMPLSceneCfg(InteractiveSceneCfg): + """Configuration for a cart-pole scene.""" + + # ground plane + ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane", spawn=sim_utils.GroundPlaneCfg()) + +@configclass +class SMPLEnvCfg(DirectRLEnvCfg): + num_actions = 69 + num_observations = 1 + num_states = 1 + + decimation = 2 + episode_length_s = 15.0 + + sim = sim_utils.SimulationCfg( + device=args_cli.device, + dt=1 / 60, + # decimation will be set in the task config + # up axis will always be Z in isaac sim + # use_gpu_pipeline is deduced from the device + gravity=(0.0, 0.0, -9.81), + physx = PhysxCfg( + # num_threads is no longer needed + solver_type=1, + # use_gpu is deduced from the device + max_position_iteration_count=4, + max_velocity_iteration_count=0, + # moved to actor config + # moved to actor config + bounce_threshold_velocity=0.2, + # moved to actor config + # default_buffer_size_multiplier is no longer needed + gpu_max_rigid_contact_count=2**23, + # num_subscenes is no longer needed + # contact_collection is no longer needed + ) + ) + + + viewer = ViewerCfg(eye=(20.0, 20.0, 20.0)) + + smpl_robot: ArticulationCfg = SMPL_Upright_CFG.replace(prim_path="/World/envs/env_.*/Robot") + # smpl_robot: ArticulationCfg = SMPLX_CFG.replace(prim_path="/World/envs/env_.*/Robot") + + # scene + scene: InteractiveSceneCfg = SMPLSceneCfg(num_envs=args_cli.num_envs, env_spacing=20.0, replicate_physics=True) + + observation_space = None + action_space = None + motion_file = args_cli.motion_file + + +class SMPLEnv(DirectRLEnv): + cfg: SMPLEnvCfg + + def __init__(self, cfg: SMPLEnvCfg, render_mode: str | None = None, **kwargs): + self.cfg = cfg + # self.humanoid_type = "smplx" + self.humanoid_type = "smpl" + super().__init__(cfg, render_mode, **kwargs) + + SMPL_NAMES = SMPLH_MUJOCO_NAMES if self.humanoid_type == "smplx" else SMPL_MUJOCO_NAMES + self.gym_joint_names = gym_joint_names = [f"{j}_{axis}" for j in SMPL_NAMES[1:] for axis in ["x", "y", "z"]] + self.sim_joint_names = sim_joint_names = self.robot.data.joint_names + self.sim_body_names = sim_body_names = self.robot.data.body_names + + self.sim_to_gym_body = [sim_body_names.index(n) for n in SMPL_NAMES] + self.sim_to_gym_dof = [sim_joint_names.index(n) for n in gym_joint_names] + self.gym_to_sim_dof = [gym_joint_names.index(n) for n in sim_joint_names] + self.gym_to_sim_body = [SMPL_NAMES.index(n) for n in sim_body_names] + + self._load_motion(cfg.motion_file) + + keyboard_interface = Se2Keyboard() + keyboard_interface.add_callback("R", self.reset) + + self.rigid_body_pos = self.robot.data.body_pos_w + + + def close(self): + super().close() + + def _configure_gym_env_spaces(self): + self.num_actions = self.cfg.num_actions + self.num_observations = self.cfg.num_observations + self.num_states = self.cfg.num_states + + + def _setup_scene(self): + self.robot = robot = Articulation(self.cfg.smpl_robot) + + # clone, filter, and replicate + self.scene.clone_environments(copy_from_source=False) + # self.scene.filter_collisions(global_prim_paths=[]) + + # add articultion and sensors to scene + self.scene.articulations["robot"] = self.robot + # add lights + light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75)) + light_cfg.func("/World/Light", light_cfg) + + + def _load_motion(self, motion_file): + + time_steps = 1 + if self.humanoid_type == "smplx": + self._has_upright_start = False + else: + self._has_upright_start = True + + motion_lib_cfg = EasyDict({ + "has_upright_start": self._has_upright_start, + "motion_file": motion_file, + "fix_height": FixHeightMode.full_fix, + "min_length": -1, + "max_length": 3000, + "im_eval": flags.im_eval, + "multi_thread": False , + "smpl_type": self.humanoid_type, + "randomrize_heading": True, + "device": self.device, + "real_traj": False, + "simulator": "isaac_sim", + "gym_to_sim_dict": { + "gym_to_sim_dof": self.gym_to_sim_dof, + "gym_to_sim_body": self.gym_to_sim_body, + "sim_to_gym_dof": self.sim_to_gym_dof, + "sim_to_gym_body": self.sim_to_gym_body, + }, + "test": True, + "im_eval": False, + }) + robot_cfg = { + "mesh": False, + "rel_joint_lm": False, + "upright_start": self._has_upright_start, + "remove_toe": False, + "real_weight_porpotion_capsules": True, + "real_weight_porpotion_boxes": True, + "model": self.humanoid_type, + "big_ankle": True, + "box_body": True, + "body_params": {}, + "joint_params": {}, + "geom_params": {}, + "actuator_params": {}, + } + smpl_robot = SMPL_Robot( + robot_cfg, + data_dir="data/smpl", + ) + + if self.humanoid_type == "smplx": + gender_beta = np.zeros((17)) + else: + gender_beta = np.zeros((11)) + + smpl_robot.load_from_skeleton(betas=torch.from_numpy(gender_beta[None, 1:]), gender=gender_beta[0:1], objs_info=None) + test_good = f"/tmp/smpl/test_good.xml" + smpl_robot.write_xml(test_good) + sk_tree = SkeletonTree.from_mjcf(test_good) + num_motions = 10 + skeleton_trees = [sk_tree] * num_motions + start_idx = 0 + + motion_lib = MotionLibSMPL(motion_lib_cfg) + motion_lib.load_motions(skeleton_trees=skeleton_trees, + gender_betas=[torch.from_numpy(gender_beta)] * num_motions, + limb_weights=[torch.from_numpy(gender_beta)] * num_motions, # not used. + random_sample=False, + start_idx = start_idx) + self._motion_lib = motion_lib + self._motion_id, self._motion_time = torch.arange(self.num_envs).to(self.device).long(), torch.zeros(self.num_envs).to(self.device).float() + + + def _pre_physics_step(self, actions: torch.Tensor) -> None: + self.actions = actions + + pass + + def _post_physics_step(self) -> None: + + + pass + + def _apply_action(self) -> None: + self.robot.set_joint_position_target(self.actions, joint_ids=None) + + def _get_observations(self) -> dict: + + motion_time = (self.episode_length_buf + 1) * self.step_dt + self._motion_time + + motion_res = self._motion_lib.get_motion_state(self._motion_id, motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + + body_pos = self.robot.data.body_pos_w[:, self.sim_to_gym_body ] + body_rot = wxyz_to_xyzw(self.robot.data.body_quat_w[:, self.sim_to_gym_body]) + body_vel = self.robot.data.body_lin_vel_w[:, self.sim_to_gym_body] + body_ang_vel = self.robot.data.body_ang_vel_w[:, self.sim_to_gym_body] + + root_pos = body_pos[:, 0, :] + root_rot = body_rot[:, 0, :] + body_pos_subset = body_pos + body_rot_subset = body_rot + body_vel_subset = body_vel + body_ang_vel_subset = body_ang_vel + + ref_rb_pos_subset = ref_rb_pos + ref_rb_rot_subset = ref_rb_rot + ref_body_vel_subset = ref_body_vel + ref_body_ang_vel_subset = ref_body_ang_vel + + # Data replay + # ref_joint_pos = ref_dof_pos[:, self.gym_to_sim_dof] + # ref_joint_vel = ref_dof_vel[:, self.gym_to_sim_dof] + + # ref_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + # self.robot.write_root_state_to_sim(ref_root_state, None) + # self.robot.write_joint_state_to_sim(ref_joint_pos, ref_joint_vel, None, None) + + self_obs = compute_humanoid_observations_smpl_max(body_pos, body_rot, body_vel, body_ang_vel, ref_smpl_params, torch.tensor(0), True, True, self._has_upright_start, False, False) + task_obs = compute_imitation_observations_v6(root_pos, root_rot, body_pos_subset, body_rot_subset, body_vel_subset, body_ang_vel_subset, ref_rb_pos_subset, ref_rb_rot_subset, ref_body_vel_subset, ref_body_ang_vel_subset, 1, self._has_upright_start) + + return { + "self_obs": self_obs, + "task_obs": task_obs, + } + + def _get_rewards(self) -> torch.Tensor: + return torch.zeros(self.num_envs) + + def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]: + return torch.zeros(self.num_envs), torch.zeros(self.num_envs) + + def _reset_idx(self, env_ids: Sequence[int]): + super()._reset_idx(env_ids) + self._motion_time[env_ids] = 0 + + motion_res = self._motion_lib.get_motion_state(self._motion_id, self._motion_time) + ref_root_pos, ref_root_rot, ref_dof_pos, ref_root_vel, ref_root_ang_vel, ref_dof_vel, ref_smpl_params, ref_pose_aa, ref_rb_pos, ref_rb_rot, ref_body_vel, ref_body_ang_vel = \ + motion_res["root_pos"], motion_res["root_rot"], motion_res["dof_pos"], motion_res["root_vel"], motion_res["root_ang_vel"], motion_res["dof_vel"], \ + motion_res["motion_bodies"], motion_res["motion_aa"], motion_res["rg_pos"], motion_res["rb_rot"], motion_res["body_vel"], motion_res["body_ang_vel"] + init_joint_pos = ref_dof_pos[:, self.gym_to_sim_dof] + init_joint_vel = ref_dof_vel[:, self.gym_to_sim_dof] + + + init_root_state = torch.cat([ref_root_pos, xyzw_to_wxyz(ref_root_rot), ref_root_vel, ref_root_ang_vel], dim = -1) + self.robot.write_root_state_to_sim(init_root_state, env_ids) + self.robot.write_joint_state_to_sim(init_joint_pos, init_joint_vel, None, env_ids) + + +def main(): + """Main function.""" + env_cfg = SMPLEnvCfg() + env = SMPLEnv(env_cfg) + + + device = env.device + + # Use configurable policy path or fall back to defaults + policy_path = args_cli.policy_path + + + check_points = [torch_ext.load_checkpoint(policy_path)] + pnn = load_pnn(check_points[0], num_prim = 3, has_lateral = False, activation = "silu", device = device) + running_mean, running_var = check_points[-1]['running_mean_std']['running_mean'], check_points[-1]['running_mean_std']['running_var'] + + action_offset = joblib.load(args_cli.action_offset_file) + + pd_action_offset = action_offset[0] + pd_action_scale = action_offset[1] + + time = 0 + obs_dict, extras = env.reset() + while True: + self_obs, task_obs = obs_dict["self_obs"], obs_dict["task_obs"] + full_obs = torch.cat([self_obs, task_obs], dim = -1) + full_obs = ((full_obs - running_mean.float()) / torch.sqrt(running_var.float() + 1e-05)) + full_obs = torch.clamp(full_obs, min=-5.0, max=5.0) + + + with torch.no_grad(): + actions, _ = pnn(full_obs, idx=0) + actions = rescale_actions(-1, 1, torch.clamp(actions, -1, 1)) + actions = actions * pd_action_scale + pd_action_offset + actions = actions[:, env.gym_to_sim_dof] + + obs_dict, _, _, _, _ = env.step(actions) + + + +if __name__ == "__main__": + # run the main function + main() + # close sim app + simulation_app.close() + + \ No newline at end of file From 5cc8b5f88f2cbb1c6cf3290026d4ec54a2cdb1ca Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Thu, 25 Dec 2025 10:23:46 +0800 Subject: [PATCH 08/11] =?UTF-8?q?=E5=8F=AF=E8=A7=86=E5=8C=96PHC=E5=9C=A8?= =?UTF-8?q?=E8=AE=AD=E7=BB=83=E4=B8=AD=E6=A8=A1=E6=8B=9F=E9=AB=98=E8=B4=A8?= =?UTF-8?q?=E9=87=8F=E5=8A=A8=E4=BD=9C=E6=8D=95=E6=8D=89=EF=BC=88MoCap?= =?UTF-8?q?=EF=BC=89=E6=95=B0=E6=8D=AE=E7=9A=84=E8=83=BD=E5=8A=9B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../AMASS-Train-Belly dancing.py | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py b/src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py new file mode 100644 index 0000000000..7c6ea43db1 --- /dev/null +++ b/src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py @@ -0,0 +1,101 @@ +import torch +from rl_games.common import datasets + +class AMPDataset(datasets.PPODataset): + def __init__(self, batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len): + super().__init__(batch_size, minibatch_size, is_discrete, is_rnn, device, seq_len) + self._idx_buf = torch.randperm(self.batch_size) + + + + return + + def update_mu_sigma(self, mu, sigma): + raise NotImplementedError() + return + + # def _get_item_rnn(self, idx): + # gstart = idx * self.num_games_batch + # gend = (idx + 1) * self.num_games_batch + # start = gstart * self.seq_len + # end = gend * self.seq_len + # self.last_range = (start, end) + # input_dict = {} + # for k,v in self.values_dict.items(): + # if k not in self.special_names: + # if v is dict: + # v_dict = { kd:vd[start:end] for kd, vd in v.items() } + # input_dict[k] = v_dict + # else: + # input_dict[k] = v[start:end] + + # rnn_states = self.values_dict['rnn_states'] + # input_dict['rnn_states'] = [s[:,gstart:gend,:] for s in rnn_states] + # return input_dict + + def update_values_dict(self, values_dict, rnn_format = False, horizon_length = 1, num_envs = 1): + self.values_dict = values_dict + self.horizon_length = horizon_length + self.num_envs = num_envs + + if rnn_format and self.is_rnn: + for k,v in self.values_dict.items(): + if k not in self.special_names and v is not None: + self.values_dict[k] = self.values_dict[k].view(self.num_envs, self.horizon_length, -1).squeeze() # Actions are already swapped to the correct format. + if not self.values_dict['rnn_states'] is None: + self.values_dict['rnn_states'] = [s.reshape(self.num_envs, self.horizon_length, -1) for s in self.values_dict['rnn_states']] # rnn_states are not swapped in AMP, so do not swap it here. + self._idx_buf = torch.randperm(self.num_envs) # Update to only shuffle the envs. + + # def _get_item_rnn(self, idx): + # data = super()._get_item_rnn(idx) + # import ipdb; ipdb.set_trace() + # return data + + def _get_item_rnn(self, idx): + # ZL: I am doubling the get_item_rnn function to in a way also get the sequential data. Pretty hacky. + # BPTT, input dict is [batch, seqlen, features]. This function return the sequences that are from the same episide and enviornment in sequentila mannar. Not used at the moment since seq_len is set to 1 for RNN right now. + step_size = int(self.minibatch_size/self.horizon_length) + + start = idx * step_size + end = (idx + 1) * step_size + sample_idx = self._idx_buf[start:end] + + input_dict = {} + + for k,v in self.values_dict.items(): + if k not in self.special_names and v is not None: + input_dict[k] = v[sample_idx, :].view(step_size * self.horizon_length, -1).squeeze() # flatten to batch size + + input_dict['old_values'] = input_dict['old_values'][:, None] # ZL Hack: following compute assumes that the old_values is [batch, 1], so has to change this back. Otherwise, the loss will be wrong. + input_dict['returns'] = input_dict['returns'][:, None] # ZL Hack: following compute assumes that the old_values is [batch, 1], so has to change this back. Otherwise, the loss will be wrong. + + if not self.values_dict['rnn_states'] is None: + input_dict['rnn_states'] = [s[sample_idx, :].view(step_size * self.horizon_length, -1) for s in self.values_dict["rnn_states"]] + + if (end >= self.batch_size): + self._shuffle_idx_buf() + + + return input_dict + + def _get_item(self, idx): + start = idx * self.minibatch_size + end = (idx + 1) * self.minibatch_size + sample_idx = self._idx_buf[start:end] + + input_dict = {} + for k,v in self.values_dict.items(): + if k not in self.special_names and v is not None: + input_dict[k] = v[sample_idx] + + if (end >= self.batch_size): + self._shuffle_idx_buf() + + return input_dict + + def _shuffle_idx_buf(self): + if self.is_rnn: + self._idx_buf = torch.randperm(self.num_envs) + else: + self._idx_buf[:] = torch.randperm(self.batch_size) + return \ No newline at end of file From 4c8678a705b363015e41a8bd708b519a4f814673 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Thu, 25 Dec 2025 22:28:21 +0800 Subject: [PATCH 09/11] =?UTF-8?q?=E4=BF=AE=E6=AD=A3=E4=BA=86=E4=B9=8B?= =?UTF-8?q?=E5=89=8D=E7=9A=84=E5=91=BD=E5=90=8D=E9=94=99=E8=AF=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../{AMASS-Train-Belly dancing.py => train_belly_ dancing.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/biomechanical_hcl_smart_simulation_platform/{AMASS-Train-Belly dancing.py => train_belly_ dancing.py} (100%) diff --git a/src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py b/src/biomechanical_hcl_smart_simulation_platform/train_belly_ dancing.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/AMASS-Train-Belly dancing.py rename to src/biomechanical_hcl_smart_simulation_platform/train_belly_ dancing.py From 06a7247a12ee064ccd76c4f58563b3ab88bf6e23 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Sat, 27 Dec 2025 09:23:18 +0800 Subject: [PATCH 10/11] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=B8=8A=E6=AC=A1?= =?UTF-8?q?=E9=97=AE=E9=A2=98?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../{train_belly_ dancing.py => train_belly_dancing.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/biomechanical_hcl_smart_simulation_platform/{train_belly_ dancing.py => train_belly_dancing.py} (100%) diff --git a/src/biomechanical_hcl_smart_simulation_platform/train_belly_ dancing.py b/src/biomechanical_hcl_smart_simulation_platform/train_belly_dancing.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/train_belly_ dancing.py rename to src/biomechanical_hcl_smart_simulation_platform/train_belly_dancing.py From 0b9d1979b5f4e4eb9f4943188fa7d91689364d32 Mon Sep 17 00:00:00 2001 From: qinyiquan57-debug Date: Thu, 1 Jan 2026 19:25:35 +0800 Subject: [PATCH 11/11] =?UTF-8?q?=E4=BF=AE=E6=94=B9=E4=BA=86src=E7=9B=AE?= =?UTF-8?q?=E5=BD=95=E4=B8=8B=E7=9A=84=E6=A8=A1=E5=9D=97=E5=90=8D=EF=BC=8C?= =?UTF-8?q?=E6=94=B9=E4=B8=BA1-2=E4=B8=AA=E5=8D=95=E8=AF=8D=EF=BC=8C?= =?UTF-8?q?=E7=94=A8=E4=B8=8B=E5=88=92=E7=BA=BF=E8=BF=9E=E6=8E=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../README.md | 0 .../enhanced_human_model.py | 0 .../eval_in_isaaclab.py | 0 .../initial_human_mode_main.py | 0 .../main.py | 0 .../main_beatsvr_bimanual_envcamera_eval.py | 0 .../main_beatsvr_bimanual_eval.py | 0 .../train_belly_dancing.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/README.md (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/enhanced_human_model.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/eval_in_isaaclab.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/initial_human_mode_main.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/main.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/main_beatsvr_bimanual_envcamera_eval.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/main_beatsvr_bimanual_eval.py (100%) rename src/{biomechanical_hcl_smart_simulation_platform => biomechanical_platform}/train_belly_dancing.py (100%) diff --git a/src/biomechanical_hcl_smart_simulation_platform/README.md b/src/biomechanical_platform/README.md similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/README.md rename to src/biomechanical_platform/README.md diff --git a/src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py b/src/biomechanical_platform/enhanced_human_model.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/enhanced_human_model.py rename to src/biomechanical_platform/enhanced_human_model.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py b/src/biomechanical_platform/eval_in_isaaclab.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/eval_in_isaaclab.py rename to src/biomechanical_platform/eval_in_isaaclab.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py b/src/biomechanical_platform/initial_human_mode_main.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/initial_human_mode_main.py rename to src/biomechanical_platform/initial_human_mode_main.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/main.py b/src/biomechanical_platform/main.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/main.py rename to src/biomechanical_platform/main.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/main_beatsvr_bimanual_envcamera_eval.py b/src/biomechanical_platform/main_beatsvr_bimanual_envcamera_eval.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/main_beatsvr_bimanual_envcamera_eval.py rename to src/biomechanical_platform/main_beatsvr_bimanual_envcamera_eval.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/main_beatsvr_bimanual_eval.py b/src/biomechanical_platform/main_beatsvr_bimanual_eval.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/main_beatsvr_bimanual_eval.py rename to src/biomechanical_platform/main_beatsvr_bimanual_eval.py diff --git a/src/biomechanical_hcl_smart_simulation_platform/train_belly_dancing.py b/src/biomechanical_platform/train_belly_dancing.py similarity index 100% rename from src/biomechanical_hcl_smart_simulation_platform/train_belly_dancing.py rename to src/biomechanical_platform/train_belly_dancing.py