diff --git a/_cite/.cache/cache.db b/_cite/.cache/cache.db
index 83e1a82b..201e828e 100644
Binary files a/_cite/.cache/cache.db and b/_cite/.cache/cache.db differ
diff --git a/_config.yaml b/_config.yaml
index f9ed785a..9269f6e6 100644
--- a/_config.yaml
+++ b/_config.yaml
@@ -41,7 +41,7 @@ defaults:
values:
layout: post
- scope:
- type: "blog"
+ type: "opensource"
values:
layout: post
@@ -57,6 +57,8 @@ collections:
output: true
blog:
output: true
+ opensource:
+ output: true
# jekyll plugins
plugins:
diff --git a/_data/citations.yaml b/_data/citations.yaml
index 98c74d97..df24bc4a 100644
--- a/_data/citations.yaml
+++ b/_data/citations.yaml
@@ -1022,7 +1022,7 @@
plugin: sources.py
file: sources.yaml
- id: doi:10.1109/TIV.2024.3467115
- title: Safety-Quantifiable Planar-Feature-based LiDAR Localization with a Prior
+ title: Safety-Quantifiable Planar-Feature-Based LiDAR Localization With a Prior
Map for Intelligent Vehicles in Urban Scenarios
authors:
- Jiachen Zhang
@@ -1030,7 +1030,7 @@
- Weisong Wen
- Li-Ta Hsu
publisher: IEEE Transactions on Intelligent Vehicles
- date: '2024-01-01'
+ date: '2025-07-01'
link: https://doi.org/g8t5zc
type: paper
image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/2024/Zhang2024SafetyQuantifiable.png
@@ -2670,10 +2670,10 @@
title: Online Dynamic Model Calibration for Reliable Control of Quadrotor Based
on Factor Graph Optimization
authors:
- - PEIWEN YANG
- - WEISONG WEN
- - SHIYU BAI
- - JIAHAO HU
+ - Peiwen Yang
+ - Weisong Wen
+ - Shiyu Bai
+ - Jiahao Hu
publisher: IEEE Transactions on Aerospace and Electronic Systems
date: '2025-05-01'
link: https://doi.org/g9hrnt
@@ -2824,3 +2824,191 @@
- urban canyons
plugin: sources.py
file: sources.yaml
+- id: doi:10.1109/TAES.2025.3607718
+ title: Unified Sufficient Conditions for Exact Convex Relaxation of Nonconvex Optimal
+ Control Problems
+ authors:
+ - Runqiu Yang
+ - Weisong Wen
+ - Peiwen Yang
+ - Zichen Zhao
+ - Fengtianyi Huang
+ publisher: IEEE Transactions on Aerospace and Electronic Systems
+ date: '2025-09-09'
+ link: https://doi.org/g93v27
+ type: paper
+ tags:
+ - Optimal control
+ - Convex relaxation
+ - Trajectory planning
+ - Convex optimization
+ - Mars landing
+ plugin: sources.py
+ file: sources.yaml
+- id: arXiv:2509.17198
+ title: Certifiably Optimal Doppler Positioning using Opportunistic LEO Satellites
+ authors:
+ - Baoshan Song
+ - Weisong Wen
+ - Qi Zhang
+ - Bing Xu
+ - Li-Ta Hsu
+ publisher: arXiv
+ date: '2025-09-21'
+ link: https://arxiv.org/abs/2509.17198
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2509.17198v1.pdf
+ tags:
+ - LEO satellite
+ - Doppler positioning
+ - signal of opportunity
+ - convex optimization
+ - semidefinite programming
+ plugin: sources.py
+ file: sources.yaml
+- id: arxiv:2509.21496
+ title: 'Wall Inspector: Quadrotor Control in Wall-proximity Through Model Compensation'
+ authors:
+ - Peiwen Yang
+ - Weisong Wen
+ - Runqiu Yang
+ - Yingming Chen
+ - Cheuk Chi Tsang
+ publisher: arXiv
+ date: '2025-09-25'
+ link: https://arxiv.org/abs/2509.21496
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2509.21496v1.pdf
+ tags:
+ - null
+ plugin: sources.py
+ file: sources.yaml
+- id: arxiv:2510.00524
+ title: Two stage GNSS outlier detection for factor graph optimization based GNSS-RTK/INS/odometer
+ fusion
+ authors:
+ - Baoshan Song
+ - Penggao Yan
+ - Xiao Xia
+ - Yihan Zhong
+ - Weisong Wen
+ - Li-Ta Hsu
+ publisher: arXiv
+ date: '2025-10-01'
+ link: https://arxiv.org/abs/2510.00524
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.00524v1.pdf
+ tags:
+ - null
+ plugin: sources.py
+ file: sources.yaml
+- id: arxiv:2510.04278
+ title: 'Integrated Planning and Control on Manifolds: Factor Graph Representation
+ and Toolkit'
+ authors:
+ - Peiwen Yang
+ - Weisong Wen
+ - Runqiu Yang
+ - Yuanyuan Zhang
+ - Jiahao Hu
+ - Yingming Chen
+ - Naigui Xiao
+ - Jiaqi Zhao
+ publisher: arXiv
+ date: '2025-10-05'
+ link: https://arxiv.org/abs/2510.04278
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.04278v1.pdf
+ tags:
+ - null
+ plugin: sources.py
+ file: sources.yaml
+- id: doi:10.1109/TITS.2025.3616580
+ title: Learning Safe, Optimal, and Real-Time Flight Interaction With Deep Confidence-Enhanced
+ Reachability Guarantee
+ authors:
+ - Yuanyuan Zhang
+ - Yingying Wang
+ - Penggao Yan
+ - Weisong Wen
+ publisher: IEEE Transactions on Intelligent Transportation Systems
+ date: '2025-10-09'
+ link: https://doi.org/hbbrm6
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/Learning_Safe_Optimal_and_Real-Time_Flight_Interaction_With_Deep_Confidence-Enhanced_Reachability_Guarantee.pdf
+ tags:
+ - Deep reinforcement learning
+ - deep confidenceenhanced reachability guarantees
+ - joint planning and control
+ - unmanned aerial vehicles
+ plugin: sources.py
+ file: sources.yaml
+- id: arxiv:2510.08880
+ title: Online IMU-odometer Calibration using GNSS Measurements for Autonomous Ground
+ Vehicle Localization
+ authors:
+ - Baoshan Song
+ - Xiao Xia
+ - Penggao Yan
+ - Yihan Zhong
+ - Weisong Wen
+ - Li-Ta Hsu
+ publisher: arXiv
+ date: '2025-10-10'
+ link: https://arxiv.org/abs/2510.08880
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.08880v1.pdf
+ plugin: sources.py
+ file: sources.yaml
+- id: arXiv:2512.20224
+ title: 'UrbanV2X: A Multisensory Vehicle-Infrastructure Dataset for Cooperative
+ Navigation in Urban Areas'
+ authors:
+ - Qijun Qin
+ - Ziqi Zhang
+ - Yihan Zhong
+ - Feng Huang
+ - Xikun Liu
+ - Runzhi Hu
+ - Hang Chen
+ - Wei Hu
+ - Dongzhe Su
+ - Jun Zhang
+ - Hoi-Fung Ng
+ - Weisong Wen
+ publisher: arXiv
+ date: '2025-12-23'
+ link: https://arxiv.org/abs/2512.20224
+ type: paper
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2512.20224v1.pdf
+ - type: source
+ text: code
+ link: https://polyu-taslab.github.io/UrbanV2X/
+ tags:
+ - Multisensor Fusion
+ - Roadside Infrastructure
+ - SLAM
+ - Autonomous Driving
+ plugin: sources.py
+ file: sources.yaml
diff --git a/_data/sources.yaml b/_data/sources.yaml
index 83c1f39f..a90c86e4 100644
--- a/_data/sources.yaml
+++ b/_data/sources.yaml
@@ -2119,3 +2119,108 @@
- Doppler measurement model
- geometry distribution
- urban canyons
+
+- id: doi:10.1109/TAES.2025.3607718
+ type: paper
+ date: 2025-09-09
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ # buttons:
+ # - type: manubot
+ # text: paper
+ # link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2404.14724v2.pdf
+ tags:
+ - Optimal control
+ - Convex relaxation
+ - Trajectory planning
+ - Convex optimization
+ - Mars landing
+
+- id: arXiv:2509.17198
+ type: paper
+ date: 2025-09-21
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2509.17198v1.pdf
+ tags:
+ - LEO satellite
+ - Doppler positioning
+ - signal of opportunity
+ - convex optimization
+ - semidefinite programming
+
+- id: arxiv:2509.21496
+ type: paper
+ date: 2025-09-25
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2509.21496v1.pdf
+ tags:
+ -
+
+- id: arxiv:2510.00524
+ type: paper
+ date: 2025-10-01
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.00524v1.pdf
+ tags:
+ -
+
+
+- id: arxiv:2510.04278
+ type: paper
+ date: 2025-10-05
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.04278v1.pdf
+ tags:
+ -
+
+- id: doi:10.1109/TITS.2025.3616580
+ type: paper
+ date: 2025-10-09
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/Learning_Safe_Optimal_and_Real-Time_Flight_Interaction_With_Deep_Confidence-Enhanced_Reachability_Guarantee.pdf
+ tags:
+ - Deep reinforcement learning
+ - deep confidenceenhanced reachability guarantees
+ - joint planning and control
+ - unmanned aerial vehicles
+
+
+- id: arxiv:2510.08880
+ type: paper
+ date: 2025-10-10
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2510.08880v1.pdf
+
+- id: arXiv:2512.20224
+ type: paper
+ date: 2025-12-23
+ # image: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/images/papers/Snipaste_2025-09-07_15-21-25.png
+ buttons:
+ - type: manubot
+ text: paper
+ link: https://github.com/PolyU-TASLAB/polyu-taslab.github.io/raw/main/research/papers/2512.20224v1.pdf
+ - type: source
+ text: code
+ link: https://polyu-taslab.github.io/UrbanV2X/
+ tags:
+ - Multisensor Fusion
+ - Roadside Infrastructure
+ - SLAM
+ - Autonomous Driving
\ No newline at end of file
diff --git a/_events/2025-09-14-Meituan_hosting_MarsTalk_at_PolyU.md b/_events/2025-09-14-Meituan_hosting_MarsTalk_at_PolyU.md
new file mode 100644
index 00000000..8c1b4c40
--- /dev/null
+++ b/_events/2025-09-14-Meituan_hosting_MarsTalk_at_PolyU.md
@@ -0,0 +1,64 @@
+---
+title: Meituan Marstalk Hosts at PolyU:Industry Leaders Discuss Future of Robotics and Intelligent Systems
+subtitle: news
+# author: Yingming Chen
+image: images/news/0914MarsTalk/marstalk.jpg
+tags: news
+order:
+---
+*Hong Kong, September 14th, 2025* – The Hong Kong Polytechnic University (PolyU) successfully hosted the Meituan Marstalk today, bringing together leading experts in robotics, automation, and artificial intelligence to explore technological breakthroughs in intelligent systems for dynamic environments.
+The ceremony featured a keynote address by representatives from HKISA, who emphasized the critical role of the new index in promoting trust and scalability in commercial and civic drone applications. The index will provide a measurable framework to evaluate drone performance, maintenance standards, and operational safety—key factors for integration into urban airspace.
+
+
+
+
+
+ HKISA presenting the DTORI indexes
+
+
+### Distinguished Speakers Share Insights on Cutting-Edge Technologies
+
+The event featured an impressive lineup of speakers from both academia and industry, highlighting the growing collaboration between universities and technology companies in advancing robotics research.
+
+Dr. Yinian Mao, Vice President of Meituan and Director of Meituan Academy of Robotics Shenzhen, delivered a keynote presentation on the company's latest innovations in autonomous systems. "The integration of robotics and AI in real-world applications is accelerating at an unprecedented pace, particularly in service delivery and urban logistics," Dr. Mao emphasized during his address.
+
+Prof. Ning Xi, Chair Professor of Robotics and Automation and Head of Department of Data and Systems Engineering at HKU, as well as Director of the Advanced Technologies Institute, provided insights into the academic research underpinning these technological advances. His presentation focused on the convergence of data science and robotic systems in creating more intelligent and adaptive machines.
+
+
+
+
+
+ Prof. Wen-Hua Chen presenting.
+
+
+### Focus on Low Altitude Economy and Drone Technology
+Prof. Wen-Hua Chen, Interim Head of Aerospace Engineering at PolyU, Chair Professor of Robotics and Autonomous Systems, and Director of the Research Centre for Low Altitude Economy, discussed the emerging opportunities in the low altitude economy sector. "Hong Kong and the Greater Bay Area are uniquely positioned to lead in the development of low altitude economy applications, from drone delivery to urban air mobility," Prof. Chen noted.
+
+Dr. Wu Haotian, Senior Director of Meituan and Head of Hardware Platform of Keeta Drone, presented practical applications of drone technology in Meituan's delivery ecosystem. The presentation showcased how autonomous drones are being deployed to navigate complex urban environments and deliver services more efficiently.
+
+Dr. Wenbo Ding, Associate Professor and Director of the Office of Research at Tsinghua SIGS, rounded out the speaker panel with insights on the research collaboration opportunities between institutions in the Greater Bay Area.
+
+### Bridging Academia and Industry Through Talent Development
+The event went beyond traditional academic presentations by incorporating practical career development opportunities for students and young professionals. Two special sessions were organized to connect talent with industry opportunities: including Express Resume Submission and Fast-track Interview Pass.
+
+
+
+
+ Conversation among leading industry participants and academic innovators.
+
+
+### TASLAB participating MarsTalk
+TASLAB members also helped hosting this event.
+
+
+
+
+
+ TASLAB group photo.
+
+
+
diff --git a/_events/2025-09-16-OHKF_RELEASES_TALKING FLIGHT.md b/_events/2025-09-16-OHKF_RELEASES_TALKING FLIGHT.md
new file mode 100644
index 00000000..542f09bc
--- /dev/null
+++ b/_events/2025-09-16-OHKF_RELEASES_TALKING FLIGHT.md
@@ -0,0 +1,43 @@
+---
+title: OHKF Releases "Taking Flight — Forging a Future for Hong Kong’s Low-Altitude Economy" Report
+subtitle: news
+# author: Li Heng
+image: images/news/0916TALK/image1.png
+tags: news
+order:
+---
+
+## OHKF Releases "Taking Flight — Forging a Future for Hong Kong’s Low-Altitude Economy" Report
+
+### In-depth Discussion on the Future Development of Hong Kong's Low-Altitude Economy
+
+This month, Our Hong Kong Foundation (OHKF) released its latest research report entitled "Taking Flight — Forging a Future for Hong Kong’s Low-Altitude Economy."
+image1
+
+### Team Support and Key Discussion Points
+
+As a supporting team for this report, we conducted in-depth discussions focusing on the following key issues:
+
+1. The government support needed by university research teams, such as the Civil Aviation Department's review and support for research drone test flights;
+2. The critical breakthroughs required for transforming low-altitude technology research achievements into industrial applications in Hong Kong;
+3. How to strengthen collaboration between academia and industry to bring economic benefits to Hong Kong;
+4. How Hong Kong can synergize with Mainland China's low-altitude economy industry during the development process.
+
+### Academic and Industry Exchange
+
+At the same time, we invited researchers from Our Hong Kong Foundation (OHKF) to visit The Hong Kong Polytechnic University and participate in several academic and industry sharing and discussion sessions.
+
+
+
+
+
+
+---
+
+### Full Report
+
+For the full report, please visit: [https://www.ourhkfoundation.org.hk/en/media/reports/taking-flight-forging-a-future-for-hong-kongs-low-altitude-economy](https://www.ourhkfoundation.org.hk/en/media/reports/taking-flight-forging-a-future-for-hong-kongs-low-altitude-economy)
+
+We look forward to contributing further to the development of the low-altitude economy industry.
diff --git a/_events/2025-09-29-PolyU_AAE_Conducts_Drone_Test_Flight_in_Sandbox_Regulatory_Project.md b/_events/2025-09-29-PolyU_AAE_Conducts_Drone_Test_Flight_in_Sandbox_Regulatory_Project.md
new file mode 100644
index 00000000..cc6c49f2
--- /dev/null
+++ b/_events/2025-09-29-PolyU_AAE_Conducts_Drone_Test_Flight_in_Sandbox_Regulatory_Project.md
@@ -0,0 +1,53 @@
+---
+title: "PolyU AAE Conducts Drone Test Flight in Sandbox Regulatory Project"
+subtitle: "news"
+image: images/news/0929CampusFlight/Drone_Flight_2.jpg
+tags: news
+order:
+---
+
+*Hong Kong, September 29th, 2025* – The Aeronautics and Aviation Engineering (AAE) department at The Hong Kong Polytechnic University (PolyU) today conducted a drone performance test flight at the Shaw Sports Complex on campus, as part of the sandbox regulatory project for the low altitude economy. This successful event marks a new phase in PolyU AAE's initiative to advance regulatory frameworks and operational standards for unmanned aerial vehicles (UAVs) in urban environments.
+
+The test flight, carried out in collaboration with the Civil Aviation Department (CAD), evaluated key performance metrics of drones under controlled conditions, focusing on safety, efficiency, and compliance with emerging low-altitude airspace regulations. The sandbox project aims to create a scalable model for integrating drone technology into Hong Kong's transportation and logistics ecosystems.
+
+
+
+
+
+ Drone performance evaluation at Shaw Sports Complex, PolyU.
+
+
+### Sandbox Project Advances Low Altitude Economy Framework
+
+The sandbox regulatory project, led by PolyU AAE, provides a controlled environment for testing and validating drone operations, contributing to the development of standardized safety protocols and performance benchmarks. Today's test flight demonstrated the practical application of these standards, assessing factors such as flight stability, navigation accuracy, and payload capacity.
+
+"Today's test flight is a critical step forward in our low altitude economy initiatives," said a representative from the AAE department. "By working closely with regulatory bodies like CAD, we are paving the way for scalable and safe drone integration in Hong Kong and the Greater Bay Area."
+
+
+
+
+
+ AAE and CAD teams during the sandbox regulatory assessment.
+
+
+### Collaborative Efforts for Future Urban Air Mobility
+
+The event highlighted the growing collaboration between academic institutions and government agencies in shaping the future of urban air mobility. The sandbox project not only focuses on technical performance but also addresses regulatory challenges, such as airspace management and public safety.
+
+"PolyU's sandbox project serves as a model for how academia and regulators can work together to foster innovation while ensuring safety and compliance," added a CAD official. "We are excited to see these efforts translate into real-world applications."
+
+### Next Steps for Low Altitude Economy Development
+
+With the successful completion of this test flight, PolyU AAE plans to expand the sandbox project to include more complex scenarios, such as multi-drone operations and extended-visual-line-of-sight (EVLOS) flights. The department will also continue to engage industry partners and policymakers to drive the adoption of low-altitude economy solutions across the region.
+
+
+
+
+
+ Discussing future phases of the sandbox regulatory project.
+
+
+The sandbox regulatory project aligns with Hong Kong's broader goals to become a hub for technological innovation, particularly in areas such as smart city development and sustainable transportation. PolyU AAE remains at the forefront of these efforts, leveraging its expertise in aeronautics and aviation engineering to contribute to the region's economic and technological advancement.
\ No newline at end of file
diff --git a/_events/2025-09-30-SourthernPower.md b/_events/2025-09-30-SourthernPower.md
new file mode 100644
index 00000000..0348ef35
--- /dev/null
+++ b/_events/2025-09-30-SourthernPower.md
@@ -0,0 +1,22 @@
+---
+title: TAS LAB Advances Collaboration on Offshore Wind Turbine Inspection with China Southern Power Grid
+subtitle: news
+# author: XIAO Naigui
+image: images/news/0930SouthernPower/image.png
+tags: news
+order:
+---
+
+## TAS LAB Advances Collaboration on Offshore Wind Turbine Inspection with China Southern Power Grid
+
+**Zhuhai, September 30, 2025** – A team led by Dr. Wen Weisong of the Department of Aeronautics and Civil Aviation at The Hong Kong Polytechnic University held a highly successful meeting today with the China Southern Power Grid Southern Offshore Wind Power Joint Development Co., Ltd. The meeting in Zhuhai marked a significant step forward in discussions for a joint laboratory research project.
+
+
+
+
+
+The collaboration is centered on the "UAV-based Blade and Tower Inspection" project, operating under the Guangdong-Hong Kong Joint Laboratory for Marine Infrastructure. The project aims to develop efficient, drone-based technologies for inspecting offshore wind turbines. The Hong Kong research efforts are led by Principal Investigator Dr. Wen Weisong, in partnership with his Guangdong counterpart, Lin Jinghua of the China Energy Engineering Group Guangdong Electric Power Design Institute Co., Ltd.. This collaboration also aims to jointly publish at least two academic papers, apply for one or more invention patents, and cultivate postgraduate talent.
+
+
+
diff --git a/_events/2025-10-10-ZhangyuanyuanTITS.md b/_events/2025-10-10-ZhangyuanyuanTITS.md
new file mode 100644
index 00000000..6f409e54
--- /dev/null
+++ b/_events/2025-10-10-ZhangyuanyuanTITS.md
@@ -0,0 +1,35 @@
+---
+title: Our paper is accepted by IEEE Transactions on Intelligent Transportation Systems
+subtitle: news
+# author: XIAO Naigui
+image: images/news/1010ZYYTITIS/1.png
+tags: news
+order:
+---
+
+It is great to share that our paper (“Learning Safe, Optimal, and Real-Time Flight Interaction With Deep Confidence-Enhanced Reachability Guarantee”, by Yuanyuan Zhang, Yingying Wang, Penggao Yan, and Weisong Wen) is accepted by the IEEE Transactions on Intelligent Transportation Systems. Congratulations to Yuanyuan and our collegues.
+
+
+
+
+
+**Abstract**
+
+In the low-altitude economy, ensuring the safe and agile flight of unmanned aerial vehicles (UAVs) in dynamic obstacle environments is essential for expanding interactive applications like parcel delivery. While deep reinforcement learning (DRL) shows promise for UAV motion planning and control, its trial-and-error exploration often struggles to ensure both agility and safety, especially under uncertain observational noise. Therefore, this paper proposes a deep confidence-enhanced reachability policy optimization (DCRPO) framework. By integrating safe DRL with nonlinear model predictive control (NMPC), DCRPO achieves high-level safety decisions, complex real-time joint planning and control for UAVs. Furthermore, we develop a deep confidence-enhanced reachability guarantee that constructs a set of stochastically forward-reachable planned trajectories under uncertainty, enabling robust safety collision probability certifications. This safe reachability mechanism adaptively selects belief space actions from planned actions to interact with the environment, further enhancing safety and reducing training time. In extensive experiments of UAVs traversing a fast-moving rectangular gate, the proposed method outperforms other state-of-the-art baseline methods under varying environments in terms of operational robustness. Furthermore, the proposed method significantly reduces overall collision violations and training time, greatly improving both training safety and efficiency. The demonstration video (https://youtu.be/7xkp9U7FSJg) and the source code (https://github.com/ZyyFLY/DCRPO) are also provided.
+
+
+
+
+
+ System Framework
+
+
+
+
+
+
+ Test Evaluation
+
\ No newline at end of file
diff --git a/_events/2025-10-17-Inner_Mongolia_Research_and_Industry_Exchange_Unmanned_Systems_and_Photovoltaic_Fieldwork.md b/_events/2025-10-17-Inner_Mongolia_Research_and_Industry_Exchange_Unmanned_Systems_and_Photovoltaic_Fieldwork.md
new file mode 100644
index 00000000..602ef672
--- /dev/null
+++ b/_events/2025-10-17-Inner_Mongolia_Research_and_Industry_Exchange_Unmanned_Systems_and_Photovoltaic_Fieldwork.md
@@ -0,0 +1,49 @@
+---
+title: Inner Mongolia Research and Industry Exchange — Unmanned Systems and Photovoltaic Fieldwork
+subtitle: news
+image: images/news/1017NeiMengGuVisit/image6.jpg
+tags: news
+order:
+---
+
+## Inner Mongolia Research and Industry Exchange — Unmanned Systems and Photovoltaic Fieldwork
+
+### Team Visit to Key Institutions and Enterprises
+
+In late October, our team members visited several key organizations in Inner Mongolia, including:
+
+- Ordos Institute of Applied Technology
+- Ordos Modern Industry Institute
+- Inner Mongolia Huiju High-Tech Co., Ltd.
+- Inner Mongolia Kubuqi Desert Photovoltaic Energy Co., Ltd.
+
+
+
+
+
+
+
+
+
+
+### Academic and Industry Collaboration
+
+During these visits, we held in-depth discussions with university and enterprise partners focusing on the application of unmanned systems in academic research and local industries.
+
+### On-site Photovoltaic Cleaning and Data Collection
+
+Additionally, at one of the power stations of Inner Mongolia Kubuqi Desert Photovoltaic Energy Co., Ltd., we conducted practical drone-based photovoltaic cleaning and data collection work. This effort lays a solid foundation for future research on unmanned systems.
+
+
+
+
+
+
+---
+
+We look forward to further advancing unmanned system technologies and strengthening cooperation between academia and industry in Inner Mongolia
diff --git a/_events/2025-10-21-Attend_IROS.md b/_events/2025-10-21-Attend_IROS.md
new file mode 100644
index 00000000..15717ae3
--- /dev/null
+++ b/_events/2025-10-21-Attend_IROS.md
@@ -0,0 +1,35 @@
+---
+title: Dr. HUANG Feng and PhD student ZHONG Yihan present their work at IEEE IROS 2025.
+subtitle: Example news
+# author: xxx
+image: images/news/IROS2025/poster_present.jpg
+tags: news
+order:
+---
+
+Our lab member Dr. HUANG Feng and PhD student ZHONG Yihan are presenting their work at IEEE IROS 2025. After 19 years, IROS returns to China, coinciding with a pivotal moment in the rapid advancement of AI and robotics—making IROS 2025 an outstanding venue for discussion and networking.. The data of our work is available at [Github](https://github.com/DarrenWong/RSG-GLIO).
+
+
+### Photos
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/_events/2025-10-30-shougang.md b/_events/2025-10-30-shougang.md
new file mode 100644
index 00000000..33e2a7fe
--- /dev/null
+++ b/_events/2025-10-30-shougang.md
@@ -0,0 +1,34 @@
+---
+title: Shougang Jinggang Innovation Center Officials Visit Trustworthy AI and Autonomous Systems Lab at Hong Kong Polytechnic University.
+subtitle: Example news
+# author: xxx
+image: images/news/Shougang/shougang1.png
+tags: news
+order:
+---
+
+Representatives from the Shougang Jinggang Innovation Center visited The Hong Kong Polytechnic University (PolyU), where they were introduced to the research activities of the Trustworthy AI and Autonomous Systems Laboratory (TAS Lab).
+
+The delegation from the Shougang Jinggang Innovation Center expressed profound admiration for the pioneering work and cutting-edge innovations underway at the Trustworthy AI and Autonomous Systems Laboratory. They highly commended our dedication to developing safe, reliable, and ethically sound autonomous systems, noting that this mission perfectly aligns with the future direction of technology development.
+
+The Center's representatives were particularly impressed by the exhibited robots—specifically the humanoid robot, the cleaning drone, and the sophisticated V2X cooperative autonomous driving platform—which they cited as exceptional demonstrations of our team's technical excellence. To further inspire innovation and foster collaboration, the Center has extended a valued invitation for the TAS Lab to display these groundbreaking robotic systems at a dedicated exhibition space within the Shougang Jinggang Innovation Center, anticipating this partnership will be a tremendous opportunity to highlight our research achievements to a broader audience of industry leaders and potential investors.
+
+### Photos
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/_events/2025-10-31-nanjing_jiangning.md b/_events/2025-10-31-nanjing_jiangning.md
new file mode 100644
index 00000000..7ae3d9af
--- /dev/null
+++ b/_events/2025-10-31-nanjing_jiangning.md
@@ -0,0 +1,27 @@
+---
+title: Nanjing Jiangning Economic Development Zone Officials Visit Trustworthy AI and Autonomous Systems Lab at Hong Kong Polytechnic University.
+subtitle: Example news
+# author: xxx
+image: images/news/nanjingjiangning/2.jpg
+tags: news
+order:
+---
+
+Officials from the Nanjing Jiangning Economic Development Zone visited The Hong Kong Polytechnic University, where they were introduced to the research activities of the Trustworthy AI and Autonomous Systems Laboratory. The delegation was given an overview of several innovative projects, including cleaning drones, tunnel inspection drones, humanoid robots, and end-to-end autonomous driving systems.
+
+During the visit, the officials expressed strong appreciation for the laboratory’s work, highlighting the potential applications and technological advancements demonstrated by the projects. The exchange underscored the importance of collaboration in cutting-edge research and its role in driving industrial and technological development.
+
+
+### Photos
+
+
+
+
+
+
+
+
+
+
diff --git a/_events/2025-11-03-ZhengXi_Phd_defense.md b/_events/2025-11-03-ZhengXi_Phd_defense.md
new file mode 100644
index 00000000..ba278928
--- /dev/null
+++ b/_events/2025-11-03-ZhengXi_Phd_defense.md
@@ -0,0 +1,18 @@
+---
+title: Congratulations to the successfully PhD oral defense of Dr. ZHENG Xi!
+# author: Yixin Gao
+image: images/news/20251103_Zhengxi/zhengxi_oral_defense.jpg
+tags: news
+order:
+---
+
+Congratulations to the successfully PhD oral defense of Dr. ZHENG Xi!
+
+
+
+
+
+
+
+
diff --git a/_events/2025-11-03-hk_fangwuzhanlan.md b/_events/2025-11-03-hk_fangwuzhanlan.md
new file mode 100644
index 00000000..5247f5a5
--- /dev/null
+++ b/_events/2025-11-03-hk_fangwuzhanlan.md
@@ -0,0 +1,30 @@
+---
+title: PolyU's TAS Lab Showcases Advanced Drone Technology at Chartered Institute of Housing Asian Pacific Branch Dinner
+subtitle:
+# author: xxx
+image: images/news/1103fangwu/fangwu1.jpg
+tags: news
+order:
+---
+
+HONG KONG – November 3, 2025 – The TAS Lab from The Hong Kong Polytechnic University (PolyU) presented its cutting-edge unmanned aerial vehicle (UAV) technology at a special exhibition during the annual dinner of The Chartered Institute of Housing (CIH) Asian Pacific Branch.
+
+The event, attended by key figures and professionals from the housing and property management industry, provided a prime opportunity for the PolyU research team to demonstrate the practical applications of their advanced drone systems.
+
+The TAS Lab's exhibition featured video presentations and our drone models WALL-E, highlighting capabilities specifically relevant to the housing sector. These included high-precision autonomous navigation for façade cleaning, AI-powered defect detection, and 3D mapping for building maintenance.
+
+The demonstration sparked significant interest among the attendees, fostering discussions on how this technology could be integrated into existing workflows for building surveys, maintenance planning, and safety compliance.
+
+### Photos
+
+
+
+
+
+
+
+
+
+
diff --git a/_events/2025-11-06-sz_chuanghuan.md b/_events/2025-11-06-sz_chuanghuan.md
new file mode 100644
index 00000000..607ed5cc
--- /dev/null
+++ b/_events/2025-11-06-sz_chuanghuan.md
@@ -0,0 +1,20 @@
+---
+title: Professor Wen Weisong of PolyU Leads Delegation to Shenzhen Chuanghuan to Discuss Drone Pipeline Inspection Technology
+subtitle:
+# author: xxx
+# image: images/news/1103fangwu/fangwu1.jpg
+tags: news
+order:
+---
+
+SHENZHEN, China – November 6, 2025 – A research delegation from The Hong Kong Polytechnic University (PolyU), led by Professor Wen Weisong, visited the offices of [Shenzhen Chuanghuan] today to engage in high-level technical discussions and explore future collaboration.
+
+The primary focus of the meeting was the application of advanced unmanned aerial vehicle (UAV) technology for internal pipeline exploration and inspection.
+
+The PolyU team presented its latest research findings and technological breakthroughs in autonomous systems. Key discussion points included navigating drones in GPS-denied, confined spaces, 3D mapping of internal structures, and AI-powered defect detection for pipe maintenance.
+
+Representatives from Shenzhen Chuanghuan shared their industry expertise and the significant market demand for safer, more efficient inspection solutions for complex urban and industrial pipe networks.
+
+The two parties held a productive dialogue on bridging the gap between cutting-edge academic research and real-world industrial applications. Both sides identified significant synergies and expressed a strong mutual interest in a future partnership.
+
+The visit concluded with an agreement to draft a formal plan for future cooperation, potentially including joint research projects, technology trials, and the development of specialized drone platforms tailored for pipeline environments.
diff --git a/_events/2025-11-10-hk_heu_visit.md b/_events/2025-11-10-hk_heu_visit.md
new file mode 100644
index 00000000..5ca7d88c
--- /dev/null
+++ b/_events/2025-11-10-hk_heu_visit.md
@@ -0,0 +1,55 @@
+---
+title: Harbin Engineering University Vice President YU Zhiwen Visits PolyU’s TAS Lab to Strengthen Research Ties
+subtitle:
+# author: xxx
+image: images/news/1110_heu/heu4.jpg
+tags: news
+order:
+---
+
+HONG KONG – November 10, 2025 – The TAS Lab at The Hong Kong Polytechnic University (PolyU) today welcomed a distinguished delegation from Harbin Engineering University (HEU), led by Vice President YU Zhiwen. The visit was organized to demonstrate the lab's latest advancements in autonomous systems and to deepen the research partnership between the two institutions.
+
+The visit began with a guided tour of the [FJ005 Indoor Flight Arena], the lab's state-of-the-art research and testing facility.
+
+Following the tour, the HEU delegation received a comprehensive briefing on the lab's key projects, presented by the TAS Lab's core research team:
+
+Mr. ZHU Fengchi delivered a presentation on the achievements of the Joint Laboratory and the fruitful collaboration between PolyU and HEU.
+
+
+
+
+
+Mr. LIU Xikun introduced the T2 Drone Project, providing a detailed overview and demonstration of the UAV's advanced capabilities.
+
+
+
+
+
+Mr. XIAO Naigui and Mr. HU Jiahao presented the lab's autonomous cleaning drone and gave a technical demonstration of several other specialized UAV projects.
+
+
+
+
+
+Prof. JIANG Yiping and Prof. GAO Zhen showcased the capabilities of the indoor flight arena and presented the lab's wider research outcomes in autonomous navigation and control.
+
+
+
+
+
+The demonstrations also included an introduction to the lab's Unmanned Ground Vehicle (UGV) platforms, illustrating the breadth of the TAS Lab's expertise in autonomous robotics.
+
+The session fostered a productive and in-depth discussion, with Vice President Yu and the HEU delegation engaging with the researchers on the technical innovations presented. The visit marks another significant step in the ongoing collaboration between the two universities, paving the way for continued innovation in autonomous systems.
+
+
+
+
+
+
+
+
diff --git a/_events/2025-11-11-UBeat_interviewed_Prof_Wen_Weisong.md b/_events/2025-11-11-UBeat_interviewed_Prof_Wen_Weisong.md
new file mode 100644
index 00000000..08e4cc7e
--- /dev/null
+++ b/_events/2025-11-11-UBeat_interviewed_Prof_Wen_Weisong.md
@@ -0,0 +1,41 @@
+---
+title: Prof. Weisong Wen Interviewed by UBeat on "Drone-Based Curtain Wall Cleaning" Technology and Prospects
+subtitle:
+# author: xxx
+image: images/news/Ubeat/image1.png
+tags: news
+order:
+---
+
+## Professor Weisong Wen interviewed by UBeat on "Drone-Based Curtain Wall Cleaning" technology and prospects
+
+### Drones for glass curtain wall cleaning: all parties gearing up.
+
+UBeat recently ran the feature "Drones for glass curtain wall cleaning: all parties gearing up." In the interview, TAS Lab lead and Assistant Professor Weisong Wen outlined the technology roadmap, regulatory compliance, and application outlook for drone-based curtain wall cleaning, showcasing the team’s progress in autonomous localization, control, and operational safety.
+
+
+
+
+
+ Prof.Wen receiving the interview.
+
+### Industry pain points and opportunities
+
+* Technical challenges: Perception and localization on highly reflective glass; disturbance-rejection control for close-proximity flight; compensation for water-jet reaction forces; enclosure waterproofing and payload reliability.
+* Safety and compliance: Advanced operations permissions, exclusion zones and contingency planning, geo-fencing and wind-field assessment—prioritizing “safety first, standards-led.”
+* Application value: Reduced high-altitude work risk, improved efficiency, and strong potential for energy savings and carbon reduction.
+* Progress and plans: A staged demonstration path—building mapping → localization and pathing → on-site cleaning—while co-developing standard procedures with property managers and regulators.
+
+
+
+
+
+ PolyU JCIT Tower.
+
+
+### Link
+
+UBeat feature: [https://ubeat.com.cuhk.edu.hk/180\_%E7%84%A1%E4%BA%BA%E6%A9%9F%E6%B4%97%E7%8E%BB%E7%92%83%E5%B9%95%E7%89%86-%E5%90%84%E6%96%B9%E8%93%84%E5%8B%A2%E5%BE%85%E7%99%BC/](https://ubeat.com.cuhk.edu.hk/180_%E7%84%A1%E4%BA%BA%E6%A9%9F%E6%B4%97%E7%8E%BB%E7%92%83%E5%B9%95%E7%89%86-%E5%90%84%E6%96%B9%E8%93%84%E5%8B%A2%E5%BE%85%E7%99%BC/)
+
diff --git a/_events/2025-11-12-hk_bj_symposium.md b/_events/2025-11-12-hk_bj_symposium.md
new file mode 100644
index 00000000..15d137dd
--- /dev/null
+++ b/_events/2025-11-12-hk_bj_symposium.md
@@ -0,0 +1,29 @@
+---
+title: Quadruped Robot Steals Spotlight at 28th Beijing-Hong Kong Economic Cooperation Symposium
+subtitle:
+# author: xxx
+image: images/news/1112HK_BJ_sym/4.jpg
+tags: news
+order:
+---
+
+HONG KONG – November 12, 2025 – The 28th Beijing-Hong Kong Economic Cooperation Symposium witnessed a showcase of technological innovation as Dr. Runqiu Yang and Mr. Zhongqi Wang from our research laboratory presented our work on quadruped robot. The robot is designed for inspection, logistics, and search-and-rescue operations, capable of carrying heavy loads for extended periods. The demonstration attracted substantial attention from government officials, industry leaders, and academic professionals.
+
+The symposium, themed "Beijing-Hong Kong Joining Hands, Connecting the World," brought together over 800 participants from government agencies, international business associations, leading enterprises, and industrial professionals. The symposium's significance was underscored by the presence of key political leaders, with Hong Kong Special Administrative Region Chief Executive John Lee and Beijing Municipal Mayor Yin Yong both delivering addresses at the opening ceremony. The symposium served as a crucial bridge for strengthening ties between the two regions' technological ecosystems. As Hong Kong Special Administrative Region Chief Executive John Lee noted in his opening address, "Beijing possesses profound historical culture and strong technological innovation capabilities, while Hong Kong enjoys the advantages of connecting the mainland with the rest of the world under the 'one country, two systems' framework."
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/_events/2025-11-18-ITSC2025-IVmeetsurban.md b/_events/2025-11-18-ITSC2025-IVmeetsurban.md
new file mode 100644
index 00000000..3b0a7c8f
--- /dev/null
+++ b/_events/2025-11-18-ITSC2025-IVmeetsurban.md
@@ -0,0 +1,66 @@
+---
+title: 4th IV Meets Urban Workshop a Success at ITSC 2025
+subtitle: Safe And Certifiable Navigation And Control for Intelligent Vehicles In Complex Urban Scenarios
+# author: Yixin Gao
+image: images/news/ITSC2025/group_photo.jpg
+tags: news
+order:
+---
+
+**GOLD COAST, AUSTRALIA – November 18, 2025 –** The **4th Workshop on Intelligent Vehicle Meets Urban: Safe And Certifiable Navigation And Control** was successfully held at the Star Grand, Broadbeach, Gold Coast, Australia, in conjunction with the ITSC 2025 conference. The event convened leading experts and researchers to address the critical challenges of ensuring **safe, robust, and certifiable autonomous navigation** in complex urban environments.
+
+
+
+
+
+The workshop featured a series of high-impact presentations by renowned experts, whose contributions steered discussions on cutting-edge solutions for urban autonomy:
+
+* **Prof. Li-Ta Hsu** (The Hong Kong Polytechnic University)
+* **Prof. Timothy D Barfoot** (University of Toronto)
+* **Prof. Fu Zhang** (The University of Hong Kong)
+* **Prof. Yi Zhou** (Hunan University, China)
+* **Dr. Mao Shan** (The University of Sydney)
+* **Prof. Shreyas Kousik** (Georgia Institute of Technology)
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Their talks covered essential topics from high-precision multi-sensor fusion and radar-based navigation to formal safety methods and LiDAR-centric systems for drones.
+
+
+A key highlight of the event was the dynamic poster session. Colleagues from our research group and Zhenxing Ming from University of Sydneey presented their latest findings, contributing significantly to the dialogue on next-generation intelligent vehicles. Their exhibited works covered areas such as robust localization, V2X data fusion, and integrity monitoring for autonomous navigation.
+
+
+The 4th Workshop on Intelligent Vehicle Meets Urban was a resounding success, fostering collaboration and setting new directions for research in safe and certifiable autonomous systems. The organizing committee extends its sincere gratitude to all invited speakers, poster presenters, and attendees for their active participation.
+
+
+For the detailed schedule, invited speaker abstracts, and information on accepted posters and videos, please visit the official workshop page: **[4th Workshop on IV meets Urban](https://sites.google.com/view/ivurban2025itsc)**
+
+
+
+
diff --git a/_events/2025-12-13-jj.md b/_events/2025-12-13-jj.md
new file mode 100644
index 00000000..4dffe8da
--- /dev/null
+++ b/_events/2025-12-13-jj.md
@@ -0,0 +1,39 @@
+---
+title: TAS Team Showcases Innovative Robotics at PolyU Technology Transfer Conference
+subtitle:
+# author: xxx
+image: images/news/1213Jinjiang/1.jpg
+tags: news
+order:
+---
+
+**JINJIANG, China** – December 14, 2025 – The TAS Research Team from The Hong Kong Polytechnic University (PolyU) made a strong impression at the inaugural **"Hong Kong Polytechnic University Technology Transfer Conference & Inaugural Annual Exchange Meeting of the Institute for Technological Innovation (2025)."** Held in Jinjiang City, Fujian Province, the event drew over 3,500 participants from government, academia, and industry, focusing on advancing industry-academia-research integration and fostering an innovation ecosystem.
+
+
+
+
+
+
+
+
+
+Led by PolyU's commitment to transforming research into real-world applications, the TAS team showcased cutting-edge outputs in robotics and sensing technologies. Highlights included unmanned aerial vehicles (UAVs) for building window cleaning, quadruped robots designed for inspection and logistics, ultra-wideband (UWB) modules for precise positioning, and LiDAR scanners enabling high-resolution 3D environmental modeling. These demonstrations attracted keen interest from investors and professionals, underscoring PolyU's role in driving national innovation strategies.
+
+
+
+
+
+
+
+
+
+As PolyU President Professor Teng Jinchao emphasized, the conference marks a milestone in converting laboratory breakthroughs into market-ready products, with TAS contributions exemplifying this vision.
+
+
+
+
diff --git a/_events/2025-12-17-Ruijie_IoTJ.md b/_events/2025-12-17-Ruijie_IoTJ.md
new file mode 100644
index 00000000..b809b293
--- /dev/null
+++ b/_events/2025-12-17-Ruijie_IoTJ.md
@@ -0,0 +1,22 @@
+---
+title: Our paper is accepted by IEEE Internet of Things Journal
+subtitle: Example news
+# author: xxx
+image: images/news/ruijie_rttlio.png
+tags: news
+order:
+---
+
+It is great to share that our paper (“RTT-LIO: A Wi-Fi RTT-aided LiDAR-Inertial Odometry via Tightly-Coupled Factor Graph Optimization in Complex Scenes”, by Ruijie Xu, Xikun Liu, Xin Wang, Weisong Wen, and Yulong Huang) is accepted by the IEEE Internet of Things Journal. Congratulations to Ruijie and etc.!
+
+## Abstract
+
+The pursuit of reliable and high-precision indoor positioning has become increasingly critical with the widespread deployment of Unmanned Autonomous Systems (UAS) across smart cities. While Wi-Fi Round-Trip-Time (RTT) technology offers promising absolute positioning capabilities, it faces challenges from signal interference and processing delays. Similarly, LiDAR-inertial odometry (LIO) systems provide accurate relative positioning, but suffer from cumulative drift over time. Although existing methods have explored loosely coupled technologies, they process sensor data separately, failing to fully exploit the complementary strengths of different sensors. This research pioneered a tightly-coupled RTT/LIO framework, encompassing novel factor graph formulations that ensure consistency between RTT and LiDAR observations, alongside LiDAR-aided RTT outlier detection and exclusion. Furthermore, we developed an innovative approach to estimate the positions of unknown access points (AP) by using prior trajectory and RTT observations. AP position estimation is based on kernel density estimation (KDE) and geometric diversity constraints (GDC) with the help of an adaptive RANSAC-based fault detection algorithm. Compared to RTT-only implementations, state-of-the-art LIO systems, and conventional loosely coupled approaches, our method demonstrated error reductions of 20-80\% in extensive experiments. The [code, Wi-Fi RTT/LiDAR/IMU dataset](https://github.com/RuijieXu0408/RTT-LIO), and [demo video](https://www.bilibili.com/video/BV1Y94MzYE7F) of our proposed methodology has been made publicly available to display our research.
+
+
+## System Framework
+
+
+
+
diff --git a/_events/2025-12-18-AirBlower_UAV_Demo_at_PolyU.md b/_events/2025-12-18-AirBlower_UAV_Demo_at_PolyU.md
new file mode 100644
index 00000000..1e3987d9
--- /dev/null
+++ b/_events/2025-12-18-AirBlower_UAV_Demo_at_PolyU.md
@@ -0,0 +1,39 @@
+---
+title: T2 Airport Airlower UAV Demonstration at PolyU
+subtitle: news
+image: images/news/1218/silent2.jpg
+tags: news
+---
+
+## Team Demonstrates Airblower UAV Technology with Civil Aviation Department at PolyU
+
+
+
+On December 18th, our team conducted a demonstration of airblower UAV technology in collaboration with the Civil Aviation Department at The Hong Kong Polytechnic University (PolyU).
+
+
+
+
+
+
+The demonstration showcased our airblower UAV system's capabilities to representatives from Hong Kong's Civil Aviation Department (CAD), highlighting the technology's potential applications in urban environments and its compliance with aviation safety standards in enclosed areas.
+
+
+
+
+
+
+### Advancing UAV Applications in Hong Kong
+
+The UAV is the product of this collaboration with the Gammon Construction Ltd., represents an important step in advancing the in field application of our airblower UAV system at the newly built T2 airport. The demonstration provided a practical example of how this technology can be used in real-world scenarios and helped to build regulatory understanding and acceptance of specialized UAV applications in Hong Kong. The demonstration provided valuable insights into the operational parameters and safety considerations of airblower UAV technology.
+
+Our team remains committed to working closely with aviation authorities to ensure that innovative UAV solutions can be safely integrated into Hong Kong's airspace, contributing to the development of the city's low-altitude economy.
+
+
+
+
+
+
diff --git a/_events/2025-12-19-cleaning_uav_sahnghai.md b/_events/2025-12-19-cleaning_uav_sahnghai.md
new file mode 100644
index 00000000..d769c767
--- /dev/null
+++ b/_events/2025-12-19-cleaning_uav_sahnghai.md
@@ -0,0 +1,46 @@
+---
+title: Team Successfully Demonstrates Automated Drone Building Cleaning at Shanghai Pudong Software Park
+subtitle: news
+image: images/news/1219/image1.jpg
+tags: news
+---
+
+## Team Successfully Demonstrates Automated Drone Building Cleaning at Shanghai Pudong Software Park
+
+
+
+Today, our team conducted a field demonstration of automated drone building cleaning at the Shanghai Pudong Software Park in China.
+
+
+
+
+
+
+
+
+
+
+As a key part of the launch ceremony for the Shanghai Pudong Software Park Low-Altitude Economy Service Platform, our demonstration received significant attention and support from the Shanghai Government, the Pudong New Area Government, and the Aircraft Owners and Pilots Association of China (AOPA-China).
+
+
+
+
+
+
+### Industry Breakthrough and Market Potential
+
+Conducting a demonstration in a city like Shanghai—characterized by its dense skyline and immense demand for building cleaning services—marks a significant breakthrough for our team in the field of drone-based cleaning.
+
+This milestone not only validates the maturity of our technology but also demonstrates the solution's potential for application in the complex environments of mega-cities.
+
+---
+
+Reference Press Release: https://mp.weixin.qq.com/s/HvCxbZXmLE4ve5UVuRo08g
+
+
+
+
+
diff --git a/_events/2026-01-13-Rinoai_MoU.md b/_events/2026-01-13-Rinoai_MoU.md
new file mode 100644
index 00000000..691a412e
--- /dev/null
+++ b/_events/2026-01-13-Rinoai_MoU.md
@@ -0,0 +1,26 @@
+---
+title: PolyU AAE and Rino.ai sign MOU to advance autonomous delivery vehicle applications
+
+subtitle: news
+image: images/news/20260113_RinoaiMoU/MoU.png
+tags: news
+---
+
+## PolyU AAE and Rino.ai sign MOU to advance autonomous delivery vehicle applications
+
+We have signed an MOU with Rino.ai, a leading L4 autonomous driving company, to co-develop and pilot autonomous vehicle applications on campus. Initial focus areas include last‑mile delivery and security patrols, with solutions tailored to dynamic pedestrian and traffic flows.
+
+Rino.ai has deployed 2,000+ vehicles in 170+ cities, leads the industry in new‑order volume, and has begun large‑scale deliveries of its Robovan autonomous logistics vehicle. PolyU AAE contributes internationally recognized expertise in multi‑sensor fusion, vehicle‑dynamics optimization, and intelligent transportation, supported by PolyU’s broader strengths in EVs and smart mobility.
+
+The partnership will enhance perception, decision‑making, and planning for dense campus environments and accelerate real‑world pilots at PolyU. More details can be found in this [website](https://www.rino.ai/news/rino-ai-and-the-hong-kong-polytechnic-university-sign-memorandum.html)
+
+
+
+
+
+
+
+
+
+
diff --git a/_events/2026-01-16-linxai.md b/_events/2026-01-16-linxai.md
new file mode 100644
index 00000000..d5385d0d
--- /dev/null
+++ b/_events/2026-01-16-linxai.md
@@ -0,0 +1,27 @@
+---
+title: TAS Team Visits LINXAI Company to Discuss Quadruped Robot Collaboration Projects
+subtitle:
+# author: xxx
+image: images/news/0116_linxai/1.jpg
+tags: news
+order:
+---
+
+**SHENZHEN, China** – January 16, 2026 – The TAS Team embarked on a productive visit to LINXAI Company, a leading innovator in robotics technology. The purpose of the visit was to engage in detailed discussions on ongoing collaboration projects centered around the quadruped robot, while also touring the company's advanced laboratory facilities. This exchange highlights PolyU's dedication to fostering industry-academia partnerships and advancing practical applications in robotics.
+
+
+
+
+
+The visit brought together TAS team members with LINXAI's engineering experts. Discussions focused on four innovative projects aimed at enhancing the capabilities of the quadruped robot:
+1. Robot Dog Following: Utilizing Ultra-Wideband (UWB) technology for precise human-following, enabling applications in logistics, security, and personal assistance.
+2. Robot Dog Vision-Based Motion Control: Integrating LiDAR sensors and Deep Reinforcement Learning (DRL) to improve terrain adaptability and gait optimization on unstructured surfaces.
+3. Guide Dog Application: Developing an intelligent system for visually impaired users, combining localization, vision-language navigation, and locomotion modules for safe mobility assistance.
+4. UAV-Robot Dog Landing: Creating an air-ground collaborative logistics system for seamless package transfer between UAVs and the quadruped robot, addressing last-mile delivery challenges.
+These projects demonstrate the platform's potential in diverse fields, ranging from disaster response and industrial inspection to assistive technologies and smart logistics.
+
+
+
+
diff --git a/_events/2026-01-18-TasFusion.md b/_events/2026-01-18-TasFusion.md
new file mode 100644
index 00000000..a4b1a002
--- /dev/null
+++ b/_events/2026-01-18-TasFusion.md
@@ -0,0 +1,47 @@
+---
+title: PolyU TAS LAB Releases TasFusion - A GNSS/IMU Sliding-Window Optimization Framework
+subtitle: news
+image: images/opensource/TasFusion/longdata.png
+tags: news
+---
+
+## PolyU TAS LAB Releases TasFusion: A GNSS/IMU Sliding-Window Optimization Framework
+
+The PolyU Trustworthy AI and Autonomous Systems Laboratory (TAS LAB) has officially released TasFusion, an open-source ROS1 framework for multi-sensor navigation and state estimation.
+
+TasFusion provides a Ceres-based GNSS/IMU loosely coupled sliding-window optimization framework, designed for research and experimental validation in outdoor navigation scenarios. The system supports IMU pre-integration, online bias estimation, marginalization to preserve historical information, and GNSS position and velocity constraints. All major functions are configurable through ROS launch parameters, enabling flexible deployment and ablation studies.
+
+The framework is accompanied by a complete toolchain, including GNSS message definitions, NLOS exclusion utilities, NovAtel receiver drivers, and NMEA parsing scripts. TasFusion has been validated on a GNSS-IMU-4G integrated navigation module (dual-IMU, u-blox F9P-04B, and 4G link), demonstrating reliable performance with high-frequency measurements and stable telemetry in real-world environments.
+
+TasFusion was developed in the context of the AAE4203 course at The Hong Kong Polytechnic University and is further supported by the Research Center for Autonomous System in Smart Transportation, PolyU-Wuxi Technology and Innovation Research Institute, reflecting close integration between education, research, and applied engineering.
+
+The project is now publicly available on GitHub and is intended to support research in navigation, sensor fusion, autonomous systems, and intelligent transportation applications.
+
+🔗 GitHub Repository:
+https://github.com/PolyU-TASLAB/TasFusion
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+> Reference Hardware Platform ([Introduction Video](https://www.bilibili.com/video/BV1fiaqzNEEm)):
+>
+> TasFusion has been validated on GNSS-IMU-4G integrated navigation module (dual-IMU + u-blox F9P-04B + 4G uplink), providing high-frequency measurements and reliable telemetry for outdoor deployments.
+>
+> For inquiries regarding this hardware platform, please contact **hbwu@hkpolyu-wxresearch.cn**.
+
+
+
+
+
diff --git a/_events/2026-01-20-simpleai.md b/_events/2026-01-20-simpleai.md
new file mode 100644
index 00000000..679de051
--- /dev/null
+++ b/_events/2026-01-20-simpleai.md
@@ -0,0 +1,36 @@
+---
+title: PolyU and Simple AI Launch Strategic Collaboration
+subtitle:
+# author: xxx
+image: images/news/0119_simpleai/1.png
+tags: news
+order:
+---
+
+**HONG KONG, China** – January 19, 2026 – The Hong Kong Polytechnic University and Beijing Simple AI Technology Co., Ltd. (Simple AI) officially launched their strategic partnership through a Memorandum of Understanding (MOU) signing ceremony. Held at PolyU's Chiang Chen Studio Theatre (AG204), the event brought together key representatives from both institutions to exchange the agreement and discuss future collaborations in embodied intelligent robotics. This partnership underscores PolyU's commitment to advancing industry-academia synergies and driving innovation in AI-driven technologies for real-world applications.
+
+
+
+
+
+The ceremony was hosted by Prof. Weisong WEN from TAS Lab. Attendees from PolyU included Ir Prof. H.C. Man, Dean of the Faculty of Engineering, Prof. Crystal Shi from the School of Hotel and Tourism Management, and Dr. Runqiu Yang from TAS Lab. Representing Simple AI were Founder and CEO Dr. Xiaofei Li, Vice President Yutang Tang, and Marketing Director Hui Wang.
+
+The MOU outlines a framework for joint efforts in several key areas of embodied intelligent robotics:
+
+1. **Joint Research and Development**: Focusing on the design and optimization of robot "brains" using AI-driven control systems, including End-to-End Learning, Vision-Language-Navigation (VLN), and Vision-Language-Action (VLA) models for robust perception, real-time reasoning, and precise execution in dynamic environments.
+2. **Closed-Loop Systems Development**: Building systems that integrate behavioral AI models with real-world data from simulated and actual scenarios, addressing challenges like data scarcity in unstructured settings such as hotels, nursing homes, and homes.
+3. **Integration of TAS Lab Expertise**: Leveraging PolyU's Trustworthy AI and Autonomous Systems Laboratory (TAS Lab) for high-precision positioning algorithms and multi-sensor fusion to ensure safety in autonomous operations, providing fail-safe checks against AI-driven perception in complex indoor environments.
+4. **Phased Proof-of-Concept Pilots and Industrialization**: Executing pilots in sectors like hotel services (utilizing Simple AI's partnerships with major hotel groups and PolyU's leadership in Hospitality & Tourism Management), elderly care, and family environments, enhancing robots' generalization, long-horizon task planning, emotional intelligence, adaptive learning, and self-evolutionary capabilities.
+
+These initiatives aim to propel advancements in assistive robotics, fostering solutions for societal needs in hospitality, elderly care, and beyond.
+
+
\ No newline at end of file
diff --git a/_members/Ai_Kedai.md b/_members/Ai_Kedai.md
new file mode 100644
index 00000000..1e404d9a
--- /dev/null
+++ b/_members/Ai_Kedai.md
@@ -0,0 +1,24 @@
+---
+name: Akida Tursun
+image: images/team/Akida.jpg
+role: ra # pi / postdoc / phd / ms / under / ra / visiting
+affiliation: PolyU-Wuxi Technology and innovation Research Institute
+order: 11
+
+links:
+ orcid:
+ email: Akida@hkpolyu-wxresearch.cn
+ profile:
+
+display_1:
+ - B.Eng.(Jiangnan University)
+display_2:
+ - June 2024 - Present
+---
+
+
+
+
+Akida received a Bachelor's degree in Management from Jiangnan University, with major courses including Principles of Management, Marketing, Consumer Behavior, Human Resource Management, etc. Currently, she works in the the Hong Kong Polytechnic University-Wuxi Research Institute, handling administrative and sales-related responsibilities.
+
+
diff --git a/_members/Chen_Hongchang.md b/_members/Chen_Hongchang.md
new file mode 100644
index 00000000..c0dd6130
--- /dev/null
+++ b/_members/Chen_Hongchang.md
@@ -0,0 +1,28 @@
+---
+name: Chen Hongchang
+image: images/team/hongchang.jpg
+role: phd
+affiliation: Hong Kong Polytechnic University
+order: 1
+
+links:
+ home-page:
+ orcid: 0009-0007-8094-5926
+ google-scholar:
+ github:
+ email: hongchang.chen@connect.polyu.hk
+ profile:
+
+display_1: Ph.D.(PolyU), M.Eng.(BIT), B.Eng. (HNU)
+display_2:
+---
+
+
+
+
+Hongchang Chen received his M.S. degree from school of Mechanical Engineering at Beijing Institute of Technology, Beijing, China, in 2025. He is currently pursuing his Ph.D. degree at The Hong Kong Polytechnic University (PolyU). His current research interests include Robotics and Computer Vision.
+
+
+
+**Research Areas**
+Autonomous Driving; Robotics; Computer Vision
diff --git a/_members/FenchiZHU_HEU_visiting.md b/_members/FenchiZHU_HEU_visiting.md
new file mode 100644
index 00000000..f6158eaf
--- /dev/null
+++ b/_members/FenchiZHU_HEU_visiting.md
@@ -0,0 +1,27 @@
+---
+name: Fengchi ZHU
+image: images/team/fengchiZHU.jpg
+role: visiting # pi / postdoc / phd / ms / under / ra / visiting
+affiliation: Harbin Engineering University
+order: 1
+
+links:
+ home-page: N/A
+ orcid: 0000-0002-1572-7769
+ google-scholar: BWgKaxcAAAAJ&hl=zh-CN
+ github: N/A
+ email: zfchiggins@163.com
+ profile:
+
+display_1: M.S and B.Eng. (HEU)
+display_2: Oct 2025
+
+---
+
+
+
+
+Fengchi Zhu received the B.S. degree in Automation from the College of Intelligent Systems Science and Engineering, Harbin Engineering University, in 2021, where he is currently working toward the Ph.D degree in control science and engineering. From Oct. 2025 to Mar. 2026, he is a visiting graduate researcher at the Department of Aeronautical and Aviation Engineering, Faculty of Engineering, The Hong Kong Polytechnic University. He won the Best Student Paper Award in 2023 IEEE International Conference on Mechatronics and Automation. His current research interests include state estimation, integrated navigation and cooperative navigation.
+
+**Research Areas**
+State estimation, Multi-agent Systems, Adaptive Kalman Filter
\ No newline at end of file
diff --git a/_members/GuangyanGuo_HEU_visiting.md b/_members/GuangyanGuo_HEU_visiting.md
new file mode 100644
index 00000000..da555152
--- /dev/null
+++ b/_members/GuangyanGuo_HEU_visiting.md
@@ -0,0 +1,27 @@
+---
+name: Guangyan Guo
+image: images/team/guangyanGuo.jpg
+role: visiting # pi / postdoc / phd / ms / under / ra / visiting
+affiliation: Harbin Engineering University
+order: 2
+
+links:
+ home-page: N/A
+ orcid: N/A
+ google-scholar: N/A
+ github: N/A
+ email: guoguangyan@hrbeu.edu.cn
+ profile:
+
+display_1: B.Eng. (HEU)
+display_2: Dec 2025
+
+---
+
+
+
+
+Guangyan Guo received the B.S. degree in Automation from the College of Intelligent Systems Science and Engineering, Harbin Engineering University, in 2021. He is currently pursuing the Ph.D. degree in control science and engineering at the same university. From December 2025 to May 2026, he is a visiting graduate researcher at the Department of Aeronautical and Aviation Engineering, Faculty of Engineering, The Hong Kong Polytechnic University, Hong Kong. He won the Best Student Paper Award at the 2025 Chinese Automation Congress. His current research interests include visual SLAM, visual scene reconstruction, and the computer simulation of physical fields.
+
+**Research Areas**
+visual SLAM,Visual scene reconstruction, Computer simulation
\ No newline at end of file
diff --git a/_members/WANG_Zhongqi.md b/_members/WANG_Zhongqi.md
new file mode 100644
index 00000000..b469adc0
--- /dev/null
+++ b/_members/WANG_Zhongqi.md
@@ -0,0 +1,24 @@
+---
+name: WANG Zhongqi
+image: images/team/zhongqi_wang.jpg
+role: under
+affiliation: Hong Kong Polytechnic University
+order: 2
+
+links:
+ home-page:
+ github: https://github.com/zqwang1105
+ email: 20099224d@connect.polyu.hk
+ profile:
+
+display_1: B.Eng.(PolyU)
+display_2: July 2025 - Present
+---
+
+
+
+
+Mr. WANG Zhongqi is currently a final-year Undergraduate Student Assistant at Department of Mechanical Engineering, The Hong Kong Polytechnic University (PolyU).
+
+**Research Areas**
+Robotics, Reinforcement learning
diff --git a/_members/XiangruWang.md b/_members/XiangruWang.md
index 0d283eeb..1280c473 100644
--- a/_members/XiangruWang.md
+++ b/_members/XiangruWang.md
@@ -1,7 +1,7 @@
---
name: Wang Xiangru
image: images/team/wang_xiangru.jpg
-role: ra # pi / postdoc / phd / ms / under / ra / visiting
+role: phd # pi / postdoc / phd / ms / under / ra / visiting
affiliation: Hong Kong Polytechnic University
order: 8
@@ -11,7 +11,7 @@ links:
profile:
display_1:
- - RA, M.Sc. (TUM), B.Eng. (WHU)
+ - PhD student, M.Sc. (TUM), B.Eng. (WHU)
display_2:
- From Feb. 2025
---
diff --git a/_members/Yang_Mokui.md b/_members/Yang_Mokui.md
new file mode 100644
index 00000000..717b0450
--- /dev/null
+++ b/_members/Yang_Mokui.md
@@ -0,0 +1,24 @@
+---
+name: Yang Mokui
+image: images/team/yang_mokui.jpg
+role: ms
+affiliation: Hong Kong Polytechnic University
+order: 1
+
+links:
+ home-page: /
+ github: https://github.com/Guohao-Fu
+ email: mokui.yang@connect.polyu.hk
+ profile:
+
+display_1: M.Phil. Student, B.Eng.(HDU)
+display_2: May 2025 - Present
+---
+
+
+
+
+Mr. Yang Mokui is currently a MPhil Student at Department of Aeronautical and Aviation Engineering, The Hong Kong Polytechnic University(PolyU), supervised by Dr. Wen Weisong.
+
+**Research Areas**
+FPGA Hardware Acceleration
diff --git a/_members/ZHAO_Jiaqi.md b/_members/ZHAO_Jiaqi.md
index 2bc582e3..5de186f9 100644
--- a/_members/ZHAO_Jiaqi.md
+++ b/_members/ZHAO_Jiaqi.md
@@ -1,24 +1,24 @@
---
name: ZHAO Jiaqi
image: images/team/zhao_jiaqi.jpg
-role: under
+role: ms
affiliation: Hong Kong Polytechnic University
order: 1
links:
- home-page: www.linkedin.com/in/jiaqi-zhao-7ab009228
+ home-page: https://www.linkedin.com/in/jiaqi-zhao-7ab009228
github: https://github.com/Qiamp
email: jiaqi.zhao@connect.polyu.hk
profile:
-display_1: B.Eng.(PolyU)
+display_1: M.Phil. Student, B.Eng.(PolyU), Drone Captain with HKCAD & CAAC License
display_2: March 2024 - Present
---
-Mr. Zhao Jiaqi is currently a Year-4 Undergraduate Student Assistant at Department of Aeronautical and Aviation Engineering, The Hong Kong Polytechnic University(PolyU), supervised by Dr. Wen Weisong.
+Mr. Zhao Jiaqi is currently a MPhil Student at Department of Aeronautical and Aviation Engineering, The Hong Kong Polytechnic University(PolyU), supervised by Dr. Wen Weisong.
**Research Areas**
-UAV Vision-Based Positioning and Navigation
+UAV end2end Positioning and Navigation
diff --git a/_members/Zhang_Ziqi.md b/_members/Zhang_Ziqi.md
index ec012c84..8c48d676 100644
--- a/_members/Zhang_Ziqi.md
+++ b/_members/Zhang_Ziqi.md
@@ -3,7 +3,7 @@ name: Zhang Ziqi
image: images/team/Zhang_Ziqi.JPG
role: phd # pi / postdoc / phd / ms / under / ra / visiting
affiliation: Hong Kong Polytechnic University
-order: 1
+order: 3
links:
orcid: 0009-0001-1289-4932
diff --git a/_members/ZihaoWang_WHU_visiting.md b/_members/ZihaoWang_WHU_visiting.md
index c374514f..0b955f17 100644
--- a/_members/ZihaoWang_WHU_visiting.md
+++ b/_members/ZihaoWang_WHU_visiting.md
@@ -14,7 +14,7 @@ links:
profile:
display_1: M.S and B.Eng. (WHU)
-display_2: To be join in fall 2025
+display_2: Sep 2025 - Dec 2025
---
diff --git a/_members/gao_yixin.md b/_members/gao_yixin.md
index c982bcc7..4130fd5b 100644
--- a/_members/gao_yixin.md
+++ b/_members/gao_yixin.md
@@ -13,7 +13,7 @@ links:
email: yixin.gao@connect.polyu.hk
display_1: PhD Student(PolyU), M.Eng.(ZJU), B.Eng. (UPC)
-display_2: Spring 2024 -- Present
+display_2: Fall 2024 -- Present
---
diff --git a/_members/hujiahao.md b/_members/hujiahao.md
index 108c3bd6..30043028 100644
--- a/_members/hujiahao.md
+++ b/_members/hujiahao.md
@@ -1,7 +1,7 @@
---
name: Hu Jiahao
image: images/team/hu_jiahao.jpg
-role: ra # pi / postdoc / phd / ms / under / visiting
+role: phd # pi / postdoc / phd / ms / under / visiting
affiliation: Hong Kong Polytechnic University
order: 1
diff --git a/_members/jiao_jianhao.md b/_members/jiao_jianhao.md
new file mode 100644
index 00000000..3d4a303d
--- /dev/null
+++ b/_members/jiao_jianhao.md
@@ -0,0 +1,25 @@
+---
+name: Jianho Jiao
+image: images/team/jianhaojiao_pict_2023.jpg
+role: postdoc # pi / postdoc / phd / ms / under / ra / visiting
+affiliation: Hong Kong Polytechnic University
+order: 1
+
+links:
+ home-page: https://gogojjh.github.io/
+ orcid:
+ google-scholar: https://scholar.google.com/citations?user=psqleSQAAAAJ&hl=zh-TW
+ github: https://github.com/gogojjh
+ email: jiaojh1994@gmail.com
+ profile:
+
+display_1: Ph.D.(HKUST), B.Eng. (ZJU)
+---
+
+
+
+
+Jianhao Jiao (Member, IEEE) received a Ph.D. in Electronic and Computer Engineering from the Hong Kong University of Science and Technology, in 2021. His research specializes in SLAM, sensor fusion, and robust robotic navigation, exemplified by pioneering works such as M-LOAM, FusionPortable dataset, and the scalable, structure-free visual navigation system, OpenNavMap. He has authored over ten papers in premier robotics venues (e.g., IROS, ICRA, NeurIPS, IJRR, IEEE TRO) and serves as an Associate Editor for RAL, IROS 2024-2025, ICRA 2025. Dr. Jiao’s ultimate research objective is to endow autonomous systems with lifelong, cognitive spatial memory mechanisms capable of dynamic updating, directed towards applications in challenging, unstructured environments such as subterranean mines and forests.
+
+**Research Areas**
+Mobile Robot, Navigation, Embodied Intelligence
\ No newline at end of file
diff --git a/_members/liheng.md b/_members/liheng.md
new file mode 100644
index 00000000..231735da
--- /dev/null
+++ b/_members/liheng.md
@@ -0,0 +1,30 @@
+---
+name: Li Heng
+image: images/team/LiHeng.jpg
+role: ra
+affiliation: Hong Kong Polytechnic University
+order: 9
+
+links:
+ home-page:
+ orcid:
+ google-scholar:
+ github: https://github.com/shannonlee2024
+ email: shannon-h.li@polyu.edu.hk
+ profile:
+
+display_1: Research Assistant,B.Eng. (DLPU), R&D Engineer(Unmanned Systems Field, Shenzhen ,China)
+display_2: Spring 2025 -- Present
+
+---
+
+
+
+
+Heng Li received a BEng degree in Automation(Innovation Experimental Class) from Dalian Polytechnic University(DLPU) DaLian, China, in 2017. And he was a research assistant to the Director of R&D Institute of Integrated Measurement&Control,DLPU.
+
+From 2017 to 2024, he worked as a R&D engineer at a company in the unmanned systems field for seven years. His main research areas are perception and security of unmanned systems.
+
+
+**Research Areas**
+Unmanned Systems Perception;Unmanned Systems Security
diff --git a/_members/liu_xikun.md b/_members/liu_xikun.md
index a178bd75..168fa032 100644
--- a/_members/liu_xikun.md
+++ b/_members/liu_xikun.md
@@ -1,7 +1,7 @@
---
name: Liu Xikun
image: images/team/liu_xikun.png
-role: phd # pi / postdoc / phd / ms / under / ra / visiting
+role: postdoc # pi / postdoc / phd / ms / under / ra / visiting
affiliation: Hong Kong Polytechnic University
order: 3
diff --git a/_members/runqiuyang.md b/_members/runqiuyang.md
index 86faa579..ebb22138 100644
--- a/_members/runqiuyang.md
+++ b/_members/runqiuyang.md
@@ -8,7 +8,7 @@ order: 3
links:
home-page: n/a
orcid: 0000-0001-6286-8217
- google-scholar: https://scholar.google.com/citations?user=cDycNtAAAAAJ&hl=en
+ google-scholar: cDycNtAAAAAJ&hl=en
github: n/a
email: runqiu.yang@polyu.edu.hk
profile:
diff --git a/_members/wang_junzhe.md b/_members/wang_junzhe.md
new file mode 100644
index 00000000..4cde88c0
--- /dev/null
+++ b/_members/wang_junzhe.md
@@ -0,0 +1,17 @@
+---
+name: WANG Junzhe
+image: images/team/wang_junzhe.png
+role: ms
+affiliation: Hong Kong Polytechnic University
+order: 114
+links:
+ email: cooper.wang@connect.polyu.hk
+ profile:
+display_1: M.Phil. Student, B.Eng.(HKUST)
+display_2: Fall 2025
+---
+
+
+WANG Junzhe received a BEng degree from the Hong Kong University of Science and Technology (HKUST). He is currently pursuing his M.Phil. degree at The Hong Kong Polytechnic University (PolyU).
+**Research Areas**
+UAV, Mapping, and Localization; Sensor Fusion; GNSS
diff --git a/_members/yihan_zhong.md b/_members/yihan_zhong.md
index 2a905108..ec60de79 100644
--- a/_members/yihan_zhong.md
+++ b/_members/yihan_zhong.md
@@ -8,7 +8,7 @@ order: 2
links:
home-page:
orcid: 0000-0002-1462-3642
- google-scholar: https://scholar.google.com/citations?user=c1xJ5pIAAAAJ&hl=en&oi=ao
+ google-scholar: c1xJ5pIAAAAJ&hl=en&oi=ao
github: https://github.com/Pirkaklo
email: yi-han.zhong@connect.polyu.hk
profile:
diff --git a/_members/yingmign_chen.md b/_members/yingmign_chen.md
index 1378099a..10917511 100644
--- a/_members/yingmign_chen.md
+++ b/_members/yingmign_chen.md
@@ -14,7 +14,7 @@ links:
profile:
display_1: M.Phil. Student, B.Eng. (Western University of Ontario)
-display_2: Spring 2024
+display_2: Spring 2024 -- Present
---
diff --git a/_members/yywang.md b/_members/yywang.md
index b80ca504..1489db7a 100644
--- a/_members/yywang.md
+++ b/_members/yywang.md
@@ -6,9 +6,9 @@ affiliation: Hong Kong Polytechnic University
order: 2
links:
- home-page: http://www.ee.cuhk.edu.hk/~yywang/
+ home-page: https://yywang.pages.dev/
orcid: 0000-0003-3293-0790
- google-scholar: https://scholar.google.com/citations?user=bRwHOgwAAAAJ&hl=zh-CN
+ google-scholar: bRwHOgwAAAAJ&hl=zh-CN
email: ying5wang@polyu.edu.hk
profile:
@@ -22,3 +22,4 @@ Yingying Wang received the B.E. degree in Electronic Engineering from Northeaste
**Research Areas**
Smart sensing; Robotics; Sensor Fusion; Inertial Measurement Unit; Wireless Sensing
+
diff --git a/_members/zhengxi.md b/_members/zhengxi.md
index af732b40..9d634c2f 100644
--- a/_members/zhengxi.md
+++ b/_members/zhengxi.md
@@ -1,14 +1,14 @@
---
name: Zheng Xi
image: images/team/zheng_xi.png
-role: phd # pi / postdoc / phd / ms / under / ra / visiting
+role: alumni # pi / postdoc / phd / ms / under / ra / visiting / alumni
affiliation: Hong Kong Polytechnic University
order: 2
links:
home-page:
orcid: 0000-0001-8399-5127
- google-scholar: https://scholar.google.com/citations?user=cfhVuzMAAAAJ&hl=zh-CN
+ google-scholar: cfhVuzMAAAAJ&hl=zh-CN
github: https://github.com/ZHENGXi-git
email: zheng-xi.zheng@connect.polyu.hk
profile:
diff --git a/_opensource/2025-11-11-HDMap.md b/_opensource/2025-11-11-HDMap.md
new file mode 100644
index 00000000..ca409266
--- /dev/null
+++ b/_opensource/2025-11-11-HDMap.md
@@ -0,0 +1,52 @@
+---
+title: Semantic-Vector HD Map
+subtitle:
+author: Runzhi Hu
+image: images/opensource/HDMap/garage_half.gif
+tags:
+order:
+---
+
+
+This is a pipeline to construct HD Semantic Map and HD Vector Map.
+
+
+
+This repository hosts an open-source HDVM (high-definition vector map) generation pipeline designed for autonomous vehicles, especially in intricate urban environments. Traditional HDVM creation methods often operate on a planar assumption, causing inaccuracies in real-world scenarios. Our solution, however, integrates data from GNSS (global navigation satellite system), INS (inertial navigation system), LiDAR, and cameras.
+
+The process starts with the extraction of semantic data from raw images using advanced architectures like Vision Transformer (ViT) and Swin Transformer. We then acquire the absolute 3D data of these semantic objects from 3D LiDAR depth and derive high-precision pose estimates from GNSS real-time kinematic (GNSS-RTK) and an INS navigation system. This semantic data aids in the extraction of vector information, such as lane markings, which forms the HDVM.
+
+A significant feature of this repo is its focus on HDVM accuracy. We've examined the impact of two primary error sources: segmentation discrepancies and LiDAR-camera extrinsic parameter deviations. An error propagation scheme is provided to showcase how these sources can affect the HDVM's precision.
+
+For ease of setup and consistency, a Docker version of the pipeline is available and is the recommended method for deployment.
+
+
+For details, please refer to our official repository at [HDMap](https://github.com/ebhrz/HDMap).
+
+
+
+
+
+
+
+
+
+If you find this code useful, we would appreciate it if you cite our paper.
+
+```latex
+@article{https://doi.org/10.1049/itr2.12524,
+author = {Hu, Runzhi and Bai, Shiyu and Wen, Weisong and Xia, Xin and Hsu, Li-Ta},
+title = {Towards high-definition vector map construction based on multi-sensor integration for intelligent vehicles: Systems and error quantification},
+journal = {IET Intelligent Transport Systems},
+volume = {n/a},
+number = {n/a},
+pages = {},
+keywords = {automated driving and intelligent vehicles, autonomous driving, navigation, sensor fusion},
+doi = {https://doi.org/10.1049/itr2.12524},
+url = {https://ietresearch.onlinelibrary.wiley.com/doi/abs/10.1049/itr2.12524},
+eprint = {https://ietresearch.onlinelibrary.wiley.com/doi/pdf/10.1049/itr2.12524}
+}
+```
+
+
\ No newline at end of file
diff --git a/_opensource/2025-11-11-kltdataset.md b/_opensource/2025-11-11-kltdataset.md
new file mode 100644
index 00000000..23f6ab9a
--- /dev/null
+++ b/_opensource/2025-11-11-kltdataset.md
@@ -0,0 +1,32 @@
+---
+title: KLT Dataset
+subtitle:
+author: Runzhi Hu
+image: images/opensource/kltdataset/NLOS_crop.gif
+tags:
+order:
+---
+
+
+The KLT dataset is a light urban scenario dataset with LOS/NLOS label
+
+
+
+The KLT dataset is a light urban scenario dataset containing GNSS raw measurements collected using a U-blox F9P receiver, ground truth provided by the SPAN-CPT system, and LOS/NLOS labels for GPS and Beidou satellites. Additional data, such as IMU, LiDAR, and camera recordings, are also included in the ROS bag file.
+
+We also provide a start script and configuration files to enable researchers to get started with the dataset quickly and efficiently.
+
+
+For details, please refer to our official repository at [KLT Dataset](https://github.com/ebhrz/KLTDataset).
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/_opensource/2025-11-11-plvins.md b/_opensource/2025-11-11-plvins.md
new file mode 100644
index 00000000..80271331
--- /dev/null
+++ b/_opensource/2025-11-11-plvins.md
@@ -0,0 +1,34 @@
+---
+title: SafetyQuantifiable-PLVINS
+subtitle:
+author: Xi Zheng
+image: images/opensource/zhengxi/framework2.png
+tags:
+order:
+---
+
+
+Safety-quantifiable Line Feature-based Monocular Visual Localization with 3D Prior Map
+
+
+To address the drift and safety quantification challenges in visual localization, we propose a novel map-aided method that delivers both accurate pose estimates and a measurable error bound. By tightly integrating visual-inertial odometry with a prior line map, our system establishes geometric constraints between 2D image features and 3D map lines. Crucially, we introduce a GNSS-inspired integrity monitoring framework to compute a Protection Level (PL), which quantifies the potential error in both position and orientation, thereby certifying the solution's safety.
+
+For details, please refer to our official repository at [SafetyQuantifiable-PLVINS](https://github.com/ZHENGXi-git/SafetyQuantifiable-PLVINS)
+
+
+And if you are using this code, please cite our paper by
+```latex
+@article{zheng2025safety,
+ title={Safety-quantifiable line feature-based monocular visual localization with 3d prior map},
+ author={Zheng, Xi and Wen, Weisong and Hsu, Li-Ta},
+ journal={IEEE Transactions on Intelligent Transportation Systems},
+ year={2025},
+ publisher={IEEE}
+}
+```
+
+
+
+
+
+
diff --git a/_opensource/2025-11-11-pyrtklib.md b/_opensource/2025-11-11-pyrtklib.md
new file mode 100644
index 00000000..2e38807b
--- /dev/null
+++ b/_opensource/2025-11-11-pyrtklib.md
@@ -0,0 +1,49 @@
+---
+title: pyrtklib
+subtitle:
+author: Runzhi Hu
+#image: images/project/huawei_mapping.gif
+tags:
+order:
+---
+
+
+Unleash all the performance of the most popular GNSS library -- RTKLIB in python. A python binding for RTKLIB provides full functions.
+
+
+
+This is a Python binding for RTKLIB, the most popular GNSS-RTK positioning C library. However, many researchers are currently using Python for research, especially in deep learning field. Thus, we implement this Python interface of RTKLIB to build a bridge between Python and positioning. By means of RTKLIB, you can easily read data from rinex file and process the positioning using the methods provided by RTKLIB, such as SPP, RTK, PPP.
+
+
+For details, please refer to our official repository at [pyrtklib](https://github.com/IPNL-POLYU/pyrtklib) [](https://pepy.tech/projects/pyrtklib).
+
+If you want to use the rtklib version based on rtklibexplorer/rtklib_demo5, please refer to [pyrtklib_demo5](https://github.com/IPNL-POLYU/pyrtklib_demo5) [](https://pepy.tech/projects/pyrtklib5).
+
+For quick install, you can use pip by
+```bash
+pip install pyrtklib
+pip install pyrtklib5 #for rtklib_demo5
+```
+
+And if you are using this code, please cite our paper by
+```latex
+@ARTICLE{10965937,
+ author={Hu, Runzhi and Xu, Penghui and Zhong, Yihan and Wen, Weisong},
+ journal={IEEE Transactions on Intelligent Transportation Systems},
+ title={pyrtklib: An Open-Source Package for Tightly Coupled Deep Learning and GNSS Integration for Positioning in Urban Canyons},
+ year={2025},
+ volume={26},
+ number={7},
+ pages={10652-10662},
+ keywords={Global navigation satellite system;Deep learning;Python;Artificial intelligence;Weight measurement;Satellites;Receivers;Mathematical models;Training;Position measurement;Artificial intelligence;deep learning;GNSS;RTKLIB},
+ doi={10.1109/TITS.2025.3552691}}
+```
+
+
+
+
+
+
\ No newline at end of file
diff --git a/_opensource/2025-11-11-tasgnss.md b/_opensource/2025-11-11-tasgnss.md
new file mode 100644
index 00000000..80664e98
--- /dev/null
+++ b/_opensource/2025-11-11-tasgnss.md
@@ -0,0 +1,28 @@
+---
+title: TASGNSS
+subtitle:
+author: Runzhi Hu
+#image: images/project/huawei_mapping.gif
+tags:
+order:
+---
+
+
+A simple python GNSS interface for positioning.
+
+
+
+This lib is based on [pyrtklib](https://github.com/IPNL-POLYU/pyrtklib) and provide a simple and modern interface for using. For more details, please refer to our website [tasgnss](https://github.com/PolyU-TASLAB/TASGNSS) and [document](https://polyu-taslab.github.io/TASGNSS/)
+
+For quick install, you can use pip by
+```bash
+pip install tasgnss
+```
+
+
+
+
+
\ No newline at end of file
diff --git a/_opensource/2025-11-11-tc-viml.md b/_opensource/2025-11-11-tc-viml.md
new file mode 100644
index 00000000..f389eb03
--- /dev/null
+++ b/_opensource/2025-11-11-tc-viml.md
@@ -0,0 +1,43 @@
+---
+title: TC-VIML
+subtitle:
+author: Xi Zheng
+image: images/opensource/zhengxi/framework.png
+tags:
+order:
+---
+
+
+Tightly-coupled Visual/Inertial/Map Integration with Observability Analysis for Reliable Localization of Intelligent Vehicles
+
+
+To enable reliable and drift-free localization for intelligent vehicles, we propose a tightly-coupled visual-inertial odometry (VIO) system that leverages a 3D prior line map. Unlike loosely-coupled methods, our approach deeply integrates line features into a factor graph optimization, supported by a robust cross-modality matching and outlier rejection strategy. For the first time, we rigorously prove that our system achieves full observability in global translation, leaving only the yaw angle unobservable. Evaluations in both simulated and real-world environments confirm the system's effectiveness.
+
+For details, please refer to our official repository at [TC-VIML](https://github.com/ZHENGXi-git/TC-VIML)
+
+
+And if you are using this code, please cite our paper by
+```latex
+@article{zheng2024tightly,
+ title={Tightly-coupled visual/inertial/map integration with observability analysis for reliable localization of intelligent vehicles},
+ author={Zheng, Xi and Wen, Weisong and Hsu, Li-Ta},
+ journal={IEEE Transactions on Intelligent Vehicles},
+ year={2024},
+ publisher={IEEE}
+}
+
+@inproceedings{zheng2023tightly,
+ title={Tightly-coupled line feature-aided visual inertial localization within lightweight 3d prior map for intelligent vehicles},
+ author={Zheng, Xi and Wen, Weisong and Hsu, Li-Ta},
+ booktitle={2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC)},
+ pages={6019--6026},
+ year={2023},
+ organization={IEEE}
+}
+```
+
+
+
+
+
+
diff --git a/_opensource/2025-11-11-tdl-gnss.md b/_opensource/2025-11-11-tdl-gnss.md
new file mode 100644
index 00000000..2cbf6e57
--- /dev/null
+++ b/_opensource/2025-11-11-tdl-gnss.md
@@ -0,0 +1,38 @@
+---
+title: TDL-GNSS
+subtitle:
+author: Runzhi Hu
+image: images/papers/2024/runzhi2024pyrtklib.png
+tags:
+order:
+---
+
+
+A tightly coupled deep learning framework for GNSS.
+
+
+
+This framework is built on pyrtklib and TASGNSS, designed to tightly integrate deep learning into the GNSS (Global Navigation Satellite System) processing workflow. For details, please refer to our official repository at [TDL-GNSS](https://github.com/ebhrz/TDL-GNSS).
+
+
+You can access the preprint version of our paper on arXiv and the published version on IEEE Xplore. We would greatly appreciate it if you cite our work:
+
+@ARTICLE{10965937,
+ author={Hu, Runzhi and Xu, Penghui and Zhong, Yihan and Wen, Weisong},
+ journal={IEEE Transactions on Intelligent Transportation Systems},
+ title={pyrtklib: An Open-Source Package for Tightly Coupled Deep Learning and GNSS Integration for Positioning in Urban Canyons},
+ year={2025},
+ volume={26},
+ number={7},
+ pages={10652-10662},
+ keywords={Global navigation satellite system;Deep learning;Python;Artificial intelligence;Weight measurement;Satellites;Receivers;Mathematical models;Training;Position measurement;Artificial intelligence;deep learning;GNSS;RTKLIB},
+ doi={10.1109/TITS.2025.3552691}}
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/_opensource/2026-01-17-TasFusion.md b/_opensource/2026-01-17-TasFusion.md
new file mode 100644
index 00000000..d02d5f42
--- /dev/null
+++ b/_opensource/2026-01-17-TasFusion.md
@@ -0,0 +1,50 @@
+---
+title: TasFusion
+subtitle:
+author: ZHAO Jiaqi
+image: images/opensource/TasFusion/demo.gif
+tags:
+order:
+---
+
+
+
+
+
+
+[TasFusion](https://github.com/PolyU-TASLAB/TasFusion.git) is a ROS1 package designed for multi-sensor navigation. Its core functionality provides a Ceres-based GNSS/IMU loosely coupled sliding-window optimization framework, along with supporting tools including GNSS message definitions, NLOS exclusion utilities, a NovAtel driver, and NMEA ROS parsing scripts.
+
+The central sensor-fusion node supports IMU pre-integration, online bias estimation, marginalization to preserve historical information, and GPS position/velocity constraints. All major functions can be flexibly enabled or disabled through parameters configured in launch files.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Github Repository: https://github.com/PolyU-TASLAB/TasFusion.git
+
+> Reference Hardware Platform ([Introduction Video](https://www.bilibili.com/video/BV1fiaqzNEEm)):
+>
+> TasFusion has been validated on GNSS-IMU-4G integrated navigation module (dual-IMU + u-blox F9P-04B + 4G uplink), providing high-frequency measurements and reliable telemetry for outdoor deployments.
+>
+> For inquiries regarding this hardware platform, please contact **hbwu@hkpolyu-wxresearch.cn**.
+
+
+
+
+
+
+
+
diff --git a/_posts/2025-01-01-Our_Autonomous_Platforms.md b/_posts/2025-01-01-Our_Autonomous_Platforms.md
index 9a893b8b..b3c47aa6 100644
--- a/_posts/2025-01-01-Our_Autonomous_Platforms.md
+++ b/_posts/2025-01-01-Our_Autonomous_Platforms.md
@@ -1,63 +1,145 @@
---
title: Our Autonomous Platforms
-# subtitle: Knowledge Transfer to Unmanned Autonomous Systems
+# subtitle: End-to-End AI-Powered Self-Driving Systems
# author: Zhang Ziqi
image: images/project/Vehicle/ADV.png
tags: Autonomous-Driving
order:
---
-Demonstration of our Autonomous Driving Vehicles and their onboard sensor platforms.
+
+Our cutting-edge research platforms for end-to-end AI self-driving, where neural networks learn to drive directly from sensor data to control outputs.
+
+## What is End-to-End AI Self-Driving?
+
+End-to-end AI self-driving represents a paradigm shift in autonomous vehicle technology. Unlike traditional modular pipelines that break down driving into separate perception, prediction, planning, and control modules, end-to-end approaches use deep neural networks to learn the entire driving task holistically—directly mapping raw sensor inputs to vehicle control commands.
+
+This revolutionary approach offers several key advantages:
+
+**Direct Sensor-to-Control Learning**: Neural networks process multi-modal sensor data (cameras, LiDAR, GNSS) and output steering angles, throttle, and brake commands in a single forward pass, eliminating the error propagation inherent in modular systems.
+
+**Learned Representations**: Rather than hand-crafting features and rules, the network automatically discovers optimal internal representations of the driving environment, capturing subtle patterns that human engineers might miss.
+
+**Data-Driven Adaptation**: End-to-end models continuously improve through exposure to diverse driving scenarios, learning complex behaviors like defensive driving, traffic flow prediction, and context-aware decision-making from demonstration data.
+
+**Unified Optimization**: The entire driving pipeline is optimized jointly using gradient-based learning, ensuring that perception and control work synergistically rather than as isolated components.
+
+Our research explores multiple end-to-end architectures—from imitation learning systems that mimic expert drivers to reinforcement learning agents that discover optimal policies through trial and error in simulation, then transfer to real-world deployment.
## Introduction
-An autonomous car, also known as a self-driving vehicle, is a sophisticated mode of transportation that can perceive its environment and navigate without human intervention. These vehicles employ a variety of advanced technologies to achieve safe and efficient driving, making them a significant innovation in modern transportation.
+Autonomous vehicles represent the future of intelligent transportation, leveraging end-to-end AI architectures to transform raw sensor data into safe, human-like driving decisions. Our laboratory develops and deploys advanced self-driving systems that embody the latest breakthroughs in deep learning, computer vision, and robotics.
+
+At the core of our autonomous platforms is an integrated AI pipeline that processes multi-modal sensor streams—LiDAR point clouds, camera images, and GNSS/INS data—through sophisticated neural network architectures. These systems learn to simultaneously perceive the environment, predict future trajectories, and execute driving maneuvers in real-time, handling complex urban scenarios with human-level performance.
+
+The autonomous driving vehicle operates under comprehensive CANBUS control integrated with ROS2 middleware. Our AI control stack communicates seamlessly with the vehicle's MCU, translating high-level neural network outputs into low-level CAN signals for precise actuation. This architecture enables full drive-by-wire control including:
+
+- **Longitudinal control**: Acceleration and braking commands derived from learned policies
+- **Lateral control**: Steering angles predicted by end-to-end neural networks
+- **Mode management**: Automated gear shifting (D/P/R/N) based on mission planning
+- **Safety systems**: AI-monitored lighting, indicators, and fail-safe mechanisms
+
+This platform serves as our testbed for advancing AI-powered autonomous driving, from imitation learning and reinforcement learning to vision-language models for natural language navigation.
+
+## End-to-End AI Architecture Components
+
+Our autonomous driving system implements a comprehensive end-to-end AI architecture comprising the following key components:
+
+### 1. Multi-Modal Perception Network
+**Function**: Fuses data from cameras, LiDAR, and GNSS/INS into unified spatial-temporal representations
+
+**Architecture**: Vision backbone (ResNet, EfficientNet, or Vision Transformers) for image feature extraction; PointNet++/VoxelNet for 3D point cloud processing; Multi-scale feature pyramid networks for detecting objects at various distances; Temporal fusion modules (ConvLSTM, 3D CNNs) for motion prediction
+
+**Outputs**: Bird's-eye-view (BEV) semantic maps, 3D object detections, drivable area segmentation, lane boundary predictions
+
+### 2. World Model & Prediction
+**Function**: Learns predictive models of how the environment evolves over time
+
+**Architecture**: Recurrent neural networks (GRU/LSTM) or Transformers for sequential prediction; Probabilistic trajectory forecasting for surrounding vehicles and pedestrians; Occupancy grid prediction for future scene states; Attention mechanisms for modeling agent-agent interactions
+
+**Outputs**: Multi-modal future trajectory distributions, predicted collision risks, uncertainty estimates
-A critical aspect of autonomous vehicles is their ability to sense and localize themselves within their surroundings. This capability is essential for navigating complex environments, avoiding obstacles, and making real-time driving decisions. Accurate sensing and localization allow autonomous cars to interpret data from their surroundings and respond appropriately to dynamic conditions.
+### 3. Planning & Decision-Making Network
+**Function**: Generates safe, comfortable, and efficient driving trajectories
-The autonomous driving vehicle operates under the comprehensive control of a CANBUS system. The host computer establishes a connection with the MCU, which is equipped with integrated ROS messaging capabilities. This integration allows the system to convert ROS messages into CAN signals, which are then transmitted to the MCU.
+**Architecture**: Hierarchical planning with high-level route planning and low-level trajectory optimization; Imitation learning from expert demonstrations (Behavioral Cloning, GAIL, DAgger); Reinforcement learning for reward-driven policy optimization (PPO, SAC, TD3); Cost volume networks for evaluating trajectory candidates; Attention-based reasoning for traffic rule compliance
+
+**Outputs**: Reference trajectories (waypoints with velocity profiles), discrete actions (lane changes, stops)
+
+### 4. Control Network
+**Function**: Executes planned trajectories through precise vehicle control
+
+**Architecture**: PID controllers enhanced with learned gain scheduling; Model Predictive Control (MPC) with learned dynamics models; Direct end-to-end control networks (steering/throttle/brake prediction); Residual learning to compensate for model uncertainties
+
+**Outputs**: Low-level commands (steering angle, throttle percentage, brake pressure)
+
+### 5. Safety & Verification Layer
+**Function**: Ensures AI decisions meet safety constraints and override when necessary
+
+**Components**: Learned safety filters using reachability analysis; Rule-based fallback systems for edge cases; Uncertainty-aware decision-making (epistemic and aleatoric uncertainty); Real-time monitoring and anomaly detection; Redundant sensor validation and fault diagnosis
+
+**Outputs**: Safety scores, intervention flags, fail-safe commands
+
+### 6. Continuous Learning Pipeline
+**Function**: Enables the system to improve from real-world deployment data
+
+**Components**: On-vehicle data logging (sensor streams, AI decisions, interventions); Offline reinforcement learning from logged experience; Active learning for identifying informative scenarios; Sim-to-real transfer learning using domain adaptation; Federated learning across vehicle fleet
+
+**Outputs**: Updated model weights, identified edge cases, performance metrics
-This architecture provides us with extensive access to the vehicle's functionalities. We can not only relay vital velocity information but also manage gear settings, including Drive (D), Park (P), Reverse (R), and Neutral (N). Additionally, the system enables control of various lighting functions, enhancing both safety and operational efficiency. Overall, this setup ensures seamless communication between components, facilitating precise control and monitoring of the vehicle’s performance.
## Sensor Platform
-Currently, our lab has two autonomous vehicles deployed on the PolyU Main Campus and the PolyU-Wuxi Research Institute. Both vehicles are equipped with unique sensors, including LiDAR, cameras, and integrated GNSS/INS, for localization and navigation.
+Our laboratory operates two autonomous vehicle testbeds—one at PolyU Main Campus and another at PolyU-Wuxi Research Institute—both equipped with production-grade sensor suites for multi-modal AI training and validation.
+
+The sensor configuration enables comprehensive environmental perception:
-Here is the sensor suite:
+| Sensor Type | Brand/Model | Specifications | AI Application |
+|-------------|-------------|----------------|----------------|
+| **LiDAR** | Robosense RS-LiDAR-32 | 32 channels, 200m range, 360° FOV, 30° vertical FOV, 10-20Hz | 3D point cloud processing for obstacle detection, semantic segmentation, and occupancy prediction |
+| **Cameras** | HikRobot Event Camera | 1280×720 resolution, 120dB HDR, 60fps, global shutter | Vision-based perception, lane detection, traffic sign recognition, end-to-end driving policy learning |
+| **GNSS/INS**| CHCNav GNSS/INS | Dual-frequency RTK, integrated IMU, cm-level accuracy | Ground-truth localization for supervised learning, map-based planning, sensor fusion validation |
-| Sensor Type | Brand/Model | Parameters |
-|-------------|-------------|------------|
-| **LiDAR** | Robosense RS-LiDAR-32 | 32 laser channels, 200m range, 360° horizontal FOV, 30° vertical FOV, 10Hz-20Hz scanning frequency |
-| **Cameras** | HikRobot Event camera | 1280x720 resolution, 120dB dynamic range, 60fps frame rate, global shutter |
-| **GNSS/INS**| CHCNav GNSS/INS | Dual-frequency GNSS receiver, integrated IMU, centimeter-level accuracy, real-time kinematic (RTK) support |
+This sensor fusion architecture provides redundant, complementary data streams that feed our end-to-end AI models, enabling robust perception under diverse weather and lighting conditions.
+## AI-Driven Autonomous Driving Demonstrations
-## ADV Demo Video
+### Real-World Testing: Campus Deployment
-### Testing
-
ADV in PolyU Campus
+
End-to-End AI Navigation — PolyU Campus
-
-
ADV in PolyU-Wuxi Research Institute
+
+
Autonomous Operation — PolyU-Wuxi Research Institute
+### AI Training Pipeline: CARLA Simulation
-
-### Carla Simulation Video
+Our AI models are pre-trained and validated in high-fidelity simulation environments before real-world deployment. Using CARLA simulator, we generate diverse driving scenarios for imitation learning, reinforcement learning, and domain adaptation research.
-
-
Carla Simulation
+
CARLA Simulation Environment — End-to-End AI Policy Learning
+
+
+
+## Research Team
+
+**Principal Investigator:**
+[Dr. Wen Weisong](https://polyu-taslab.github.io/members/Wen_Weisong.html) — Assistant Professor, Department of Aeronautical and Aviation Engineering, The Hong Kong Polytechnic University
-### Researcher
+**Core Researchers:**
+[Mr. Zhang Ziqi](https://polyu-taslab.github.io/members/Zhang_Ziqi.html) — PhD Student, End-to-End Learning & Sensor Fusion
+[Dr. Huang Feng](https://polyu-taslab.github.io/members/Huang_Feng.html) — Postdoctoral Researcher, Navigation & Localization
+
+---
-[Dr. Weisong Wen](https://polyu-taslab.github.io/members/Wen_Weisong.html), [Mr. Zhang Ziqi](https://polyu-taslab.github.io/members/Zhang_Ziqi.html), [Mr. Huang Feng](https://polyu-taslab.github.io/members/Huang_Feng.html)
+**Research Focus:** End-to-End Deep Learning, Vision-Language Navigation, Multi-Modal Sensor Fusion, Sim-to-Real Transfer, Safe Reinforcement Learning, Imitation Learning, World Models for Autonomous Driving
\ No newline at end of file
diff --git a/_sass/custom.scss b/_sass/custom.scss
new file mode 100644
index 00000000..c8f75f41
--- /dev/null
+++ b/_sass/custom.scss
@@ -0,0 +1,179 @@
+---
+title: Projects
+nav:
+ order: 3
+ tooltip:
+---
+
+# {% include icon.html icon="fa-solid fa-wrench" %}Projects
+
+
+
Our Projects
+
Explore our latest projects and initiatives.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+{% include section.html %}
+
+{% include search-box.html %}
+
+{% include tags.html tags=site.tags %}
+
+{% include search-info.html %}
+
+{% include list.html data="posts" component="post-excerpt" %}
+
+
+
+
\ No newline at end of file
diff --git a/_styles/code.scss b/_styles/code.scss
index 4a50657e..cdcf4a0d 100644
--- a/_styles/code.scss
+++ b/_styles/code.scss
@@ -1,11 +1,28 @@
---
---
+@import url('https://fonts.googleapis.com/css2?family=Noto+Sans:wght@400;700&family=Roboto:wght@400;700&display=swap');
+
+body {
+ font-family: 'Noto Sans', 'Roboto', Arial, Helvetica, sans-serif;
+ font-size: 1.08rem;
+ color: #222;
+ background: #fafbfc;
+ line-height: 1.7;
+}
+
+h1, h2, h3, h4, h5, h6 {
+ font-family: 'Roboto', 'Noto Sans', Arial, Helvetica, sans-serif;
+ font-weight: 700;
+ letter-spacing: 0.01em;
+ color: #1a1a1a;
+}
+
pre,
code,
pre *,
code * {
- font-family: var(--code);
+ font-family: 'Fira Mono', 'Menlo', 'Monaco', 'Consolas', 'Liberation Mono', 'Courier New', monospace;
}
// inline code
diff --git a/_styles/search-box.scss b/_styles/search-box.scss
index 5f20a783..091d0dc2 100644
--- a/_styles/search-box.scss
+++ b/_styles/search-box.scss
@@ -24,3 +24,88 @@
color: var(--black);
border: none;
}
+
+/* Smart search suggestion styles */
+.search-suggestions {
+ position: absolute;
+ top: 100%;
+ left: 0;
+ width: 100%;
+ background: #fff;
+ border: 1px solid #ccc;
+ border-radius: 0 0 8px 8px;
+ box-shadow: 0 2px 8px rgba(0,0,0,0.08);
+ z-index: 10;
+ max-height: 220px;
+ overflow-y: auto;
+ display: none;
+ font-size: 1rem;
+}
+
+.suggestion-item {
+ display: flex;
+ flex-direction: column;
+ gap: 2px;
+ padding: 12px 18px;
+ cursor: pointer;
+ color: var(--black);
+ transition: background 0.2s;
+ border-bottom: 1px solid #f0f0f0;
+}
+.suggestion-item:last-child {
+ border-bottom: none;
+}
+.suggestion-item:hover {
+ background: var(--light-gray, #f5f5f5);
+}
+.suggestion-title {
+ font-weight: 600;
+ color: var(--primary, #1a73e8);
+ font-size: 1.05em;
+}
+.suggestion-tags {
+ font-size: 0.92em;
+ color: var(--secondary, #666);
+ margin-top: 2px;
+ display: flex;
+ flex-wrap: wrap;
+ gap: 6px;
+}
+.suggestion-tag {
+ background: var(--light-gray, #f5f5f5);
+ color: var(--primary, #1a73e8);
+ border-radius: 999px;
+ padding: 2px 10px;
+ font-size: 0.9em;
+}
+
+.search-box .search-input {
+ border-radius: 8px;
+ border: 1.5px solid #bdbdbd;
+ padding: 10px 14px;
+ font-size: 1.08rem;
+ box-sizing: border-box;
+ outline: none;
+ transition: border-color 0.2s, box-shadow 0.2s;
+}
+.search-box .search-input:focus {
+ border-color: var(--primary, #1a73e8);
+ box-shadow: 0 0 0 2px rgba(26,115,232,0.08);
+}
+
+.search-box button {
+ border-radius: 8px;
+ border: 1.5px solid #bdbdbd;
+ background: #f5f5f5;
+ color: var(--black);
+ font-size: 1.08rem;
+ cursor: pointer;
+ padding: 0 16px;
+ height: 38px;
+ margin-left: 8px;
+ transition: background 0.2s, border-color 0.2s;
+}
+.search-box button:hover {
+ background: var(--light-gray, #eaeaea);
+ border-color: var(--primary, #1a73e8);
+}
diff --git a/blog/index.md b/blog/index.md
index 1f1ce0d9..67ed8017 100644
--- a/blog/index.md
+++ b/blog/index.md
@@ -1,8 +1,8 @@
---
title: Blog
-nav:
- order: 5
- tooltip: Knowledge sharing
+# nav:
+# order: 5
+# tooltip: Knowledge sharing
---
# {% include icon.html icon="fa-solid fa-feather-pointed" %}Blog
diff --git a/contact/index.md b/contact/index.md
index 45849407..5c0d1ae9 100644
--- a/contact/index.md
+++ b/contact/index.md
@@ -10,11 +10,66 @@ nav:
### Openings
-We regularly have multiple openings for Postdoc/PhD/MPhil/RA/Internships (All year round) to work on research related to trustworthy autonomous systems in general, including UAV and self-driving cars. If you are a PolyU student (Undergraduate and MSc students seeking URIS or dissertation supervision) interested in working with me, feel free to drop me an email (together with your transcript and brief introduction) or walk into my office at room R820!
+### Openings
+
+We regularly have multiple openings for Postdoc/PhD/MPhil/RA/Internships (all year round) to work on research related to **AI-driven trustworthy autonomous systems**, with a focus on **end-to-end autonomous UAVs** and **end-to-end self-driving cars**. If you are a PolyU student (Undergraduate and MSc students seeking URIS or dissertation supervision) interested in working with me, feel free to drop me an email at **welson.wen@polyu.edu.hk** (together with your transcript and brief introduction) or walk into my office at **room R820**!
+
+---
+
+#### Postdoc/RA Positions (Regular Quotas)
+
+**Research areas include:**
+- Large AI models for autonomous systems
+- Foundation models for robotics
+- End-to-end learning for UAVs and self-driving cars
+- AI-enabled perception and control
+- Multimodal sensor fusion (LiDAR/Camera/IMU/GNSS)
+- Trustworthy AI for navigation and control
+- Vision-language models for drone perception
+- Deep reinforcement learning for UAV navigation
+- Semantic-aided positioning
+- Hardware-software co-design for next-generation navigation chips
+- Urban GNSS positioning (RTK, PPP, PPP-RTK)
+- Multi-agent collaborative positioning and control
+- Safety-certified AI systems
+
+**Application requirements:**
+For those interested, please send us your CV, representative publications list, and research statement/proposal to **welson.wen@polyu.edu.hk**. (We will reply to you within one week if you are shortlisted for an interview).
+
+**For any candidate, you MUST have at least one of the following:**
+1. A strong publication record in top-tier AI/robotics venues (e.g., NeurIPS, ICML, ICRA, IROS, CoRL, CVPR, ICCV); OR
+2. Strong capabilities in coding (proficient in C++ and/or Python, experience with PyTorch/TensorFlow/ROS); OR
+3. Strong capabilities in preparing competitive research proposals and securing funding.
+
+---
+
+#### PhD/MPhil Topics (Regular Quotas)
+
+**Core research themes:**
+- End-to-end learning for autonomous UAVs and self-driving cars
+- Safety-certifiable AI for perception, positioning, and control
+- Foundation models and large vision-language models for robotics
+- Multimodal learning for autonomous systems
+- AI-driven multi-sensor fusion and navigation
+- Deep reinforcement learning for UAV swarm coordination
+- Trustworthy AI for safety-critical applications
+
+For more specific topics we are working on, please refer to our [TAS Lab website](https://polyu-taslab.github.io/) and [project page](https://polyu-taslab.github.io/projects/).
+
+**Application requirements:**
+For those interested, please send us your CV, representative publications list (if any), and a brief research proposal/statement of research interests to **welson.wen@polyu.edu.hk**. (We will reply to you within one week if you are shortlisted for an interview).
+
+---
+
+#### What We Offer
-**Postdoc/RA positions** (Regular quotas): High precise perception positioning control with multi-sensory integration, autonomous systems, unmanned aerial vehicles (UAV), semantic aided positioning, map update and qualification, hardware-software co-design for next-generation navigation chips, urban GNSS positioning, GNSS RTK, PPP, PPP-RTK, multi-agent collaborative positioning. For those interested, please send us your CV, representative publications list, and research statement/proposal. (We will reply to you within one week if you are shortlisted for an interview). For any candidate, you MUST have at least one of the following properties: (1) a strong publication record! Or (2) strong capabilities in coding (at least C++ or Python) or hardware. Or (3) strong capabilities in preparing research proposals.
+- Access to cutting-edge UAV platforms, self-driving car testbeds, and GPU computing clusters
+- Collaboration with leading industry partners (Huawei, Tencent, Meituan, HONOR)
+- Opportunities to publish in top AI/robotics conferences and journals
+- A vibrant, diverse, and inclusive research environment with 30+ lab members
+- Funding support for conference travel and research equipment
-**PhD/MPhil topics** (Regular quotas): Safety-certifiable positioning, control, and perception for autonomous systems. For more topics we are working on, please refer to our project page. For those interested, please send us your CV, representative publications list, and research proposal. (We will reply to you within one week if you are shortlisted for an interview).
+**Application materials:** CV + Publications/Coding portfolio + Research statement → **welson.wen@polyu.edu.hk**
{%
diff --git a/images/news/0116_linxai/1.jpg b/images/news/0116_linxai/1.jpg
new file mode 100644
index 00000000..d5c5ca2a
Binary files /dev/null and b/images/news/0116_linxai/1.jpg differ
diff --git a/images/news/0116_linxai/2.jpg b/images/news/0116_linxai/2.jpg
new file mode 100644
index 00000000..2c145475
Binary files /dev/null and b/images/news/0116_linxai/2.jpg differ
diff --git a/images/news/0116_linxai/f96fcea426e5e7dc867a622af30f2335.jpg b/images/news/0116_linxai/f96fcea426e5e7dc867a622af30f2335.jpg
new file mode 100644
index 00000000..d5c5ca2a
Binary files /dev/null and b/images/news/0116_linxai/f96fcea426e5e7dc867a622af30f2335.jpg differ
diff --git a/images/news/0116_linxai/readme.md b/images/news/0116_linxai/readme.md
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/images/news/0116_linxai/readme.md
@@ -0,0 +1 @@
+
diff --git a/images/news/0119_simpleai/1.png b/images/news/0119_simpleai/1.png
new file mode 100644
index 00000000..f0df8d8a
Binary files /dev/null and b/images/news/0119_simpleai/1.png differ
diff --git a/images/news/0119_simpleai/2.png b/images/news/0119_simpleai/2.png
new file mode 100644
index 00000000..8d19c5d2
Binary files /dev/null and b/images/news/0119_simpleai/2.png differ
diff --git a/images/news/0119_simpleai/3.png b/images/news/0119_simpleai/3.png
new file mode 100644
index 00000000..e9dbe8eb
Binary files /dev/null and b/images/news/0119_simpleai/3.png differ
diff --git a/images/news/0119_simpleai/4.png b/images/news/0119_simpleai/4.png
new file mode 100644
index 00000000..fe07a83f
Binary files /dev/null and b/images/news/0119_simpleai/4.png differ
diff --git a/images/news/0119_simpleai/readme.md b/images/news/0119_simpleai/readme.md
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/images/news/0119_simpleai/readme.md
@@ -0,0 +1 @@
+
diff --git a/images/news/0914MarsTalk/Chen.jpg b/images/news/0914MarsTalk/Chen.jpg
new file mode 100644
index 00000000..5a107f4e
Binary files /dev/null and b/images/news/0914MarsTalk/Chen.jpg differ
diff --git a/images/news/0914MarsTalk/GroupTas.jpg b/images/news/0914MarsTalk/GroupTas.jpg
new file mode 100644
index 00000000..a5071388
Binary files /dev/null and b/images/news/0914MarsTalk/GroupTas.jpg differ
diff --git a/images/news/0914MarsTalk/GroupTas2.jpg b/images/news/0914MarsTalk/GroupTas2.jpg
new file mode 100644
index 00000000..986b022f
Binary files /dev/null and b/images/news/0914MarsTalk/GroupTas2.jpg differ
diff --git a/images/news/0914MarsTalk/WenWeisong.jpg b/images/news/0914MarsTalk/WenWeisong.jpg
new file mode 100644
index 00000000..668ee6e0
Binary files /dev/null and b/images/news/0914MarsTalk/WenWeisong.jpg differ
diff --git a/images/news/0914MarsTalk/marstalk.jpg b/images/news/0914MarsTalk/marstalk.jpg
new file mode 100644
index 00000000..a724b6a0
Binary files /dev/null and b/images/news/0914MarsTalk/marstalk.jpg differ
diff --git a/images/news/0916TALK/image1.png b/images/news/0916TALK/image1.png
new file mode 100755
index 00000000..982a2481
Binary files /dev/null and b/images/news/0916TALK/image1.png differ
diff --git a/images/news/0916TALK/image2.png b/images/news/0916TALK/image2.png
new file mode 100755
index 00000000..66c2207d
Binary files /dev/null and b/images/news/0916TALK/image2.png differ
diff --git a/images/news/0916TALK/image3.jpg b/images/news/0916TALK/image3.jpg
new file mode 100755
index 00000000..cbeb98ae
Binary files /dev/null and b/images/news/0916TALK/image3.jpg differ
diff --git a/images/news/0916TALK/image4.png b/images/news/0916TALK/image4.png
new file mode 100644
index 00000000..250f80ba
Binary files /dev/null and b/images/news/0916TALK/image4.png differ
diff --git a/images/news/0929CampusFlight/CAD_exam.jpg b/images/news/0929CampusFlight/CAD_exam.jpg
new file mode 100644
index 00000000..4754f252
Binary files /dev/null and b/images/news/0929CampusFlight/CAD_exam.jpg differ
diff --git a/images/news/0929CampusFlight/Drone_Flight.jpg b/images/news/0929CampusFlight/Drone_Flight.jpg
new file mode 100644
index 00000000..1b7cad41
Binary files /dev/null and b/images/news/0929CampusFlight/Drone_Flight.jpg differ
diff --git a/images/news/0929CampusFlight/Drone_Flight_2.jpg b/images/news/0929CampusFlight/Drone_Flight_2.jpg
new file mode 100644
index 00000000..8d8440f0
Binary files /dev/null and b/images/news/0929CampusFlight/Drone_Flight_2.jpg differ
diff --git a/images/news/0929CampusFlight/Examine_drone.jpg b/images/news/0929CampusFlight/Examine_drone.jpg
new file mode 100644
index 00000000..d399a507
Binary files /dev/null and b/images/news/0929CampusFlight/Examine_drone.jpg differ
diff --git a/images/news/0930SouthernPower/image.png b/images/news/0930SouthernPower/image.png
new file mode 100644
index 00000000..7d513a0f
Binary files /dev/null and b/images/news/0930SouthernPower/image.png differ
diff --git a/images/news/1010ZYYTITIS/1.png b/images/news/1010ZYYTITIS/1.png
new file mode 100644
index 00000000..0a2571ac
Binary files /dev/null and b/images/news/1010ZYYTITIS/1.png differ
diff --git a/images/news/1010ZYYTITIS/framework.png b/images/news/1010ZYYTITIS/framework.png
new file mode 100644
index 00000000..df2327d8
Binary files /dev/null and b/images/news/1010ZYYTITIS/framework.png differ
diff --git a/images/news/1010ZYYTITIS/test.png b/images/news/1010ZYYTITIS/test.png
new file mode 100644
index 00000000..47a985e7
Binary files /dev/null and b/images/news/1010ZYYTITIS/test.png differ
diff --git a/images/news/1017NeiMengGuVisit/image1.jpg b/images/news/1017NeiMengGuVisit/image1.jpg
new file mode 100644
index 00000000..7139e240
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image1.jpg differ
diff --git a/images/news/1017NeiMengGuVisit/image2.jpg b/images/news/1017NeiMengGuVisit/image2.jpg
new file mode 100644
index 00000000..08ef9a9e
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image2.jpg differ
diff --git a/images/news/1017NeiMengGuVisit/image3.jpg b/images/news/1017NeiMengGuVisit/image3.jpg
new file mode 100644
index 00000000..1c03fa2b
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image3.jpg differ
diff --git a/images/news/1017NeiMengGuVisit/image4.jpg b/images/news/1017NeiMengGuVisit/image4.jpg
new file mode 100644
index 00000000..96fc336c
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image4.jpg differ
diff --git a/images/news/1017NeiMengGuVisit/image5.jpg b/images/news/1017NeiMengGuVisit/image5.jpg
new file mode 100644
index 00000000..89cacf18
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image5.jpg differ
diff --git a/images/news/1017NeiMengGuVisit/image6.jpg b/images/news/1017NeiMengGuVisit/image6.jpg
new file mode 100644
index 00000000..f3605a69
Binary files /dev/null and b/images/news/1017NeiMengGuVisit/image6.jpg differ
diff --git a/images/news/1103fangwu/fangwu1.jpg b/images/news/1103fangwu/fangwu1.jpg
new file mode 100644
index 00000000..ffcd8f3b
Binary files /dev/null and b/images/news/1103fangwu/fangwu1.jpg differ
diff --git a/images/news/1103fangwu/fangwu2.jpg b/images/news/1103fangwu/fangwu2.jpg
new file mode 100644
index 00000000..c6262d2a
Binary files /dev/null and b/images/news/1103fangwu/fangwu2.jpg differ
diff --git a/images/news/1110_heu/heu1.jpg b/images/news/1110_heu/heu1.jpg
new file mode 100644
index 00000000..7995277a
Binary files /dev/null and b/images/news/1110_heu/heu1.jpg differ
diff --git a/images/news/1110_heu/heu2.jpg b/images/news/1110_heu/heu2.jpg
new file mode 100644
index 00000000..7ff7f737
Binary files /dev/null and b/images/news/1110_heu/heu2.jpg differ
diff --git a/images/news/1110_heu/heu3.jpg b/images/news/1110_heu/heu3.jpg
new file mode 100644
index 00000000..028cbaff
Binary files /dev/null and b/images/news/1110_heu/heu3.jpg differ
diff --git a/images/news/1110_heu/heu4.jpg b/images/news/1110_heu/heu4.jpg
new file mode 100644
index 00000000..e7b5307b
Binary files /dev/null and b/images/news/1110_heu/heu4.jpg differ
diff --git a/images/news/1110_heu/heu5.jpg b/images/news/1110_heu/heu5.jpg
new file mode 100644
index 00000000..dd752cd9
Binary files /dev/null and b/images/news/1110_heu/heu5.jpg differ
diff --git a/images/news/1112HK_BJ_sym/1.jpg b/images/news/1112HK_BJ_sym/1.jpg
new file mode 100644
index 00000000..067627ed
Binary files /dev/null and b/images/news/1112HK_BJ_sym/1.jpg differ
diff --git a/images/news/1112HK_BJ_sym/2.jpg b/images/news/1112HK_BJ_sym/2.jpg
new file mode 100644
index 00000000..5ae6a5f6
Binary files /dev/null and b/images/news/1112HK_BJ_sym/2.jpg differ
diff --git a/images/news/1112HK_BJ_sym/3.jpg b/images/news/1112HK_BJ_sym/3.jpg
new file mode 100644
index 00000000..9c27490f
Binary files /dev/null and b/images/news/1112HK_BJ_sym/3.jpg differ
diff --git a/images/news/1112HK_BJ_sym/4.jpg b/images/news/1112HK_BJ_sym/4.jpg
new file mode 100644
index 00000000..c7c2626d
Binary files /dev/null and b/images/news/1112HK_BJ_sym/4.jpg differ
diff --git a/images/news/1213Jinjiang/1.jpg b/images/news/1213Jinjiang/1.jpg
new file mode 100644
index 00000000..555ad4c7
Binary files /dev/null and b/images/news/1213Jinjiang/1.jpg differ
diff --git a/images/news/1213Jinjiang/2.jpg b/images/news/1213Jinjiang/2.jpg
new file mode 100644
index 00000000..5283ce38
Binary files /dev/null and b/images/news/1213Jinjiang/2.jpg differ
diff --git a/images/news/1213Jinjiang/3.jpg b/images/news/1213Jinjiang/3.jpg
new file mode 100644
index 00000000..e0a6f840
Binary files /dev/null and b/images/news/1213Jinjiang/3.jpg differ
diff --git a/images/news/1213Jinjiang/4.jpg b/images/news/1213Jinjiang/4.jpg
new file mode 100644
index 00000000..c682cb9f
Binary files /dev/null and b/images/news/1213Jinjiang/4.jpg differ
diff --git a/images/news/1213Jinjiang/5.jpg b/images/news/1213Jinjiang/5.jpg
new file mode 100644
index 00000000..9001e8f8
Binary files /dev/null and b/images/news/1213Jinjiang/5.jpg differ
diff --git a/images/news/1213Jinjiang/readme.md b/images/news/1213Jinjiang/readme.md
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/images/news/1213Jinjiang/readme.md
@@ -0,0 +1 @@
+
diff --git a/images/news/1218/demo.mp4 b/images/news/1218/demo.mp4
new file mode 100644
index 00000000..aa725aee
Binary files /dev/null and b/images/news/1218/demo.mp4 differ
diff --git a/images/news/1218/silent.jpg b/images/news/1218/silent.jpg
new file mode 100644
index 00000000..3ce53e47
Binary files /dev/null and b/images/news/1218/silent.jpg differ
diff --git a/images/news/1218/silent2.jpg b/images/news/1218/silent2.jpg
new file mode 100644
index 00000000..98ad79b7
Binary files /dev/null and b/images/news/1218/silent2.jpg differ
diff --git a/images/news/1219/image1.jpg b/images/news/1219/image1.jpg
new file mode 100644
index 00000000..14f51292
Binary files /dev/null and b/images/news/1219/image1.jpg differ
diff --git a/images/news/1219/image2.jpg b/images/news/1219/image2.jpg
new file mode 100644
index 00000000..fa37f9f6
Binary files /dev/null and b/images/news/1219/image2.jpg differ
diff --git a/images/news/1219/image3.jpg b/images/news/1219/image3.jpg
new file mode 100644
index 00000000..14f51292
Binary files /dev/null and b/images/news/1219/image3.jpg differ
diff --git a/images/news/1219/image4.jpg b/images/news/1219/image4.jpg
new file mode 100644
index 00000000..fc20f3b7
Binary files /dev/null and b/images/news/1219/image4.jpg differ
diff --git a/images/news/20251103_Zhengxi/zhengxi_oral_defense.jpg b/images/news/20251103_Zhengxi/zhengxi_oral_defense.jpg
new file mode 100644
index 00000000..8686e034
Binary files /dev/null and b/images/news/20251103_Zhengxi/zhengxi_oral_defense.jpg differ
diff --git a/images/news/20260113_RinoaiMoU/MoU.png b/images/news/20260113_RinoaiMoU/MoU.png
new file mode 100644
index 00000000..8ee06d8f
Binary files /dev/null and b/images/news/20260113_RinoaiMoU/MoU.png differ
diff --git a/images/news/IROS2025/oral_present.jpg b/images/news/IROS2025/oral_present.jpg
new file mode 100644
index 00000000..44f6dba4
Binary files /dev/null and b/images/news/IROS2025/oral_present.jpg differ
diff --git a/images/news/IROS2025/poster.jpg b/images/news/IROS2025/poster.jpg
new file mode 100644
index 00000000..9fd429a1
Binary files /dev/null and b/images/news/IROS2025/poster.jpg differ
diff --git a/images/news/IROS2025/poster_present.jpg b/images/news/IROS2025/poster_present.jpg
new file mode 100644
index 00000000..4d7c00d7
Binary files /dev/null and b/images/news/IROS2025/poster_present.jpg differ
diff --git a/images/news/IROS2025/robot.jpg b/images/news/IROS2025/robot.jpg
new file mode 100644
index 00000000..b125de46
Binary files /dev/null and b/images/news/IROS2025/robot.jpg differ
diff --git a/images/news/ITSC2025/group_photo.jpg b/images/news/ITSC2025/group_photo.jpg
new file mode 100644
index 00000000..56cdf628
Binary files /dev/null and b/images/news/ITSC2025/group_photo.jpg differ
diff --git a/images/news/ITSC2025/hsu.png b/images/news/ITSC2025/hsu.png
new file mode 100644
index 00000000..0cc7cb67
Binary files /dev/null and b/images/news/ITSC2025/hsu.png differ
diff --git a/images/news/ITSC2025/join.JPG b/images/news/ITSC2025/join.JPG
new file mode 100644
index 00000000..27f57ac0
Binary files /dev/null and b/images/news/ITSC2025/join.JPG differ
diff --git a/images/news/ITSC2025/kousik.jpg b/images/news/ITSC2025/kousik.jpg
new file mode 100644
index 00000000..97de3547
Binary files /dev/null and b/images/news/ITSC2025/kousik.jpg differ
diff --git a/images/news/ITSC2025/kousik.png b/images/news/ITSC2025/kousik.png
new file mode 100644
index 00000000..0f2893c2
Binary files /dev/null and b/images/news/ITSC2025/kousik.png differ
diff --git a/images/news/ITSC2025/shan.JPG b/images/news/ITSC2025/shan.JPG
new file mode 100644
index 00000000..86942ad4
Binary files /dev/null and b/images/news/ITSC2025/shan.JPG differ
diff --git a/images/news/ITSC2025/tim.png b/images/news/ITSC2025/tim.png
new file mode 100644
index 00000000..c4d33ff4
Binary files /dev/null and b/images/news/ITSC2025/tim.png differ
diff --git a/images/news/ITSC2025/zhangfu.png b/images/news/ITSC2025/zhangfu.png
new file mode 100644
index 00000000..ba41c3ad
Binary files /dev/null and b/images/news/ITSC2025/zhangfu.png differ
diff --git a/images/news/ITSC2025/zhou.JPG b/images/news/ITSC2025/zhou.JPG
new file mode 100644
index 00000000..75cd7338
Binary files /dev/null and b/images/news/ITSC2025/zhou.JPG differ
diff --git a/images/news/Shougang/shougang1.png b/images/news/Shougang/shougang1.png
new file mode 100644
index 00000000..ddba5bf2
Binary files /dev/null and b/images/news/Shougang/shougang1.png differ
diff --git a/images/news/Shougang/shougang2.jpg b/images/news/Shougang/shougang2.jpg
new file mode 100644
index 00000000..6cda0e1b
Binary files /dev/null and b/images/news/Shougang/shougang2.jpg differ
diff --git a/images/news/Shougang/shougang3.jpg b/images/news/Shougang/shougang3.jpg
new file mode 100644
index 00000000..6d502d43
Binary files /dev/null and b/images/news/Shougang/shougang3.jpg differ
diff --git a/images/news/Shougang/shougang4.jpg b/images/news/Shougang/shougang4.jpg
new file mode 100644
index 00000000..a8b419e7
Binary files /dev/null and b/images/news/Shougang/shougang4.jpg differ
diff --git a/images/news/Ubeat/image1.png b/images/news/Ubeat/image1.png
new file mode 100644
index 00000000..7490ab2c
Binary files /dev/null and b/images/news/Ubeat/image1.png differ
diff --git a/images/news/Ubeat/image2.png b/images/news/Ubeat/image2.png
new file mode 100644
index 00000000..bd484ea5
Binary files /dev/null and b/images/news/Ubeat/image2.png differ
diff --git a/images/news/Ubeat/image3.png b/images/news/Ubeat/image3.png
new file mode 100644
index 00000000..0923d9f3
Binary files /dev/null and b/images/news/Ubeat/image3.png differ
diff --git a/images/news/nanjingjiangning/1.jpg b/images/news/nanjingjiangning/1.jpg
new file mode 100644
index 00000000..fbdf4a3e
Binary files /dev/null and b/images/news/nanjingjiangning/1.jpg differ
diff --git a/images/news/nanjingjiangning/2.jpg b/images/news/nanjingjiangning/2.jpg
new file mode 100644
index 00000000..a88a915e
Binary files /dev/null and b/images/news/nanjingjiangning/2.jpg differ
diff --git a/images/news/ruijie_rttlio.png b/images/news/ruijie_rttlio.png
new file mode 100644
index 00000000..09d9cc5c
Binary files /dev/null and b/images/news/ruijie_rttlio.png differ
diff --git a/images/opensource/HDMap/garage.png b/images/opensource/HDMap/garage.png
new file mode 100644
index 00000000..46bae70d
Binary files /dev/null and b/images/opensource/HDMap/garage.png differ
diff --git a/images/opensource/HDMap/garage_half.gif b/images/opensource/HDMap/garage_half.gif
new file mode 100644
index 00000000..3355baca
Binary files /dev/null and b/images/opensource/HDMap/garage_half.gif differ
diff --git a/images/opensource/TDL-GNSS/TDL_structure.png b/images/opensource/TDL-GNSS/TDL_structure.png
new file mode 100644
index 00000000..cf5b2a93
Binary files /dev/null and b/images/opensource/TDL-GNSS/TDL_structure.png differ
diff --git a/images/opensource/TasFusion/board.png b/images/opensource/TasFusion/board.png
new file mode 100644
index 00000000..96d1abe2
Binary files /dev/null and b/images/opensource/TasFusion/board.png differ
diff --git a/images/opensource/TasFusion/demo.gif b/images/opensource/TasFusion/demo.gif
new file mode 100644
index 00000000..5545affe
Binary files /dev/null and b/images/opensource/TasFusion/demo.gif differ
diff --git a/images/opensource/TasFusion/longdata.png b/images/opensource/TasFusion/longdata.png
new file mode 100644
index 00000000..83913d9d
Binary files /dev/null and b/images/opensource/TasFusion/longdata.png differ
diff --git a/images/opensource/kltdataset/NLOS_crop.gif b/images/opensource/kltdataset/NLOS_crop.gif
new file mode 100644
index 00000000..8713c039
Binary files /dev/null and b/images/opensource/kltdataset/NLOS_crop.gif differ
diff --git a/images/opensource/zhengxi/framework.png b/images/opensource/zhengxi/framework.png
new file mode 100644
index 00000000..2836ccd9
Binary files /dev/null and b/images/opensource/zhengxi/framework.png differ
diff --git a/images/opensource/zhengxi/framework2.png b/images/opensource/zhengxi/framework2.png
new file mode 100644
index 00000000..906fb328
Binary files /dev/null and b/images/opensource/zhengxi/framework2.png differ
diff --git a/images/team/Akida.jpg b/images/team/Akida.jpg
new file mode 100644
index 00000000..66ae6dc4
Binary files /dev/null and b/images/team/Akida.jpg differ
diff --git a/images/team/LiHeng.jpg b/images/team/LiHeng.jpg
new file mode 100644
index 00000000..0528a31b
Binary files /dev/null and b/images/team/LiHeng.jpg differ
diff --git a/images/team/fengchiZHU.jpg b/images/team/fengchiZHU.jpg
new file mode 100644
index 00000000..9c64422f
Binary files /dev/null and b/images/team/fengchiZHU.jpg differ
diff --git a/images/team/guangyanGuo.jpg b/images/team/guangyanGuo.jpg
new file mode 100644
index 00000000..e04da31d
Binary files /dev/null and b/images/team/guangyanGuo.jpg differ
diff --git a/images/team/hongchang.jpg b/images/team/hongchang.jpg
new file mode 100644
index 00000000..19009001
Binary files /dev/null and b/images/team/hongchang.jpg differ
diff --git a/images/team/jianhaojiao_pict_2023.jpg b/images/team/jianhaojiao_pict_2023.jpg
new file mode 100644
index 00000000..947ca729
Binary files /dev/null and b/images/team/jianhaojiao_pict_2023.jpg differ
diff --git a/images/team/wang_junzhe.png b/images/team/wang_junzhe.png
new file mode 100644
index 00000000..b887d04d
Binary files /dev/null and b/images/team/wang_junzhe.png differ
diff --git a/images/team/wang_xiangru.jpg b/images/team/wang_xiangru.jpg
index e4fa8a6e..4acbffad 100644
Binary files a/images/team/wang_xiangru.jpg and b/images/team/wang_xiangru.jpg differ
diff --git a/images/team/yang_mokui.jpg b/images/team/yang_mokui.jpg
new file mode 100644
index 00000000..94406e69
Binary files /dev/null and b/images/team/yang_mokui.jpg differ
diff --git a/images/team/zhongqi_wang.jpg b/images/team/zhongqi_wang.jpg
new file mode 100644
index 00000000..eb14d4df
Binary files /dev/null and b/images/team/zhongqi_wang.jpg differ
diff --git a/index.md b/index.md
index fe5bff19..33624e51 100644
--- a/index.md
+++ b/index.md
@@ -3,19 +3,15 @@
# PolyU TAS LAB's Website
-
+
The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront of pioneering advancements in autonomous systems (such as UAV and self-driving cars) technology, emphasizing the importance of safety, reliability, and ethical standards. Our laboratory is home to a diverse group of researchers and engineers who specialize in artificial intelligence, robotics, cybersecurity, and human-system interaction. Together, we are committed to developing autonomous systems that inspire confidence and trust among users and stakeholders. Through collaborative efforts with industry partners, academic institutions, and policymakers, our team addresses the complex challenges of integrating autonomous systems into society, ensuring they operate transparently and responsibly.
-
{% include section.html %}
## Highlights
{% capture text %}
-
-
-
{%
include button.html
link="research"
@@ -24,7 +20,6 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
flip=true
style="bare"
%}
-
{% endcapture %}
{%
@@ -36,9 +31,6 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
%}
{% capture text %}
-
-
-
{%
include button.html
link="projects"
@@ -47,7 +39,6 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
flip=true
style="bare"
%}
-
{% endcapture %}
{%
@@ -61,9 +52,6 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
%}
{% capture text %}
-
-
-
{%
include button.html
link="team"
@@ -72,7 +60,6 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
flip=true
style="bare"
%}
-
{% endcapture %}
{%
@@ -81,6 +68,4 @@ The Trustworthy AI and Autonomous Systems (TAS) Laboratory is at the forefront o
link="team"
title="Our Team"
text=text
-%}
-
-
\ No newline at end of file
+%}
\ No newline at end of file
diff --git a/news/index.md b/news/index.md
index a0e2d9df..6e5eba24 100644
--- a/news/index.md
+++ b/news/index.md
@@ -5,7 +5,7 @@ nav:
tooltip: Recent News
---
-# {% include icon.html icon="fa-light fa-bullhorn" %}Events & News
+# {% include icon.html icon="fa-solid fa-wrench" %}Events & News
diff --git a/opensource/index.md b/opensource/index.md
new file mode 100644
index 00000000..2f9a29d7
--- /dev/null
+++ b/opensource/index.md
@@ -0,0 +1,21 @@
+---
+title: Opensource
+nav:
+ order: 5
+ tooltip: Datset and code sharing
+---
+
+# {% include icon.html icon="fa-solid fa-code" %}Dataset and Code
+
+
+
+
+{% include section.html %}
+
+{% include search-box.html %}
+
+
+
+{% include search-info.html %}
+
+{% include list.html data="opensource" component="post-excerpt" style="rich" %}
\ No newline at end of file
diff --git a/projects/index.md b/projects/index.md
index 9084d468..9404ff7f 100644
--- a/projects/index.md
+++ b/projects/index.md
@@ -15,7 +15,41 @@ nav:
{% include search-box.html %}
-{% include tags.html tags=site.tags %}
+
+
+
+
+ {% include tags.html tags=site.tags %}
+
+
+
{% include search-info.html %}
diff --git a/research/index.md b/research/index.md
index 4f3eaf54..a8620dd0 100644
--- a/research/index.md
+++ b/research/index.md
@@ -5,7 +5,7 @@ nav:
tooltip: Published works
---
-# {% include icon.html icon="fa-solid fa-microscope" %}Publications
+# {% include icon.html icon="fa-solid fa-wrench" %}Publications
@@ -16,14 +16,13 @@ nav:
-
+
-
-
+
+
-
{% include section.html %}
## All
diff --git a/research/papers/2509.17198v1.pdf b/research/papers/2509.17198v1.pdf
new file mode 100644
index 00000000..869bda7e
Binary files /dev/null and b/research/papers/2509.17198v1.pdf differ
diff --git a/research/papers/2509.21496v1.pdf b/research/papers/2509.21496v1.pdf
new file mode 100644
index 00000000..2875f164
Binary files /dev/null and b/research/papers/2509.21496v1.pdf differ
diff --git a/research/papers/2510.00524v1.pdf b/research/papers/2510.00524v1.pdf
new file mode 100644
index 00000000..d1612079
Binary files /dev/null and b/research/papers/2510.00524v1.pdf differ
diff --git a/research/papers/2510.04278v1.pdf b/research/papers/2510.04278v1.pdf
new file mode 100644
index 00000000..9cee2f9d
Binary files /dev/null and b/research/papers/2510.04278v1.pdf differ
diff --git a/research/papers/2510.08880v1.pdf b/research/papers/2510.08880v1.pdf
new file mode 100644
index 00000000..2253c3b7
Binary files /dev/null and b/research/papers/2510.08880v1.pdf differ
diff --git a/research/papers/2512.20224v1.pdf b/research/papers/2512.20224v1.pdf
new file mode 100644
index 00000000..e44d1102
Binary files /dev/null and b/research/papers/2512.20224v1.pdf differ
diff --git a/research/papers/Learning_Safe_Optimal_and_Real-Time_Flight_Interaction_With_Deep_Confidence-Enhanced_Reachability_Guarantee.pdf b/research/papers/Learning_Safe_Optimal_and_Real-Time_Flight_Interaction_With_Deep_Confidence-Enhanced_Reachability_Guarantee.pdf
new file mode 100644
index 00000000..7ec3e2c8
Binary files /dev/null and b/research/papers/Learning_Safe_Optimal_and_Real-Time_Flight_Interaction_With_Deep_Confidence-Enhanced_Reachability_Guarantee.pdf differ
diff --git a/team/index.md b/team/index.md
index df8b51fa..9ac1387b 100644
--- a/team/index.md
+++ b/team/index.md
@@ -5,63 +5,141 @@ nav:
tooltip: About our team
---
-# {% include icon.html icon="fa-solid fa-users" %}Team
+# {% include icon.html icon="fa-solid fa-wrench" %}Team
-
+
+
+
Our lab is made up of a highly engaged and collaborative team of researchers. We recognize that diverse teams do better research. We foster an environment where team members are treated equally, and where we respect and admire our differences. The team includes postdocs, students at all levels, staff, and our lab mascots.
+---
-
-
-
+
+
+
Faculty (Principal Investigator)
+
+ {% include list_pi.html data="members" component="portrait_pi" filters="role == 'pi'" %}
-#### Faculty (Principal Investigator)
-{% include list_pi.html data="members" component="portrait_pi" filters="role == 'pi'" %}
-#### Postdoctoral Fellows
-{% include list_students.html data="members" component="portrait_students" filters="role == 'postdoc'" %}
-#### Ph.D./MPhil Students
-{% include list_students.html data="members" component="portrait_students" filters="role == 'phd'" %}
-
-{% include list_students.html data="members" component="portrait_students" filters="role == 'ms'" %}
-#### Research/Project Assistant
-{% include list_students.html data="members" component="portrait_students" filters="role == 'ra'" %}
-#### Undergraduate Students
-{% include list_students.html data="members" component="portrait_students" filters="role == 'under'" %}
-#### Visiting Scholar/Students
-{% include list_students.html data="members" component="portrait_students" filters="role == 'visiting'" %}
-#### Alumni
-{% include list_students.html data="members" component="portrait_students" filters="role == 'alumni'" %}
+
-To fulfill our mission to advance collaborative approaches and practical solutions to global poverty challenges, PolyU TAS Lab strives to foster diversity, equity, inclusion, and belonging in all we do.
+
-We strive to do so as a moral imperative and also because:
+---
-- Diversity drives richer ideas and solutions.
+#### Inclusion and Diversity
-- Equity ensures that all voices are heard and valued.
+To fulfill our mission to advance collaborative approaches and practical solutions to global poverty challenges, PolyU TAS Lab strives to foster diversity, equity, inclusion, and belonging in all we do.
-- Inclusion results in a seat at the decision-making table.
+We strive to do so as a moral imperative and also because:
-- Belonging means that we all feel welcome and confident in our roles.
+- Diversity drives richer ideas and solutions.
+- Equity ensures that all voices are heard and valued.
+- Inclusion results in a seat at the decision-making table.
+- Belonging means that we all feel welcome and confident in our roles.
As such, TAS Lab is committed to:
-- Dedicating time and creating safe spaces for people to voice diverse perspectives in decision making, teaching, research, and in our work with community partners.
+- Dedicating time and creating safe spaces for people to voice diverse perspectives in decision making, teaching, research, and in our work with community partners.
+- Acknowledging, working to understand, and repairing the power imbalances that have historically marginalized many voices, including in the field of international development.
+- Progressively becoming more diverse, equitable, and inclusive, and ultimately becoming an anti-racist organization.
-- Acknowledging, working to understand, and repairing the power imbalances that have historically marginalized many voices, including in the field of international development.
-
-- Progressively becoming more diverse, equitable, and inclusive, and ultimately becoming an anti-racist organization.
In this way, we aim for TAS Lab staff, students, and collaborators around the world to be able to design for a more equitable world.
-
+---
#### We are grateful for the continued support we receive from:
@@ -75,7 +153,6 @@ In this way, we aim for TAS Lab staff, students, and collaborators around the wo
-
@@ -85,9 +162,7 @@ In this way, we aim for TAS Lab staff, students, and collaborators around the wo
style="width: 100%; height: auto; object-fit: cover; max-width: 250px; margin: 30px auto; vertical-align: middle;">
-