Skip to content

Commit 6c4711a

Browse files
committed
Update Ranran and favicon
1 parent 6d56e7b commit 6c4711a

7 files changed

Lines changed: 13 additions & 17 deletions

File tree

_bibliography/biblio.bib

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2,18 +2,20 @@
22
---
33
@article{ranran2025nopose,
44
title = {No Pose at All: Self-Supervised Pose-Free 3D Gaussian Splatting from Sparse Views},
5+
url = {https://ranrhuang.github.io/spfsplat/},
6+
paper_url = {https://www.arxiv.org/pdf/2508.01171},
57
journal = {International Conference on Computer Vision},
68
customauthor = {Ranran Huang, and Krystian Mikolajczyk},
79
year = {2025},
810
volume = {ICCV},
9-
image = {/assets/images/publications/no_pose_at_all.png},
10-
abstract = {We introduce SPFSplat, an efficient framework for 3D Gaussian splatting from sparse multi-view images, requiring \textbf{no ground-truth poses} during both training and inference. Our method simultaneously predicts Gaussians and camera poses from unposed images in a canonical space within a single feed-forward step. During training, the pose head estimates the poses at target views, which are supervised through the image rendering loss. Additionally, a reprojection loss is introduced to ensure alignment between Gaussians and the estimated poses of input views, reinforcing geometric consistency. This pose-free training paradigm and efficient one-step feed-forward inference makes SPFSplat well-suited for practical applications. Despite the absence of pose supervision, our self-supervised SPFSplat achieves state-of-the-art performance in novel view synthesis, even under significant viewpoint changes. Furthermore, it surpasses recent methods trained with geometry priors in relative pose estimation, demonstrating its effectiveness in both 3D scene reconstruction and camera pose learning.},
11+
image = {/assets/images/publications/no_pose_at_all.gif},
12+
abstract = {We introduce SPFSplat, an efficient framework for 3D Gaussian splatting from sparse multi-view images, requiring no ground-truth poses during training or inference. It employs a shared feature extraction backbone, enabling simultaneous prediction of 3D Gaussian primitives and camera poses in a canonical space from unposed inputs within a single feed-forward step. Alongside the rendering loss based on estimated novel-view poses, a reprojection loss is integrated to enforce the learning of pixel-aligned Gaussian primitives for enhanced geometric constraints. This pose-free training paradigm and efficient one-step feedforward design make SPFSplat well-suited for practical applications. Remarkably, despite the absence of pose supervision, SPFSplat achieves state-of-the-art performance in novel view synthesis even under significant viewpoint changes and limited image overlap. It also surpasses recent methods trained with geometry priors in relative pose estimation.},
1113
}
1214

1315
@article{jing2025stereo,
1416
title={Stereo Any Video: Temporally Consistent Stereo Matching},
1517
url = {https://tomtomtommi.github.io/StereoAnyVideo/},
16-
paper_url = {https://arxiv.org/abs/2503.05549},
18+
paper_url = {https://arxiv.org/pdf/2503.05549},
1719
journal={International Conference on Computer Vision},
1820
customauthor = {Junpeng Jing, Weixun Luo, Ye Mao, and Krystian Mikolajczyk},
1921
year={2025},
@@ -25,7 +27,7 @@ @article{jing2025stereo
2527
@article{ye2025hypo,
2628
title = {Hypo3D: Exploring Hypothetical Reasoning in 3D},
2729
url = {https://matchlab-imperial.github.io/Hypo3D/},
28-
paper_url = {https://arxiv.org/abs/2502.00954},
30+
paper_url = {https://arxiv.org/pdf/2502.00954},
2931
abstract = {The rise of vision-language foundation models marks an advancement in bridging the gap between human and machine capabilities in 3D scene reasoning. Existing 3D reasoning benchmarks assume real-time scene accessibility, which is impractical due to the high cost of frequent scene updates. To this end, we introduce Hypothetical 3D Reasoning, namely Hypo3D, a benchmark designed to evaluate models' ability to reason without access to real-time scene data. Models need to imagine the scene state based on a provided change description before reasoning. Hypo3D is formulated as a 3D Visual Question Answering (VQA) benchmark, comprising 7,727 context changes across 700 indoor scenes, resulting in 14,885 question-answer pairs. An anchor-based world frame is established for all scenes, ensuring consistent reference to a global frame for directional terms in context changes and QAs. Extensive experiments show that state-of-the-art foundation models struggle to reason in hypothetically changed scenes. This reveals a substantial performance gap compared to humans, particularly in scenarios involving movement changes and directional reasoning. Even when the context change is irrelevant to the question, models often incorrectly adjust their answers.},
3032
journal = {International Conference on Machine Learning},
3133
customauthor = {Ye Mao, Weixun Luo, Junpeng Jing, Anlan Qiu, and Krystian Mikolajczyk},
@@ -37,7 +39,7 @@ @article{ye2025hypo
3739
@article{ye2024open,
3840
title = {OpenDlign: Open-World Point Cloud Understanding with Depth-Aligned Images},
3941
url = {https://yebulabula.github.io/OpenDlign/},
40-
paper_url = {https://arxiv.org/abs/2404.16538},
42+
paper_url = {https://arxiv.org/pdf/2404.16538},
4143
customauthor = {Ye Mao, Junpeng Jing, and Krystian Mikolajczyk},
4244
journal = {Advances in Neural Information Processing Systems},
4345
year = {2024},
@@ -50,7 +52,7 @@ @article{ye2024open
5052
@article{jing2024bidavideo,
5153
title = {Match Stereo Videos via Bidirectional Alignment},
5254
url = {https://tomtomtommi.github.io/BiDAVideo/},
53-
paper_url = {https://arxiv.org/abs/2409.20283},
55+
paper_url = {https://arxiv.org/pdf/2409.20283},
5456
customauthor = {Junpeng Jing, Ye Mao, Anlan Qiu, and Krystian Mikolajczyk},
5557
journal = {arXiv preprint},
5658
volume = {arXiv:2409.20283},
@@ -62,7 +64,7 @@ @article{jing2024bidavideo
6264
@article{jing2024bidastereo,
6365
title = {Match-Stereo-Videos: Bidirectional Alignment for Consistent Dynamic Stereo Matching},
6466
url = {https://tomtomtommi.github.io/BiDAStereo/},
65-
paper_url = {https://arxiv.org/abs/2403.10755},
67+
paper_url = {https://arxiv.org/pdf/2403.10755},
6668
customauthor = {Junpeng Jing, Ye Mao, and Krystian Mikolajczyk},
6769
journal = {European Conference on Computer Vision},
6870
year = {2024},

_data/site_nav.yml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,9 @@
11
# Site wide navigation
2-
# - name: Research
3-
# url: "/research"
42
- name: Publications
53
url: "/publications"
64
- name: Team
75
url: "/team"
86
- name: Teaching
97
url: "/teaching"
10-
# - name: Resources
11-
# url: "/resources"
128
- name: Join
139
url: "/join"

_includes/head.html

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,7 @@
22
<meta charset="utf-8" />
33
<meta http-equiv="x-ua-compatible" content="ie=edge">
44
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
5-
{% comment %}
6-
{%- seo -%}
7-
{% endcomment %}
5+
<link rel="icon" href="{{ '/assets/images/icons/matchlab.png' | relative_url }}" type="image/png">
86
<link rel="stylesheet" href="{{ '/assets/css/app.css' | relative_url }}">
97
<link rel="stylesheet" href="{{ '/assets/fonts/foundation-icons/foundation-icons.css' | relative_url }}">
108
<link rel="preconnect" href="https://fonts.googleapis.com">

_layouts/biblio.html

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
---
22
---
33
<div class="grid-x grid-margin-x grid-margin-y">
4-
<div class="cell small-12 medium-4 text-center">
4+
<div class="cell small-12 medium-5 text-center">
55
<img alt="{{ entry.title }}" src="{% if entry.image %}{% if entry.image contains '://' %}{{ entry.image }}{% else %}{{ site.url }}{{ entry.image }}{% endif %}{% else %}{% if page.publication_photo contains '://' %}{{ page.publication_photo }}{% else %}{{ site.url }}{{ page.publication_photo }}{% endif %}{% endif %}" style="height: 200px; width: auto;">
66
</div>
7-
<div class="cell small-12 medium-8">
7+
<div class="cell small-12 medium-7">
88
<div class="margin-bottom-3">
99
{% if entry.abbr %}
1010
{% if site.data.venues[entry.abbr] %}

_team/ranran.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
name: Ranran Huang
33
designation: Ph.D. (2024 ~ Present)
44
photo: /assets/images/team/ranran_huang.jpeg
5-
website:
5+
website: https://ranrhuang.github.io/
66
email: r.huang24@imperial.ac.uk
77
order: 4
88
---

assets/images/icons/matchlab.png

1.14 MB
Loading
29.1 MB
Loading

0 commit comments

Comments
 (0)