Skip to content

Commit f0f2ea3

Browse files
committed
fixing error
1 parent b7ed51c commit f0f2ea3

1 file changed

Lines changed: 3 additions & 9 deletions

File tree

src/constants.ts

Lines changed: 3 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ export const RESEARCH_DATA: ProjectData = {
55
title: "AsyncEvGS: Asynchronous Event-Assisted Gaussian Splatting for Handheld Motion-Blurred Scenes",
66
conference: "Conference Name 202X (Oral/Poster)",
77
authors: [
8-
{ name: "Jun Dai", affiliation: "Shanghai AI Laboratory", url: "https://daijun10086.github.io/", isEqualContribution: true },
8+
{ name: "Jun Dai", affiliation: "Shanghai AI Laboratory", url: "https://daijun10086.github.io/", isEqualContribution: false },
99
{ name: "Renbiao Jin", affiliation: "Shanghai Jiaotong University", url: "#", isEqualContribution: false },
1010
{ name: "Bo Xu", affiliation: "Shanghai Jiaotong University", url: "#" },
1111
{ name: "Yutian Chen", affiliation: "CUHK", url: "#" },
@@ -14,21 +14,15 @@ export const RESEARCH_DATA: ProjectData = {
1414
{ name: "Tianfan Xue", affiliation: "CUHK", url: "#" },
1515
{ name: "Shi Guo", affiliation: "Shanghai AI Laboratory", url: "#" }
1616
],
17-
abstract: "3D reconstruction methods such as 3D Gaussian Splatting (3DGS) and Neural Radiance Fields (NeRF) achieve impressive photorealism but fail when input images suffer from severe motion blur. While event cameras provide high-temporal-resolution motion cues, existing event-assisted approaches rely on low-resolution sensors and strict synchronization, limiting their practicality for handheld 3D capture on common devices, such as smartphones. We introduce a flexible, high-resolution asynchronous RGB–Event dual-camera system and a corresponding reconstruction framework. Our approach first reconstructs sharp images from the event data and then employs a cross-domain pose estimation module based on the Visual Geometry Transformer (VGGT) to obtain robust initialization for 3DGS. During optimization, we employ a structure-driven event loss and view-specific consistency regularizers to mitigate the ill-posed behavior of traditional event losses and deblurring losses, ensuring both stable and high-fidelity reconstruction. We further contribute AsyncEv-Deblur, a new high-resolution RGB–Event dataset captured with our asynchronous system. Experiments demonstrate that our method achieves state-of-the-art performance on both our challenging dataset and existing benchmarks, substantially improving reconstruction robustness under severe motion blur.",
17+
abstract: `3D reconstruction methods such as 3D Gaussian Splatting (3DGS) and Neural Radiance Fields (NeRF) achieve impressive photorealism but fail when input images suffer from severe motion blur. While event cameras provide high-temporal-resolution motion cues, existing event-assisted approaches rely on low-resolution sensors and strict synchronization, limiting their practicality for handheld 3D capture on common devices, such as smartphones. We introduce a flexible, high-resolution asynchronous RGB–Event dual-camera system and a corresponding reconstruction framework. Our approach first reconstructs sharp images from the event data and then employs a cross-domain pose estimation module based on the Visual Geometry Transformer (VGGT) to obtain robust initialization for 3DGS. During optimization, we employ a structure-driven event loss and view-specific consistency regularizers to mitigate the ill-posed behavior of traditional event losses and deblurring losses, ensuring both stable and high-fidelity reconstruction. We further contribute AsyncEv-Deblur, a new high-resolution RGB–Event dataset captured with our asynchronous system. Experiments demonstrate that our method achieves state-of-the-art performance on both our challenging dataset and existing benchmarks, substantially improving reconstruction robustness under severe motion blur.`,
1818
links: [
1919
{ label: "Paper", url: "#", icon: "pdf" },
2020
{ label: "Code", url: "#", icon: "github" },
2121
{ label: "Video", url: "#", icon: "youtube" },
2222
{ label: "Dataset", url: "#", icon: "database" }
2323
],
2424
heroVideoUrl: "https://storage.googleapis.com/gtv-videos-bucket/sample/BigBuckBunny.mp4", // Replace with your project's teaser video URL
25-
methodDescription: `An overview of our proposed reconstruction pipeline. Our method takes blurred RGB images and sharp event streams as input. We first employ VGGT [1] to process both RGB and event images, providing robust initial camera poses and 3DGS points. The 3DGS representation is then jointly optimized using five key losses, broadly categorized into three groups:
26-
27-
** (1) Deblurring Losses:** The blur synthesis loss ($\mathcal{L}_{\text{blur}}$) matches the synthesized blur to the input, while an RGB consistency regularizer ($\mathcal{L}_{\text{reg-r}}$) prevents degradation of the sharp neighboring views.
28-
29-
** (2) Event-Guided Losses:** We augment the traditional photometric loss ($\mathcal{L}_{\text{evs}}$), with our novel structure loss ($\mathcal{L}_{\text{struct}}$) to robustly leverage high-frequency event details.
30-
31-
** (3) Consistency Loss ($\mathcal{L}_{\text{reg-e}}$):** A color distillation loss ensures that event views match the colors learned from a coarse (Stage 1) 3DGS copy.`,
25+
methodDescription: `An overview of our proposed reconstruction pipeline. Our method takes blurred RGB images and sharp event streams as input. We first employ VGGT [1] to process both RGB and event images, providing robust initial camera poses and 3DGS points. The 3DGS representation is then jointly optimized using five key losses, broadly categorized into three groups: **(1) Deblurring Losses:** The blur synthesis loss ($\\mathcal{L}_{\\text{blur}}$) matches the synthesized blur to the input, while an RGB consistency regularizer ($\\mathcal{L}_{\\text{reg-r}}$) prevents degradation of the sharp neighboring views. **(2) Event-Guided Losses:** We augment the traditional photometric loss ($\\mathcal{L}_{\\text{evs}}$), with our novel structure loss ($\\mathcal{L}_{\\text{struct}}$) to robustly leverage high-frequency event details. **(3) Consistency Loss ($\\mathcal{L}_{\\text{reg-e}}$):** A color distillation loss ensures that event views match the colors learned from a coarse (Stage 1) 3DGS copy.`,
3226
methodImageUrl: pipelineImg, // Replace with your pipeline diagram URL
3327
comparisons: [
3428
{

0 commit comments

Comments
 (0)