@inproceedings{e2956f60a9854a1d83d5f1f148c06d35,
title = "VAPCNet: Viewpoint-Aware 3D Point Cloud Completion",
abstract = "Most existing learning-based 3D point cloud completion methods ignore the fact that the completion process is highly coupled with the viewpoint of a partial scan. However, the various viewpoints of incompletely scanned objects in real-world applications are normally unknown and directly estimating the viewpoint of each incomplete object is usually time-consuming and leads to huge annotation cost. In this paper, we thus propose an unsupervised viewpoint representation learning scheme for 3D point cloud completion without explicit viewpoint estimation. To be specific, we learn abstract representations of partial scans to distinguish various viewpoints in the representation space rather than the explicit estimation in the 3D space. We also introduce a Viewpoint-Aware Point cloud Completion Network (VAPCNet) with flexible adaption to various viewpoints based on the learned representations. The proposed viewpoint representation learning scheme can extract discriminative representations to obtain accurate viewpoint information. Reported experiments on two popular public datasets show that our VAPCNet achieves state-of-the-art performance for the point cloud completion task. Source code is available at https://github. com/FZH92128/VAPCNet.",
author = "Zhiheng Fu and Longguang Wang and Lian Xu and Zhiyong Wang and Hamid Laga and Yulan Guo and Farid Boussaid and Mohammed Bennamoun",
year = "2023",
language = "English",
series = "Proceedings of the IEEE International Conference on Computer Vision",
publisher = "IEEE, Institute of Electrical and Electronics Engineers",
pages = "12074--12084",
booktitle = "Proceedings of the IEEE/CVF International Conference on Computer Vision",
address = "United States",
note = "2023 International Conference on Computer Vision : ICCV2023 ; Conference date: 04-10-2023 Through 06-10-2023",
url = "https://iccv2023.thecvf.com/paris.convention.center-36700-3-13-7.php",
}