Skip to content

Commit 03562d8

Browse files
davidsonicfacebook-github-bot
authored andcommitted
Benchmark Cameras
Summary: Address comments to add benchmarkings for cameras and the new fisheye cameras. The dependency functions in test_cameras have been updated in Diff 1. The following two snapshots show benchmarking results. Reviewed By: kjchalup Differential Revision: D38991914 fbshipit-source-id: 51fe9bb7237543e4ee112c9f5068a4cf12a9d482
1 parent 2283c29 commit 03562d8

File tree

2 files changed

+105
-6
lines changed

2 files changed

+105
-6
lines changed

tests/benchmarks/bm_cameras.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
# Copyright (c) Meta Platforms, Inc. and affiliates.
2+
# All rights reserved.
3+
#
4+
# This source code is licensed under the BSD-style license found in the
5+
# LICENSE file in the root directory of this source tree.
6+
7+
import itertools
8+
9+
from fvcore.common.benchmark import benchmark
10+
from tests.test_cameras import TestCamerasCommon
11+
12+
13+
def _setUp():
14+
case_grid = {
15+
"cam_type": [
16+
"OpenGLOrthographicCameras",
17+
"OpenGLPerspectiveCameras",
18+
"SfMOrthographicCameras",
19+
"SfMPerspectiveCameras",
20+
"FoVOrthographicCameras",
21+
"FoVPerspectiveCameras",
22+
"OrthographicCameras",
23+
"PerspectiveCameras",
24+
"FishEyeCameras",
25+
],
26+
"batch_size": [1, 10],
27+
"num_points": [10, 100],
28+
"device": ["cpu", "cuda:0"],
29+
}
30+
test_cases = itertools.product(*case_grid.values())
31+
kwargs_list = [dict(zip(case_grid.keys(), case)) for case in test_cases]
32+
return kwargs_list
33+
34+
35+
def _bm_cameras_project() -> None:
36+
kwargs_list = _setUp()
37+
benchmark(
38+
TestCamerasCommon.transform_points,
39+
"TEST_TRANSFORM_POINTS",
40+
kwargs_list,
41+
)
42+
43+
44+
def _bm_cameras_unproject() -> None:
45+
kwargs_list = _setUp()
46+
benchmark(
47+
TestCamerasCommon.unproject_points,
48+
"TEST_UNPROJECT_POINTS",
49+
kwargs_list,
50+
)
51+
52+
53+
def bm_cameras() -> None:
54+
_bm_cameras_project()
55+
_bm_cameras_unproject()
56+
57+
58+
if __name__ == "__main__":
59+
bm_cameras()

tests/test_cameras.py

Lines changed: 46 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636

3737
import numpy as np
3838
import torch
39+
from pytorch3d.common.datatypes import Device
3940
from pytorch3d.renderer.camera_utils import join_cameras_as_batch
4041
from pytorch3d.renderer.cameras import (
4142
camera_position_from_spherical_angles,
@@ -149,14 +150,17 @@ def ndc_to_screen_points_naive(points, imsize):
149150

150151

151152
def init_random_cameras(
152-
cam_type: typing.Type[CamerasBase], batch_size: int, random_z: bool = False
153+
cam_type: typing.Type[CamerasBase],
154+
batch_size: int,
155+
random_z: bool = False,
156+
device: Device = "cpu",
153157
):
154158
cam_params = {}
155159
T = torch.randn(batch_size, 3) * 0.03
156160
if not random_z:
157161
T[:, 2] = 4
158162
R = so3_exp_map(torch.randn(batch_size, 3) * 3.0)
159-
cam_params = {"R": R, "T": T}
163+
cam_params = {"R": R, "T": T, "device": device}
160164
if cam_type in (OpenGLPerspectiveCameras, OpenGLOrthographicCameras):
161165
cam_params["znear"] = torch.rand(batch_size) * 10 + 0.1
162166
cam_params["zfar"] = torch.rand(batch_size) * 4 + 1 + cam_params["znear"]
@@ -613,15 +617,33 @@ def test_unproject_points(self, batch_size=50, num_points=100):
613617
self.assertTrue(torch.allclose(xyz_unproj, matching_xyz, atol=1e-4))
614618

615619
@staticmethod
616-
def unproject_points(cam_type, batch_size=50, num_points=100):
620+
def unproject_points(
621+
cam_type, batch_size=50, num_points=100, device: Device = "cpu"
622+
):
617623
"""
618624
Checks that an unprojection of a randomly projected point cloud
619625
stays the same.
620626
"""
627+
if device == "cuda":
628+
device = torch.device("cuda:0")
629+
else:
630+
device = torch.device("cpu")
631+
632+
str2cls = { # noqa
633+
"OpenGLOrthographicCameras": OpenGLOrthographicCameras,
634+
"OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
635+
"SfMOrthographicCameras": SfMOrthographicCameras,
636+
"SfMPerspectiveCameras": SfMPerspectiveCameras,
637+
"FoVOrthographicCameras": FoVOrthographicCameras,
638+
"FoVPerspectiveCameras": FoVPerspectiveCameras,
639+
"OrthographicCameras": OrthographicCameras,
640+
"PerspectiveCameras": PerspectiveCameras,
641+
"FishEyeCameras": FishEyeCameras,
642+
}
621643

622644
def run_cameras():
623645
# init the cameras
624-
cameras = init_random_cameras(cam_type, batch_size)
646+
cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
625647
# xyz - the ground truth point cloud
626648
xyz = torch.randn(num_points, 3) * 0.3
627649
xyz = cameras.unproject_points(xyz, scaled_depth_input=True)
@@ -666,15 +688,33 @@ def test_project_points_screen(self, batch_size=50, num_points=100):
666688
self.assertClose(xyz_project_screen, xyz_project_screen_naive, atol=1e-4)
667689

668690
@staticmethod
669-
def transform_points(cam_type, batch_size=50, num_points=100):
691+
def transform_points(
692+
cam_type, batch_size=50, num_points=100, device: Device = "cpu"
693+
):
670694
"""
671695
Checks that an unprojection of a randomly projected point cloud
672696
stays the same.
673697
"""
674698

699+
if device == "cuda":
700+
device = torch.device("cuda:0")
701+
else:
702+
device = torch.device("cpu")
703+
str2cls = { # noqa
704+
"OpenGLOrthographicCameras": OpenGLOrthographicCameras,
705+
"OpenGLPerspectiveCameras": OpenGLPerspectiveCameras,
706+
"SfMOrthographicCameras": SfMOrthographicCameras,
707+
"SfMPerspectiveCameras": SfMPerspectiveCameras,
708+
"FoVOrthographicCameras": FoVOrthographicCameras,
709+
"FoVPerspectiveCameras": FoVPerspectiveCameras,
710+
"OrthographicCameras": OrthographicCameras,
711+
"PerspectiveCameras": PerspectiveCameras,
712+
"FishEyeCameras": FishEyeCameras,
713+
}
714+
675715
def run_cameras():
676716
# init the cameras
677-
cameras = init_random_cameras(cam_type, batch_size)
717+
cameras = init_random_cameras(str2cls[cam_type], batch_size, device=device)
678718
# xyz - the ground truth point cloud
679719
xy = torch.randn(num_points, 2) * 2.0 - 1.0
680720
z = torch.randn(num_points, 1) * 3.0 + 1.0

0 commit comments

Comments
 (0)