@@ -45,18 +45,19 @@ def packed_to_padded_python(inputs, first_idxs, max_size, device):
45
45
PyTorch implementation of packed_to_padded function.
46
46
"""
47
47
num_meshes = first_idxs .size (0 )
48
- D = inputs .shape [1 ] if inputs .dim () == 2 else 0
49
- if D == 0 :
48
+ if inputs .dim () == 1 :
50
49
inputs_padded = torch .zeros ((num_meshes , max_size ), device = device )
51
50
else :
52
- inputs_padded = torch .zeros ((num_meshes , max_size , D ), device = device )
51
+ inputs_padded = torch .zeros (
52
+ (num_meshes , max_size , * inputs .shape [1 :]), device = device
53
+ )
53
54
for m in range (num_meshes ):
54
55
s = first_idxs [m ]
55
56
if m == num_meshes - 1 :
56
57
f = inputs .shape [0 ]
57
58
else :
58
59
f = first_idxs [m + 1 ]
59
- inputs_padded [m , :f ] = inputs [s :f ]
60
+ inputs_padded [m , : f - s ] = inputs [s :f ]
60
61
61
62
return inputs_padded
62
63
@@ -66,22 +67,21 @@ def padded_to_packed_python(inputs, first_idxs, num_inputs, device):
66
67
PyTorch implementation of padded_to_packed function.
67
68
"""
68
69
num_meshes = inputs .size (0 )
69
- D = inputs .shape [2 ] if inputs .dim () == 3 else 0
70
- if D == 0 :
70
+ if inputs .dim () == 2 :
71
71
inputs_packed = torch .zeros ((num_inputs ,), device = device )
72
72
else :
73
- inputs_packed = torch .zeros ((num_inputs , D ), device = device )
73
+ inputs_packed = torch .zeros ((num_inputs , * inputs . shape [ 2 :] ), device = device )
74
74
for m in range (num_meshes ):
75
75
s = first_idxs [m ]
76
76
if m == num_meshes - 1 :
77
77
f = num_inputs
78
78
else :
79
79
f = first_idxs [m + 1 ]
80
- inputs_packed [s :f ] = inputs [m , :f ]
80
+ inputs_packed [s :f ] = inputs [m , : f - s ]
81
81
82
82
return inputs_packed
83
83
84
- def _test_packed_to_padded_helper (self , D , device ):
84
+ def _test_packed_to_padded_helper (self , dims , device ):
85
85
"""
86
86
Check the results from packed_to_padded and PyTorch implementations
87
87
are the same.
@@ -91,10 +91,12 @@ def _test_packed_to_padded_helper(self, D, device):
91
91
mesh_to_faces_packed_first_idx = meshes .mesh_to_faces_packed_first_idx ()
92
92
max_faces = meshes .num_faces_per_mesh ().max ().item ()
93
93
94
- if D == 0 :
94
+ if len ( dims ) == 0 :
95
95
values = torch .rand ((faces .shape [0 ],), device = device , requires_grad = True )
96
96
else :
97
- values = torch .rand ((faces .shape [0 ], D ), device = device , requires_grad = True )
97
+ values = torch .rand (
98
+ (faces .shape [0 ], * dims ), device = device , requires_grad = True
99
+ )
98
100
values_torch = values .detach ().clone ()
99
101
values_torch .requires_grad = True
100
102
values_padded = packed_to_padded (
@@ -107,10 +109,10 @@ def _test_packed_to_padded_helper(self, D, device):
107
109
self .assertClose (values_padded , values_padded_torch )
108
110
109
111
# check backward
110
- if D == 0 :
112
+ if len ( dims ) == 0 :
111
113
grad_inputs = torch .rand ((len (meshes ), max_faces ), device = device )
112
114
else :
113
- grad_inputs = torch .rand ((len (meshes ), max_faces , D ), device = device )
115
+ grad_inputs = torch .rand ((len (meshes ), max_faces , * dims ), device = device )
114
116
values_padded .backward (grad_inputs )
115
117
grad_outputs = values .grad
116
118
values_padded_torch .backward (grad_inputs )
@@ -122,27 +124,41 @@ def _test_packed_to_padded_helper(self, D, device):
122
124
self .assertClose (grad_outputs , grad_outputs_torch2 )
123
125
124
126
def test_packed_to_padded_flat_cpu (self ):
125
- self ._test_packed_to_padded_helper (0 , "cpu" )
127
+ self ._test_packed_to_padded_helper ([] , "cpu" )
126
128
127
129
def test_packed_to_padded_D1_cpu (self ):
128
- self ._test_packed_to_padded_helper (1 , "cpu" )
130
+ self ._test_packed_to_padded_helper ([ 1 ] , "cpu" )
129
131
130
132
def test_packed_to_padded_D16_cpu (self ):
131
- self ._test_packed_to_padded_helper (16 , "cpu" )
133
+ self ._test_packed_to_padded_helper ([16 ], "cpu" )
134
+
135
+ def test_packed_to_padded_D16_9_cpu (self ):
136
+ self ._test_packed_to_padded_helper ([16 , 9 ], "cpu" )
137
+
138
+ def test_packed_to_padded_D16_3_2_cpu (self ):
139
+ self ._test_packed_to_padded_helper ([16 , 3 , 2 ], "cpu" )
132
140
133
141
def test_packed_to_padded_flat_cuda (self ):
134
142
device = get_random_cuda_device ()
135
- self ._test_packed_to_padded_helper (0 , device )
143
+ self ._test_packed_to_padded_helper ([] , device )
136
144
137
145
def test_packed_to_padded_D1_cuda (self ):
138
146
device = get_random_cuda_device ()
139
- self ._test_packed_to_padded_helper (1 , device )
147
+ self ._test_packed_to_padded_helper ([ 1 ] , device )
140
148
141
149
def test_packed_to_padded_D16_cuda (self ):
142
150
device = get_random_cuda_device ()
143
- self ._test_packed_to_padded_helper (16 , device )
151
+ self ._test_packed_to_padded_helper ([16 ], device )
152
+
153
+ def test_packed_to_padded_D16_9_cuda (self ):
154
+ device = get_random_cuda_device ()
155
+ self ._test_packed_to_padded_helper ([16 , 9 ], device )
156
+
157
+ def test_packed_to_padded_D16_3_2_cuda (self ):
158
+ device = get_random_cuda_device ()
159
+ self ._test_packed_to_padded_helper ([16 , 3 , 2 ], device )
144
160
145
- def _test_padded_to_packed_helper (self , D , device ):
161
+ def _test_padded_to_packed_helper (self , dims , device ):
146
162
"""
147
163
Check the results from packed_to_padded and PyTorch implementations
148
164
are the same.
@@ -151,10 +167,10 @@ def _test_padded_to_packed_helper(self, D, device):
151
167
mesh_to_faces_packed_first_idx = meshes .mesh_to_faces_packed_first_idx ()
152
168
num_faces_per_mesh = meshes .num_faces_per_mesh ()
153
169
max_faces = num_faces_per_mesh .max ().item ()
154
- if D == 0 :
170
+ if len ( dims ) == 0 :
155
171
values = torch .rand ((len (meshes ), max_faces ), device = device )
156
172
else :
157
- values = torch .rand ((len (meshes ), max_faces , D ), device = device )
173
+ values = torch .rand ((len (meshes ), max_faces , * dims ), device = device )
158
174
for i , num in enumerate (num_faces_per_mesh ):
159
175
values [i , num :] = 0
160
176
values .requires_grad = True
@@ -173,11 +189,11 @@ def _test_padded_to_packed_helper(self, D, device):
173
189
self .assertClose (values_packed , values_packed_torch )
174
190
175
191
# check backward
176
- if D == 0 :
192
+ if len ( dims ) == 0 :
177
193
grad_inputs = torch .rand ((num_faces_per_mesh .sum ().item ()), device = device )
178
194
else :
179
195
grad_inputs = torch .rand (
180
- (num_faces_per_mesh .sum ().item (), D ), device = device
196
+ (num_faces_per_mesh .sum ().item (), * dims ), device = device
181
197
)
182
198
values_packed .backward (grad_inputs )
183
199
grad_outputs = values .grad
@@ -190,41 +206,39 @@ def _test_padded_to_packed_helper(self, D, device):
190
206
self .assertClose (grad_outputs , grad_outputs_torch2 )
191
207
192
208
def test_padded_to_packed_flat_cpu (self ):
193
- self ._test_padded_to_packed_helper (0 , "cpu" )
209
+ self ._test_padded_to_packed_helper ([] , "cpu" )
194
210
195
211
def test_padded_to_packed_D1_cpu (self ):
196
- self ._test_padded_to_packed_helper (1 , "cpu" )
212
+ self ._test_padded_to_packed_helper ([ 1 ] , "cpu" )
197
213
198
214
def test_padded_to_packed_D16_cpu (self ):
199
- self ._test_padded_to_packed_helper (16 , "cpu" )
215
+ self ._test_padded_to_packed_helper ([16 ], "cpu" )
216
+
217
+ def test_padded_to_packed_D16_9_cpu (self ):
218
+ self ._test_padded_to_packed_helper ([16 , 9 ], "cpu" )
219
+
220
+ def test_padded_to_packed_D16_3_2_cpu (self ):
221
+ self ._test_padded_to_packed_helper ([16 , 3 , 2 ], "cpu" )
200
222
201
223
def test_padded_to_packed_flat_cuda (self ):
202
224
device = get_random_cuda_device ()
203
- self ._test_padded_to_packed_helper (0 , device )
225
+ self ._test_padded_to_packed_helper ([] , device )
204
226
205
227
def test_padded_to_packed_D1_cuda (self ):
206
228
device = get_random_cuda_device ()
207
- self ._test_padded_to_packed_helper (1 , device )
229
+ self ._test_padded_to_packed_helper ([ 1 ] , device )
208
230
209
231
def test_padded_to_packed_D16_cuda (self ):
210
232
device = get_random_cuda_device ()
211
- self ._test_padded_to_packed_helper (16 , device )
212
-
213
- def test_invalid_inputs_shapes (self , device = "cuda:0" ):
214
- with self .assertRaisesRegex (ValueError , "input can only be 2-dimensional." ):
215
- values = torch .rand ((100 , 50 , 2 ), device = device )
216
- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
217
- packed_to_padded (values , first_idxs , 100 )
218
-
219
- with self .assertRaisesRegex (ValueError , "input can only be 3-dimensional." ):
220
- values = torch .rand ((100 ,), device = device )
221
- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
222
- padded_to_packed (values , first_idxs , 20 )
223
-
224
- with self .assertRaisesRegex (ValueError , "input can only be 3-dimensional." ):
225
- values = torch .rand ((100 , 50 , 2 , 2 ), device = device )
226
- first_idxs = torch .tensor ([0 , 80 ], dtype = torch .int64 , device = device )
227
- padded_to_packed (values , first_idxs , 20 )
233
+ self ._test_padded_to_packed_helper ([16 ], device )
234
+
235
+ def test_padded_to_packed_D16_9_cuda (self ):
236
+ device = get_random_cuda_device ()
237
+ self ._test_padded_to_packed_helper ([16 , 9 ], device )
238
+
239
+ def test_padded_to_packed_D16_3_2_cuda (self ):
240
+ device = get_random_cuda_device ()
241
+ self ._test_padded_to_packed_helper ([16 , 3 , 2 ], device )
228
242
229
243
@staticmethod
230
244
def packed_to_padded_with_init (
0 commit comments