Skip to content

Commit c54e048

Browse files
bottlerfacebook-github-bot
authored andcommitted
more readthedocs
Summary: Quote formats, spelling Reviewed By: shapovalov Differential Revision: D40913734 fbshipit-source-id: d6dea65d5204b3c463c656a07ef9b447b7be6a0a
1 parent f7ac7b6 commit c54e048

File tree

12 files changed

+83
-86
lines changed

12 files changed

+83
-86
lines changed

docs/generate_implicitron_stubs.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def iterate_directory(directory_path, dest):
4949
toc = []
5050
if not dest.exists():
5151
dest.mkdir()
52-
for file in directory_path.glob("*.py"):
52+
for file in sorted(directory_path.glob("*.py")):
5353
if file.stem.startswith("_"):
5454
continue
5555
module = paths_to_modules([file])
@@ -121,7 +121,7 @@ def make_directory_index(title: str, directory_path: Path):
121121
]
122122
basic_dataset_modules = [f"pytorch3d.implicitron.dataset.{i}" for i in basic_dataset]
123123
create_one_file(
124-
"pytorch3d.implicitron.dataset",
124+
"pytorch3d.implicitron.dataset in general",
125125
"Basics of data for implicitron",
126126
basic_dataset_modules,
127127
DEST_DIR / "data_basics.rst",
@@ -131,15 +131,15 @@ def make_directory_index(title: str, directory_path: Path):
131131
i for i in dataset_files if i.stem.find("_dataset_map_provider") != -1
132132
]
133133
create_one_file(
134-
"pytorch3d.impliciton.dataset",
134+
"pytorch3d.implicitron.dataset specific datasets",
135135
"specific datasets",
136136
paths_to_modules(specific_dataset_files),
137137
DEST_DIR / "datasets.rst",
138138
)
139139

140140
evaluation_files = sorted(ROOT_DIR.glob("pytorch3d/implicitron/evaluation/*.py"))
141141
create_one_file(
142-
"pytorch3d.impliciton.evaluation",
142+
"pytorch3d.implicitron.evaluation",
143143
"evaluation",
144144
paths_to_modules(evaluation_files),
145145
DEST_DIR / "evaluation.rst",

docs/modules/implicitron/data_basics.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
pytorch3d.implicitron.dataset
2-
=============================
1+
pytorch3d.implicitron.dataset in general
2+
========================================
33

44
Basics of data for implicitron
55

docs/modules/implicitron/datasets.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
pytorch3d.impliciton.dataset
2-
============================
1+
pytorch3d.implicitron.dataset specific datasets
2+
===============================================
33

44
specific datasets
55

docs/modules/implicitron/evaluation.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
1-
pytorch3d.impliciton.evaluation
2-
===============================
1+
pytorch3d.implicitron.evaluation
2+
================================
33

44
evaluation
55

docs/modules/implicitron/models/implicit_function/index.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,10 @@ pytorch3d.implicitron.models.implicit_function
44
.. toctree::
55

66
base
7+
decoding_functions
78
idr_feature_field
89
neural_radiance_field
910
scene_representation_networks
1011
utils
11-
decoding_functions
1212
voxel_grid
1313
voxel_grid_implicit_function

pytorch3d/implicitron/dataset/json_index_dataset_map_provider_v2.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -57,8 +57,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
5757
Generates the training, validation, and testing dataset objects for
5858
a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files.
5959
60-
The dataset is organized in the filesystem as follows:
61-
```
60+
The dataset is organized in the filesystem as follows::
61+
6262
self.dataset_root
6363
├── <category_0>
6464
│ ├── <sequence_name_0>
@@ -90,7 +90,6 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
9090
├── <category_1>
9191
├── ...
9292
├── <category_K>
93-
```
9493
9594
The dataset contains sequences named `<sequence_name_i>` from `K` categories with
9695
names `<category_j>`. Each category comprises sequence folders
@@ -106,8 +105,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
106105
the list of all frames and sequences of the given category stored as lists of
107106
`FrameAnnotation` and `SequenceAnnotation` objects respectivelly.
108107
109-
Each `set_lists_<subset_name_l>.json` file contains the following dictionary:
110-
```
108+
Each `set_lists_<subset_name_l>.json` file contains the following dictionary::
109+
111110
{
112111
"train": [
113112
(sequence_name: str, frame_number: int, image_path: str),
@@ -122,7 +121,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
122121
...
123122
],
124123
]
125-
```
124+
126125
defining the list of frames (identified with their `sequence_name` and `frame_number`)
127126
in the "train", "val", and "test" subsets of the dataset.
128127
Note that `frame_number` can be obtained only from `frame_annotations.jgz` and
@@ -131,8 +130,8 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
131130
have its frame number set to `20`, not 5).
132131
133132
Each `eval_batches_<subset_name_l>.json` file contains a list of evaluation examples
134-
in the following form:
135-
```
133+
in the following form::
134+
136135
[
137136
[ # batch 1
138137
(sequence_name: str, frame_number: int, image_path: str),
@@ -143,7 +142,7 @@ class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): # pyre-ignore [13]
143142
...
144143
],
145144
]
146-
```
145+
147146
Note that the evaluation examples always come from the `"test"` subset of the dataset.
148147
(test frames can repeat across batches).
149148
@@ -341,14 +340,13 @@ def get_category_to_subset_name_list(self) -> Dict[str, List[str]]:
341340
342341
Returns:
343342
category_to_subset_name_list: A dictionary containing subset names available
344-
per category of the following form:
345-
```
343+
per category of the following form::
344+
346345
{
347346
category_0: [category_0_subset_name_0, category_0_subset_name_1, ...],
348347
category_1: [category_1_subset_name_0, category_1_subset_name_1, ...],
349348
...
350349
}
351-
```
352350
353351
"""
354352
category_to_subset_name_list_json = "category_to_subset_name_list.json"

pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -554,8 +554,8 @@ def _get_flat_nvs_metric_key(result, metric_name) -> str:
554554

555555
def flatten_nvs_results(results):
556556
"""
557-
Takes input `results` list of dicts of the form:
558-
```
557+
Takes input `results` list of dicts of the form::
558+
559559
[
560560
{
561561
'subset':'train/test/...',
@@ -564,12 +564,14 @@ def flatten_nvs_results(results):
564564
},
565565
...
566566
]
567-
```
568-
And converts to a flat dict as follows:
569-
{
570-
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
571-
...
572-
}
567+
568+
And converts to a flat dict as follows::
569+
570+
{
571+
'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics,
572+
...
573+
}
574+
573575
"""
574576
results_flat = {}
575577
for result in results:

pytorch3d/implicitron/models/renderer/multipass_ea.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -29,21 +29,21 @@ class MultiPassEmissionAbsorptionRenderer( # pyre-ignore: 13
2929
During each ray marching pass, features, depth map, and masks
3030
are integrated: Let o_i be the opacity estimated by the implicit function,
3131
and d_i be the offset between points `i` and `i+1` along the respective ray.
32-
Ray marching is performed using the following equations:
33-
```
34-
ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
35-
weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
36-
```
32+
Ray marching is performed using the following equations::
33+
34+
ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)),
35+
weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}),
36+
3737
and the final rendered quantities are computed by a dot-product of ray values
3838
with the weights, e.g. `features = sum_n(weight_n * ray_features_n)`.
3939
4040
By default, for the EA raymarcher from [1] (
4141
activated with `self.raymarcher_class_type="EmissionAbsorptionRaymarcher"`
42-
):
43-
```
42+
)::
43+
4444
cap_fn(x) = 1 - exp(-x),
4545
weight_fn(x) = w * x.
46-
```
46+
4747
Note that the latter can altered by changing `self.raymarcher_class_type`,
4848
e.g. to "CumsumRaymarcher" which implements the cumulative-sum raymarcher
4949
from NeuralVolumes [2].

pytorch3d/implicitron/models/view_pooler/feature_aggregator.py

Lines changed: 10 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -250,12 +250,11 @@ class AngleWeightedReductionFeatureAggregator(torch.nn.Module, FeatureAggregator
250250
Performs a weighted aggregation using a set of predefined `reduction_functions`
251251
and concatenates the results of each aggregation function along the
252252
channel dimension. The weights are proportional to the cosine of the
253-
angle between the target ray and the source ray:
254-
```
255-
weight = (
256-
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
257-
)**self.weight_by_ray_angle_gamma
258-
```
253+
angle between the target ray and the source ray::
254+
255+
weight = (
256+
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
257+
)**self.weight_by_ray_angle_gamma
259258
260259
The reduction functions singularize the second dimension
261260
of the sampled features which stacks the source views.
@@ -359,12 +358,11 @@ class AngleWeightedIdentityFeatureAggregator(torch.nn.Module, FeatureAggregatorB
359358
"""
360359
This aggregator does not perform any feature aggregation. It only weights
361360
the features by the weights proportional to the cosine of the
362-
angle between the target ray and the source ray:
363-
```
364-
weight = (
365-
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
366-
)**self.weight_by_ray_angle_gamma
367-
```
361+
angle between the target ray and the source ray::
362+
363+
weight = (
364+
dot(target_ray, source_ray) * 0.5 + 0.5 + self.min_ray_angle_weight
365+
)**self.weight_by_ray_angle_gamma
368366
369367
Settings:
370368
min_ray_angle_weight: The minimum possible aggregation weight

pytorch3d/implicitron/models/view_pooler/view_sampler.py

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -218,8 +218,8 @@ def cameras_points_cartesian_product(
218218
) -> Tuple[CamerasBase, torch.Tensor]:
219219
"""
220220
Generates all pairs of pairs of elements from 'camera' and 'pts' and returns
221-
`camera_rep` and `pts_rep` such that:
222-
```
221+
`camera_rep` and `pts_rep` such that::
222+
223223
camera_rep = [ pts_rep = [
224224
camera[0] pts[0],
225225
camera[0] pts[1],
@@ -235,15 +235,14 @@ def cameras_points_cartesian_product(
235235
camera[n_cameras-1] ...,
236236
... pts[batch_pts-1],
237237
] ]
238-
```
239238
240239
Args:
241240
camera: A batch of `n_cameras` cameras.
242241
pts: A batch of `batch_pts` points of shape `(batch_pts, ..., dim)`
243242
244243
Returns:
245-
camera_rep: A batch of batch_pts*n_cameras cameras such that:
246-
```
244+
camera_rep: A batch of batch_pts*n_cameras cameras such that::
245+
247246
camera_rep = [
248247
camera[0]
249248
camera[0]
@@ -258,11 +257,11 @@ def cameras_points_cartesian_product(
258257
camera[n_cameras-1]
259258
camera[n_cameras-1]
260259
]
261-
```
260+
262261
263262
pts_rep: Repeated `pts` of shape `(batch_pts*n_cameras, ..., dim)`,
264-
such that:
265-
```
263+
such that::
264+
266265
pts_rep = [
267266
pts[0],
268267
pts[1],
@@ -278,7 +277,7 @@ def cameras_points_cartesian_product(
278277
...,
279278
pts[batch_pts-1],
280279
]
281-
```
280+
282281
"""
283282
n_cameras = camera.R.shape[0]
284283
batch_pts = pts.shape[0]

pytorch3d/implicitron/tools/stats.py

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -73,29 +73,29 @@ class Stats(object):
7373
# TODO: update this with context manager
7474
"""
7575
stats logging object useful for gathering statistics of training a deep net in pytorch
76-
Example:
77-
```
78-
# init stats structure that logs statistics 'objective' and 'top1e'
79-
stats = Stats( ('objective','top1e') )
80-
network = init_net() # init a pytorch module (=nueral network)
81-
dataloader = init_dataloader() # init a dataloader
82-
for epoch in range(10):
83-
# start of epoch -> call new_epoch
84-
stats.new_epoch()
85-
86-
# iterate over batches
87-
for batch in dataloader:
88-
89-
output = network(batch) # run and save into a dict of output variables "output"
90-
91-
# stats.update() automatically parses the 'objective' and 'top1e' from
92-
# the "output" dict and stores this into the db
93-
stats.update(output)
94-
stats.print() # prints the averages over given epoch
95-
# stores the training plots into '/tmp/epoch_stats.pdf'
96-
# and plots into a visdom server running at localhost (if running)
97-
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
98-
```
76+
Example::
77+
78+
# init stats structure that logs statistics 'objective' and 'top1e'
79+
stats = Stats( ('objective','top1e') )
80+
network = init_net() # init a pytorch module (=nueral network)
81+
dataloader = init_dataloader() # init a dataloader
82+
for epoch in range(10):
83+
# start of epoch -> call new_epoch
84+
stats.new_epoch()
85+
86+
# iterate over batches
87+
for batch in dataloader:
88+
89+
output = network(batch) # run and save into a dict of output variables
90+
91+
# stats.update() automatically parses the 'objective' and 'top1e' from
92+
# the "output" dict and stores this into the db
93+
stats.update(output)
94+
stats.print() # prints the averages over given epoch
95+
# stores the training plots into '/tmp/epoch_stats.pdf'
96+
# and plots into a visdom server running at localhost (if running)
97+
stats.plot_stats(plot_file='/tmp/epoch_stats.pdf')
98+
9999
"""
100100

101101
def __init__(

pytorch3d/implicitron/tools/utils.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -181,11 +181,11 @@ class Timer:
181181
"""
182182
A simple class for timing execution.
183183
184-
Example:
185-
```
184+
Example::
185+
186186
with Timer():
187187
print("This print statement is timed.")
188-
```
188+
189189
"""
190190

191191
def __init__(self, name="timer", quiet=False):

0 commit comments

Comments
 (0)