Skip to content

Commit 59644c2

Browse files
authored
Create examples showing ndd usage (#6140)
* Add tabs on the front documentation page to show a dynamic mode snippet * Rework the getting started page for dynamic mode * Add a way to have dynamic mode variants for notebooks. * Translate image processing examples to dynamic mode. * Make pipeline/dynamic mode examples subpages instead of using a button * Fix links to documentation pages with aliased names * Move the augmentation gallery dynamic/pipeline mode under the same section * Fix see also references for dynamic mode * Translate data loading examples to dynamic mode * Translate audio processing examples to dynamic mode * Translate video examples to dynamic mode * Fix formatting issues * Fix links to documentation * Fix copyright headers * Remove an unused import * Rework tabs to also support github rendering * Try the .. container directive to see if GitHub supports it * Make the dali_tabs extension generate reStructuredText instead of HTML * Reduce the size of figures in getting started notebooks * Fix a formatting issue * Fix an issue with relative synsets imports * Use absolute paths for operator cross references * Remove references to pipelines in dynamic mode notebooks * Fix references to ndd.readers.Numpy in numpy reader example * Fix the exclusion/inclusion of numpy reader in tests * Fix title of dynamic/pipeline mode-specific pages and add a badge --------- Signed-off-by: Rostan Tabet <[email protected]>
1 parent 0d773ee commit 59644c2

File tree

86 files changed

+9832
-1357
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

86 files changed

+9832
-1357
lines changed

.gitignore

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -28,16 +28,16 @@ docs/dali.png
2828
docs/nvidia.ico
2929
docs/operations/*
3030
docs/dali_dynamic/operations/*
31-
docs/examples/audio_processing/index.rst
31+
docs/examples/audio_processing/**/index.rst
3232
docs/examples/custom_operations/index.rst
33-
docs/examples/general/data_loading/index.rst
33+
docs/examples/general/data_loading/**/index.rst
3434
docs/examples/general/expressions/index.rst
3535
docs/examples/general/general_ops_index.rst
36-
docs/examples/image_processing/index.rst
36+
docs/examples/image_processing/**/index.rst
3737
docs/examples/index.rst
3838
docs/examples/operations_index.rst
3939
docs/examples/other_index.rst
40-
docs/examples/sequence_processing/index.rst
40+
docs/examples/sequence_processing/**/index.rst
4141
docs/examples/use_cases/index.rst
4242
docs/examples/use_cases/paddle/index.rst
4343
.DS_Store

README.rst

Lines changed: 96 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -32,63 +32,116 @@ can easily be retargeted to TensorFlow, PyTorch, and PaddlePaddle.
3232

3333
DALI in action:
3434

35-
.. code-block:: python
35+
.. container:: dali-tabs
3636

37-
from nvidia.dali.pipeline import pipeline_def
38-
import nvidia.dali.types as types
39-
import nvidia.dali.fn as fn
40-
from nvidia.dali.plugin.pytorch import DALIGenericIterator
41-
import os
37+
**Pipeline mode:**
4238

43-
# To run with different data, see documentation of nvidia.dali.fn.readers.file
44-
# points to https://github.com/NVIDIA/DALI_extra
45-
data_root_dir = os.environ['DALI_EXTRA_PATH']
46-
images_dir = os.path.join(data_root_dir, 'db', 'single', 'jpeg')
39+
.. code-block:: python
4740
41+
from nvidia.dali.pipeline import pipeline_def
42+
import nvidia.dali.types as types
43+
import nvidia.dali.fn as fn
44+
from nvidia.dali.plugin.pytorch import DALIGenericIterator
45+
import os
4846
49-
def loss_func(pred, y):
50-
pass
47+
# To run with different data, see documentation of nvidia.dali.fn.readers.file
48+
# points to https://github.com/NVIDIA/DALI_extra
49+
data_root_dir = os.environ['DALI_EXTRA_PATH']
50+
images_dir = os.path.join(data_root_dir, 'db', 'single', 'jpeg')
5151
5252
53-
def model(x):
54-
pass
53+
def loss_func(pred, y):
54+
pass
5555
5656
57-
def backward(loss, model):
58-
pass
57+
def model(x):
58+
pass
5959
6060
61-
@pipeline_def(num_threads=4, device_id=0)
62-
def get_dali_pipeline():
63-
images, labels = fn.readers.file(
64-
file_root=images_dir, random_shuffle=True, name="Reader")
65-
# decode data on the GPU
66-
images = fn.decoders.image_random_crop(
67-
images, device="mixed", output_type=types.RGB)
68-
# the rest of processing happens on the GPU as well
69-
images = fn.resize(images, resize_x=256, resize_y=256)
70-
images = fn.crop_mirror_normalize(
71-
images,
72-
crop_h=224,
73-
crop_w=224,
74-
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
75-
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
76-
mirror=fn.random.coin_flip())
77-
return images, labels
61+
def backward(loss, model):
62+
pass
7863
7964
80-
train_data = DALIGenericIterator(
81-
[get_dali_pipeline(batch_size=16)],
82-
['data', 'label'],
83-
reader_name='Reader'
84-
)
65+
@pipeline_def(num_threads=4, device_id=0)
66+
def get_dali_pipeline():
67+
images, labels = fn.readers.file(
68+
file_root=images_dir, random_shuffle=True, name="Reader")
69+
# decode data on the GPU
70+
images = fn.decoders.image_random_crop(
71+
images, device="mixed", output_type=types.RGB)
72+
# the rest of processing happens on the GPU as well
73+
images = fn.resize(images, resize_x=256, resize_y=256)
74+
images = fn.crop_mirror_normalize(
75+
images,
76+
crop_h=224,
77+
crop_w=224,
78+
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
79+
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
80+
mirror=fn.random.coin_flip())
81+
return images, labels
8582
8683
87-
for i, data in enumerate(train_data):
88-
x, y = data[0]['data'], data[0]['label']
89-
pred = model(x)
90-
loss = loss_func(pred, y)
91-
backward(loss, model)
84+
train_data = DALIGenericIterator(
85+
[get_dali_pipeline(batch_size=16)],
86+
['data', 'label'],
87+
reader_name='Reader'
88+
)
89+
90+
91+
for i, data in enumerate(train_data):
92+
x, y = data[0]['data'], data[0]['label']
93+
pred = model(x)
94+
loss = loss_func(pred, y)
95+
backward(loss, model)
96+
97+
**Dynamic mode:**
98+
99+
.. code-block:: python
100+
101+
import os
102+
import nvidia.dali.types as types
103+
import nvidia.dali.experimental.dynamic as ndd
104+
import torch
105+
106+
# To run with different data, see documentation of ndd.readers.File
107+
# points to https://github.com/NVIDIA/DALI_extra
108+
data_root_dir = os.environ['DALI_EXTRA_PATH']
109+
images_dir = os.path.join(data_root_dir, 'db', 'single', 'jpeg')
110+
111+
112+
def loss_func(pred, y):
113+
pass
114+
115+
116+
def model(x):
117+
pass
118+
119+
120+
def backward(loss, model):
121+
pass
122+
123+
124+
reader = ndd.readers.File(file_root=images_dir, random_shuffle=True)
125+
126+
for images, labels in reader.next_epoch(batch_size=16):
127+
images = ndd.decoders.image_random_crop(images, device="gpu", output_type=types.RGB)
128+
# the rest of processing happens on the GPU as well
129+
images = ndd.resize(images, resize_x=256, resize_y=256)
130+
images = ndd.crop_mirror_normalize(
131+
images,
132+
crop_h=224,
133+
crop_w=224,
134+
mean=[0.485 * 255, 0.456 * 255, 0.406 * 255],
135+
std=[0.229 * 255, 0.224 * 255, 0.225 * 255],
136+
mirror=ndd.random.coin_flip(),
137+
)
138+
139+
x = torch.as_tensor(images)
140+
y = torch.as_tensor(labels.gpu())
141+
142+
pred = model(x)
143+
loss = loss_func(pred, y)
144+
backward(loss, model)
92145
93146
94147
Highlights

docs/_extensions/dali_tabs.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
# Copyright (c) 2026, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
#
3+
# Licensed under the Apache License, Version 2.0 (the "License");
4+
# you may not use this file except in compliance with the License.
5+
# You may obtain a copy of the License at
6+
#
7+
# http://www.apache.org/licenses/LICENSE-2.0
8+
#
9+
# Unless required by applicable law or agreed to in writing, software
10+
# distributed under the License is distributed on an "AS IS" BASIS,
11+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
# See the License for the specific language governing permissions and
13+
# limitations under the License.
14+
15+
import re
16+
17+
from sphinx.application import Sphinx
18+
19+
# Pattern to match container:: dali-tabs blocks
20+
CONTAINER_PATTERN = re.compile(
21+
r"^\.\. container:: dali-tabs\n((?:[ ]{3,}[^\n]*\n|\n)*)",
22+
re.MULTILINE,
23+
)
24+
25+
# Pattern to match **Tab Name:** headers
26+
TAB_HEADER_PATTERN = re.compile(r"^[ ]*\*\*([^*]+):\*\*\s*$", re.MULTILINE)
27+
28+
29+
def _transform_container_to_tabset(match: re.Match) -> str:
30+
"""
31+
Transform a ``.. container:: dali-tabs`` block into ``.. tab-set::`` RST.
32+
See README.rst for an example of usage.
33+
"""
34+
content = match.group(1)
35+
36+
# Split by tab headers: [before, name1, content1, name2, content2, ...]
37+
parts = TAB_HEADER_PATTERN.split(content)
38+
if len(parts) < 3:
39+
return match.group(0) # No tabs found, return unchanged
40+
41+
lines = [".. tab-set::", " :sync-group: dali-mode", ""]
42+
43+
for i in range(1, len(parts), 2):
44+
tab_name = parts[i].strip()
45+
tab_content = parts[i + 1] if i + 1 < len(parts) else ""
46+
sync_key = tab_name.lower().replace(" ", "-")
47+
48+
lines.append(f" .. tab-item:: {tab_name}")
49+
lines.append(f" :sync: {sync_key}")
50+
lines.append("")
51+
52+
# Re-indent: content has 3-space indent from container, add 3 more for tab-item
53+
for line in tab_content.rstrip().split("\n"):
54+
if line.strip():
55+
lines.append(f" {line}")
56+
else:
57+
lines.append("")
58+
lines.append("")
59+
60+
return "\n".join(lines)
61+
62+
63+
def include_read_handler(
64+
app: Sphinx,
65+
relative_path: str,
66+
parent_docname: str,
67+
content: list[str],
68+
) -> None:
69+
"""Transform container:: dali-tabs in included files."""
70+
if not content:
71+
return
72+
73+
text = content[0]
74+
if "dali-tabs" not in text:
75+
return
76+
transformed = CONTAINER_PATTERN.sub(_transform_container_to_tabset, text)
77+
if transformed != text:
78+
content[0] = transformed
79+
80+
81+
def setup(app: Sphinx):
82+
app.connect("include-read", include_read_handler)
83+
return {
84+
"version": "1.0",
85+
"parallel_read_safe": True,
86+
"parallel_write_safe": True,
87+
}

docs/autodoc_submodules.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,14 @@
1212
# See the License for the specific language governing permissions and
1313
# limitations under the License.
1414

15-
import nvidia.dali.plugin.pytorch
16-
import nvidia.dali.plugin.numba
17-
import nvidia.dali.plugin.jax
18-
import nvidia.dali.experimental.dynamic
1915
import inspect
2016
import sys
2117

18+
import nvidia.dali.experimental.dynamic
19+
import nvidia.dali.plugin.jax
20+
import nvidia.dali.plugin.numba
21+
import nvidia.dali.plugin.pytorch
22+
2223
try:
2324
import nvidia.dali.plugin.video
2425
except ImportError:
@@ -154,7 +155,7 @@ def get_references(name, references):
154155
if name in references:
155156
result += ".. seealso::\n"
156157
for desc, url in references[name]:
157-
result += f" * `{desc} <../{url}>`_\n"
158+
result += f" * `{desc} </{url}>`_\n"
158159
return result
159160

160161

0 commit comments

Comments
 (0)