Useful Snippets

Various examples of Isaac Sim Replicator snippets that can be run as Standalone Applications or from the UI using the Script Editor.

Annotator and Custom Writer Data from Multiple Cameras

Example on how to access data from multiple cameras in a scene using annotators or custom writers. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/multi-camera.py.

Annotator and Custom Writer Data from Multiple Cameras
  1from isaacsim import SimulationApp
  2
  3simulation_app = SimulationApp(launch_config={"headless": False})
  4
  5import os
  6import omni.usd
  7import omni.kit
  8import omni.replicator.core as rep
  9from omni.replicator.core import AnnotatorRegistry, Writer
 10from PIL import Image
 11from pxr import UsdGeom, Sdf
 12
 13NUM_FRAMES = 5
 14
 15# Save rgb image to file
 16def save_rgb(rgb_data, file_name):
 17    rgb_img = Image.fromarray(rgb_data, "RGBA")
 18    rgb_img.save(file_name + ".png")
 19
 20
 21# Randomize cube color every frame using a replicator randomizer
 22def cube_color_randomizer():
 23    cube_prims = rep.get.prims(path_pattern="Cube")
 24    with cube_prims:
 25        rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
 26    return cube_prims.node
 27
 28
 29# Access data through a custom replicator writer
 30class MyWriter(Writer):
 31    def __init__(self, rgb: bool = True):
 32        self._frame_id = 0
 33        if rgb:
 34            self.annotators.append(AnnotatorRegistry.get_annotator("rgb"))
 35        # Create writer output directory
 36        self.file_path = os.path.join(os.getcwd(), "_out_mc_writer", "")
 37        print(f"Writing writer data to {self.file_path}")
 38        dir = os.path.dirname(self.file_path)
 39        os.makedirs(dir, exist_ok=True)
 40
 41    def write(self, data):
 42        for annotator in data.keys():
 43            annotator_split = annotator.split("-")
 44            if len(annotator_split) > 1:
 45                render_product_name = annotator_split[-1]
 46            if annotator.startswith("rgb"):
 47                save_rgb(data[annotator], f"{self.file_path}/{render_product_name}_frame_{self._frame_id}")
 48        self._frame_id += 1
 49
 50
 51rep.WriterRegistry.register(MyWriter)
 52
 53# Create a new stage with a dome light
 54omni.usd.get_context().new_stage()
 55stage = omni.usd.get_context().get_stage()
 56dome_light = stage.DefinePrim("/World/DomeLight", "DomeLight")
 57dome_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(900.0)
 58
 59# Create cube
 60cube_prim = stage.DefinePrim("/World/Cube", "Cube")
 61UsdGeom.Xformable(cube_prim).AddTranslateOp().Set((0.0, 5.0, 1.0))
 62
 63# Register cube color randomizer to trigger on every frame
 64rep.randomizer.register(cube_color_randomizer)
 65with rep.trigger.on_frame():
 66    rep.randomizer.cube_color_randomizer()
 67
 68# Create cameras
 69camera_prim1 = stage.DefinePrim("/World/Camera1", "Camera")
 70UsdGeom.Xformable(camera_prim1).AddTranslateOp().Set((0.0, 10.0, 20.0))
 71UsdGeom.Xformable(camera_prim1).AddRotateXYZOp().Set((-15.0, 0.0, 0.0))
 72
 73camera_prim2 = stage.DefinePrim("/World/Camera2", "Camera")
 74UsdGeom.Xformable(camera_prim2).AddTranslateOp().Set((-10.0, 15.0, 15.0))
 75UsdGeom.Xformable(camera_prim2).AddRotateXYZOp().Set((-45.0, 0.0, 45.0))
 76
 77# Create render products
 78rp1 = rep.create.render_product(str(camera_prim1.GetPrimPath()), resolution=(320, 320))
 79rp2 = rep.create.render_product(str(camera_prim2.GetPrimPath()), resolution=(640, 640))
 80rp3 = rep.create.render_product("/OmniverseKit_Persp", (1024, 1024))
 81
 82# Acess the data through a custom writer
 83writer = rep.WriterRegistry.get("MyWriter")
 84writer.initialize(rgb=True)
 85writer.attach([rp1, rp2, rp3])
 86
 87# Acess the data through annotators
 88rgb_annotators = []
 89for rp in [rp1, rp2, rp3]:
 90    rgb = rep.AnnotatorRegistry.get_annotator("rgb")
 91    rgb.attach(rp)
 92    rgb_annotators.append(rgb)
 93
 94# Create annotator output directory
 95file_path = os.path.join(os.getcwd(), "_out_mc_annot", "")
 96print(f"Writing annotator data to {file_path}")
 97dir = os.path.dirname(file_path)
 98os.makedirs(dir, exist_ok=True)
 99
100# Data will be captured manually using step
101rep.orchestrator.set_capture_on_play(False)
102
103for i in range(NUM_FRAMES):
104    # The step function provides new data to the annotators, triggers the randomizers and the writer
105    rep.orchestrator.step(rt_subframes=4)
106    for j, rgb_annot in enumerate(rgb_annotators):
107        save_rgb(rgb_annot.get_data(), f"{dir}/rp{j}_step_{i}")
108
109simulation_app.close()
Annotator and Custom Writer Data from Multiple Cameras
  1import asyncio
  2import os
  3import omni.usd
  4import omni.kit
  5import omni.replicator.core as rep
  6from omni.replicator.core import AnnotatorRegistry, Writer
  7from PIL import Image
  8from pxr import UsdGeom, Sdf
  9
 10NUM_FRAMES = 5
 11
 12# Save rgb image to file
 13def save_rgb(rgb_data, file_name):
 14    rgb_img = Image.fromarray(rgb_data, "RGBA")
 15    rgb_img.save(file_name + ".png")
 16
 17
 18# Randomize cube color every frame using a replicator randomizer
 19def cube_color_randomizer():
 20    cube_prims = rep.get.prims(path_pattern="Cube")
 21    with cube_prims:
 22        rep.randomizer.color(colors=rep.distribution.uniform((0, 0, 0), (1, 1, 1)))
 23    return cube_prims.node
 24
 25
 26# Access data through a custom replicator writer
 27class MyWriter(Writer):
 28    def __init__(self, rgb: bool = True):
 29        self._frame_id = 0
 30        if rgb:
 31            self.annotators.append(AnnotatorRegistry.get_annotator("rgb"))
 32        # Create writer output directory
 33        self.file_path = os.path.join(os.getcwd(), "_out_mc_writer", "")
 34        print(f"Writing writer data to {self.file_path}")
 35        dir = os.path.dirname(self.file_path)
 36        os.makedirs(dir, exist_ok=True)
 37
 38    def write(self, data):
 39        for annotator in data.keys():
 40            annotator_split = annotator.split("-")
 41            if len(annotator_split) > 1:
 42                render_product_name = annotator_split[-1]
 43            if annotator.startswith("rgb"):
 44                save_rgb(data[annotator], f"{self.file_path}/{render_product_name}_frame_{self._frame_id}")
 45        self._frame_id += 1
 46
 47
 48rep.WriterRegistry.register(MyWriter)
 49
 50# Create a new stage with a dome light
 51omni.usd.get_context().new_stage()
 52stage = omni.usd.get_context().get_stage()
 53dome_light = stage.DefinePrim("/World/DomeLight", "DomeLight")
 54dome_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(900.0)
 55
 56# Create cube
 57cube_prim = stage.DefinePrim("/World/Cube", "Cube")
 58UsdGeom.Xformable(cube_prim).AddTranslateOp().Set((0.0, 5.0, 1.0))
 59
 60# Register cube color randomizer to trigger on every frame
 61rep.randomizer.register(cube_color_randomizer)
 62with rep.trigger.on_frame():
 63    rep.randomizer.cube_color_randomizer()
 64
 65# Create cameras
 66camera_prim1 = stage.DefinePrim("/World/Camera1", "Camera")
 67UsdGeom.Xformable(camera_prim1).AddTranslateOp().Set((0.0, 10.0, 20.0))
 68UsdGeom.Xformable(camera_prim1).AddRotateXYZOp().Set((-15.0, 0.0, 0.0))
 69
 70camera_prim2 = stage.DefinePrim("/World/Camera2", "Camera")
 71UsdGeom.Xformable(camera_prim2).AddTranslateOp().Set((-10.0, 15.0, 15.0))
 72UsdGeom.Xformable(camera_prim2).AddRotateXYZOp().Set((-45.0, 0.0, 45.0))
 73
 74# Create render products
 75rp1 = rep.create.render_product(str(camera_prim1.GetPrimPath()), resolution=(320, 320))
 76rp2 = rep.create.render_product(str(camera_prim2.GetPrimPath()), resolution=(640, 640))
 77rp3 = rep.create.render_product("/OmniverseKit_Persp", (1024, 1024))
 78
 79# Acess the data through a custom writer
 80writer = rep.WriterRegistry.get("MyWriter")
 81writer.initialize(rgb=True)
 82writer.attach([rp1, rp2, rp3])
 83
 84# Acess the data through annotators
 85rgb_annotators = []
 86for rp in [rp1, rp2, rp3]:
 87    rgb = rep.AnnotatorRegistry.get_annotator("rgb")
 88    rgb.attach(rp)
 89    rgb_annotators.append(rgb)
 90
 91# Create annotator output directory
 92file_path = os.path.join(os.getcwd(), "_out_mc_annot", "")
 93print(f"Writing annotator data to {file_path}")
 94dir = os.path.dirname(file_path)
 95os.makedirs(dir, exist_ok=True)
 96
 97# Data will be captured manually using step
 98rep.orchestrator.set_capture_on_play(False)
 99
100async def run_example_async():
101    for i in range(NUM_FRAMES):
102        # The step function provides new data to the annotators, triggers the randomizers and the writer
103        await rep.orchestrator.step_async(rt_subframes=4)
104        for j, rgb_annot in enumerate(rgb_annotators):
105            save_rgb(rgb_annot.get_data(), f"{dir}/rp{j}_step_{i}")
106
107
108asyncio.ensure_future(run_example_async())

Synthetic Data Access at Specific Simulation Timepoints

Example on how to access synthetic data (rgb, semantic segmentation) from multiple cameras in a simulation scene at specific events using annotators or writers. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/simulation_get_data.py.

Synthetic Data Access at Specific Simulation Timepoints
 1from isaacsim import SimulationApp
 2
 3simulation_app = SimulationApp(launch_config={"renderer": "RayTracedLighting", "headless": False})
 4
 5import json
 6import os
 7
 8import carb.settings
 9import numpy as np
10import omni
11import omni.replicator.core as rep
12from omni.isaac.core import World
13from omni.isaac.core.objects import DynamicCuboid
14from omni.isaac.core.utils.semantics import add_update_semantics
15from PIL import Image
16
17
18# Util function to save rgb annotator data
19def write_rgb_data(rgb_data, file_path):
20    rgb_img = Image.fromarray(rgb_data, "RGBA")
21    rgb_img.save(file_path + ".png")
22
23
24# Util function to save semantic segmentation annotator data
25def write_sem_data(sem_data, file_path):
26    id_to_labels = sem_data["info"]["idToLabels"]
27    with open(file_path + ".json", "w") as f:
28        json.dump(id_to_labels, f)
29    sem_image_data = np.frombuffer(sem_data["data"], dtype=np.uint8).reshape(*sem_data["data"].shape, -1)
30    sem_img = Image.fromarray(sem_image_data, "RGBA")
31    sem_img.save(file_path + ".png")
32
33
34# Create a new stage with the default ground plane
35omni.usd.get_context().new_stage()
36
37# Setup the simulation world
38world = World()
39world.scene.add_default_ground_plane()
40world.reset()
41
42# Setting capture on play to False will prevent the replicator from capturing data each frame
43carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
44
45# Create a camera and render product to collect the data from
46cam = rep.create.camera(position=(5, 5, 5), look_at=(0, 0, 0))
47rp = rep.create.render_product(cam, (512, 512))
48
49# Set the output directory for the data
50out_dir = os.getcwd() + "/_out_sim_event"
51os.makedirs(out_dir, exist_ok=True)
52print(f"Outputting data to {out_dir}..")
53
54# Example of using a writer to save the data
55writer = rep.WriterRegistry.get("BasicWriter")
56writer.initialize(
57    output_dir=f"{out_dir}/writer", rgb=True, semantic_segmentation=True, colorize_semantic_segmentation=True
58)
59writer.attach(rp)
60
61# Run a preview to ensure the replicator graph is initialized
62rep.orchestrator.preview()
63
64# Example of accessing the data directly from annotators
65rgb_annot = rep.AnnotatorRegistry.get_annotator("rgb")
66rgb_annot.attach(rp)
67sem_annot = rep.AnnotatorRegistry.get_annotator("semantic_segmentation", init_params={"colorize": True})
68sem_annot.attach(rp)
69
70# Spawn and drop a few cubes, capture data when they stop moving
71for i in range(5):
72    cuboid = world.scene.add(DynamicCuboid(prim_path=f"/World/Cuboid_{i}", name=f"Cuboid_{i}", position=(0, 0, 10 + i)))
73    add_update_semantics(cuboid.prim, "Cuboid")
74
75    for s in range(500):
76        world.step(render=False)
77        vel = np.linalg.norm(cuboid.get_linear_velocity())
78        if vel < 0.1:
79            print(f"Cube_{i} stopped moving after {s} simulation steps, writing data..")
80            # Tigger the writer and update the annotators with new data
81            rep.orchestrator.step(rt_subframes=4, delta_time=0.0, pause_timeline=False)
82            write_rgb_data(rgb_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_rgb")
83            write_sem_data(sem_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_sem")
84            break
85
86simulation_app.close()
Synthetic Data Access at Specific Simulation Timepoints
 1import asyncio
 2import json
 3import os
 4
 5import carb.settings
 6import numpy as np
 7import omni
 8import omni.replicator.core as rep
 9from omni.isaac.core import World
10from omni.isaac.core.objects import DynamicCuboid
11from omni.isaac.core.utils.semantics import add_update_semantics
12from PIL import Image
13
14
15# Util function to save rgb annotator data
16def write_rgb_data(rgb_data, file_path):
17    rgb_img = Image.fromarray(rgb_data, "RGBA")
18    rgb_img.save(file_path + ".png")
19
20
21# Util function to save semantic segmentation annotator data
22def write_sem_data(sem_data, file_path):
23    id_to_labels = sem_data["info"]["idToLabels"]
24    with open(file_path + ".json", "w") as f:
25        json.dump(id_to_labels, f)
26    sem_image_data = np.frombuffer(sem_data["data"], dtype=np.uint8).reshape(*sem_data["data"].shape, -1)
27    sem_img = Image.fromarray(sem_image_data, "RGBA")
28    sem_img.save(file_path + ".png")
29
30
31# Create a new stage with the default ground plane
32omni.usd.get_context().new_stage()
33
34# Setup the simulation world
35world = World()
36world.scene.add_default_ground_plane()
37
38
39# Setting capture on play to False will prevent the replicator from capturing data each frame
40carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
41
42# Create a camera and render product to collect the data from
43cam = rep.create.camera(position=(5, 5, 5), look_at=(0, 0, 0))
44rp = rep.create.render_product(cam, (512, 512))
45
46# Set the output directory for the data
47out_dir = os.getcwd() + "/_out_sim_event"
48os.makedirs(out_dir, exist_ok=True)
49print(f"Outputting data to {out_dir}..")
50
51# Example of using a writer to save the data
52writer = rep.WriterRegistry.get("BasicWriter")
53writer.initialize(
54    output_dir=f"{out_dir}/writer", rgb=True, semantic_segmentation=True, colorize_semantic_segmentation=True
55)
56writer.attach(rp)
57
58# Run a preview to ensure the replicator graph is initialized
59rep.orchestrator.preview()
60
61# Example of accessing the data directly from annotators
62rgb_annot = rep.AnnotatorRegistry.get_annotator("rgb")
63rgb_annot.attach(rp)
64sem_annot = rep.AnnotatorRegistry.get_annotator("semantic_segmentation", init_params={"colorize": True})
65sem_annot.attach(rp)
66
67
68async def run_example_async():
69    await world.initialize_simulation_context_async()
70    await world.reset_async()
71
72    # Spawn and drop a few cubes, capture data when they stop moving
73    for i in range(5):
74        cuboid = world.scene.add(
75            DynamicCuboid(prim_path=f"/World/Cuboid_{i}", name=f"Cuboid_{i}", position=(0, 0, 10 + i))
76        )
77        add_update_semantics(cuboid.prim, "Cuboid")
78
79        for s in range(500):
80            await omni.kit.app.get_app().next_update_async()
81            vel = np.linalg.norm(cuboid.get_linear_velocity())
82            if vel < 0.1:
83                print(f"Cube_{i} stopped moving after {s} simulation steps, writing data..")
84                # Tigger the writer and update the annotators with new data
85                await rep.orchestrator.step_async(rt_subframes=4, delta_time=0.0, pause_timeline=False)
86                write_rgb_data(rgb_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_rgb")
87                write_sem_data(sem_annot.get_data(), f"{out_dir}/Cube_{i}_step_{s}_sem")
88                break
89
90
91asyncio.ensure_future(run_example_async())

Custom Event Randomization and Writing

The following example showcases the use of custom events to trigger randomizations and data writing at various times throughout the simulation. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/custom_event_and_write.py.

Custom Event Randomization and Writing
 1from isaacsim import SimulationApp
 2
 3simulation_app = SimulationApp(launch_config={"headless": False})
 4
 5import os
 6
 7import omni.replicator.core as rep
 8import omni.usd
 9
10omni.usd.get_context().new_stage()
11distance_light = rep.create.light(rotation=(315, 0, 0), intensity=4000, light_type="distant")
12
13large_cube = rep.create.cube(scale=1.25, position=(1, 1, 0))
14small_cube = rep.create.cube(scale=0.75, position=(-1, -1, 0))
15large_cube_prim = large_cube.get_output_prims()["prims"][0]
16small_cube_prim = small_cube.get_output_prims()["prims"][0]
17
18rp = rep.create.render_product("/OmniverseKit_Persp", (512, 512))
19writer = rep.WriterRegistry.get("BasicWriter")
20out_dir = os.getcwd() + "/_out_custom_event"
21print(f"Writing data to {out_dir}")
22writer.initialize(output_dir=out_dir, rgb=True)
23writer.attach(rp)
24
25with rep.trigger.on_custom_event(event_name="randomize_large_cube"):
26    with large_cube:
27        rep.randomizer.rotation()
28
29with rep.trigger.on_custom_event(event_name="randomize_small_cube"):
30    with small_cube:
31        rep.randomizer.rotation()
32
33
34def run_example():
35    print(f"Randomizing small cube")
36    rep.utils.send_og_event(event_name="randomize_small_cube")
37    print("Capturing frame")
38    rep.orchestrator.step(rt_subframes=8)
39
40    print("Moving small cube")
41    small_cube_prim.GetAttribute("xformOp:translate").Set((-2, -2, 0))
42    print("Capturing frame")
43    rep.orchestrator.step(rt_subframes=8)
44
45    print(f"Randomizing large cube")
46    rep.utils.send_og_event(event_name="randomize_large_cube")
47    print("Capturing frame")
48    rep.orchestrator.step(rt_subframes=8)
49
50    print("Moving large cube")
51    large_cube_prim.GetAttribute("xformOp:translate").Set((2, 2, 0))
52    print("Capturing frame")
53    rep.orchestrator.step(rt_subframes=8)
54
55    # Wait until all the data is saved to disk
56    rep.orchestrator.wait_until_complete()
57
58
59run_example()
60
61simulation_app.close()
Custom Event Randomization and Writing
 1import asyncio
 2import os
 3
 4import omni.replicator.core as rep
 5import omni.usd
 6
 7omni.usd.get_context().new_stage()
 8distance_light = rep.create.light(rotation=(315, 0, 0), intensity=4000, light_type="distant")
 9
10large_cube = rep.create.cube(scale=1.25, position=(1, 1, 0))
11small_cube = rep.create.cube(scale=0.75, position=(-1, -1, 0))
12large_cube_prim = large_cube.get_output_prims()["prims"][0]
13small_cube_prim = small_cube.get_output_prims()["prims"][0]
14
15rp = rep.create.render_product("/OmniverseKit_Persp", (512, 512))
16writer = rep.WriterRegistry.get("BasicWriter")
17out_dir = os.getcwd() + "/_out_custom_event"
18print(f"Writing data to {out_dir}")
19writer.initialize(output_dir=out_dir, rgb=True)
20writer.attach(rp)
21
22with rep.trigger.on_custom_event(event_name="randomize_large_cube"):
23    with large_cube:
24        rep.randomizer.rotation()
25
26with rep.trigger.on_custom_event(event_name="randomize_small_cube"):
27    with small_cube:
28        rep.randomizer.rotation()
29
30
31async def run_example_async():
32    print(f"Randomizing small cube")
33    rep.utils.send_og_event(event_name="randomize_small_cube")
34    print("Capturing frame")
35    await rep.orchestrator.step_async(rt_subframes=8)
36
37    print("Moving small cube")
38    small_cube_prim.GetAttribute("xformOp:translate").Set((-2, -2, 0))
39    print("Capturing frame")
40    await rep.orchestrator.step_async(rt_subframes=8)
41
42    print(f"Randomizing large cube")
43    rep.utils.send_og_event(event_name="randomize_large_cube")
44    print("Capturing frame")
45    await rep.orchestrator.step_async(rt_subframes=8)
46
47    print("Moving large cube")
48    large_cube_prim.GetAttribute("xformOp:translate").Set((2, 2, 0))
49    print("Capturing frame")
50    await rep.orchestrator.step_async(rt_subframes=8)
51
52    # Wait until all the data is saved to disk
53    await rep.orchestrator.wait_until_complete_async()
54
55
56asyncio.ensure_future(run_example_async())

Motion Blur

This example demonstrates how to capture motion blur data using RTX - Real-Time and RTX - Interactive (Path Tracing) rendering modes. For the RTX - Real-Time mode, details on the motion blur parameters can be found here. For the RTX – Interactive (Path Tracing) mode, motion blur is achieved by rendering multiple subframes (/omni/replicator/pathTracedMotionBlurSubSamples) and combining them to create the effect. The example uses animated and physics-enabled assets with synchronized motion. Keyframe animated assets can be advanced at any custom delta time due to their interpolated motion, whereas physics-enabled assets require a custom physics FPS to ensure motion samples at any custom delta time. The example showcases how to compute the target physics FPS, change it if needed, and restore the original physics FPS after capturing the motion blur. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/motion_blur.py.

Motion Blur
  1from isaacsim import SimulationApp
  2
  3simulation_app = SimulationApp({"headless": False})
  4
  5import os
  6
  7import carb.settings
  8import omni.kit.app
  9import omni.replicator.core as rep
 10import omni.timeline
 11import omni.usd
 12from omni.isaac.nucleus import get_assets_root_path
 13from pxr import PhysxSchema, Sdf, UsdGeom, UsdPhysics
 14
 15# Paths to the animated and physics-ready assets
 16PHYSICS_ASSET_URL = "/Isaac/Props/YCB/Axis_Aligned_Physics/003_cracker_box.usd"
 17ANIM_ASSET_URL = "/Isaac/Props/YCB/Axis_Aligned/003_cracker_box.usd"
 18
 19# -z velocities and start locations of the animated (left side) and physics (right side) assets (stage units/s)
 20ASSET_VELOCITIES = [0, 5, 10]
 21ASSET_X_MIRRORED_LOCATIONS = [(0.5, 0, 0.3), (0.3, 0, 0.3), (0.1, 0, 0.3)]
 22
 23# Used to calculate how many frames to animate the assets to maintain the same velocity as the physics assets
 24ANIMATION_DURATION = 10
 25
 26# Create a new stage with animated and physics-enabled assets with synchronized motion
 27def setup_stage():
 28    # Create new stage
 29    omni.usd.get_context().new_stage()
 30    stage = omni.usd.get_context().get_stage()
 31    timeline = omni.timeline.get_timeline_interface()
 32    timeline.set_end_time(ANIMATION_DURATION)
 33
 34    # Create lights
 35    dome_light = stage.DefinePrim("/World/DomeLight", "DomeLight")
 36    dome_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(100.0)
 37    distant_light = stage.DefinePrim("/World/DistantLight", "DistantLight")
 38    if not distant_light.GetAttribute("xformOp:rotateXYZ"):
 39        UsdGeom.Xformable(distant_light).AddRotateXYZOp()
 40    distant_light.GetAttribute("xformOp:rotateXYZ").Set((-75, 0, 0))
 41    distant_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(2500)
 42
 43    # Setup the physics assets with gravity disabled and the requested velocity
 44    assets_root_path = get_assets_root_path()
 45    physics_asset_url = assets_root_path + PHYSICS_ASSET_URL
 46    for loc, vel in zip(ASSET_X_MIRRORED_LOCATIONS, ASSET_VELOCITIES):
 47        prim = stage.DefinePrim(f"/World/physics_asset_{int(abs(vel))}", "Xform")
 48        prim.GetReferences().AddReference(physics_asset_url)
 49        if not prim.GetAttribute("xformOp:translate"):
 50            UsdGeom.Xformable(prim).AddTranslateOp()
 51        prim.GetAttribute("xformOp:translate").Set(loc)
 52        prim.GetAttribute("physxRigidBody:disableGravity").Set(True)
 53        prim.GetAttribute("physxRigidBody:angularDamping").Set(0.0)
 54        prim.GetAttribute("physxRigidBody:linearDamping").Set(0.0)
 55        prim.GetAttribute("physics:velocity").Set((0, 0, -vel))
 56
 57    # Setup animated assets maintaining the same velocity as the physics assets
 58    anim_asset_url = assets_root_path + ANIM_ASSET_URL
 59    for loc, vel in zip(ASSET_X_MIRRORED_LOCATIONS, ASSET_VELOCITIES):
 60        start_loc = (-loc[0], loc[1], loc[2])
 61        prim = stage.DefinePrim(f"/World/anim_asset_{int(abs(vel))}", "Xform")
 62        prim.GetReferences().AddReference(anim_asset_url)
 63        if not prim.GetAttribute("xformOp:translate"):
 64            UsdGeom.Xformable(prim).AddTranslateOp()
 65        anim_distance = vel * ANIMATION_DURATION
 66        end_loc = (start_loc[0], start_loc[1], start_loc[2] - anim_distance)
 67        end_keyframe = timeline.get_time_codes_per_seconds() * ANIMATION_DURATION
 68        # Timesampled keyframe (animated) translation
 69        prim.GetAttribute("xformOp:translate").Set(start_loc, time=0)
 70        prim.GetAttribute("xformOp:translate").Set(end_loc, time=end_keyframe)
 71
 72
 73# Capture motion blur frames with the given delta time step and render mode
 74def run_motion_blur_example(num_frames=3, custom_delta_time=None, use_path_tracing=True, pt_subsamples=8, pt_spp=64):
 75    # Create a new stage with the assets
 76    setup_stage()
 77    stage = omni.usd.get_context().get_stage()
 78
 79    # Set replicator settings (capture only on request and enable motion blur)
 80    carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
 81    carb.settings.get_settings().set("/omni/replicator/captureMotionBlur", True)
 82
 83    # Set motion blur settings based on the render mode
 84    if use_path_tracing:
 85        print(f"[MotionBlur] Setting PathTracing render mode motion blur settings")
 86        carb.settings.get_settings().set("/rtx/rendermode", "PathTracing")
 87        # (int): Total number of samples for each rendered pixel, per frame.
 88        carb.settings.get_settings().set("/rtx/pathtracing/spp", pt_spp)
 89        # (int): Maximum number of samples to accumulate per pixel. When this count is reached the rendering stops until a scene or setting change is detected, restarting the rendering process. Set to 0 to remove this limit.
 90        carb.settings.get_settings().set("/rtx/pathtracing/totalSpp", pt_spp)
 91        carb.settings.get_settings().set("/rtx/pathtracing/optixDenoiser/enabled", 0)
 92        # Number of sub samples to render if in PathTracing render mode and motion blur is enabled.
 93        carb.settings.get_settings().set("/omni/replicator/pathTracedMotionBlurSubSamples", pt_subsamples)
 94    else:
 95        print(f"[MotionBlur] Setting RayTracedLighting render mode motion blur settings")
 96        carb.settings.get_settings().set("/rtx/rendermode", "RayTracedLighting")
 97        # 0: Disabled, 1: TAA, 2: FXAA, 3: DLSS, 4:RTXAA
 98        carb.settings.get_settings().set("/rtx/post/aa/op", 2)
 99        # (float): The fraction of the largest screen dimension to use as the maximum motion blur diameter.
100        carb.settings.get_settings().set("/rtx/post/motionblur/maxBlurDiameterFraction", 0.02)
101        # (float): Exposure time fraction in frames (1.0 = one frame duration) to sample.
102        carb.settings.get_settings().set("/rtx/post/motionblur/exposureFraction", 1.0)
103        # (int): Number of samples to use in the filter. A higher number improves quality at the cost of performance.
104        carb.settings.get_settings().set("/rtx/post/motionblur/numSamples", 8)
105
106    # Setup camera and writer
107    camera = rep.create.camera(position=(0, 1.5, 0), look_at=(0, 0, 0), name="MotionBlurCam")
108    render_product = rep.create.render_product(camera, (1920, 1080))
109    basic_writer = rep.WriterRegistry.get("BasicWriter")
110    delta_time_str = "None" if custom_delta_time is None else f"{custom_delta_time:.4f}"
111    render_mode_str = f"pt_subsamples_{pt_subsamples}_spp_{pt_spp}" if use_path_tracing else "rt"
112    output_directory = os.getcwd() + f"/_out_motion_blur_dt_{delta_time_str}_{render_mode_str}"
113    print(f"[MotionBlur] Output directory: {output_directory}")
114    basic_writer.initialize(output_dir=output_directory, rgb=True)
115    basic_writer.attach(render_product)
116
117    # Run a few updates to make sure all materials are fully loaded for capture
118    for _ in range(50):
119        simulation_app.update()
120
121    # Use the physics scene to modify the physics FPS (if needed) to guarantee motion samples at any custom delta time
122    physx_scene = None
123    for prim in stage.Traverse():
124        if prim.IsA(UsdPhysics.Scene):
125            physx_scene = PhysxSchema.PhysxSceneAPI.Apply(prim)
126            break
127    if physx_scene is None:
128        print(f"[MotionBlur] Creating a new PhysicsScene")
129        physics_scene = UsdPhysics.Scene.Define(stage, "/PhysicsScene")
130        physx_scene = PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/PhysicsScene"))
131
132    # Check the target physics depending on the custom delta time and the render mode
133    target_physics_fps = stage.GetTimeCodesPerSecond() if custom_delta_time is None else 1 / custom_delta_time
134    if use_path_tracing:
135        target_physics_fps *= pt_subsamples
136
137    # Check if the physics FPS needs to be increased to match the custom delta time
138    orig_physics_fps = physx_scene.GetTimeStepsPerSecondAttr().Get()
139    if target_physics_fps > orig_physics_fps:
140        print(f"[MotionBlur] Changing physics FPS from {orig_physics_fps} to {target_physics_fps}")
141        physx_scene.GetTimeStepsPerSecondAttr().Set(target_physics_fps)
142
143    # Start the timeline for physics updates in the step function
144    timeline = omni.timeline.get_timeline_interface()
145    timeline.play()
146
147    # Capture frames
148    for i in range(num_frames):
149        print(f"[MotionBlur] \tCapturing frame {i}")
150        rep.orchestrator.step(delta_time=custom_delta_time)
151
152    # Restore the original physics FPS
153    if target_physics_fps > orig_physics_fps:
154        print(f"[MotionBlur] Restoring physics FPS from {target_physics_fps} to {orig_physics_fps}")
155        physx_scene.GetTimeStepsPerSecondAttr().Set(orig_physics_fps)
156
157    # Switch back to the raytracing render mode
158    if use_path_tracing:
159        print(f"[MotionBlur] Restoring render mode to RayTracedLighting")
160        carb.settings.get_settings().set("/rtx/rendermode", "RayTracedLighting")
161
162
163def run_motion_blur_examples():
164    motion_blur_step_duration = [None, 1 / 30, 1 / 60, 1 / 240]
165    for custom_delta_time in motion_blur_step_duration:
166        # RayTracing examples
167        run_motion_blur_example(custom_delta_time=custom_delta_time, use_path_tracing=False)
168        # PathTracing examples
169        spps = [32, 128]
170        motion_blur_sub_samples = [4, 16]
171        for motion_blur_sub_sample in motion_blur_sub_samples:
172            for spp in spps:
173                run_motion_blur_example(
174                    custom_delta_time=custom_delta_time,
175                    use_path_tracing=True,
176                    pt_subsamples=motion_blur_sub_sample,
177                    pt_spp=spp,
178                )
179
180
181run_motion_blur_examples()
182
183simulation_app.close()
Motion Blur
  1import asyncio
  2import os
  3
  4import carb.settings
  5import omni.kit.app
  6import omni.replicator.core as rep
  7import omni.timeline
  8import omni.usd
  9from omni.isaac.nucleus import get_assets_root_path
 10from pxr import PhysxSchema, Sdf, UsdGeom, UsdPhysics
 11
 12# Paths to the animated and physics-ready assets
 13PHYSICS_ASSET_URL = "/Isaac/Props/YCB/Axis_Aligned_Physics/003_cracker_box.usd"
 14ANIM_ASSET_URL = "/Isaac/Props/YCB/Axis_Aligned/003_cracker_box.usd"
 15
 16# -z velocities and start locations of the animated (left side) and physics (right side) assets (stage units/s)
 17ASSET_VELOCITIES = [0, 5, 10]
 18ASSET_X_MIRRORED_LOCATIONS = [(0.5, 0, 0.3), (0.3, 0, 0.3), (0.1, 0, 0.3)]
 19
 20# Used to calculate how many frames to animate the assets to maintain the same velocity as the physics assets
 21ANIMATION_DURATION = 10
 22
 23# Create a new stage with animated and physics-enabled assets with synchronized motion
 24def setup_stage():
 25    # Create new stage
 26    omni.usd.get_context().new_stage()
 27    stage = omni.usd.get_context().get_stage()
 28    timeline = omni.timeline.get_timeline_interface()
 29    timeline.set_end_time(ANIMATION_DURATION)
 30
 31    # Create lights
 32    dome_light = stage.DefinePrim("/World/DomeLight", "DomeLight")
 33    dome_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(100.0)
 34    distant_light = stage.DefinePrim("/World/DistantLight", "DistantLight")
 35    if not distant_light.GetAttribute("xformOp:rotateXYZ"):
 36        UsdGeom.Xformable(distant_light).AddRotateXYZOp()
 37    distant_light.GetAttribute("xformOp:rotateXYZ").Set((-75, 0, 0))
 38    distant_light.CreateAttribute("inputs:intensity", Sdf.ValueTypeNames.Float).Set(2500)
 39
 40    # Setup the physics assets with gravity disabled and the requested velocity
 41    assets_root_path = get_assets_root_path()
 42    physics_asset_url = assets_root_path + PHYSICS_ASSET_URL
 43    for loc, vel in zip(ASSET_X_MIRRORED_LOCATIONS, ASSET_VELOCITIES):
 44        prim = stage.DefinePrim(f"/World/physics_asset_{int(abs(vel))}", "Xform")
 45        prim.GetReferences().AddReference(physics_asset_url)
 46        if not prim.GetAttribute("xformOp:translate"):
 47            UsdGeom.Xformable(prim).AddTranslateOp()
 48        prim.GetAttribute("xformOp:translate").Set(loc)
 49        prim.GetAttribute("physxRigidBody:disableGravity").Set(True)
 50        prim.GetAttribute("physxRigidBody:angularDamping").Set(0.0)
 51        prim.GetAttribute("physxRigidBody:linearDamping").Set(0.0)
 52        prim.GetAttribute("physics:velocity").Set((0, 0, -vel))
 53
 54    # Setup animated assets maintaining the same velocity as the physics assets
 55    anim_asset_url = assets_root_path + ANIM_ASSET_URL
 56    for loc, vel in zip(ASSET_X_MIRRORED_LOCATIONS, ASSET_VELOCITIES):
 57        start_loc = (-loc[0], loc[1], loc[2])
 58        prim = stage.DefinePrim(f"/World/anim_asset_{int(abs(vel))}", "Xform")
 59        prim.GetReferences().AddReference(anim_asset_url)
 60        if not prim.GetAttribute("xformOp:translate"):
 61            UsdGeom.Xformable(prim).AddTranslateOp()
 62        anim_distance = vel * ANIMATION_DURATION
 63        end_loc = (start_loc[0], start_loc[1], start_loc[2] - anim_distance)
 64        end_keyframe = timeline.get_time_codes_per_seconds() * ANIMATION_DURATION
 65        # Timesampled keyframe (animated) translation
 66        prim.GetAttribute("xformOp:translate").Set(start_loc, time=0)
 67        prim.GetAttribute("xformOp:translate").Set(end_loc, time=end_keyframe)
 68
 69
 70# Capture motion blur frames with the given delta time step and render mode
 71async def run_motion_blur_example_async(
 72    num_frames=3, custom_delta_time=None, use_path_tracing=True, pt_subsamples=8, pt_spp=64
 73):
 74    # Create a new stage with the assets
 75    setup_stage()
 76    stage = omni.usd.get_context().get_stage()
 77
 78    # Set replicator settings (capture only on request and enable motion blur)
 79    carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
 80    carb.settings.get_settings().set("/omni/replicator/captureMotionBlur", True)
 81
 82    # Set motion blur settings based on the render mode
 83    if use_path_tracing:
 84        print(f"[MotionBlur] Setting PathTracing render mode motion blur settings")
 85        carb.settings.get_settings().set("/rtx/rendermode", "PathTracing")
 86        # (int): Total number of samples for each rendered pixel, per frame.
 87        carb.settings.get_settings().set("/rtx/pathtracing/spp", pt_spp)
 88        # (int): Maximum number of samples to accumulate per pixel. When this count is reached the rendering stops until a scene or setting change is detected, restarting the rendering process. Set to 0 to remove this limit.
 89        carb.settings.get_settings().set("/rtx/pathtracing/totalSpp", pt_spp)
 90        carb.settings.get_settings().set("/rtx/pathtracing/optixDenoiser/enabled", 0)
 91        # Number of sub samples to render if in PathTracing render mode and motion blur is enabled.
 92        carb.settings.get_settings().set("/omni/replicator/pathTracedMotionBlurSubSamples", pt_subsamples)
 93    else:
 94        print(f"[MotionBlur] Setting RayTracedLighting render mode motion blur settings")
 95        carb.settings.get_settings().set("/rtx/rendermode", "RayTracedLighting")
 96        # 0: Disabled, 1: TAA, 2: FXAA, 3: DLSS, 4:RTXAA
 97        carb.settings.get_settings().set("/rtx/post/aa/op", 2)
 98        # (float): The fraction of the largest screen dimension to use as the maximum motion blur diameter.
 99        carb.settings.get_settings().set("/rtx/post/motionblur/maxBlurDiameterFraction", 0.02)
100        # (float): Exposure time fraction in frames (1.0 = one frame duration) to sample.
101        carb.settings.get_settings().set("/rtx/post/motionblur/exposureFraction", 1.0)
102        # (int): Number of samples to use in the filter. A higher number improves quality at the cost of performance.
103        carb.settings.get_settings().set("/rtx/post/motionblur/numSamples", 8)
104
105    # Setup camera and writer
106    camera = rep.create.camera(position=(0, 1.5, 0), look_at=(0, 0, 0), name="MotionBlurCam")
107    render_product = rep.create.render_product(camera, (1920, 1080))
108    basic_writer = rep.WriterRegistry.get("BasicWriter")
109    delta_time_str = "None" if custom_delta_time is None else f"{custom_delta_time:.4f}"
110    render_mode_str = f"pt_subsamples_{pt_subsamples}_spp_{pt_spp}" if use_path_tracing else "rt"
111    output_directory = os.getcwd() + f"/_out_motion_blur_dt_{delta_time_str}_{render_mode_str}"
112    print(f"[MotionBlur] Output directory: {output_directory}")
113    basic_writer.initialize(output_dir=output_directory, rgb=True)
114    basic_writer.attach(render_product)
115
116    # Run a few updates to make sure all materials are fully loaded for capture
117    for _ in range(50):
118        await omni.kit.app.get_app().next_update_async()
119
120    # Use the physics scene to modify the physics FPS (if needed) to guarantee motion samples at any custom delta time
121    physx_scene = None
122    for prim in stage.Traverse():
123        if prim.IsA(UsdPhysics.Scene):
124            physx_scene = PhysxSchema.PhysxSceneAPI.Apply(prim)
125            break
126    if physx_scene is None:
127        print(f"[MotionBlur] Creating a new PhysicsScene")
128        physics_scene = UsdPhysics.Scene.Define(stage, "/PhysicsScene")
129        physx_scene = PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/PhysicsScene"))
130
131    # Check the target physics depending on the custom delta time and the render mode
132    target_physics_fps = stage.GetTimeCodesPerSecond() if custom_delta_time is None else 1 / custom_delta_time
133    if use_path_tracing:
134        target_physics_fps *= pt_subsamples
135
136    # Check if the physics FPS needs to be increased to match the custom delta time
137    orig_physics_fps = physx_scene.GetTimeStepsPerSecondAttr().Get()
138    if target_physics_fps > orig_physics_fps:
139        print(f"[MotionBlur] Changing physics FPS from {orig_physics_fps} to {target_physics_fps}")
140        physx_scene.GetTimeStepsPerSecondAttr().Set(target_physics_fps)
141
142    # Start the timeline for physics updates in the step function
143    timeline = omni.timeline.get_timeline_interface()
144    timeline.play()
145
146    # Capture frames
147    for i in range(num_frames):
148        print(f"[MotionBlur] \tCapturing frame {i}")
149        await rep.orchestrator.step_async(delta_time=custom_delta_time)
150
151    # Restore the original physics FPS
152    if target_physics_fps > orig_physics_fps:
153        print(f"[MotionBlur] Restoring physics FPS from {target_physics_fps} to {orig_physics_fps}")
154        physx_scene.GetTimeStepsPerSecondAttr().Set(orig_physics_fps)
155
156    # Switch back to the raytracing render mode
157    if use_path_tracing:
158        print(f"[MotionBlur] Restoring render mode to RayTracedLighting")
159        carb.settings.get_settings().set("/rtx/rendermode", "RayTracedLighting")
160
161
162async def run_motion_blur_examples_async():
163    motion_blur_step_duration = [None, 1 / 30, 1 / 60, 1 / 240]
164    for custom_delta_time in motion_blur_step_duration:
165        # RayTracing examples
166        await run_motion_blur_example_async(custom_delta_time=custom_delta_time, use_path_tracing=False)
167        # PathTracing examples
168        spps = [32, 128]
169        motion_blur_sub_samples = [4, 16]
170        for motion_blur_sub_sample in motion_blur_sub_samples:
171            for spp in spps:
172                await run_motion_blur_example_async(
173                    custom_delta_time=custom_delta_time,
174                    use_path_tracing=True,
175                    pt_subsamples=motion_blur_sub_sample,
176                    pt_spp=spp,
177                )
178
179
180asyncio.ensure_future(run_motion_blur_examples_async())

Subscribers and Events at Custom FPS

Examples of subscribing to various events (such as stage, physics, and render/app), setting custom update rates, and adjusting various related settings. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/subscribers_and_events.py.

Subscribers and Events at Custom FPS
  1from isaacsim import SimulationApp
  2
  3simulation_app = SimulationApp({"headless": False})
  4
  5import asyncio
  6import time
  7
  8import carb.events
  9import carb.settings
 10import omni.kit.app
 11import omni.physx
 12import omni.timeline
 13import omni.usd
 14from pxr import PhysxSchema, UsdPhysics
 15
 16# TIMELINE / STAGE
 17USE_CUSTOM_TIMELINE_SETTINGS = False
 18USE_FIXED_TIME_STEPPING = False
 19PLAY_EVERY_FRAME = True
 20PLAY_DELAY_COMPENSATION = 0.0
 21SUBSAMPLE_RATE = 1
 22STAGE_FPS = 30.0
 23
 24# PHYSX
 25USE_CUSTOM_PHYSX_FPS = False
 26PHYSX_FPS = 60.0
 27MIN_SIM_FPS = 30
 28
 29# Simulations can also be enabled/disabled at runtime
 30DISABLE_SIMULATIONS = False
 31
 32# APP / RENDER
 33LIMIT_APP_FPS = False
 34APP_FPS = 120
 35
 36# Duration after which to clear subscribers and print the cached events
 37MAX_DURATION = 3.0
 38PRINT_EVENTS = False
 39
 40
 41def on_timeline_event(event: omni.timeline.TimelineEventType):
 42    global timeline_sub
 43    global timeline_events
 44    global wall_start_time
 45    elapsed_wall_time = time.time() - wall_start_time
 46
 47    # Cache only time advance events
 48    if event.type == omni.timeline.TimelineEventType.CURRENT_TIME_TICKED.value:
 49        event_name = omni.timeline.TimelineEventType(event.type).name
 50        event_payload = event.payload
 51        timeline_events.append((elapsed_wall_time, event_name, event_payload))
 52
 53    # Clear subscriber and print cached events
 54    if elapsed_wall_time > MAX_DURATION:
 55        if timeline_sub is not None:
 56            timeline_sub.unsubscribe()
 57            timeline_sub = None
 58        num_events = len(timeline_events)
 59        fps = num_events / MAX_DURATION
 60        print(f"[timeline] captured {num_events} events with aprox {fps} FPS")
 61        if PRINT_EVENTS:
 62            for i, (wall_time, event_name, payload) in enumerate(timeline_events):
 63                print(f"\t[timeline][{i}]\ttime={wall_time:.4f};\tevent={event_name};\tpayload={payload}")
 64
 65
 66def on_physics_step(dt: float):
 67    global physx_events
 68    global wall_start_time
 69    elapsed_wall_time = time.time() - wall_start_time
 70
 71    # Cache physics events
 72    physx_events.append((elapsed_wall_time, dt))
 73
 74    # Clear subscriber and print cached events
 75    if elapsed_wall_time > MAX_DURATION:
 76        # Physics unsubscription needs to be defered from the callback function
 77        # see: '[Error] [omni.physx.plugin] Subscription cannot be changed during the event call'
 78        async def clear_physx_sub_async():
 79            global physx_sub
 80            if physx_sub is not None:
 81                physx_sub.unsubscribe()
 82                physx_sub = None
 83
 84        asyncio.ensure_future(clear_physx_sub_async())
 85        num_events = len(physx_events)
 86        fps = num_events / MAX_DURATION
 87        print(f"[physics] captured {num_events} events with aprox {fps} FPS")
 88        if PRINT_EVENTS:
 89            for i, (wall_time, dt) in enumerate(physx_events):
 90                print(f"\t[physics][{i}]\ttime={wall_time:.4f};\tdt={dt};")
 91
 92
 93def on_stage_render_event(event: omni.usd.StageRenderingEventType):
 94    global stage_render_sub
 95    global stage_render_events
 96    global wall_start_time
 97    elapsed_wall_time = time.time() - wall_start_time
 98
 99    event_name = omni.usd.StageRenderingEventType(event.type).name
100    event_payload = event.payload
101    stage_render_events.append((elapsed_wall_time, event_name, event_payload))
102
103    if elapsed_wall_time > MAX_DURATION:
104        if stage_render_sub is not None:
105            stage_render_sub.unsubscribe()
106            stage_render_sub = None
107        num_events = len(stage_render_events)
108        fps = num_events / MAX_DURATION
109        print(f"[stage render] captured {num_events} events with aprox {fps} FPS")
110        if PRINT_EVENTS:
111            for i, (wall_time, event_name, payload) in enumerate(stage_render_events):
112                print(f"\t[stage render][{i}]\ttime={wall_time:.4f};\tevent={event_name};\tpayload={payload}")
113
114
115def on_app_update(event: carb.events.IEvent):
116    global app_sub
117    global app_update_events
118    global wall_start_time
119    elapsed_wall_time = time.time() - wall_start_time
120
121    event_type = event.type
122    event_payload = event.payload
123    app_update_events.append((elapsed_wall_time, event_type, event_payload))
124
125    if elapsed_wall_time > MAX_DURATION:
126        if app_sub is not None:
127            app_sub.unsubscribe()
128            app_sub = None
129        num_events = len(app_update_events)
130        fps = num_events / MAX_DURATION
131        print(f"[app] captured {num_events} events with aprox {fps} FPS")
132        if PRINT_EVENTS:
133            for i, (wall_time, event_type, payload) in enumerate(app_update_events):
134                print(f"\t[app][{i}]\ttime={wall_time:.4f};\tevent={event_type};\tpayload={payload}")
135
136
137stage = omni.usd.get_context().get_stage()
138timeline = omni.timeline.get_timeline_interface()
139
140
141if USE_CUSTOM_TIMELINE_SETTINGS:
142    # Ideal to make simulation and animation synchronized.
143    # Default: True in editor, False in standalone.
144    # NOTE:
145    # - It may limit the frame rate (see 'timeline.set_play_every_frame') such that the elapsed wall clock time matches the frame's delta time.
146    # - If the app runs slower than this, animation playback may slow down (see 'CompensatePlayDelayInSecs').
147    # - For performance benchmarks, turn this off or set a very high target in `timeline.set_target_framerate`
148    carb.settings.get_settings().set("/app/player/useFixedTimeStepping", USE_FIXED_TIME_STEPPING)
149
150    # This compensates for frames that require more computation time than the frame's fixed delta time, by temporarily speeding up playback.
151    # The parameter represents the length of these "faster" playback periods, which means that it must be larger than the fixed frame time to take effect.
152    # Default: 0.0
153    # NOTE:
154    # - only effective if `useFixedTimeStepping` is set to True
155    # - setting a large value results in long fast playback after a huge lag spike
156    carb.settings.get_settings().set("/app/player/CompensatePlayDelayInSecs", PLAY_DELAY_COMPENSATION)
157
158    # If set to True, no frames are skipped and in every frame time advances by `1 / TimeCodesPerSecond`.
159    # Default: False
160    # NOTE:
161    # - only effective if `useFixedTimeStepping` is set to True
162    # - simulation is usually faster than real-time and processing is only limited by the frame rate of the runloop
163    # - useful for recording
164    # - same as `carb.settings.get_settings().set("/app/player/useFastMode", PLAY_EVERY_FRAME)`
165    timeline.set_play_every_frame(PLAY_EVERY_FRAME)
166
167    # Timeline sub-stepping, i.e. how many times updates are called (update events are dispatched) each frame.
168    # Default: 1
169    # NOTE: same as `carb.settings.get_settings().set("/app/player/timelineSubsampleRate", SUBSAMPLE_RATE)`
170    timeline.set_ticks_per_frame(SUBSAMPLE_RATE)
171
172    # Time codes per second for the stage
173    # NOTE: same as `stage.SetTimeCodesPerSecond(STAGE_FPS)` and `carb.settings.get_settings().set("/app/stage/timeCodesPerSecond", STAGE_FPS)`
174    timeline.set_time_codes_per_second(STAGE_FPS)
175
176
177# Create a PhysX scene to set the physics time step
178if USE_CUSTOM_PHYSX_FPS:
179    physx_scene = None
180    for prim in stage.Traverse():
181        if prim.IsA(UsdPhysics.Scene):
182            physx_scene = PhysxSchema.PhysxSceneAPI.Apply(prim)
183            break
184    if physx_scene is None:
185        physics_scene = UsdPhysics.Scene.Define(stage, "/PhysicsScene")
186        physx_scene = PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/PhysicsScene"))
187
188    # Time step for the physics simulation
189    # Default: 60.0
190    physx_scene.GetTimeStepsPerSecondAttr().Set(PHYSX_FPS)
191
192    # Minimum simulation frequency to prevent clamping; if the frame rate drops below this,
193    # physics steps are discarded to avoid app slowdown if the overall frame rate is too low.
194    # Default: 30.0
195    # NOTE: Matching `minFrameRate` with `TimeStepsPerSecond` ensures a single physics step per update.
196    carb.settings.get_settings().set("/persistent/simulation/minFrameRate", MIN_SIM_FPS)
197
198
199# Throttle Render/UI/Main thread update rate
200if LIMIT_APP_FPS:
201    # Enable rate limiting of the main run loop (UI, rendering, etc.)
202    # Default: False
203    carb.settings.get_settings().set("/app/runLoops/main/rateLimitEnabled", LIMIT_APP_FPS)
204
205    # FPS limit of the main run loop (UI, rendering, etc.)
206    # Default: 120
207    # NOTE: disabled if `/app/player/useFixedTimeStepping` is False
208    carb.settings.get_settings().set("/app/runLoops/main/rateLimitFrequency", int(APP_FPS))
209
210
211# Simulations can be selectively disabled (or toggled at specific times)
212if DISABLE_SIMULATIONS:
213    carb.settings.get_settings().set("/app/player/playSimulations", False)
214
215
216# Start the timeline
217timeline.set_current_time(0)
218timeline.set_end_time(MAX_DURATION + 1)
219timeline.set_looping(False)
220timeline.play()
221timeline.commit()
222wall_start_time = time.time()
223
224# Subscribe and cache various events for a limited duration
225timeline_events = []
226timeline_sub = timeline.get_timeline_event_stream().create_subscription_to_pop(on_timeline_event)
227physx_events = []
228physx_sub = omni.physx.get_physx_interface().subscribe_physics_step_events(on_physics_step)
229stage_render_events = []
230stage_render_sub = omni.usd.get_context().get_rendering_event_stream().create_subscription_to_pop(on_stage_render_event)
231app_update_events = []
232app_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(on_app_update)
233
234# Keep the simulation running until the duration is passed
235while simulation_app.is_running():
236    if time.time() - wall_start_time > MAX_DURATION + 0.1:
237        break
238    simulation_app.update()
239
240simulation_app.close()
Subscribers and Events at Custom FPS
  1import asyncio
  2import time
  3
  4import carb.events
  5import carb.settings
  6import omni.kit.app
  7import omni.physx
  8import omni.timeline
  9import omni.usd
 10from pxr import PhysxSchema, UsdPhysics
 11
 12# TIMELINE / STAGE
 13USE_CUSTOM_TIMELINE_SETTINGS = False
 14USE_FIXED_TIME_STEPPING = False
 15PLAY_EVERY_FRAME = True
 16PLAY_DELAY_COMPENSATION = 0.0
 17SUBSAMPLE_RATE = 1
 18STAGE_FPS = 30.0
 19
 20# PHYSX
 21USE_CUSTOM_PHYSX_FPS = False
 22PHYSX_FPS = 60.0
 23MIN_SIM_FPS = 30
 24
 25# Simulations can also be enabled/disabled at runtime
 26DISABLE_SIMULATIONS = False
 27
 28# APP / RENDER
 29LIMIT_APP_FPS = False
 30APP_FPS = 120
 31
 32# Duration after which to clear subscribers and print the cached events
 33MAX_DURATION = 3.0
 34PRINT_EVENTS = False
 35
 36
 37def on_timeline_event(event: omni.timeline.TimelineEventType):
 38    global timeline_sub
 39    global timeline_events
 40    global wall_start_time
 41    elapsed_wall_time = time.time() - wall_start_time
 42
 43    # Cache only time advance events
 44    if event.type == omni.timeline.TimelineEventType.CURRENT_TIME_TICKED.value:
 45        event_name = omni.timeline.TimelineEventType(event.type).name
 46        event_payload = event.payload
 47        timeline_events.append((elapsed_wall_time, event_name, event_payload))
 48
 49    # Clear subscriber and print cached events
 50    if elapsed_wall_time > MAX_DURATION:
 51        if timeline_sub is not None:
 52            timeline_sub.unsubscribe()
 53            timeline_sub = None
 54        num_events = len(timeline_events)
 55        fps = num_events / MAX_DURATION
 56        print(f"[timeline] captured {num_events} events with aprox {fps} FPS")
 57        if PRINT_EVENTS:
 58            for i, (wall_time, event_name, payload) in enumerate(timeline_events):
 59                print(f"\t[timeline][{i}]\ttime={wall_time:.4f};\tevent={event_name};\tpayload={payload}")
 60
 61
 62def on_physics_step(dt: float):
 63    global physx_events
 64    global wall_start_time
 65    elapsed_wall_time = time.time() - wall_start_time
 66
 67    # Cache physics events
 68    physx_events.append((elapsed_wall_time, dt))
 69
 70    # Clear subscriber and print cached events
 71    if elapsed_wall_time > MAX_DURATION:
 72        # Physics unsubscription needs to be defered from the callback function
 73        # see: '[Error] [omni.physx.plugin] Subscription cannot be changed during the event call'
 74        async def clear_physx_sub_async():
 75            global physx_sub
 76            if physx_sub is not None:
 77                physx_sub.unsubscribe()
 78                physx_sub = None
 79
 80        asyncio.ensure_future(clear_physx_sub_async())
 81        num_events = len(physx_events)
 82        fps = num_events / MAX_DURATION
 83        print(f"[physics] captured {num_events} events with aprox {fps} FPS")
 84        if PRINT_EVENTS:
 85            for i, (wall_time, dt) in enumerate(physx_events):
 86                print(f"\t[physics][{i}]\ttime={wall_time:.4f};\tdt={dt};")
 87
 88
 89def on_stage_render_event(event: omni.usd.StageRenderingEventType):
 90    global stage_render_sub
 91    global stage_render_events
 92    global wall_start_time
 93    elapsed_wall_time = time.time() - wall_start_time
 94
 95    event_name = omni.usd.StageRenderingEventType(event.type).name
 96    event_payload = event.payload
 97    stage_render_events.append((elapsed_wall_time, event_name, event_payload))
 98
 99    if elapsed_wall_time > MAX_DURATION:
100        if stage_render_sub is not None:
101            stage_render_sub.unsubscribe()
102            stage_render_sub = None
103        num_events = len(stage_render_events)
104        fps = num_events / MAX_DURATION
105        print(f"[stage render] captured {num_events} events with aprox {fps} FPS")
106        if PRINT_EVENTS:
107            for i, (wall_time, event_name, payload) in enumerate(stage_render_events):
108                print(f"\t[stage render][{i}]\ttime={wall_time:.4f};\tevent={event_name};\tpayload={payload}")
109
110
111def on_app_update(event: carb.events.IEvent):
112    global app_sub
113    global app_update_events
114    global wall_start_time
115    elapsed_wall_time = time.time() - wall_start_time
116
117    event_type = event.type
118    event_payload = event.payload
119    app_update_events.append((elapsed_wall_time, event_type, event_payload))
120
121    if elapsed_wall_time > MAX_DURATION:
122        if app_sub is not None:
123            app_sub.unsubscribe()
124            app_sub = None
125        num_events = len(app_update_events)
126        fps = num_events / MAX_DURATION
127        print(f"[app] captured {num_events} events with aprox {fps} FPS")
128        if PRINT_EVENTS:
129            for i, (wall_time, event_type, payload) in enumerate(app_update_events):
130                print(f"\t[app][{i}]\ttime={wall_time:.4f};\tevent={event_type};\tpayload={payload}")
131
132
133stage = omni.usd.get_context().get_stage()
134timeline = omni.timeline.get_timeline_interface()
135
136
137if USE_CUSTOM_TIMELINE_SETTINGS:
138    # Ideal to make simulation and animation synchronized.
139    # Default: True in editor, False in standalone.
140    # NOTE:
141    # - It may limit the frame rate (see 'timeline.set_play_every_frame') such that the elapsed wall clock time matches the frame's delta time.
142    # - If the app runs slower than this, animation playback may slow down (see 'CompensatePlayDelayInSecs').
143    # - For performance benchmarks, turn this off or set a very high target in `timeline.set_target_framerate`
144    carb.settings.get_settings().set("/app/player/useFixedTimeStepping", USE_FIXED_TIME_STEPPING)
145
146    # This compensates for frames that require more computation time than the frame's fixed delta time, by temporarily speeding up playback.
147    # The parameter represents the length of these "faster" playback periods, which means that it must be larger than the fixed frame time to take effect.
148    # Default: 0.0
149    # NOTE:
150    # - only effective if `useFixedTimeStepping` is set to True
151    # - setting a large value results in long fast playback after a huge lag spike
152    carb.settings.get_settings().set("/app/player/CompensatePlayDelayInSecs", PLAY_DELAY_COMPENSATION)
153
154    # If set to True, no frames are skipped and in every frame time advances by `1 / TimeCodesPerSecond`.
155    # Default: False
156    # NOTE:
157    # - only effective if `useFixedTimeStepping` is set to True
158    # - simulation is usually faster than real-time and processing is only limited by the frame rate of the runloop
159    # - useful for recording
160    # - same as `carb.settings.get_settings().set("/app/player/useFastMode", PLAY_EVERY_FRAME)`
161    timeline.set_play_every_frame(PLAY_EVERY_FRAME)
162
163    # Timeline sub-stepping, i.e. how many times updates are called (update events are dispatched) each frame.
164    # Default: 1
165    # NOTE: same as `carb.settings.get_settings().set("/app/player/timelineSubsampleRate", SUBSAMPLE_RATE)`
166    timeline.set_ticks_per_frame(SUBSAMPLE_RATE)
167
168    # Time codes per second for the stage
169    # NOTE: same as `stage.SetTimeCodesPerSecond(STAGE_FPS)` and `carb.settings.get_settings().set("/app/stage/timeCodesPerSecond", STAGE_FPS)`
170    timeline.set_time_codes_per_second(STAGE_FPS)
171
172
173# Create a PhysX scene to set the physics time step
174if USE_CUSTOM_PHYSX_FPS:
175    physx_scene = None
176    for prim in stage.Traverse():
177        if prim.IsA(UsdPhysics.Scene):
178            physx_scene = PhysxSchema.PhysxSceneAPI.Apply(prim)
179            break
180    if physx_scene is None:
181        physics_scene = UsdPhysics.Scene.Define(stage, "/PhysicsScene")
182        physx_scene = PhysxSchema.PhysxSceneAPI.Apply(stage.GetPrimAtPath("/PhysicsScene"))
183
184    # Time step for the physics simulation
185    # Default: 60.0
186    physx_scene.GetTimeStepsPerSecondAttr().Set(PHYSX_FPS)
187
188    # Minimum simulation frequency to prevent clamping; if the frame rate drops below this,
189    # physics steps are discarded to avoid app slowdown if the overall frame rate is too low.
190    # Default: 30.0
191    # NOTE: Matching `minFrameRate` with `TimeStepsPerSecond` ensures a single physics step per update.
192    carb.settings.get_settings().set("/persistent/simulation/minFrameRate", MIN_SIM_FPS)
193
194
195# Throttle Render/UI/Main thread update rate
196if LIMIT_APP_FPS:
197    # Enable rate limiting of the main run loop (UI, rendering, etc.)
198    # Default: False
199    carb.settings.get_settings().set("/app/runLoops/main/rateLimitEnabled", LIMIT_APP_FPS)
200
201    # FPS limit of the main run loop (UI, rendering, etc.)
202    # Default: 120
203    # NOTE: disabled if `/app/player/useFixedTimeStepping` is False
204    carb.settings.get_settings().set("/app/runLoops/main/rateLimitFrequency", int(APP_FPS))
205
206
207# Simulations can be selectively disabled (or toggled at specific times)
208if DISABLE_SIMULATIONS:
209    carb.settings.get_settings().set("/app/player/playSimulations", False)
210
211
212# Start the timeline
213timeline.set_current_time(0)
214timeline.set_end_time(MAX_DURATION + 1)
215timeline.set_looping(False)
216timeline.play()
217timeline.commit()
218wall_start_time = time.time()
219
220# Subscribe and cache various events for a limited duration
221timeline_events = []
222timeline_sub = timeline.get_timeline_event_stream().create_subscription_to_pop(on_timeline_event)
223physx_events = []
224physx_sub = omni.physx.get_physx_interface().subscribe_physics_step_events(on_physics_step)
225stage_render_events = []
226stage_render_sub = omni.usd.get_context().get_rendering_event_stream().create_subscription_to_pop(on_stage_render_event)
227app_update_events = []
228app_sub = omni.kit.app.get_app().get_update_event_stream().create_subscription_to_pop(on_app_update)

Accessing Writer and Annotator Data at Custom FPS

Example of how to trigger a writer and access annotator data at a custom FPS, with product rendering disabled when the data is not needed. The standalone example can also be found at: standalone_examples/api/omni.replicator.isaac/custom_fps_writer_annotator.py.

Note

It is currently not possible to change timeline (stage) FPS after the replicator graph creation as it causes a graph reset. This issue is being addressed. As a workaround make sure you are setting the timeline (stage) parameters before creating the replicator graph.

Accessing Writer and Annotator Data at Custom FPS
 1from isaacsim import SimulationApp
 2
 3simulation_app = SimulationApp({"headless": False})
 4
 5import os
 6
 7import carb.settings
 8import omni.kit.app
 9import omni.replicator.core as rep
10import omni.timeline
11import omni.usd
12
13# NOTE: To avoid FPS delta misses make sure the sensor framerate is divisible by the timeline framerate
14STAGE_FPS = 60.0
15SENSOR_FPS = 10.0
16SENSOR_DT = 1.0 / SENSOR_FPS
17
18
19def run_custom_fps_example(num_frames=10):
20    # Create a new stage
21    omni.usd.get_context().new_stage()
22
23    # Disable capture on play (data will only be accessed at custom times)
24    carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
25
26    # Set the timeline parameters
27    timeline = omni.timeline.get_timeline_interface()
28    timeline.set_looping(False)
29    timeline.set_current_time(0.0)
30    timeline.set_end_time(10)
31    timeline.set_time_codes_per_second(STAGE_FPS)
32    timeline.play()
33    timeline.commit()
34
35    # Create a light and a semantically annotated cube
36    rep.create.light()
37    rep.create.cube(semantics=[("class", "cube")])
38
39    # Create a render product and disable it (it will re-enabled when data is needed)
40    rp = rep.create.render_product("/OmniverseKit_Persp", (512, 512), name="rp")
41    rp.hydra_texture.set_updates_enabled(False)
42
43    # Create a writer and an annotator as different ways to access the data
44    out_dir_rgb = os.getcwd() + "/_out_writer_fps_rgb"
45    print(f"Writer data will be written to: {out_dir_rgb}")
46    writer_rgb = rep.WriterRegistry.get("BasicWriter")
47    writer_rgb.initialize(output_dir=out_dir_rgb, rgb=True)
48    # NOTE: 'trigger=None' is needed to make sure the writer is only triggered at the custom schedule times
49    writer_rgb.attach(rp, trigger=None)
50    annot_depth = rep.AnnotatorRegistry.get_annotator("distance_to_camera")
51    annot_depth.attach(rp)
52
53    # Run the simulation for the given number of frames and access the data at the desired framerates
54    previous_time = timeline.get_current_time()
55    elapsed_time = 0.0
56    for i in range(num_frames):
57        current_time = timeline.get_current_time()
58        elapsed_time += current_time - previous_time
59        print(f"[{i}] current_time={current_time:.4f}, elapsed_time={elapsed_time:.4f}/{SENSOR_DT:.4f};")
60
61        # Check if enough time has passed to trigger the sensor
62        if elapsed_time >= SENSOR_DT:
63            # Reset the elapsed time with the difference to the optimal trigger time (when the timeline fps is not divisible by the sensor framerate)
64            elapsed_time = elapsed_time - SENSOR_DT
65
66            # Enable render products for data access
67            rp.hydra_texture.set_updates_enabled(True)
68
69            # Write will be scheduled at the next step call
70            writer_rgb.schedule_write()
71
72            # Step needs to be called after scheduling the write
73            rep.orchestrator.step(delta_time=0.0)
74
75            # After step, the annotator data is available and in sync with the stage
76            annot_data = annot_depth.get_data()
77            print(f"\tWriter triggered and annotator data shape={annot_data.shape};")
78
79            # Disable render products to avoid unnecessary rendering
80            rp.hydra_texture.set_updates_enabled(False)
81
82            # Restart the timeline if it has been paused by the replicator step function
83            if not timeline.is_playing():
84                timeline.play()
85
86        previous_time = current_time
87        # Advance the app (timeline) by one frame
88        simulation_app.update()
89
90    # Make sure the writer finishes writing the data to disk
91    rep.orchestrator.wait_until_complete()
92
93
94# Run the example for a given number of frames
95run_custom_fps_example(num_frames=50)
96
97# Close the application
98simulation_app.close()
Accessing Writer and Annotator Data at Custom FPS
 1import asyncio
 2import os
 3
 4import carb.settings
 5import omni.kit.app
 6import omni.replicator.core as rep
 7import omni.timeline
 8import omni.usd
 9
10# NOTE: To avoid FPS delta misses make sure the sensor framerate is divisible by the timeline framerate
11STAGE_FPS = 60.0
12SENSOR_FPS = 10.0
13SENSOR_DT = 1.0 / SENSOR_FPS
14
15
16async def run_custom_fps_example_async(num_frames=10):
17    # Create a new stage
18    await omni.usd.get_context().new_stage_async()
19
20    # Disable capture on play (data will only be accessed at custom times)
21    carb.settings.get_settings().set("/omni/replicator/captureOnPlay", False)
22
23    # Set the timeline parameters
24    timeline = omni.timeline.get_timeline_interface()
25    timeline.set_looping(False)
26    timeline.set_current_time(0.0)
27    timeline.set_end_time(10)
28    timeline.set_time_codes_per_second(STAGE_FPS)
29    timeline.play()
30    timeline.commit()
31
32    # Create a light and a semantically annotated cube
33    rep.create.light()
34    rep.create.cube(semantics=[("class", "cube")])
35
36    # Create a render product and disable it (it will re-enabled when data is needed)
37    rp = rep.create.render_product("/OmniverseKit_Persp", (512, 512), name="rp")
38    rp.hydra_texture.set_updates_enabled(False)
39
40    # Create a writer and an annotator as different ways to access the data
41    out_dir_rgb = os.getcwd() + "/_out_writer_fps_rgb"
42    print(f"Writer data will be written to: {out_dir_rgb}")
43    writer_rgb = rep.WriterRegistry.get("BasicWriter")
44    writer_rgb.initialize(output_dir=out_dir_rgb, rgb=True)
45    # NOTE: 'trigger=None' is needed to make sure the writer is only triggered at the custom schedule times
46    writer_rgb.attach(rp, trigger=None)
47    annot_depth = rep.AnnotatorRegistry.get_annotator("distance_to_camera")
48    annot_depth.attach(rp)
49
50    # Run the simulation for the given number of frames and access the data at the desired framerates
51    previous_time = timeline.get_current_time()
52    elapsed_time = 0.0
53    for i in range(num_frames):
54        current_time = timeline.get_current_time()
55        elapsed_time += current_time - previous_time
56        print(f"[{i}] current_time={current_time:.4f}, elapsed_time={elapsed_time:.4f}/{SENSOR_DT:.4f};")
57
58        # Check if enough time has passed to trigger the sensor
59        if elapsed_time >= SENSOR_DT:
60            # Reset the elapsed time with the difference to the optimal trigger time (when the timeline fps is not divisible by the sensor framerate)
61            elapsed_time = elapsed_time - SENSOR_DT
62
63            # Enable render products for data access
64            rp.hydra_texture.set_updates_enabled(True)
65
66            # Write will be scheduled at the next step call
67            writer_rgb.schedule_write()
68
69            # Step needs to be called after scheduling the write
70            await rep.orchestrator.step_async(delta_time=0.0)
71
72            # After step, the annotator data is available and in sync with the stage
73            annot_data = annot_depth.get_data()
74            print(f"\tWriter triggered and annotator data shape={annot_data.shape};")
75
76            # Disable render products to avoid unnecessary rendering
77            rp.hydra_texture.set_updates_enabled(False)
78
79            # Restart the timeline if it has been paused by the replicator step function
80            if not timeline.is_playing():
81                timeline.play()
82
83        previous_time = current_time
84        # Advance the app (timeline) by one frame
85        await omni.kit.app.get_app().next_update_async()
86
87    # Make sure the writer finishes writing the data to disk
88    await rep.orchestrator.wait_until_complete_async()
89
90
91# Run the example for a given number of frames
92asyncio.ensure_future(run_custom_fps_example_async(num_frames=50))