Skip to content
Open
27 changes: 27 additions & 0 deletions source/isaaclab/isaaclab/renderers/base_renderer.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,33 @@
class BaseRenderer(ABC):
"""Abstract base class for renderer implementations."""

@abstractmethod
def create_output_buffers(
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

creating a new method seems cleaner here. this should not be owned by Camera

self,
data_types: list[str],
height: int,
width: int,
num_views: int,
device: torch.device | str,
) -> dict[str, torch.Tensor]:
"""Allocate output tensors for the supported subset of ``data_types``.

Implementations MUST omit any data-type names they cannot produce and
allocate on ``device``. They MAY include aliased entries that share
storage with another entry (e.g. ``rgb`` as a view into ``rgba``).

Args:
data_types: Names of the requested data types.
height: Image height in pixels.
width: Image width in pixels.
num_views: Number of camera views (batch dimension).
device: Torch device on which to allocate the buffers.

Returns:
Mapping from data-type name to a pre-allocated tensor.
"""
pass

@abstractmethod
def prepare_stage(self, stage: Any, num_envs: int) -> None:
"""Prepare the stage for rendering before create_render_data is called.
Expand Down
142 changes: 32 additions & 110 deletions source/isaaclab/isaaclab/sensors/camera/camera.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,19 @@
import numpy as np
import torch
import warp as wp
from packaging import version

from pxr import Sdf, UsdGeom
from pxr import UsdGeom

import isaaclab.utils.sensors as sensor_utils
from isaaclab.app.settings_manager import get_settings_manager
from isaaclab.renderers import BaseRenderer, Renderer
from isaaclab.sim.views import FrameView
from isaaclab.utils import has_kit, to_camel_case
from isaaclab.utils import to_camel_case
from isaaclab.utils.math import (
convert_camera_frame_orientation_convention,
create_rotation_matrix_from_view,
quat_from_matrix,
)
from isaaclab.utils.version import get_isaac_sim_version

from ..sensor_base import SensorBase
from .camera_data import CameraData
Expand Down Expand Up @@ -94,12 +92,6 @@ class Camera(SensorBase):
}
"""The set of sensor types that are not supported by the camera class."""

SIMPLE_SHADING_TYPES: set[str] = {
"simple_shading_constant_diffuse",
"simple_shading_diffuse_mdl",
"simple_shading_full_mdl",
}

def __init__(self, cfg: CameraCfg):
"""Initializes the camera sensor.

Expand All @@ -115,10 +107,15 @@ def __init__(self, cfg: CameraCfg):
# initialize base class
super().__init__(cfg)

# toggle rendering of rtx sensors as True
# this flag is read by SimulationContext to determine if rtx sensors should be rendered
settings = get_settings_manager()
settings.set_bool("/isaaclab/render/rtx_sensors", True)
# TODO: Camera should not branch on a specific renderer_type string. Replace with a
# generic opt-in flag on RendererCfg (e.g. ``requires_kit_rtx_sensors_flag``) that
# RTX-family cfgs set to True, so this branch carries no renderer-specific knowledge.
# The flag must flip at scene-construction time (before sim.reset()) because
# SimulationContext.is_rendering and several env classes branch on it pre-reset;
# flipping inside the renderer's __init__ (which only runs at sim.reset()) would
# silently break that timing.
if self.cfg.renderer_cfg.renderer_type == "isaac_rtx":
get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", True)
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 /isaaclab/render/rtx_sensors not set for OVRTXRenderer

Before this PR, Camera.__init__ unconditionally set /isaaclab/render/rtx_sensors = True for every camera. After the refactor it is only set when renderer_type == "isaac_rtx", so OVRTXRenderer users (renderer_type = "ovrtx") will have this flag remain False.

This flag is read in three critical places:

  • SimulationContext.is_rendering (line 349 of simulation_context.py) — used to decide whether the simulation renders at all
  • ManagerBasedEnv.has_rtx_sensors / DirectRLEnv.has_rtx_sensors — gate the per-reset re-render loop and the wait_for_textures path

With OVRTXRenderer, both will behave as if no RTX sensor is present, silently producing stale camera frames after env resets.


# Compute camera orientation (convention conversion) and spawn
rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0)
Expand All @@ -138,22 +135,6 @@ def __init__(self, cfg: CameraCfg):
self._renderer: BaseRenderer | None = None
self._render_data = None

if not has_kit():
return
# HACK: We need to disable instancing for semantic_segmentation and instance_segmentation_fast to work
# checks for Isaac Sim v4.5 as this issue exists there
if get_isaac_sim_version() == version.parse("4.5"):
if "semantic_segmentation" in self.cfg.data_types or "instance_segmentation_fast" in self.cfg.data_types:
logger.warning(
"Isaac Sim 4.5 introduced a bug in Camera and TiledCamera when outputting instance and semantic"
" segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets"
" will be disabled in the current workflow and may lead to longer load times and increased memory"
" usage."
)
with Sdf.ChangeBlock():
for prim in self.stage.Traverse():
prim.SetInstanceable(False)

def __del__(self):
"""Unsubscribes from callbacks and cleans up renderer resources."""
# unsubscribe callbacks
Expand All @@ -168,10 +149,6 @@ def __str__(self) -> str:
return (
f"Camera @ '{self.cfg.prim_path}': \n"
f"\tdata types : {list(self.data.output.keys())} \n"
f"\tsemantic filter : {self.cfg.semantic_filter}\n"
f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n"
f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n"
f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n"
f"\tupdate period (s): {self.cfg.update_period}\n"
f"\tshape : {self.image_shape}\n"
f"\tnumber of sensors : {self._view.count}"
Expand Down Expand Up @@ -385,16 +362,10 @@ def _initialize_impl(self):

Raises:
RuntimeError: If the number of camera prims in the view does not match the number of environments.
RuntimeError: If cameras are not enabled (missing ``--enable_cameras`` flag).
RuntimeError: Propagated from the renderer constructor when the active backend's
runtime requirements are not satisfied (e.g. the RTX backend requires the
simulation app to be launched with ``--enable_cameras``).
"""
renderer_type = getattr(self.cfg.renderer_cfg, "renderer_type", "default")
needs_kit_cameras = renderer_type in ("default", "isaac_rtx")
if needs_kit_cameras and not get_settings_manager().get("/isaaclab/cameras_enabled"):
raise RuntimeError(
"A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable"
" rendering."
)

# Initialize parent class
super()._initialize_impl()

Expand Down Expand Up @@ -485,73 +456,24 @@ def _create_buffers(self):
self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device)
self._update_poses(self._ALL_INDICES)
self._data.image_shape = self.image_shape
# -- output data (eagerly pre-allocated so renderer.set_outputs() can hold tensor references)
data_dict = dict()
if "rgba" in self.cfg.data_types or "rgb" in self.cfg.data_types:
data_dict["rgba"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8
).contiguous()
if "rgb" in self.cfg.data_types:
data_dict["rgb"] = data_dict["rgba"][..., :3]
if "albedo" in self.cfg.data_types:
data_dict["albedo"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8
).contiguous()
for data_type in self.SIMPLE_SHADING_TYPES:
if data_type in self.cfg.data_types:
data_dict[data_type] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.uint8
).contiguous()
if "distance_to_image_plane" in self.cfg.data_types:
data_dict["distance_to_image_plane"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32
).contiguous()
if "depth" in self.cfg.data_types:
data_dict["depth"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32
).contiguous()
if "distance_to_camera" in self.cfg.data_types:
data_dict["distance_to_camera"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32
).contiguous()
if "normals" in self.cfg.data_types:
data_dict["normals"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.float32
).contiguous()
if "motion_vectors" in self.cfg.data_types:
data_dict["motion_vectors"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 2), device=self.device, dtype=torch.float32
).contiguous()
if "semantic_segmentation" in self.cfg.data_types:
if self.cfg.colorize_semantic_segmentation:
data_dict["semantic_segmentation"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8
).contiguous()
else:
data_dict["semantic_segmentation"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32
).contiguous()
if "instance_segmentation_fast" in self.cfg.data_types:
if self.cfg.colorize_instance_segmentation:
data_dict["instance_segmentation_fast"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8
).contiguous()
else:
data_dict["instance_segmentation_fast"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32
).contiguous()
if "instance_id_segmentation_fast" in self.cfg.data_types:
if self.cfg.colorize_instance_id_segmentation:
data_dict["instance_id_segmentation_fast"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8
).contiguous()
else:
data_dict["instance_id_segmentation_fast"] = torch.zeros(
(self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32
).contiguous()

self._data.output = data_dict
self._data.info = {name: None for name in self.cfg.data_types}
# -- output data: ask the renderer to allocate buffers for the requested data types.
buffers = self._renderer.create_output_buffers(
self.cfg.data_types,
self.cfg.height,
self.cfg.width,
self._view.count,
self.device,
)
# Surface any requested data types the active renderer cannot produce.
unsupported = [name for name in self.cfg.data_types if name not in buffers]
if unsupported:
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we no longer skip this silently, surface as warning to user

logger.warning(
"Renderer %s does not support the following requested data types and will not produce them: %s",
type(self._renderer).__name__,
unsupported,
)
self._data.output = buffers
self._data.info = {name: None for name in buffers}
self._renderer.set_outputs(self._render_data, self._data.output)

def _update_intrinsic_matrices(self, env_ids: Sequence[int]):
Expand Down
66 changes: 66 additions & 0 deletions source/isaaclab/isaaclab/sensors/camera/camera_cfg.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from __future__ import annotations

import warnings
from dataclasses import MISSING, field
from typing import TYPE_CHECKING, Literal

Expand All @@ -19,6 +20,18 @@
if TYPE_CHECKING:
from .camera import Camera

# Default values for the RTX-flavored fields kept on :class:`CameraCfg` for
# backward compatibility. These mirror the defaults on
# :class:`~isaaclab_physx.renderers.IsaacRtxRendererCfg`.
_DEPRECATED_RENDERER_FIELD_DEFAULTS: dict = {
Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

decided to keep it here for backward compatibility, we can decide to break it in a later version bump.

"semantic_filter": "*:*",
"colorize_semantic_segmentation": True,
"colorize_instance_id_segmentation": True,
"colorize_instance_segmentation": True,
"semantic_segmentation_mapping": {},
"depth_clipping_behavior": "none",
}


@configclass
class CameraCfg(SensorBaseCfg):
Expand Down Expand Up @@ -67,6 +80,11 @@ class OffsetCfg:
- ``"max"``: Values are clipped to the maximum value.
- ``"zero"``: Values are clipped to zero.
- ``"none``: No clipping is applied. Values will be returned as ``inf``.

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.depth_clipping_behavior`
on :attr:`renderer_cfg` instead.
"""

data_types: list[str] = ["rgb"]
Expand Down Expand Up @@ -108,27 +126,47 @@ class OffsetCfg:
For more information on the semantics filter, see the documentation on `Replicator Semantics Schema Editor`_.

.. _Replicator Semantics Schema Editor: https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/semantics_schema_editor.html#semantics-filtering

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.semantic_filter` on
:attr:`renderer_cfg` instead.
"""

colorize_semantic_segmentation: bool = True
"""Whether to colorize the semantic segmentation images. Defaults to True.

If True, semantic segmentation is converted to an image where semantic IDs are mapped to colors
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_semantic_segmentation`
on :attr:`renderer_cfg` instead.
"""

colorize_instance_id_segmentation: bool = True
"""Whether to colorize the instance ID segmentation images. Defaults to True.

If True, instance id segmentation is converted to an image where instance IDs are mapped to colors.
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_instance_id_segmentation`
on :attr:`renderer_cfg` instead.
"""

colorize_instance_segmentation: bool = True
"""Whether to colorize the instance ID segmentation images. Defaults to True.

If True, instance segmentation is converted to an image where instance IDs are mapped to colors.
and returned as a ``uint8`` 4-channel array. If False, the output is returned as a ``int32`` array.

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.colorize_instance_segmentation`
on :attr:`renderer_cfg` instead.
"""

semantic_segmentation_mapping: dict = {}
Expand All @@ -147,7 +185,35 @@ class OffsetCfg:
"class:robot": (61, 178, 255, 255),
}

.. deprecated:: 4.7.0
This field is RTX-specific. Set
:attr:`~isaaclab_physx.renderers.IsaacRtxRendererCfg.semantic_segmentation_mapping`
on :attr:`renderer_cfg` instead.
"""

renderer_cfg: RendererCfg = field(default_factory=IsaacRtxRendererCfg)
"""Renderer configuration for camera sensor."""

def __post_init__(self):
"""Forward deprecated RTX-flavored fields onto :attr:`renderer_cfg`.

Each deprecated field set to a non-default value emits a
:class:`DeprecationWarning` and is copied onto ``self.renderer_cfg``
when that cfg defines the same-named field.
"""
# Forwarded by name: any same-named field on ``renderer_cfg`` will receive the value.
for field_name, default in _DEPRECATED_RENDERER_FIELD_DEFAULTS.items():
value = getattr(self, field_name)
if value == default:
continue
warnings.warn(
f"CameraCfg.{field_name} is deprecated and will be removed in a future release."
f" Set this field on CameraCfg.renderer_cfg instead.",
DeprecationWarning,
stacklevel=2,
)
if hasattr(self.renderer_cfg, field_name):
setattr(self.renderer_cfg, field_name, value)
# Reset to default so re-runs of ``__post_init__`` (via ``SensorBase.__init__``'s
# ``cfg.copy()``) don't re-forward and clobber a user-set ``renderer_cfg`` field.
setattr(self, field_name, default)
3 changes: 3 additions & 0 deletions source/isaaclab/test/renderers/test_renderer_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ class MockRenderer(BaseRenderer):
def __init__(self, cfg=None):
pass

def create_output_buffers(self, data_types, height, width, num_views, device):
return {}

def prepare_stage(self, stage, num_envs):
pass

Expand Down
Loading
Loading