-
Notifications
You must be signed in to change notification settings - Fork 3.5k
Decouple Camera from RTX/Replicator output-buffer logic #5366
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: develop
Are you sure you want to change the base?
Changes from 6 commits
e8fc9f0
7a678db
c0e40c7
56fee8d
16d631a
404d79f
9cb99db
b9b1138
05e5f08
d106f2b
0ee4df6
e025bba
44ed678
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -12,21 +12,19 @@ | |
| import numpy as np | ||
| import torch | ||
| import warp as wp | ||
| from packaging import version | ||
|
|
||
| from pxr import Sdf, UsdGeom | ||
| from pxr import UsdGeom | ||
|
|
||
| import isaaclab.utils.sensors as sensor_utils | ||
| from isaaclab.app.settings_manager import get_settings_manager | ||
| from isaaclab.renderers import BaseRenderer, Renderer | ||
| from isaaclab.sim.views import FrameView | ||
| from isaaclab.utils import has_kit, to_camel_case | ||
| from isaaclab.utils import to_camel_case | ||
| from isaaclab.utils.math import ( | ||
| convert_camera_frame_orientation_convention, | ||
| create_rotation_matrix_from_view, | ||
| quat_from_matrix, | ||
| ) | ||
| from isaaclab.utils.version import get_isaac_sim_version | ||
|
|
||
| from ..sensor_base import SensorBase | ||
| from .camera_data import CameraData | ||
|
|
@@ -94,12 +92,6 @@ class Camera(SensorBase): | |
| } | ||
| """The set of sensor types that are not supported by the camera class.""" | ||
|
|
||
| SIMPLE_SHADING_TYPES: set[str] = { | ||
| "simple_shading_constant_diffuse", | ||
| "simple_shading_diffuse_mdl", | ||
| "simple_shading_full_mdl", | ||
| } | ||
|
|
||
| def __init__(self, cfg: CameraCfg): | ||
| """Initializes the camera sensor. | ||
|
|
||
|
|
@@ -115,10 +107,15 @@ def __init__(self, cfg: CameraCfg): | |
| # initialize base class | ||
| super().__init__(cfg) | ||
|
|
||
| # toggle rendering of rtx sensors as True | ||
| # this flag is read by SimulationContext to determine if rtx sensors should be rendered | ||
| settings = get_settings_manager() | ||
| settings.set_bool("/isaaclab/render/rtx_sensors", True) | ||
| # TODO: Camera should not branch on a specific renderer_type string. Replace with a | ||
| # generic opt-in flag on RendererCfg (e.g. ``requires_kit_rtx_sensors_flag``) that | ||
| # RTX-family cfgs set to True, so this branch carries no renderer-specific knowledge. | ||
| # The flag must flip at scene-construction time (before sim.reset()) because | ||
| # SimulationContext.is_rendering and several env classes branch on it pre-reset; | ||
| # flipping inside the renderer's __init__ (which only runs at sim.reset()) would | ||
| # silently break that timing. | ||
| if self.cfg.renderer_cfg.renderer_type == "isaac_rtx": | ||
| get_settings_manager().set_bool("/isaaclab/render/rtx_sensors", True) | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
Before this PR, This flag is read in three critical places:
With |
||
|
|
||
| # Compute camera orientation (convention conversion) and spawn | ||
| rot = torch.tensor(self.cfg.offset.rot, dtype=torch.float32, device="cpu").unsqueeze(0) | ||
|
|
@@ -138,22 +135,6 @@ def __init__(self, cfg: CameraCfg): | |
| self._renderer: BaseRenderer | None = None | ||
| self._render_data = None | ||
|
|
||
| if not has_kit(): | ||
| return | ||
| # HACK: We need to disable instancing for semantic_segmentation and instance_segmentation_fast to work | ||
| # checks for Isaac Sim v4.5 as this issue exists there | ||
| if get_isaac_sim_version() == version.parse("4.5"): | ||
| if "semantic_segmentation" in self.cfg.data_types or "instance_segmentation_fast" in self.cfg.data_types: | ||
| logger.warning( | ||
| "Isaac Sim 4.5 introduced a bug in Camera and TiledCamera when outputting instance and semantic" | ||
| " segmentation outputs for instanceable assets. As a workaround, the instanceable flag on assets" | ||
| " will be disabled in the current workflow and may lead to longer load times and increased memory" | ||
| " usage." | ||
| ) | ||
| with Sdf.ChangeBlock(): | ||
| for prim in self.stage.Traverse(): | ||
| prim.SetInstanceable(False) | ||
|
|
||
| def __del__(self): | ||
| """Unsubscribes from callbacks and cleans up renderer resources.""" | ||
| # unsubscribe callbacks | ||
|
|
@@ -168,10 +149,6 @@ def __str__(self) -> str: | |
| return ( | ||
| f"Camera @ '{self.cfg.prim_path}': \n" | ||
| f"\tdata types : {list(self.data.output.keys())} \n" | ||
| f"\tsemantic filter : {self.cfg.semantic_filter}\n" | ||
| f"\tcolorize semantic segm. : {self.cfg.colorize_semantic_segmentation}\n" | ||
| f"\tcolorize instance segm. : {self.cfg.colorize_instance_segmentation}\n" | ||
| f"\tcolorize instance id segm.: {self.cfg.colorize_instance_id_segmentation}\n" | ||
| f"\tupdate period (s): {self.cfg.update_period}\n" | ||
| f"\tshape : {self.image_shape}\n" | ||
| f"\tnumber of sensors : {self._view.count}" | ||
|
|
@@ -385,16 +362,10 @@ def _initialize_impl(self): | |
|
|
||
| Raises: | ||
| RuntimeError: If the number of camera prims in the view does not match the number of environments. | ||
| RuntimeError: If cameras are not enabled (missing ``--enable_cameras`` flag). | ||
| RuntimeError: Propagated from the renderer constructor when the active backend's | ||
| runtime requirements are not satisfied (e.g. the RTX backend requires the | ||
| simulation app to be launched with ``--enable_cameras``). | ||
| """ | ||
| renderer_type = getattr(self.cfg.renderer_cfg, "renderer_type", "default") | ||
| needs_kit_cameras = renderer_type in ("default", "isaac_rtx") | ||
| if needs_kit_cameras and not get_settings_manager().get("/isaaclab/cameras_enabled"): | ||
| raise RuntimeError( | ||
| "A camera was spawned without the --enable_cameras flag. Please use --enable_cameras to enable" | ||
| " rendering." | ||
| ) | ||
|
|
||
| # Initialize parent class | ||
| super()._initialize_impl() | ||
|
|
||
|
|
@@ -485,73 +456,24 @@ def _create_buffers(self): | |
| self._data.quat_w_world = torch.zeros((self._view.count, 4), device=self._device) | ||
| self._update_poses(self._ALL_INDICES) | ||
| self._data.image_shape = self.image_shape | ||
| # -- output data (eagerly pre-allocated so renderer.set_outputs() can hold tensor references) | ||
| data_dict = dict() | ||
| if "rgba" in self.cfg.data_types or "rgb" in self.cfg.data_types: | ||
| data_dict["rgba"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| if "rgb" in self.cfg.data_types: | ||
| data_dict["rgb"] = data_dict["rgba"][..., :3] | ||
| if "albedo" in self.cfg.data_types: | ||
| data_dict["albedo"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| for data_type in self.SIMPLE_SHADING_TYPES: | ||
| if data_type in self.cfg.data_types: | ||
| data_dict[data_type] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| if "distance_to_image_plane" in self.cfg.data_types: | ||
| data_dict["distance_to_image_plane"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 | ||
| ).contiguous() | ||
| if "depth" in self.cfg.data_types: | ||
| data_dict["depth"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 | ||
| ).contiguous() | ||
| if "distance_to_camera" in self.cfg.data_types: | ||
| data_dict["distance_to_camera"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.float32 | ||
| ).contiguous() | ||
| if "normals" in self.cfg.data_types: | ||
| data_dict["normals"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 3), device=self.device, dtype=torch.float32 | ||
| ).contiguous() | ||
| if "motion_vectors" in self.cfg.data_types: | ||
| data_dict["motion_vectors"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 2), device=self.device, dtype=torch.float32 | ||
| ).contiguous() | ||
| if "semantic_segmentation" in self.cfg.data_types: | ||
| if self.cfg.colorize_semantic_segmentation: | ||
| data_dict["semantic_segmentation"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| else: | ||
| data_dict["semantic_segmentation"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 | ||
| ).contiguous() | ||
| if "instance_segmentation_fast" in self.cfg.data_types: | ||
| if self.cfg.colorize_instance_segmentation: | ||
| data_dict["instance_segmentation_fast"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| else: | ||
| data_dict["instance_segmentation_fast"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 | ||
| ).contiguous() | ||
| if "instance_id_segmentation_fast" in self.cfg.data_types: | ||
| if self.cfg.colorize_instance_id_segmentation: | ||
| data_dict["instance_id_segmentation_fast"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 4), device=self.device, dtype=torch.uint8 | ||
| ).contiguous() | ||
| else: | ||
| data_dict["instance_id_segmentation_fast"] = torch.zeros( | ||
| (self._view.count, self.cfg.height, self.cfg.width, 1), device=self.device, dtype=torch.int32 | ||
| ).contiguous() | ||
|
|
||
| self._data.output = data_dict | ||
| self._data.info = {name: None for name in self.cfg.data_types} | ||
| # -- output data: ask the renderer to allocate buffers for the requested data types. | ||
| buffers = self._renderer.create_output_buffers( | ||
| self.cfg.data_types, | ||
| self.cfg.height, | ||
| self.cfg.width, | ||
| self._view.count, | ||
| self.device, | ||
| ) | ||
| # Surface any requested data types the active renderer cannot produce. | ||
| unsupported = [name for name in self.cfg.data_types if name not in buffers] | ||
| if unsupported: | ||
|
nvsekkin marked this conversation as resolved.
|
||
| logger.warning( | ||
| "Renderer %s does not support the following requested data types and will not produce them: %s", | ||
| type(self._renderer).__name__, | ||
| unsupported, | ||
| ) | ||
| self._data.output = buffers | ||
| self._data.info = {name: None for name in buffers} | ||
| self._renderer.set_outputs(self._render_data, self._data.output) | ||
|
|
||
| def _update_intrinsic_matrices(self, env_ids: Sequence[int]): | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.