Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
168 changes: 41 additions & 127 deletions docker/Dockerfile
Original file line number Diff line number Diff line change
@@ -1,146 +1,60 @@
# Base image with Python 3.10 and slim Debian system
FROM python:3.10-slim
FROM stereolabs/zed:5.2-gl-devel-cuda12.8-ubuntu24.04
# FROM stereolabs/zed:5.0-gl-devel-cuda12.1-ubuntu22.04

# System configuration
ENV DEBIAN_FRONTEND=noninteractive \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
CMAKE_BUILD_PARALLEL_LEVEL=2 \
SKBUILD_BUILD_OPTIONS="-j2" \
MAKEFLAGS="-j2"
#############################################################################
# Explanation of environment variables:
# DEBIAN_FRONTEND=noninteractive: Disables interactive prompts during apt-get install.
# PYTHONDONTWRITEBYTECODE=1: Prevents .pyc files, keeping the image clean.
# PYTHONUNBUFFERED=1: Ensures logs are written directly (no buffering).
# Parallel build settings for CMake, scikit-build, and make to improve build speed.
#############################################################################
UV_LINK_MODE=copy

# create a user to avoid running as root
RUN useradd -ms /bin/bash devuser
WORKDIR /home/devuser
USER root
WORKDIR /opt/rcs-src

# Install system dependencies (from debian_deps.txt manually inlined here)
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
cmake \
curl \
git \
libpoco-dev \
libeigen3-dev \
libxslt-dev \
libcoin-dev \
libccd-dev \
libgl1 \
libglib2.0-0 \
libglfw3-dev \
libboost-all-dev \
liblzma-dev \
libxml2-dev \
libxslt1-dev \
libpoco-dev \
ninja-build \
clang \
clang-format \
clang-tidy \
pkg-config \
curl \
unzip \
wget \
libgl1 \
patchelf \
python3-venv \
libegl-dev \
libegl1-mesa-dev \
libglib2.0-dev \
mesa-utils \
&& rm -rf /var/lib/apt/lists/*

# Remove root password so `su -` works from devuser
RUN passwd -d root


# Switch to non-root user
USER devuser
RUN curl -LsSf https://astral.sh/uv/install.sh | sh

# Set up virtual environment (Python)
RUN python3 -m venv /home/devuser/.venv
# prepend /home/devuser/.venv/bin to the existing PATH
# This ensures that the virtual environment's Python and pip are used by default.
ENV PATH="/home/devuser/.venv/bin:$PATH"
ENV PATH="/opt/venv/bin:/root/.local/bin:${PATH}" \
VIRTUAL_ENV=/opt/venv

# Copy project files into container
COPY --chown=devuser . /home/devuser/project
WORKDIR /home/devuser/project
RUN uv python install 3.11 \
&& uv venv --python 3.11 /opt/venv \
&& python -m ensurepip --upgrade

# Upgrade pip and install project build tools
RUN pip install --upgrade pip setuptools
# Install the ZED Python bindings into the same virtualenv used by the project.
RUN uv pip install requests \
&& python /usr/local/zed/get_python_api.py

# Install development dependencies
RUN pip install --group build_deps
COPY . /opt/rcs-src
COPY docker/link-editable-source.sh /usr/local/bin/link-editable-source

# Install the package in editable mode (CMake + pybind11 + scikit-build-core triggered)
RUN pip install -e . --no-cache-dir --verbose

# Default command that runs when you start a container without specifying a command explicitly.
CMD ["python3"]

######################################################################
# Build the Docker image with specified memory limits
# To build the Docker image, run the following command in the terminal:
# docker build --memory=4g --memory-swap=6g . -t rcs-dev
######################################################################
# --memory=4g Limit the build process to 4 GB of RAM
# --memory-swap=6g Limit total memory (RAM + swap) to 6 GB
# . Use current directory as the Docker context
# -t rcs-dev Tag the built image as "rcs-dev"
######################################################################

######################################################################
# Run the Docker container interactively (without GUI)
# docker run -it --rm rcs-dev bash
######################################################################
# -it Interactive mode with TTY
# --rm Automatically remove container after exit
# rcs-dev Name of the Docker image to run
# bash Start an interactive bash shell inside the container
######################################################################

######################################################################
# Optional: Run GUI applications from inside the container
# First, allow X11 connections from Docker containers:
# Run this command on the host machine:
# xhost +local:docker
######################################################################
# xhost A utility to manage X11 display access control
# +local:docker Grant X11 access to Docker containers running locally
######################################################################

######################################################################
# Run container with GUI support (no GPU)
# docker run -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix --shm-size=1g rcs-dev bash
######################################################################
# -e DISPLAY=$DISPLAY Pass display info to container
# -v /tmp/.X11-unix:/tmp/.X11-unix Mount X11 socket for GUI apps
# --shm-size=1g Increase shared memory for rendering (useful for tools like MuJoCo)
######################################################################

######################################################################
# Run container with NVIDIA GPU support
# Make sure NVIDIA Container Toolkit is installed and configured
# For more info: https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html
# docker run -it --rm --gpus all --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all -e NVIDIA_DRIVER_CAPABILITIES=all -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix --shm-size=1g rcs-dev bash
######################################################################
# --gpus all Enable all available GPUs
# --runtime=nvidia Use NVIDIA runtime for GPU access
# -e NVIDIA_VISIBLE_DEVICES=all Expose all GPUs inside container
# -e NVIDIA_DRIVER_CAPABILITIES=all Enable all GPU features (e.g., graphics, compute)
# Other flags same as GUI setup above
######################################################################
# Run the container with NVIDIA GPU support and hardware access:
# docker run -it --rm --gpus all --runtime=nvidia -e NVIDIA_VISIBLE_DEVICES=all -e NVIDIA_DRIVER_CAPABILITIES=all -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix --shm-size=2g --network host --privileged --cap-add=SYS_NICE --ulimit rtprio=99 --ulimit rttime=-1 --ulimit memlock=8428281856 -v /dev:/dev rcs-dev bash
# Optional flags for running the container with hardware access:
# --network host \ # Use the host's network stack (needed for low-latency ROS comms)
# --privileged \ # Grant full device and kernel access (required for hardware control)
# --cap-add=SYS_NICE \ # Allow processes to raise their scheduling priority
# --ulimit rtprio=99 \ # Enable real-time priority up to 99
# --ulimit rttime=-1 \ # Disable CPU time limit for real-time threads
# --ulimit memlock=8428281856 \ # Lock ~8GB of RAM to prevent memory swapping
# -v /dev:/dev \ # Mount all host devices for hardware access (e.g., Franka arm)
RUN chmod +x /usr/local/bin/link-editable-source \
&& uv pip install \
build \
wheel \
"setuptools>=45" \
"scikit-build-core>=0.3.3" \
pybind11 \
cmake \
ninja \
"mujoco==3.2.6" \
"pin==3.7.0" \
&& uv pip install /opt/rcs-src \
&& uv pip install --no-build-isolation /opt/rcs-src/extensions/rcs_fr3 \
&& uv pip install /opt/rcs-src/extensions/rcs_realsense \
&& uv pip install /opt/rcs-src/extensions/rcs_robotiq2f85 \
&& uv pip install /opt/rcs-src/extensions/rcs_zed

WORKDIR /workspace/robot-control-stack

CMD ["sh"]
40 changes: 22 additions & 18 deletions docker/README.md
Original file line number Diff line number Diff line change
@@ -1,24 +1,28 @@
# Docker (GUI + GPU + HW add-ons)
# Docker

**Prereqs:** Docker + docker-compose, X11 on host, NVIDIA driver + NVIDIA Container Toolkit (legacy `runtime: nvidia`).
**Layout:** `docker/Dockerfile`, overrides in `docker/compose/` (`base.yml`, `gui.yml`, `gpu.yml`, `hw.yml`).
Build the image from the repository root:

## Build the image
`docker-compose -f docker/compose/base.yml build dev`
```sh
docker build -f docker/Dockerfile -t rcs-dev .
```

## (GUI) allow X access (host)
`export XAUTHORITY=${XAUTHORITY:-$HOME/.Xauthority}`
`xhost +local:docker`
Run the development container with Docker Compose:

## Run container with GUI + GPU + HW and open a shell
`docker-compose -f docker/compose/base.yml -f docker/compose/gui.yml -f docker/compose/gpu.yml -f docker/compose/hw.yml run --rm run bash`
*(Use fewer `-f` files for lighter setups, e.g., GUI+GPU without HW.)*
```sh
xhost +si:localuser:root
docker compose -f docker/compose/dev.yml run --rm rcs
```

## Inside the container
`pip install -ve extensions/rcs_fr3`
`cd examples`
`python fr3_env_cartesian_control.py`
Notes:

## Troubleshooting
- **`nvidia-smi` missing in container:** ensure it exists on host at `/usr/bin/nvidia-smi` (GPU override bind-mounts it).
- **GUI can’t open:** re-run the `xhost` command and confirm `$DISPLAY` is set on the host.
- The compose setup bind-mounts the repository into `/workspace/robot-control-stack`.
- The compose service is tagged as `rcs-dev`, so the manual Docker build tag and the Compose service refer to the same image name.
- The Docker image installs the ZED Python API (`pyzed`) during build by running `/usr/local/zed/get_python_api.py` inside the project virtualenv.
- The compose setup requests GPU access using a device reservation, which is more widely supported than the newer service-level `gpus:` key.
- The host should grant local X11 access before starting the container: `xhost +si:localuser:root`.
- `~/zed_models` is mounted into `/usr/local/zed/resources` to match the direct `docker run` setup.
- `/dev/dri` is masked inside the container so host Mesa/AMD render nodes do not override the NVIDIA runtime devices.
- NVIDIA PRIME/GLX environment variables are exported to bias OpenGL/EGL selection toward the NVIDIA stack when using X11 forwarding.
- Python source changes are picked up from the mounted repo, including `extensions/rcs_zed`.
- If you change C++ code in `rcs` or `rcs_fr3`, rebuild the image.
- For non-GPU hosts, comment out the GPU-related lines in `docker/compose/dev.yml`.
38 changes: 0 additions & 38 deletions docker/compose/base.yml

This file was deleted.

38 changes: 38 additions & 0 deletions docker/compose/dev.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
services:
rcs:
image: rcs-dev
build:
context: ../..
dockerfile: docker/Dockerfile
working_dir: /workspace/robot-control-stack
command:
- /bin/sh
- -lc
- /usr/local/bin/link-editable-source && exec /bin/sh
environment:
DISPLAY: ${DISPLAY}
RCS_PREFIX: /workspace/robot-control-stack
NVIDIA_DISABLE_REQUIRE: 1
NVIDIA_VISIBLE_DEVICES: all
NVIDIA_DRIVER_CAPABILITIES: all
__NV_PRIME_RENDER_OFFLOAD: 1
__GLX_VENDOR_LIBRARY_NAME: nvidia
__VK_LAYER_NV_optimus: NVIDIA_only
volumes:
- ../..:/workspace/robot-control-stack
- /tmp/.X11-unix:/tmp/.X11-unix
- /dev:/dev
- ${HOME}/zed_models:/usr/local/zed/resources
tmpfs:
- /dev/dri
network_mode: host
privileged: true
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [gpu]
stdin_open: true
tty: true
12 changes: 0 additions & 12 deletions docker/compose/gpu.yml

This file was deleted.

11 changes: 0 additions & 11 deletions docker/compose/gui.yml

This file was deleted.

20 changes: 0 additions & 20 deletions docker/compose/hw.yml

This file was deleted.

Loading
Loading