diff --git a/cv_bridge/CMakeLists.txt b/cv_bridge/CMakeLists.txt
index 942cb0fac..6dae028d0 100644
--- a/cv_bridge/CMakeLists.txt
+++ b/cv_bridge/CMakeLists.txt
@@ -18,27 +18,6 @@ if(ANDROID)
set(CV_BRIDGE_DISABLE_PYTHON ON)
endif()
-if(CV_BRIDGE_DISABLE_PYTHON)
- find_package(Boost REQUIRED)
- set(boost_python_target "")
-else()
- find_package(Python3 REQUIRED COMPONENTS Development NumPy)
- find_package(Boost QUIET)
- if(Boost_VERSION_STRING VERSION_LESS "1.67")
- # This is a bit of a hack to suppress a warning
- # No header defined for python3; skipping header check
- # Which should only affect Boost versions < 1.67
- # Resolution for newer versions:
- # https://gitlab.kitware.com/cmake/cmake/issues/16391
- set(_Boost_PYTHON3_HEADERS "boost/python.hpp")
- find_package(Boost REQUIRED COMPONENTS python3)
- set(boost_python_target "Boost::python3")
- else()
- find_package(Boost REQUIRED COMPONENTS python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR})
- set(boost_python_target "Boost::python${Python3_VERSION_MAJOR}${Python3_VERSION_MINOR}")
- endif()
-endif()
-
find_package(rclcpp REQUIRED)
find_package(rcpputils REQUIRED)
find_package(sensor_msgs REQUIRED)
diff --git a/cv_bridge/package.xml b/cv_bridge/package.xml
index fe34f46d8..fea735903 100644
--- a/cv_bridge/package.xml
+++ b/cv_bridge/package.xml
@@ -19,9 +19,6 @@
ament_cmake_ros
python_cmake_module
- libboost-dev
- libboost-python-dev
-
libopencv-dev
python3-numpy
rclcpp
@@ -30,7 +27,6 @@
python3-opencv
ament_index_python
- libboost-python
ament_cmake_gtest
ament_cmake_pytest
diff --git a/cv_bridge/python/cv_bridge/__init__.py b/cv_bridge/python/cv_bridge/__init__.py
index e887f6be9..cf6f85c39 100644
--- a/cv_bridge/python/cv_bridge/__init__.py
+++ b/cv_bridge/python/cv_bridge/__init__.py
@@ -3,6 +3,6 @@
# python bindings
# This try is just to satisfy doc jobs that are built differently.
try:
- from cv_bridge.boost.cv_bridge_boost import cvtColorForDisplay, getCvType
+ from cv_bridge.nanobind.cv_bridge_nanobind import cvtColorForDisplay, getCvType
except ImportError:
pass
diff --git a/cv_bridge/python/cv_bridge/core.py b/cv_bridge/python/cv_bridge/core.py
index 43797b4a0..81c63a7a9 100644
--- a/cv_bridge/python/cv_bridge/core.py
+++ b/cv_bridge/python/cv_bridge/core.py
@@ -89,11 +89,11 @@ def dtype_with_channels_to_cvtype2(self, dtype, n_channels):
return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)
def cvtype2_to_dtype_with_channels(self, cvtype):
- from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap
+ from cv_bridge.nanobind.cv_bridge_nanobind import CV_MAT_CNWrap, CV_MAT_DEPTHWrap
return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype)
def encoding_to_cvtype2(self, encoding):
- from cv_bridge.boost.cv_bridge_boost import getCvType
+ from cv_bridge.nanobind.cv_bridge_nanobind import getCvType
try:
return getCvType(encoding)
@@ -135,7 +135,7 @@ def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding='passthrough'
if desired_encoding == 'passthrough':
return im
- from cv_bridge.boost.cv_bridge_boost import cvtColor2
+ from cv_bridge.nanobind.cv_bridge_nanobind import cvtColor2
try:
res = cvtColor2(im, 'bgr8', desired_encoding)
@@ -183,12 +183,12 @@ def imgmsg_to_cv2(self, img_msg, desired_encoding='passthrough'):
# If the byte order is different between the message and the system.
if img_msg.is_bigendian == (sys.byteorder == 'little'):
- im = im.byteswap().newbyteorder()
+ im = im.byteswap().view(im.dtype.newbyteorder())
if desired_encoding == 'passthrough':
return im
- from cv_bridge.boost.cv_bridge_boost import cvtColor2
+ from cv_bridge.nanobind.cv_bridge_nanobind import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
diff --git a/cv_bridge/src/CMakeLists.txt b/cv_bridge/src/CMakeLists.txt
index 76ccfd9f9..4eee39de0 100644
--- a/cv_bridge/src/CMakeLists.txt
+++ b/cv_bridge/src/CMakeLists.txt
@@ -18,7 +18,6 @@ target_link_libraries(${PROJECT_NAME} PUBLIC
rclcpp::rclcpp
)
target_link_libraries(${PROJECT_NAME} PRIVATE
- Boost::headers
rcpputils::rcpputils)
install(TARGETS ${PROJECT_NAME} EXPORT export_${PROJECT_NAME}
@@ -32,20 +31,28 @@ install(FILES
DESTINATION include/${PROJECT_NAME}/${PROJECT_NAME})
if(NOT CV_BRIDGE_DISABLE_PYTHON)
- Python3_add_library(${PROJECT_NAME}_boost MODULE module.cpp module_opencv4.cpp)
- target_link_libraries(${PROJECT_NAME}_boost PRIVATE
+ find_package(Python 3 COMPONENTS Interpreter Development REQUIRED)
+ include(FetchContent)
+ FetchContent_Declare(
+ nanobind
+ GIT_REPOSITORY https://github.com/wjakob/nanobind.git
+ GIT_TAG v2.9.2
+ )
+
+ FetchContent_MakeAvailable(nanobind)
+ nanobind_add_module(${PROJECT_NAME}_nanobind MODULE module.cpp)
+ target_link_libraries(${PROJECT_NAME}_nanobind PRIVATE
${PROJECT_NAME}
- ${boost_python_target}
- Python3::NumPy)
- target_compile_definitions(${PROJECT_NAME}_boost PRIVATE PYTHON3)
+ )
+ target_compile_definitions(${PROJECT_NAME}_nanobind PRIVATE PYTHON3)
if(OpenCV_VERSION_MAJOR VERSION_EQUAL 4)
- target_compile_definitions(${PROJECT_NAME}_boost PRIVATE OPENCV_VERSION_4)
+ target_compile_definitions(${PROJECT_NAME}_nanobind PRIVATE OPENCV_VERSION_4)
endif()
- set_target_properties(${PROJECT_NAME}_boost PROPERTIES
- LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/boost/
+ set_target_properties(${PROJECT_NAME}_nanobind PROPERTIES
+ LIBRARY_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/nanobind/
PREFIX ""
)
- install(TARGETS ${PROJECT_NAME}_boost DESTINATION ${PYTHON_INSTALL_DIR}/${PROJECT_NAME}/boost/)
+ install(TARGETS ${PROJECT_NAME}_nanobind DESTINATION ${PYTHON_INSTALL_DIR}/${PROJECT_NAME}/nanobind/)
endif()
diff --git a/cv_bridge/src/cv_bridge.cpp b/cv_bridge/src/cv_bridge.cpp
index e763e5da7..1ccd337c0 100644
--- a/cv_bridge/src/cv_bridge.cpp
+++ b/cv_bridge/src/cv_bridge.cpp
@@ -36,7 +36,6 @@
#include
#include
-#include
#include
#include
#include
diff --git a/cv_bridge/src/module.cpp b/cv_bridge/src/module.cpp
index fe7174c65..2a0edb89f 100644
--- a/cv_bridge/src/module.cpp
+++ b/cv_bridge/src/module.cpp
@@ -1,122 +1,255 @@
/*********************************************************************
-* Software License Agreement (BSD License)
-*
-* Copyright (c) 2012, Willow Garage, Inc.
-* Copyright (c) 2018 Intel Corporation.
-* All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-* * Neither the name of the Willow Garage nor the names of its
-* contributors may be used to endorse or promote products derived
-* from this software without specific prior written permission.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
-* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
-* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
-* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+ * Software License Agreement (BSD License)
+ *
+ * Copyright (c) 2012, Willow Garage, Inc.
+ * Copyright (c) 2018 Intel Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+ * * Neither the name of the Willow Garage nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
-*********************************************************************/
+ *********************************************************************/
-#include "module.hpp"
-#include
+#include
+#include
+#include
+#include
-PyObject * mod_opencv;
+namespace nb = nanobind;
-bp::object
-cvtColor2Wrap(bp::object obj_in, const std::string & encoding_in, const std::string & encoding_out)
+// Helper function to convert nanobind ndarray to cv::Mat
+cv::Mat convert_to_CvMat(nb::ndarray array)
{
- // Convert the Python input to an image
- cv::Mat mat_in;
- convert_to_CvMat2(obj_in.ptr(), mat_in);
+ // Check if array is valid
+ if (!array.is_valid())
+ {
+ throw std::runtime_error("Invalid numpy array");
+ }
- // Call cv_bridge for color conversion
- cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(
- std_msgs::msg::Header(), encoding_in, mat_in));
+ size_t ndim = array.ndim();
+ if (ndim < 2 || ndim > 3)
+ {
+ throw std::runtime_error("Input must be 2D or 3D array");
+ }
- cv::Mat mat = cv_bridge::cvtColor(cv_image, encoding_out)->image;
+ // Get shape information
+ int rows = array.shape(0);
+ int cols = array.shape(1);
+ int channels = (ndim == 3) ? array.shape(2) : 1;
- return bp::object(boost::python::handle<>(pyopencv_from(mat)));
+ // Determine OpenCV type from numpy dtype
+ int cv_type;
+ auto dtype = array.dtype();
+
+ if (dtype == nb::dtype())
+ {
+ cv_type = CV_8UC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_8SC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_16UC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_16SC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_32SC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_32FC(channels);
+ }
+ else if (dtype == nb::dtype())
+ {
+ cv_type = CV_64FC(channels);
+ }
+ else
+ {
+ throw std::runtime_error("Unsupported numpy dtype");
+ }
+
+ // Create cv::Mat from numpy array data (clone to own the data)
+ return cv::Mat(rows, cols, cv_type, const_cast(array.data())).clone();
}
-bp::object
-cvtColorForDisplayWrap(
- bp::object obj_in,
- const std::string & encoding_in,
- const std::string & encoding_out,
- bool do_dynamic_scaling = false,
- double min_image_value = 0.0,
- double max_image_value = 0.0,
- int colormap = -1)
+// Helper function to convert cv::Mat to nanobind ndarray
+nb::ndarray convert_from_CvMat(const cv::Mat &mat)
{
- // Convert the Python input to an image
- cv::Mat mat_in;
- convert_to_CvMat2(obj_in.ptr(), mat_in);
-
- cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(
- std_msgs::msg::Header(), encoding_in, mat_in));
-
- cv_bridge::CvtColorForDisplayOptions options;
- options.do_dynamic_scaling = do_dynamic_scaling;
- options.min_image_value = min_image_value;
- options.max_image_value = max_image_value;
- options.colormap = colormap;
- cv::Mat mat = cv_bridge::cvtColorForDisplay(/*source=*/ cv_image,
- /*encoding_out=*/ encoding_out,
- /*options=*/ options)->image;
-
- return bp::object(boost::python::handle<>(pyopencv_from(mat)));
+ // Build shape
+ size_t ndim = (mat.channels() == 1) ? 2 : 3;
+ size_t shape[3];
+
+ shape[0] = static_cast(mat.rows);
+ shape[1] = static_cast(mat.cols);
+ if (ndim == 3)
+ {
+ shape[2] = static_cast(mat.channels());
+ }
+
+ // Copy data to ensure it persists
+ size_t size_bytes = static_cast(mat.total()) * static_cast(mat.elemSize());
+ void *data = std::malloc(size_bytes);
+ if (!data)
+ throw std::bad_alloc();
+ std::memcpy(data, mat.data, size_bytes);
+
+ // Create capsule for memory management
+ auto owner = nb::capsule(data, [](void *p) noexcept
+ { std::free(p); });
+
+ // Create generic ndarray explicitly. Note: data pointer is first argument.
+ switch (mat.depth())
+ {
+ case CV_8U:
+ return nb::ndarray(
+ data, // value (void*)
+ ndim, // number of dimensions
+ shape, // shape array (const size_t*)
+ std::move(owner), // owner/capsule (handle)
+ nullptr, // strides (optional)
+ nb::dtype(), // dtype
+ nb::device::cpu::value, // device
+ 0 // readonly flag
+ );
+ case CV_8S:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ case CV_16U:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ case CV_16S:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ case CV_32S:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ case CV_32F:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ case CV_64F:
+ return nb::ndarray(data, ndim, shape, std::move(owner), nullptr, nb::dtype(), nb::device::cpu::value, 0);
+ default:
+ std::free(data);
+ throw std::runtime_error("Unsupported cv::Mat type");
+ }
}
-BOOST_PYTHON_FUNCTION_OVERLOADS(cvtColorForDisplayWrap_overloads, cvtColorForDisplayWrap, 3, 7)
+nb::ndarray cvtColor2Wrap(nb::ndarray obj_in, const std::string &encoding_in, const std::string &encoding_out)
+{
+ // Convert the Python input to an image
+ cv::Mat mat_in = convert_to_CvMat(obj_in);
+
+ // Call cv_bridge for color conversion
+ cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(
+ std_msgs::msg::Header(), encoding_in, mat_in));
+
+ cv::Mat mat = cv_bridge::cvtColor(cv_image, encoding_out)->image;
+
+ return convert_from_CvMat(mat);
+}
+
+nb::ndarray cvtColorForDisplayWrap(
+ nb::ndarray obj_in,
+ const std::string &encoding_in,
+ const std::string &encoding_out = "",
+ bool do_dynamic_scaling = false,
+ double min_image_value = 0.0,
+ double max_image_value = 0.0,
+ int colormap = -1)
+{
+ // Convert the numpy array input to cv::Mat
+ cv::Mat mat_in = convert_to_CvMat(obj_in);
+
+ // Create CvImage
+ cv_bridge::CvImagePtr cv_image(new cv_bridge::CvImage(
+ std_msgs::msg::Header(), encoding_in, mat_in));
+
+ // Set up conversion options
+ cv_bridge::CvtColorForDisplayOptions options;
+ options.do_dynamic_scaling = do_dynamic_scaling;
+ options.min_image_value = min_image_value;
+ options.max_image_value = max_image_value;
+ options.colormap = colormap;
+
+ // Perform the conversion
+ cv::Mat mat = cv_bridge::cvtColorForDisplay(
+ /*source=*/cv_image,
+ /*encoding_out=*/encoding_out,
+ /*options=*/options)
+ ->image;
+
+ // Convert cv::Mat back to numpy array
+ return convert_from_CvMat(mat);
+}
int CV_MAT_CNWrap(int i)
{
- return CV_MAT_CN(i);
+ return CV_MAT_CN(i);
}
int CV_MAT_DEPTHWrap(int i)
{
- return CV_MAT_DEPTH(i);
+ return CV_MAT_DEPTH(i);
}
-BOOST_PYTHON_MODULE(cv_bridge_boost)
+NB_MODULE(cv_bridge_nanobind, m)
{
- do_numpy_import();
- mod_opencv = PyImport_ImportModule("cv2");
-
- // Wrap the function to get encodings as OpenCV types
- boost::python::def("getCvType", cv_bridge::getCvType);
- boost::python::def("cvtColor2", cvtColor2Wrap);
- boost::python::def("CV_MAT_CNWrap", CV_MAT_CNWrap);
- boost::python::def("CV_MAT_DEPTHWrap", CV_MAT_DEPTHWrap);
- boost::python::def("cvtColorForDisplay", cvtColorForDisplayWrap,
- cvtColorForDisplayWrap_overloads(
- boost::python::args("source", "encoding_in", "encoding_out", "do_dynamic_scaling",
- "min_image_value", "max_image_value", "colormap"),
- "Convert image to display with specified encodings.\n\n"
- "Args:\n"
- " - source (numpy.ndarray): input image\n"
- " - encoding_in (str): input image encoding\n"
- " - encoding_out (str): encoding to which the image conveted\n"
- " - do_dynamic_scaling (bool): flag to do dynamic scaling with min/max value\n"
- " - min_image_value (float): minimum pixel value for dynamic scaling\n"
- " - max_image_value (float): maximum pixel value for dynamic scaling\n"
- " - colormap (int): colormap to use when converting to color image\n"
- ));
-}
+ m.doc() = "cv_bridge_nanobind: Lightweight nanobind bindings for ROS2 cv_bridge.\n\n"
+ "Provides efficient conversion between NumPy arrays and OpenCV cv::Mat,\n"
+ "plus wrappers for cv_bridge color conversions.\n\n"
+ "Functions:\n"
+ " getCvType(encoding): Return OpenCV type for a ROS encoding.\n"
+ " cvtColor2(img, encoding_in, encoding_out): Convert image encoding.\n"
+ " cvtColorForDisplay(img, encoding_in, encoding_out='',\n"
+ " do_dynamic_scaling=False,\n"
+ " min_image_value=0, max_image_value=0,\n"
+ " colormap=-1): Prepare image for visualization.\n"
+ " CV_MAT_CNWrap(type): Get number of channels from CV type.\n"
+ " CV_MAT_DEPTHWrap(type): Get depth (dtype) from CV type.\n";
+ m.def("getCvType", &cv_bridge::getCvType, "Get the OpenCV type for a given encoding");
+ m.def("cvtColor2", cvtColor2Wrap);
+ m.def("CV_MAT_CNWrap", CV_MAT_CNWrap);
+ m.def("CV_MAT_DEPTHWrap", CV_MAT_DEPTHWrap);
+ m.def("cvtColorForDisplay",
+ &cvtColorForDisplayWrap,
+ nb::arg("source"),
+ nb::arg("encoding_in"),
+ nb::arg("encoding_out") = "",
+ nb::arg("do_dynamic_scaling") = false,
+ nb::arg("min_image_value") = 0.0,
+ nb::arg("max_image_value") = 0.0,
+ nb::arg("colormap") = -1,
+ "Convert image to display with specified encodings.\n\n"
+ "Args:\n"
+ " - source (numpy.ndarray): input image\n"
+ " - encoding_in (str): input image encoding\n"
+ " - encoding_out (str): encoding to which the image converted\n"
+ " - do_dynamic_scaling (bool): flag to do dynamic scaling with min/max value\n"
+ " - min_image_value (float): minimum pixel value for dynamic scaling\n"
+ " - max_image_value (float): maximum pixel value for dynamic scaling\n"
+ " - colormap (int): colormap to use when converting to color image\n");
+}
\ No newline at end of file
diff --git a/cv_bridge/src/module.hpp b/cv_bridge/src/module.hpp
deleted file mode 100644
index b9ea35bd0..000000000
--- a/cv_bridge/src/module.hpp
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2014 Open Source Robotics Foundation, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-#ifndef CV_BRIDGE_MODULE_HPP_
-#define CV_BRIDGE_MODULE_HPP_
-
-#include
-
-// Have to define macros to silence warnings about deprecated headers being used by
-// boost/python.hpp in some versions of boost.
-// See: https://github.com/ros-perception/vision_opencv/issues/449
-#include
-#if (BOOST_VERSION / 100 >= 1073 && BOOST_VERSION / 100 <= 1076) // Boost 1.73 - 1.76
- #define BOOST_BIND_GLOBAL_PLACEHOLDERS
-#endif
-#if (BOOST_VERSION / 100 == 1074) // Boost 1.74
- #define BOOST_ALLOW_DEPRECATED_HEADERS
-#endif
-#include
-
-#include
-#include
-
-#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#include
-
-#include
-
-namespace bp = boost::python;
-
-int convert_to_CvMat2(const PyObject * o, cv::Mat & m);
-
-PyObject * pyopencv_from(const cv::Mat & m);
-
-static void * do_numpy_import()
-{
- import_array();
- return nullptr;
-}
-
-#endif // CV_BRIDGE_MODULE_HPP_
diff --git a/cv_bridge/src/module_opencv4.cpp b/cv_bridge/src/module_opencv4.cpp
deleted file mode 100644
index ac93f0897..000000000
--- a/cv_bridge/src/module_opencv4.cpp
+++ /dev/null
@@ -1,372 +0,0 @@
-/* Taken from opencv/modules/python/src2/cv2.cpp */
-
-#include "module.hpp"
-
-#include "opencv2/core/types_c.h"
-
-#include "opencv2/opencv_modules.hpp"
-
-#include "pycompat.hpp"
-
-static PyObject * opencv_error = 0;
-
-static int failmsg(const char * fmt, ...)
-{
- char str[1000];
-
- va_list ap;
- va_start(ap, fmt);
- vsnprintf(str, sizeof(str), fmt, ap);
- va_end(ap);
-
- PyErr_SetString(PyExc_TypeError, str);
- return 0;
-}
-
-struct ArgInfo
-{
- const char * name;
- bool outputarg;
- // more fields may be added if necessary
-
- ArgInfo(const char * name_, bool outputarg_)
- : name(name_),
- outputarg(outputarg_) {}
-
- // to match with older pyopencv_to function signature
- operator const char *() const {return name;}
-};
-
-class PyAllowThreads
-{
-public:
- PyAllowThreads()
- : _state(PyEval_SaveThread()) {}
- ~PyAllowThreads()
- {
- PyEval_RestoreThread(_state);
- }
-
-private:
- PyThreadState * _state;
-};
-
-class PyEnsureGIL
-{
-public:
- PyEnsureGIL()
- : _state(PyGILState_Ensure()) {}
- ~PyEnsureGIL()
- {
- PyGILState_Release(_state);
- }
-
-private:
- PyGILState_STATE _state;
-};
-
-#define ERRWRAP2(expr) \
- try \
- { \
- PyAllowThreads allowThreads; \
- expr; \
- } \
- catch (const cv::Exception & e) \
- { \
- PyErr_SetString(opencv_error, e.what()); \
- return 0; \
- }
-
-using namespace cv;
-
-
-[[gnu::unused]] static PyObject * failmsgp(const char * fmt, ...)
-{
- char str[1000];
-
- va_list ap;
- va_start(ap, fmt);
- vsnprintf(str, sizeof(str), fmt, ap);
- va_end(ap);
-
- PyErr_SetString(PyExc_TypeError, str);
- return 0;
-}
-
-class NumpyAllocator : public MatAllocator
-{
-public:
- NumpyAllocator() {stdAllocator = Mat::getStdAllocator();}
- ~NumpyAllocator() {}
-
-// To compile openCV3 with OpenCV4 APIs.
-#ifndef OPENCV_VERSION_4
-#define AccessFlag int
-#endif
-
- UMatData * allocate(PyObject * o, int dims, const int * sizes, int type, size_t * step) const
- {
- UMatData * u = new UMatData(this);
- u->data = u->origdata =
- reinterpret_cast(PyArray_DATA(reinterpret_cast(o)));
- npy_intp * _strides = PyArray_STRIDES(reinterpret_cast(o));
- for (int i = 0; i < dims - 1; i++) {
- step[i] = (size_t)_strides[i];
- }
- step[dims - 1] = CV_ELEM_SIZE(type);
- u->size = sizes[0] * step[0];
- u->userdata = o;
- return u;
- }
-
- UMatData * allocate(
- int dims0, const int * sizes, int type, void * data, size_t * step, AccessFlag flags,
- UMatUsageFlags usageFlags) const
- {
- if (data != 0) {
- CV_Error(Error::StsAssert, "The data should normally be NULL!");
- // probably this is safe to do in such extreme case
- return stdAllocator->allocate(dims0, sizes, type, data, step, flags, usageFlags);
- }
- PyEnsureGIL gil;
-
- int depth = CV_MAT_DEPTH(type);
- int cn = CV_MAT_CN(type);
- const int f = static_cast(sizeof(size_t) / 8);
- int typenum = depth == CV_8U ? NPY_UBYTE : depth == CV_8S ? NPY_BYTE :
- depth == CV_16U ? NPY_USHORT : depth == CV_16S ? NPY_SHORT :
- depth == CV_32S ? NPY_INT : depth == CV_32F ? NPY_FLOAT :
- depth == CV_64F ? NPY_DOUBLE : f * NPY_ULONGLONG + (f ^ 1) * NPY_UINT;
- int i, dims = dims0;
- cv::AutoBuffer _sizes(dims + 1);
- for (i = 0; i < dims; i++) {
- _sizes[i] = sizes[i];
- }
- if (cn > 1) {
- _sizes[dims++] = cn;
- }
- PyObject * o = PyArray_SimpleNew(dims, _sizes, typenum);
- if (!o) {
- CV_Error_(Error::StsError,
- ("The numpy array of typenum=%d, ndims=%d can not be created", typenum, dims));
- }
- return allocate(o, dims0, sizes, type, step);
- }
-
- bool allocate(UMatData * u, AccessFlag accessFlags, UMatUsageFlags usageFlags) const
- {
- return stdAllocator->allocate(u, accessFlags, usageFlags);
- }
-
- void deallocate(UMatData * u) const
- {
- if (u) {
- PyEnsureGIL gil;
- PyObject * o = reinterpret_cast(u->userdata);
- Py_XDECREF(o);
- delete u;
- }
- }
-
- const MatAllocator * stdAllocator;
-};
-
-NumpyAllocator g_numpyAllocator;
-
-
-template
-static
-bool pyopencv_to(PyObject * obj, T & p, const char * name = "");
-
-template
-static
-PyObject * pyopencv_from(const T & src);
-
-enum { ARG_NONE = 0, ARG_MAT = 1, ARG_SCALAR = 2 };
-
-// special case, when the convertor needs full ArgInfo structure
-static bool pyopencv_to(PyObject * o, Mat & m, const ArgInfo info)
-{
- // to avoid PyArray_Check() to crash even with valid array
- do_numpy_import();
-
-
- bool allowND = true;
- if (!o || o == Py_None) {
- if (!m.data) {
- m.allocator = &g_numpyAllocator;
- }
- return true;
- }
-
- if (PyInt_Check(o) ) {
- double v[] = {static_cast(PyInt_AsLong(reinterpret_cast(o))), 0., 0., 0.};
- m = Mat(4, 1, CV_64F, v).clone();
- return true;
- }
- if (PyFloat_Check(o) ) {
- double v[] = {PyFloat_AsDouble(reinterpret_cast(o)), 0., 0., 0.};
- m = Mat(4, 1, CV_64F, v).clone();
- return true;
- }
- if (PyTuple_Check(o) ) {
- int i, sz = static_cast(PyTuple_Size(reinterpret_cast(o)));
- m = Mat(sz, 1, CV_64F);
- for (i = 0; i < sz; i++) {
- PyObject * oi = PyTuple_GET_ITEM(o, i);
- if (PyInt_Check(oi) ) {
- m.at(i) = static_cast(PyInt_AsLong(oi));
- } else if (PyFloat_Check(oi) ) {
- m.at(i) = static_cast(PyFloat_AsDouble(oi));
- } else {
- failmsg("%s is not a numerical tuple", info.name);
- m.release();
- return false;
- }
- }
- return true;
- }
-
- if (!PyArray_Check(o) ) {
- failmsg("%s is not a numpy array, neither a scalar", info.name);
- return false;
- }
-
- PyArrayObject * oarr = reinterpret_cast(o);
-
- bool needcopy = false, needcast = false;
- int typenum = PyArray_TYPE(oarr), new_typenum = typenum;
- int type = typenum == NPY_UBYTE ? CV_8U :
- typenum == NPY_BYTE ? CV_8S :
- typenum == NPY_USHORT ? CV_16U :
- typenum == NPY_SHORT ? CV_16S :
- typenum == NPY_INT ? CV_32S :
- typenum == NPY_INT32 ? CV_32S :
- typenum == NPY_FLOAT ? CV_32F :
- typenum == NPY_DOUBLE ? CV_64F : -1;
-
- if (type < 0) {
- if (typenum == NPY_INT64 || typenum == NPY_UINT64 || type == NPY_LONG) {
- needcopy = needcast = true;
- new_typenum = NPY_INT;
- type = CV_32S;
- } else {
- failmsg("%s data type = %d is not supported", info.name, typenum);
- return false;
- }
- }
-
-#ifndef CV_MAX_DIM
- const int CV_MAX_DIM = 32;
-#endif
-
- int ndims = PyArray_NDIM(oarr);
- if (ndims >= CV_MAX_DIM) {
- failmsg("%s dimensionality (=%d) is too high", info.name, ndims);
- return false;
- }
-
- int size[CV_MAX_DIM + 1];
- size_t step[CV_MAX_DIM + 1];
- size_t elemsize = CV_ELEM_SIZE1(type);
- const npy_intp * _sizes = PyArray_DIMS(oarr);
- const npy_intp * _strides = PyArray_STRIDES(oarr);
- bool ismultichannel = ndims == 3 && _sizes[2] <= CV_CN_MAX;
-
- for (int i = ndims - 1; i >= 0 && !needcopy; i--) {
- // these checks handle cases of
- // a) multi-dimensional (ndims > 2) arrays, as well as simpler 1- and 2-dimensional cases
- // b) transposed arrays, where _strides[] elements go in non-descending order
- // c) flipped arrays, where some of _strides[] elements are negative
- if ( (i == ndims - 1 && (size_t)_strides[i] != elemsize) ||
- (i < ndims - 1 && _strides[i] < _strides[i + 1]) )
- {
- needcopy = true;
- }
- }
-
- if (ismultichannel && _strides[1] != (npy_intp)elemsize * _sizes[2]) {
- needcopy = true;
- }
-
- if (needcopy) {
- if (info.outputarg) {
- failmsg(
- "Layout of the output array %s is incompatible with \
- cv::Mat (step[ndims-1] != elemsize or step[1] != elemsize*nchannels)",
- info.name);
- return false;
- }
-
- if (needcast) {
- o = PyArray_Cast(oarr, new_typenum);
- oarr = reinterpret_cast(o);
- } else {
- oarr = PyArray_GETCONTIGUOUS(oarr);
- o = reinterpret_cast(oarr);
- }
-
- _strides = PyArray_STRIDES(oarr);
- }
-
- for (int i = 0; i < ndims; i++) {
- size[i] = static_cast(_sizes[i]);
- step[i] = (size_t)_strides[i];
- }
-
- // handle degenerate case
- if (ndims == 0) {
- size[ndims] = 1;
- step[ndims] = elemsize;
- ndims++;
- }
-
- if (ismultichannel) {
- ndims--;
- type |= CV_MAKETYPE(0, size[2]);
- }
-
- if (ndims > 2 && !allowND) {
- failmsg("%s has more than 2 dimensions", info.name);
- return false;
- }
-
- m = Mat(ndims, size, type, PyArray_DATA(oarr), step);
- m.u = g_numpyAllocator.allocate(o, ndims, size, type, step);
- m.addref();
-
- if (!needcopy) {
- Py_INCREF(o);
- }
- m.allocator = &g_numpyAllocator;
-
- return true;
-}
-
-template<>
-bool pyopencv_to(PyObject * o, Mat & m, const char * name)
-{
- return pyopencv_to(o, m, ArgInfo(name, 0));
-}
-
-PyObject * pyopencv_from(const Mat & m)
-{
- if (!m.data) {
- Py_RETURN_NONE;
- }
- Mat temp, * p = const_cast(&m);
- if (!p->u || p->allocator != &g_numpyAllocator) {
- temp.allocator = &g_numpyAllocator;
- ERRWRAP2(m.copyTo(temp));
- p = &temp;
- }
- PyObject * o = reinterpret_cast(p->u->userdata);
- Py_INCREF(o);
- return o;
-}
-
-int convert_to_CvMat2(const PyObject * o, cv::Mat & m)
-{
- pyopencv_to(const_cast(o), m, "unknown");
- return 0;
-}
diff --git a/cv_bridge/src/pycompat.hpp b/cv_bridge/src/pycompat.hpp
deleted file mode 100644
index 576d93008..000000000
--- a/cv_bridge/src/pycompat.hpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*M///////////////////////////////////////////////////////////////////////////////////////
-//
-// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
-//
-// By downloading, copying, installing or using the software you agree to this license.
-// If you do not agree to this license, do not download, install,
-// copy or use the software.
-//
-//
-// License Agreement
-// For Open Source Computer Vision Library
-//
-// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
-// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
-// Copyright (c) 2018 Intel Corporation.
-// Third party copyrights are property of their respective owners.
-//
-// Redistribution and use in source and binary forms, with or without modification,
-// are permitted provided that the following conditions are met:
-//
-// * Redistribution's of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-//
-// * Redistribution's in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-//
-// * The name of the copyright holders may not be used to endorse or promote products
-// derived from this software without specific prior written permission.
-//
-// This software is provided by the copyright holders and contributors "as is" and
-// any express or implied warranties, including, but not limited to, the implied
-// warranties of merchantability and fitness for a particular purpose are disclaimed.
-// In no event shall the Intel Corporation or contributors be liable for any direct,
-// indirect, incidental, special, exemplary, or consequential damages
-// (including, but not limited to, procurement of substitute goods or services;
-// loss of use, data, or profits; or business interruption) however caused
-// and on any theory of liability, whether in contract, strict liability,
-// or tort (including negligence or otherwise) arising in any way out of
-// the use of this software, even if advised of the possibility of such damage.
-//
-//M*/
-
-// Defines for Python 2/3 compatibility.
-#ifndef PYCOMPAT_HPP_
-#define PYCOMPAT_HPP_
-
-#if PY_MAJOR_VERSION >= 3
-// Python3 treats all ints as longs, PyInt_X functions have been removed.
-#define PyInt_Check PyLong_Check
-#define PyInt_CheckExact PyLong_CheckExact
-#define PyInt_AsLong PyLong_AsLong
-#define PyInt_AS_LONG PyLong_AS_LONG
-#define PyInt_FromLong PyLong_FromLong
-#define PyNumber_Int PyNumber_Long
-
-// Python3 strings are unicode, these defines mimic the Python2 functionality.
-#define PyString_Check PyUnicode_Check
-#define PyString_FromString PyUnicode_FromString
-#define PyString_FromStringAndSize PyUnicode_FromStringAndSize
-#define PyString_Size PyUnicode_GET_SIZE
-
-// PyUnicode_AsUTF8 isn't available until Python 3.3
-#if (PY_VERSION_HEX < 0x03030000)
-#define PyString_AsString _PyUnicode_AsString
-#else
-#define PyString_AsString PyUnicode_AsUTF8
-#endif
-#endif
-
-#endif // PYCOMPAT_HPP_
diff --git a/cv_bridge/test/CMakeLists.txt b/cv_bridge/test/CMakeLists.txt
index e8cfee810..13a37dbc0 100644
--- a/cv_bridge/test/CMakeLists.txt
+++ b/cv_bridge/test/CMakeLists.txt
@@ -22,10 +22,10 @@ ament_add_gtest(${PROJECT_NAME}-utest
APPEND_LIBRARY_DIRS "${cv_bridge_lib_dir}")
target_link_libraries(${PROJECT_NAME}-utest
${PROJECT_NAME}
- Boost::headers
opencv_core
opencv_imgcodecs
- ${sensor_msgs_TARGETS})
+ ${sensor_msgs_TARGETS}
+ rcpputils::rcpputils)
# enable cv_bridge python tests
find_package(ament_cmake_pytest REQUIRED)
diff --git a/cv_bridge/test/test_endian.cpp b/cv_bridge/test/test_endian.cpp
index 088091822..783236066 100644
--- a/cv_bridge/test/test_endian.cpp
+++ b/cv_bridge/test/test_endian.cpp
@@ -1,12 +1,44 @@
-#include
+#include "rcpputils/endian.hpp"
#include
#include
#include
-TEST(CvBridgeTest, endianness)
+// Byteswap implementations
+inline uint32_t byteswap(uint32_t value) noexcept
+{
+ return ((value & 0x000000FF) << 24) |
+ ((value & 0x0000FF00) << 8) |
+ ((value & 0x00FF0000) >> 8) |
+ ((value & 0xFF000000) >> 24);
+}
+
+inline int32_t byteswap(int32_t value) noexcept
{
- using namespace boost::endian;
+ return static_cast(byteswap(static_cast(value)));
+}
+template
+inline T native_to_big(T value) noexcept
+{
+ if constexpr (rcpputils::endian::native == rcpputils::endian::little)
+ {
+ return byteswap(value);
+ }
+ return value;
+}
+
+template
+inline T native_to_little(T value) noexcept
+{
+ if constexpr (rcpputils::endian::native == rcpputils::endian::little)
+ {
+ return value;
+ }
+ return byteswap(value);
+}
+
+TEST(CvBridgeTest, endianness)
+{
// Create an image of the type opposite to the platform
sensor_msgs::msg::Image msg;
msg.height = 1;
@@ -15,10 +47,10 @@ TEST(CvBridgeTest, endianness)
msg.step = 8;
msg.data.resize(msg.step);
- int32_t * data = reinterpret_cast(&msg.data[0]);
+ int32_t *data = reinterpret_cast(&msg.data[0]);
// Write 1 and 2 in order, but with an endianness opposite to the platform
- if (order::native == order::little) {
+ if (rcpputils::endian::native == rcpputils::endian::little) {
msg.is_bigendian = true;
*(data++) = native_to_big(static_cast(1));
*data = native_to_big(static_cast(2));
@@ -30,7 +62,7 @@ TEST(CvBridgeTest, endianness)
// Make sure the values are still the same
cv_bridge::CvImageConstPtr img =
- cv_bridge::toCvShare(std::make_shared(msg));
+ cv_bridge::toCvShare(std::make_shared(msg));
EXPECT_EQ(img->image.at(0, 0)[0], 1);
EXPECT_EQ(img->image.at(0, 0)[1], 2);
// Make sure we cannot share data