Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions docs/api_extra.rst
Original file line number Diff line number Diff line change
Expand Up @@ -644,8 +644,8 @@ N-dimensional array type
------------------------

The following type can be used to exchange n-dimension arrays with frameworks
like NumPy, PyTorch, Tensorflow, JAX, CuPy, and others. It requires an
additional include directive:
like NumPy, PyTorch, Tensorflow, JAX, CuPy, PaddlePaddle, and others. It
requires an additional include directive:

.. code-block:: cpp

Expand Down Expand Up @@ -1104,6 +1104,8 @@ convert into an equivalent representation in one of the following frameworks:

.. cpp:class:: cupy

.. cpp:class:: paddle

Eigen convenience type aliases
------------------------------

Expand Down
4 changes: 3 additions & 1 deletion docs/ndarray.rst
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ The ``nb::ndarray<..>`` class
nanobind can exchange n-dimensional arrays (henceforth "**nd-arrays**") with
popular array programming frameworks including `NumPy <https://numpy.org>`__,
`PyTorch <https://pytorch.org>`__, `TensorFlow <https://www.tensorflow.org>`__,
`JAX <https://jax.readthedocs.io>`__, and `CuPy <https://cupy.dev>`_. It
`JAX <https://jax.readthedocs.io>`__, `CuPy <https://cupy.dev>`_ and
`PaddlePaddle <https://www.paddlepaddle.org.cn/en>`__. It
supports *zero-copy* exchange using two protocols:

- The classic `buffer
Expand Down Expand Up @@ -275,6 +276,7 @@ desired Python type.
- :cpp:class:`nb::tensorflow <tensorflow>`: create a ``tensorflow.python.framework.ops.EagerTensor``.
- :cpp:class:`nb::jax <jax>`: create a ``jaxlib.xla_extension.DeviceArray``.
- :cpp:class:`nb::cupy <cupy>`: create a ``cupy.ndarray``.
- :cpp:class:`nb::paddle <paddle>`: create a ``paddle.Tensor``.
- No framework annotation. In this case, nanobind will create a raw Python
``dltensor`` `capsule <https://docs.python.org/3/c-api/capsule.html>`__
representing the `DLPack <https://github.com/dmlc/dlpack>`__ metadata.
Expand Down
4 changes: 2 additions & 2 deletions docs/porting.rst
Original file line number Diff line number Diff line change
Expand Up @@ -348,8 +348,8 @@ Removed features include:
- ○ The NumPy array class (``py::array``) was removed in exchange for a more
powerful alternative (:cpp:class:`nb::ndarray\<..\> <nanobind::ndarray>`)
that additionally supports CPU/GPU tensors produced by various frameworks
(NumPy, PyTorch, TensorFlow, JAX, etc.). Its API is not compatible with
pybind11, however.
(NumPy, PyTorch, TensorFlow, JAX, PaddlePaddle, etc.). Its API is not compatible
with pybind11, however.
- ● Buffer protocol binding (``.def_buffer()``) was removed in favor of
:cpp:class:`nb::ndarray\<..\> <nanobind::ndarray>`.
- ● Support for evaluating Python files was removed.
Expand Down
3 changes: 2 additions & 1 deletion docs/why.rst
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,8 @@ nanobind includes a number of quality-of-life improvements for developers:
`buffer protocol <https://docs.python.org/3/c-api/buffer.html>`__ to achieve
*zero-copy* CPU/GPU array exchange with frameworks like `NumPy
<https://numpy.org>`__, `PyTorch <https://pytorch.org>`__, `TensorFlow
<https://www.tensorflow.org>`__, `JAX <https://jax.readthedocs.io>`__, etc. See
<https://www.tensorflow.org>`__, `JAX <https://jax.readthedocs.io>`__,
`PaddlePaddle <https://www.paddlepaddle.org.cn/en>`__ etc. See
the :ref:`section on n-dimensional arrays <ndarrays>` for details.

- **Stable ABI**: nanobind can target Python's `stable ABI interface
Expand Down
1 change: 1 addition & 0 deletions include/nanobind/ndarray.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ NB_FRAMEWORK(pytorch, 2, "torch.Tensor");
NB_FRAMEWORK(tensorflow, 3, "tensorflow.python.framework.ops.EagerTensor");
NB_FRAMEWORK(jax, 4, "jaxlib.xla_extension.DeviceArray");
NB_FRAMEWORK(cupy, 5, "cupy.ndarray");
NB_FRAMEWORK(paddle, 6, "paddle.Tensor");

NAMESPACE_BEGIN(device)
NB_DEVICE(none, 0); NB_DEVICE(cpu, 1); NB_DEVICE(cuda, 2);
Expand Down
21 changes: 16 additions & 5 deletions src/nb_ndarray.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,9 @@ bool ndarray_check(PyObject *o) noexcept {
// Tensorflow
strcmp(tp_name, "tensorflow.python.framework.ops.EagerTensor") == 0 ||
// Cupy
strcmp(tp_name, "cupy.ndarray") == 0;
strcmp(tp_name, "cupy.ndarray") == 0 ||
// PaddlePaddle
strcmp(tp_name, "paddle.Tensor") == 0;

Py_DECREF(name);
return result;
Expand Down Expand Up @@ -402,6 +404,8 @@ ndarray_handle *ndarray_import(PyObject *o, const ndarray_config *c,
package = module_::import_("torch.utils.dlpack");
else if (strncmp(module_name, "jaxlib", 6) == 0)
package = module_::import_("jax.dlpack");
else if (strcmp(module_name, "paddle") == 0)
package = module_::import_("paddle.utils.dlpack");

if (package.is_valid())
capsule = package.attr("to_dlpack")(handle(o));
Expand Down Expand Up @@ -538,11 +542,16 @@ ndarray_handle *ndarray_import(PyObject *o, const ndarray_config *c,
try {
if (strcmp(module_name, "numpy") == 0 || strcmp(module_name, "cupy") == 0) {
converted = handle(o).attr("astype")(dtype, order);
} else if (strcmp(module_name, "torch") == 0) {
} else if (strcmp(module_name, "torch") == 0 || strcmp(module_name, "paddle") == 0) {
converted = handle(o).attr("to")(
arg("dtype") = module_::import_("torch").attr(dtype));
if (c->order == 'C')
arg("dtype") = module_::import_(module_name).attr(dtype));
if (c->order == 'C') {
// paddle.Tensor.contiguous will operate on self Tensor
// so to have a similar behavior to pytorch, detach() should be called
if (strcmp(module_name, "paddle") == 0)
converted = converted.attr("detach")();
converted = converted.attr("contiguous")();
}
} else if (strncmp(module_name, "tensorflow.", 11) == 0) {
converted = module_::import_("tensorflow")
.attr("cast")(handle(o), dtype);
Expand Down Expand Up @@ -791,6 +800,7 @@ PyObject *ndarray_export(ndarray_handle *th, int framework,
case tensorflow::value: pkg_name = "tensorflow.experimental.dlpack"; break;
case jax::value: pkg_name = "jax.dlpack"; break;
case cupy::value: pkg_name = "cupy"; break;
case paddle::value: pkg_name = "paddle.utils.dlpack"; break;
default: pkg_name = nullptr;
}

Expand All @@ -807,7 +817,8 @@ PyObject *ndarray_export(ndarray_handle *th, int framework,

if (copy) {
const char* copy_str = "copy";
if (framework == pytorch::value)
if (framework == pytorch::value ||
framework == paddle::value)
copy_str = "clone";

try {
Expand Down
2 changes: 1 addition & 1 deletion src/stubgen.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def __init__(

# Precompile RE to extract nanobind nd-arrays
self.ndarray_re = re.compile(
sep_before + r"(numpy\.ndarray|ndarray|torch\.Tensor)\[([^\]]*)\]"
sep_before + r"(numpy\.ndarray|ndarray|torch\.Tensor|paddle\.Tensor)\[([^\]]*)\]"
)

# Types which moved from typing.* to collections.abc in Python 3.9
Expand Down
13 changes: 13 additions & 0 deletions tests/test_ndarray.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,19 @@ NB_MODULE(test_ndarray_ext, m) {
deleter);
});

m.def("ret_paddle", []() {
float *f = new float[8] { 1, 2, 3, 4, 5, 6, 7, 8 };
size_t shape[2] = { 2, 4 };

nb::capsule deleter(f, [](void *data) noexcept {
destruct_count++;
delete[] (float *) data;
});

return nb::ndarray<nb::paddle, float, nb::shape<2, 4>>(f, 2, shape,
deleter);
});

m.def("ret_array_scalar", []() {
float* f = new float[1] { 1 };
size_t shape[1] = {};
Expand Down
Loading