From c8d60c1a2c81f4f5b312bf0d0649f9b5a4f7a9c1 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 7 Aug 2023 10:31:04 -0700 Subject: [PATCH 01/17] Add docs to envs --- dependencies.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/dependencies.yaml b/dependencies.yaml index 765a812cb..68020fd0a 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -12,6 +12,7 @@ files: - develop - docs - run + - docs - test_python test_python: output: none From ca7c3875895d332bc22a58868133a501fa2a193f Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 7 Aug 2023 10:32:02 -0700 Subject: [PATCH 02/17] Add breathe and alphabetize --- conda/environments/all_cuda-118_arch-x86_64.yaml | 1 + conda/environments/all_cuda-120_arch-x86_64.yaml | 1 + dependencies.yaml | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 5c5ecb453..71609ca35 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -4,6 +4,7 @@ channels: - rapidsai - conda-forge dependencies: +- breathe - cmake>=3.26.4 - cuda-python>=11.7.1,<12.0a0 - cuda-version=11.8 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index e01de1ca2..ce89249a9 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -4,6 +4,7 @@ channels: - rapidsai - conda-forge dependencies: +- breathe - cmake>=3.26.4 - cuda-nvcc - cuda-python>=12.0,<13.0a0 diff --git a/dependencies.yaml b/dependencies.yaml index 68020fd0a..1a1be7e4f 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -12,7 +12,6 @@ files: - develop - docs - run - - docs - test_python test_python: output: none @@ -158,6 +157,7 @@ dependencies: - output_types: [conda] packages: - *doxygen + - breathe - graphviz - ipython - make From d58bef25fee6041387b319d85b9f625317042af0 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Tue, 8 Aug 2023 17:36:48 -0700 Subject: [PATCH 03/17] Add C++ docs --- doxygen/Doxyfile | 2 +- python/docs/conf.py | 12 ++++++++++++ python/docs/index.rst | 1 + python/docs/librmm_api.rst | 4 ++++ 4 files changed, 18 insertions(+), 1 deletion(-) create mode 100644 python/docs/librmm_api.rst diff --git a/doxygen/Doxyfile b/doxygen/Doxyfile index 2ace4abba..28a789a9f 100644 --- a/doxygen/Doxyfile +++ b/doxygen/Doxyfile @@ -2012,7 +2012,7 @@ MAN_LINKS = NO # captures the structure of the code including all documentation. # The default value is: NO. -GENERATE_XML = NO +GENERATE_XML = YES # The XML_OUTPUT tag is used to specify where the XML pages will be put. If a # relative path is entered the value of OUTPUT_DIRECTORY will be put in front of diff --git a/python/docs/conf.py b/python/docs/conf.py index d8c7460dc..e224f59dd 100644 --- a/python/docs/conf.py +++ b/python/docs/conf.py @@ -46,8 +46,12 @@ "IPython.sphinxext.ipython_directive", "nbsphinx", "recommonmark", + "breathe", ] +# Breathe Configuration +breathe_projects = {"librmm": "../../doxygen/xml"} +breathe_default_project = "librmm" copybutton_prompt_text = ">>> " @@ -197,9 +201,17 @@ ] +def on_missing_reference(app, env, node, contnode): + return contnode + if node["refdomain"] == "cpp": + return contnode + return None + + def setup(app): app.add_js_file("copybutton_pydocs.js") app.add_css_file("https://docs.rapids.ai/assets/css/custom.css") app.add_js_file( "https://docs.rapids.ai/assets/js/custom.js", loading_method="defer" ) + app.connect("missing-reference", on_missing_reference) diff --git a/python/docs/index.rst b/python/docs/index.rst index 8dffdeb02..4a675d297 100644 --- a/python/docs/index.rst +++ b/python/docs/index.rst @@ -12,6 +12,7 @@ Welcome to rmm's documentation! basics.md api.rst + librmm_api.rst Indices and tables diff --git a/python/docs/librmm_api.rst b/python/docs/librmm_api.rst new file mode 100644 index 000000000..087eda247 --- /dev/null +++ b/python/docs/librmm_api.rst @@ -0,0 +1,4 @@ +API Reference +============== + +.. doxygennamespace:: rmm From 2a479d5dcc3470768105c03507748d306716f70d Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 9 Aug 2023 08:13:31 -0700 Subject: [PATCH 04/17] Fix doxygen code blocks so that they also display nicely in Sphinx --- include/rmm/device_uvector.hpp | 2 +- include/rmm/mr/device/device_memory_resource.hpp | 2 +- include/rmm/mr/device/failure_callback_resource_adaptor.hpp | 2 +- include/rmm/mr/device/per_device_resource.hpp | 2 +- include/rmm/mr/device/polymorphic_allocator.hpp | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/rmm/device_uvector.hpp b/include/rmm/device_uvector.hpp index 47ed1adff..74103e9d4 100644 --- a/include/rmm/device_uvector.hpp +++ b/include/rmm/device_uvector.hpp @@ -39,7 +39,7 @@ namespace rmm { * `thrust::uninitialized_fill`. * * Example: - * @code{c++} + * @code{.cpp} * rmm::mr::device_memory_resource * mr = new my_custom_resource(); * rmm::cuda_stream_view s{}; * diff --git a/include/rmm/mr/device/device_memory_resource.hpp b/include/rmm/mr/device/device_memory_resource.hpp index 5f511e393..5149411d5 100644 --- a/include/rmm/mr/device/device_memory_resource.hpp +++ b/include/rmm/mr/device/device_memory_resource.hpp @@ -70,7 +70,7 @@ namespace rmm::mr { * pool_memory_resource objects for each device and sets them as the per-device resource for that * device. * - * @code{c++} + * @code{.cpp} * std::vector> per_device_pools; * for(int i = 0; i < N; ++i) { * cudaSetDevice(i); diff --git a/include/rmm/mr/device/failure_callback_resource_adaptor.hpp b/include/rmm/mr/device/failure_callback_resource_adaptor.hpp index 48f0513d8..a6778d1b0 100644 --- a/include/rmm/mr/device/failure_callback_resource_adaptor.hpp +++ b/include/rmm/mr/device/failure_callback_resource_adaptor.hpp @@ -55,7 +55,7 @@ using failure_callback_t = std::function; * When implementing a callback function for allocation retry, care must be taken to avoid an * infinite loop. The following example makes sure to only retry the allocation once: * - * @code{c++} + * @code{.cpp} * using failure_callback_adaptor = * rmm::mr::failure_callback_resource_adaptor; * diff --git a/include/rmm/mr/device/per_device_resource.hpp b/include/rmm/mr/device/per_device_resource.hpp index aa7217758..594b48749 100644 --- a/include/rmm/mr/device/per_device_resource.hpp +++ b/include/rmm/mr/device/per_device_resource.hpp @@ -61,7 +61,7 @@ * pool_memory_resource objects for each device and sets them as the per-device resource for that * device. * - * @code{c++} + * @code{.cpp} * std::vector> per_device_pools; * for(int i = 0; i < N; ++i) { * cudaSetDevice(i); diff --git a/include/rmm/mr/device/polymorphic_allocator.hpp b/include/rmm/mr/device/polymorphic_allocator.hpp index a52ec14d1..c3b8ac6ea 100644 --- a/include/rmm/mr/device/polymorphic_allocator.hpp +++ b/include/rmm/mr/device/polymorphic_allocator.hpp @@ -133,7 +133,7 @@ bool operator!=(polymorphic_allocator const& lhs, polymorphic_allocator co *`deallocate` functions. * * Example: - *\code{c++} + *\code{.cpp} * my_stream_ordered_allocator a{...}; * cuda_stream_view s = // create stream; * From 565d6317ea158dd0ffe74c97d8d4abb73383ff53 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 9 Aug 2023 17:53:34 -0700 Subject: [PATCH 05/17] Temporarily allow building with warnings so that CI can complete and generate previews --- python/docs/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/docs/Makefile b/python/docs/Makefile index 72d1dcd85..68e58b474 100644 --- a/python/docs/Makefile +++ b/python/docs/Makefile @@ -3,7 +3,7 @@ # You can set these variables from the command line, and also # from the environment for the first two. -SPHINXOPTS = -n -v -W --keep-going +SPHINXOPTS = -n -v SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build From 0bb577029c13fef6d51181a93b0eab0b2b892a02 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 11 Sep 2023 13:42:00 -0700 Subject: [PATCH 06/17] Also ignore xml --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index ad6c8ebf7..8d5823ab2 100644 --- a/.gitignore +++ b/.gitignore @@ -91,6 +91,7 @@ thirdparty/googletest/ ## Doxygen doxygen/html +doxygen/xml #Java target From ddbdc56b5edc8d68b227accba99195d1a456c8f6 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 20 Sep 2023 15:40:33 -0700 Subject: [PATCH 07/17] Fix doxygen issues revealed by building XML --- include/rmm/cuda_stream_view.hpp | 2 +- .../mr/device/aligned_resource_adaptor.hpp | 2 +- .../rmm/mr/device/arena_memory_resource.hpp | 3 +- .../rmm/mr/device/binning_memory_resource.hpp | 5 ++-- .../mr/device/callback_memory_resource.hpp | 23 ++++++++++++++ .../mr/device/cuda_async_memory_resource.hpp | 24 +++++++++------ .../cuda_async_view_memory_resource.hpp | 22 +++++++++----- .../rmm/mr/device/cuda_memory_resource.hpp | 27 +++++++++-------- .../rmm/mr/device/device_memory_resource.hpp | 2 +- .../mr/device/limiting_resource_adaptor.hpp | 5 ++-- .../mr/device/logging_resource_adaptor.hpp | 2 ++ .../rmm/mr/device/managed_memory_resource.hpp | 30 +++++++++++-------- include/rmm/thrust_rmm_allocator.h | 4 +-- 13 files changed, 99 insertions(+), 52 deletions(-) diff --git a/include/rmm/cuda_stream_view.hpp b/include/rmm/cuda_stream_view.hpp index 150fe3957..b6e3cc022 100644 --- a/include/rmm/cuda_stream_view.hpp +++ b/include/rmm/cuda_stream_view.hpp @@ -166,7 +166,7 @@ inline bool operator!=(cuda_stream_view lhs, cuda_stream_view rhs) { return not( * @brief Output stream operator for printing / logging streams * * @param os The output ostream - * @param sv The cuda_stream_view to output + * @param stream The cuda_stream_view to output * @return std::ostream& The output ostream */ inline std::ostream& operator<<(std::ostream& os, cuda_stream_view stream) diff --git a/include/rmm/mr/device/aligned_resource_adaptor.hpp b/include/rmm/mr/device/aligned_resource_adaptor.hpp index 0b933fb56..f79f0c15a 100644 --- a/include/rmm/mr/device/aligned_resource_adaptor.hpp +++ b/include/rmm/mr/device/aligned_resource_adaptor.hpp @@ -140,7 +140,7 @@ class aligned_resource_adaptor final : public device_memory_resource { * * @throws Nothing. * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated * @param bytes Size of the allocation * @param stream Stream on which to perform the deallocation */ diff --git a/include/rmm/mr/device/arena_memory_resource.hpp b/include/rmm/mr/device/arena_memory_resource.hpp index e16fbaf7c..882cbf694 100644 --- a/include/rmm/mr/device/arena_memory_resource.hpp +++ b/include/rmm/mr/device/arena_memory_resource.hpp @@ -293,7 +293,8 @@ class arena_memory_resource final : public device_memory_resource { * @param stream to execute on. * @return std::pair containing free_size and total_size of memory. */ - std::pair do_get_mem_info(cuda_stream_view) const override + std::pair do_get_mem_info( + [[maybe_unused]] cuda_stream_view stream) const override { return std::make_pair(0, 0); } diff --git a/include/rmm/mr/device/binning_memory_resource.hpp b/include/rmm/mr/device/binning_memory_resource.hpp index 705df63c6..54cccca28 100644 --- a/include/rmm/mr/device/binning_memory_resource.hpp +++ b/include/rmm/mr/device/binning_memory_resource.hpp @@ -182,7 +182,7 @@ class binning_memory_resource final : public device_memory_resource { * * @throws nothing * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. * @param stream Stream on which to perform deallocation @@ -201,7 +201,8 @@ class binning_memory_resource final : public device_memory_resource { * @param stream the stream being executed on * @return std::pair with available and free memory for resource */ - [[nodiscard]] std::pair do_get_mem_info(cuda_stream_view) const override + [[nodiscard]] std::pair do_get_mem_info( + [[maybe_unused]] cuda_stream_view stream) const override { return std::make_pair(0, 0); } diff --git a/include/rmm/mr/device/callback_memory_resource.hpp b/include/rmm/mr/device/callback_memory_resource.hpp index 2d9695be2..8a42df6e2 100644 --- a/include/rmm/mr/device/callback_memory_resource.hpp +++ b/include/rmm/mr/device/callback_memory_resource.hpp @@ -100,11 +100,34 @@ class callback_memory_resource final : public device_memory_resource { default; ///< @default_move_assignment{callback_memory_resource} private: + /** + * @brief Allocates memory of size at least \p bytes. + * + * The returned pointer will have at minimum 256 byte alignment. + * + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. + * + * @param bytes The size of the allocation + * @param stream Stream on which to perform allocation + * @return void* Pointer to the newly allocated memory + */ void* do_allocate(std::size_t bytes, cuda_stream_view stream) override { return allocate_callback_(bytes, stream, allocate_callback_arg_); } + /** + * @brief Deallocate memory pointed to by \p p. + * + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. + * + * @param ptr Pointer to be deallocated + * @param bytes The size in bytes of the allocation. This must be equal to the + * value of `bytes` that was passed to the `allocate` call that returned `p`. + * @param stream Stream on which to perform deallocation + */ void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override { deallocate_callback_(ptr, bytes, stream, deallocate_callback_arg_); diff --git a/include/rmm/mr/device/cuda_async_memory_resource.hpp b/include/rmm/mr/device/cuda_async_memory_resource.hpp index 329d8f29a..cd75f5add 100644 --- a/include/rmm/mr/device/cuda_async_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_async_memory_resource.hpp @@ -173,13 +173,15 @@ class cuda_async_memory_resource final : public device_memory_resource { #endif /** - * @brief Allocates memory of size at least `bytes` using cudaMalloc. + * @brief Allocates memory of size at least \p bytes. * - * The returned pointer has at least 256B alignment. + * The returned pointer will have at minimum 256 byte alignment. * - * @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. * - * @param bytes The size, in bytes, of the allocation + * @param bytes The size of the allocation + * @param stream Stream on which to perform allocation * @return void* Pointer to the newly allocated memory */ void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override @@ -197,17 +199,21 @@ class cuda_async_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * @throws Nothing. + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated + * @param bytes The size in bytes of the allocation. This must be equal to the + * value of `bytes` that was passed to the `allocate` call that returned `p`. + * @param stream Stream on which to perform deallocation */ - void do_deallocate(void* ptr, std::size_t size, rmm::cuda_stream_view stream) override + void do_deallocate(void* ptr, std::size_t bytes, rmm::cuda_stream_view stream) override { #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT - pool_.deallocate(ptr, size, stream); + pool_.deallocate(ptr, bytes, stream); #else (void)ptr; - (void)size; + (void)bytes; (void)stream; #endif } diff --git a/include/rmm/mr/device/cuda_async_view_memory_resource.hpp b/include/rmm/mr/device/cuda_async_view_memory_resource.hpp index c685cd75f..190897b82 100644 --- a/include/rmm/mr/device/cuda_async_view_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_async_view_memory_resource.hpp @@ -108,13 +108,12 @@ class cuda_async_view_memory_resource final : public device_memory_resource { #endif /** - * @brief Allocates memory of size at least `bytes` using cudaMalloc. + * @brief Allocates memory of size at least \p bytes. * - * The returned pointer has at least 256B alignment. + * The returned pointer will have at minimum 256 byte alignment. * - * @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled - * - * @param bytes The size, in bytes, of the allocation + * @param bytes The size of the allocation + * @param stream Stream on which to perform allocation * @return void* Pointer to the newly allocated memory */ void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override @@ -135,11 +134,17 @@ class cuda_async_view_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * @throws Nothing. + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated + * @param bytes The size in bytes of the allocation. This must be equal to the + * value of `bytes` that was passed to the `allocate` call that returned `p`. + * @param stream Stream on which to perform deallocation */ - void do_deallocate(void* ptr, std::size_t, rmm::cuda_stream_view stream) override + void do_deallocate(void* ptr, + [[maybe_unused]] std::size_t bytes, + rmm::cuda_stream_view stream) override { #ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT if (ptr != nullptr) { @@ -147,6 +152,7 @@ class cuda_async_view_memory_resource final : public device_memory_resource { } #else (void)ptr; + (void)bytes; (void)stream; #endif } diff --git a/include/rmm/mr/device/cuda_memory_resource.hpp b/include/rmm/mr/device/cuda_memory_resource.hpp index b0bf9ae09..6bd15bd27 100644 --- a/include/rmm/mr/device/cuda_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_memory_resource.hpp @@ -55,18 +55,17 @@ class cuda_memory_resource final : public device_memory_resource { private: /** - * @brief Allocates memory of size at least `bytes` using cudaMalloc. + * @brief Allocates memory of size at least \p bytes. * - * The returned pointer has at least 256B alignment. + * The returned pointer will have at minimum 256 byte alignment. * - * @note Stream argument is ignored + * The stream argument is ignored. * - * @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled - * - * @param bytes The size, in bytes, of the allocation + * @param bytes The size of the allocation + * @param stream This argument is ignored * @return void* Pointer to the newly allocated memory */ - void* do_allocate(std::size_t bytes, cuda_stream_view) override + void* do_allocate(std::size_t bytes, [[maybe_unused]] cuda_stream_view stream) override { void* ptr{nullptr}; RMM_CUDA_TRY_ALLOC(cudaMalloc(&ptr, bytes)); @@ -76,13 +75,17 @@ class cuda_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * @note Stream argument is ignored. - * - * @throws Nothing. + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated + * @param bytes The size in bytes of the allocation. This must be equal to the + * value of `bytes` that was passed to the `allocate` call that returned `p`. + * @param stream Stream on which to perform deallocation */ - void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override + void do_deallocate(void* ptr, + [[maybe_unused]] std::size_t bytes, + [[maybe_unused]] cuda_stream_view stream) override { RMM_ASSERT_CUDA_SUCCESS(cudaFree(ptr)); } diff --git a/include/rmm/mr/device/device_memory_resource.hpp b/include/rmm/mr/device/device_memory_resource.hpp index 5149411d5..247774145 100644 --- a/include/rmm/mr/device/device_memory_resource.hpp +++ b/include/rmm/mr/device/device_memory_resource.hpp @@ -201,7 +201,7 @@ class device_memory_resource { * If supported, this operation may optionally be executed on a stream. * Otherwise, the stream is ignored and the null stream is used. * - * @param p Pointer to be deallocated + * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. * @param stream Stream on which to perform deallocation diff --git a/include/rmm/mr/device/limiting_resource_adaptor.hpp b/include/rmm/mr/device/limiting_resource_adaptor.hpp index 895c404b0..ebc67fed1 100644 --- a/include/rmm/mr/device/limiting_resource_adaptor.hpp +++ b/include/rmm/mr/device/limiting_resource_adaptor.hpp @@ -185,7 +185,8 @@ class limiting_resource_adaptor final : public device_memory_resource { * @param stream Stream on which to get the mem info. * @return std::pair contaiing free_size and total_size of memory */ - [[nodiscard]] std::pair do_get_mem_info(cuda_stream_view) const override + [[nodiscard]] std::pair do_get_mem_info( + [[maybe_unused]] cuda_stream_view stream) const override { return {allocation_limit_ - allocated_bytes_, allocation_limit_}; } @@ -209,7 +210,7 @@ class limiting_resource_adaptor final : public device_memory_resource { * * @tparam Upstream Type of the upstream `device_memory_resource`. * @param upstream Pointer to the upstream resource - * @param limit Maximum amount of memory to allocate + * @param allocation_limit Maximum amount of memory to allocate */ template limiting_resource_adaptor make_limiting_adaptor(Upstream* upstream, diff --git a/include/rmm/mr/device/logging_resource_adaptor.hpp b/include/rmm/mr/device/logging_resource_adaptor.hpp index 0ff9e950b..263387b44 100644 --- a/include/rmm/mr/device/logging_resource_adaptor.hpp +++ b/include/rmm/mr/device/logging_resource_adaptor.hpp @@ -329,6 +329,7 @@ class logging_resource_adaptor final : public device_memory_resource { * @param upstream Pointer to the upstream resource * @param filename Name of the file to write log info. If not specified, * retrieves the log file name from the environment variable "RMM_LOG_FILE". + * @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade */ template logging_resource_adaptor make_logging_adaptor( @@ -346,6 +347,7 @@ logging_resource_adaptor make_logging_adaptor( * @tparam Upstream Type of the upstream `device_memory_resource`. * @param upstream Pointer to the upstream resource * @param stream The ostream to write log info. + * @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade */ template logging_resource_adaptor make_logging_adaptor(Upstream* upstream, diff --git a/include/rmm/mr/device/managed_memory_resource.hpp b/include/rmm/mr/device/managed_memory_resource.hpp index 4a0f7701a..d53da4e61 100644 --- a/include/rmm/mr/device/managed_memory_resource.hpp +++ b/include/rmm/mr/device/managed_memory_resource.hpp @@ -55,18 +55,17 @@ class managed_memory_resource final : public device_memory_resource { private: /** - * @brief Allocates memory of size at least `bytes` using cudaMallocManaged. + * @brief Allocates memory of size at least \p bytes. * - * The returned pointer has at least 256B alignment. + * The returned pointer will have at minimum 256 byte alignment. * - * @note Stream argument is ignored + * The stream is ignored. * - * @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled - * - * @param bytes The size, in bytes, of the allocation + * @param bytes The size of the allocation + * @param stream This argument is ignored * @return void* Pointer to the newly allocated memory */ - void* do_allocate(std::size_t bytes, cuda_stream_view) override + void* do_allocate(std::size_t bytes, [[maybe_unused]] cuda_stream_view stream) override { // FIXME: Unlike cudaMalloc, cudaMallocManaged will throw an error for 0 // size allocations. @@ -78,15 +77,19 @@ class managed_memory_resource final : public device_memory_resource { } /** - * @brief Deallocate memory pointed to by `ptr`. - * - * @note Stream argument is ignored. + * @brief Deallocate memory pointed to by \p p. * - * @throws Nothing. + * If supported, this operation may optionally be executed on a stream. + * Otherwise, the stream is ignored and the null stream is used. * * @param ptr Pointer to be deallocated + * @param bytes The size in bytes of the allocation. This must be equal to the + * value of `bytes` that was passed to the `allocate` call that returned `p`. + * @param stream Stream on which to perform deallocation */ - void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override + void do_deallocate(void* ptr, + [[maybe_unused]] std::size_t bytes, + [[maybe_unused]] cuda_stream_view stream) override { RMM_ASSERT_CUDA_SUCCESS(cudaFree(ptr)); } @@ -116,7 +119,8 @@ class managed_memory_resource final : public device_memory_resource { * @param stream to execute on * @return std::pair contaiing free_size and total_size of memory */ - [[nodiscard]] std::pair do_get_mem_info(cuda_stream_view) const override + [[nodiscard]] std::pair do_get_mem_info( + [[maybe_unused]] cuda_stream_view stream) const override { std::size_t free_size{}; std::size_t total_size{}; diff --git a/include/rmm/thrust_rmm_allocator.h b/include/rmm/thrust_rmm_allocator.h index 3bbcab0a5..0b78d5497 100644 --- a/include/rmm/thrust_rmm_allocator.h +++ b/include/rmm/thrust_rmm_allocator.h @@ -33,9 +33,9 @@ using exec_policy_t = std::unique_ptr; * @brief Returns a unique_ptr to a Thrust CUDA execution policy that uses RMM * for temporary memory allocation. * - * @Param stream The stream that the allocator will use + * @param stream The stream that the allocator will use * - * @Returns A Thrust execution policy that will use RMM for temporary memory + * @return A Thrust execution policy that will use RMM for temporary memory * allocation. */ [[deprecated("Use new exec_policy in rmm/exec_policy.hpp")]] inline exec_policy_t exec_policy( From 5c7f8c7cb4cf6abe9679872918482168a7027228 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Wed, 20 Sep 2023 15:46:48 -0700 Subject: [PATCH 08/17] Fix some typos in the docs --- include/rmm/mr/device/callback_memory_resource.hpp | 8 ++++---- include/rmm/mr/device/cuda_async_memory_resource.hpp | 6 ------ include/rmm/mr/device/cuda_async_view_memory_resource.hpp | 3 --- include/rmm/mr/device/cuda_memory_resource.hpp | 5 ++--- include/rmm/mr/device/managed_memory_resource.hpp | 5 ++--- 5 files changed, 8 insertions(+), 19 deletions(-) diff --git a/include/rmm/mr/device/callback_memory_resource.hpp b/include/rmm/mr/device/callback_memory_resource.hpp index 8a42df6e2..289ceb9d6 100644 --- a/include/rmm/mr/device/callback_memory_resource.hpp +++ b/include/rmm/mr/device/callback_memory_resource.hpp @@ -105,8 +105,8 @@ class callback_memory_resource final : public device_memory_resource { * * The returned pointer will have at minimum 256 byte alignment. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. + * If supported by the callback, this operation may optionally be executed on + * a stream. Otherwise, the stream is ignored and the null stream is used. * * @param bytes The size of the allocation * @param stream Stream on which to perform allocation @@ -120,8 +120,8 @@ class callback_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. + * If supported by the callback, this operation may optionally be executed on + * a stream. Otherwise, the stream is ignored and the null stream is used. * * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the diff --git a/include/rmm/mr/device/cuda_async_memory_resource.hpp b/include/rmm/mr/device/cuda_async_memory_resource.hpp index cd75f5add..9bbd9cc09 100644 --- a/include/rmm/mr/device/cuda_async_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_async_memory_resource.hpp @@ -177,9 +177,6 @@ class cuda_async_memory_resource final : public device_memory_resource { * * The returned pointer will have at minimum 256 byte alignment. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. - * * @param bytes The size of the allocation * @param stream Stream on which to perform allocation * @return void* Pointer to the newly allocated memory @@ -199,9 +196,6 @@ class cuda_async_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. - * * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. diff --git a/include/rmm/mr/device/cuda_async_view_memory_resource.hpp b/include/rmm/mr/device/cuda_async_view_memory_resource.hpp index 190897b82..569f65df7 100644 --- a/include/rmm/mr/device/cuda_async_view_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_async_view_memory_resource.hpp @@ -134,9 +134,6 @@ class cuda_async_view_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. - * * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. diff --git a/include/rmm/mr/device/cuda_memory_resource.hpp b/include/rmm/mr/device/cuda_memory_resource.hpp index 6bd15bd27..5164bc5c5 100644 --- a/include/rmm/mr/device/cuda_memory_resource.hpp +++ b/include/rmm/mr/device/cuda_memory_resource.hpp @@ -75,13 +75,12 @@ class cuda_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. + * The stream argument is ignored. * * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. - * @param stream Stream on which to perform deallocation + * @param stream This argument is ignored. */ void do_deallocate(void* ptr, [[maybe_unused]] std::size_t bytes, diff --git a/include/rmm/mr/device/managed_memory_resource.hpp b/include/rmm/mr/device/managed_memory_resource.hpp index d53da4e61..e987de15d 100644 --- a/include/rmm/mr/device/managed_memory_resource.hpp +++ b/include/rmm/mr/device/managed_memory_resource.hpp @@ -79,13 +79,12 @@ class managed_memory_resource final : public device_memory_resource { /** * @brief Deallocate memory pointed to by \p p. * - * If supported, this operation may optionally be executed on a stream. - * Otherwise, the stream is ignored and the null stream is used. + * The stream is ignored. * * @param ptr Pointer to be deallocated * @param bytes The size in bytes of the allocation. This must be equal to the * value of `bytes` that was passed to the `allocate` call that returned `p`. - * @param stream Stream on which to perform deallocation + * @param stream This argument is ignored */ void do_deallocate(void* ptr, [[maybe_unused]] std::size_t bytes, From d8a16ad6e8a6598eb709f8938b0cda801ef532c0 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 21 Sep 2023 10:37:08 -0700 Subject: [PATCH 09/17] Alphabetize deps --- dependencies.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies.yaml b/dependencies.yaml index 1a1be7e4f..dab7daab8 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -156,8 +156,8 @@ dependencies: common: - output_types: [conda] packages: - - *doxygen - breathe + - *doxygen - graphviz - ipython - make From 6d941e30b0029f67369e68e24e9554a670950be3 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 21 Sep 2023 10:41:28 -0700 Subject: [PATCH 10/17] Fix typos --- include/rmm/mr/device/logging_resource_adaptor.hpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/rmm/mr/device/logging_resource_adaptor.hpp b/include/rmm/mr/device/logging_resource_adaptor.hpp index 263387b44..c59fdf61a 100644 --- a/include/rmm/mr/device/logging_resource_adaptor.hpp +++ b/include/rmm/mr/device/logging_resource_adaptor.hpp @@ -330,6 +330,7 @@ class logging_resource_adaptor final : public device_memory_resource { * @param filename Name of the file to write log info. If not specified, * retrieves the log file name from the environment variable "RMM_LOG_FILE". * @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade + * performance. */ template logging_resource_adaptor make_logging_adaptor( @@ -348,6 +349,7 @@ logging_resource_adaptor make_logging_adaptor( * @param upstream Pointer to the upstream resource * @param stream The ostream to write log info. * @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade + * performance. */ template logging_resource_adaptor make_logging_adaptor(Upstream* upstream, From 576f87e19c2b9a9bc20b92f443f8b01056fd8181 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 21 Sep 2023 10:41:36 -0700 Subject: [PATCH 11/17] Remove patch for now --- python/docs/conf.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/python/docs/conf.py b/python/docs/conf.py index e224f59dd..021a077c1 100644 --- a/python/docs/conf.py +++ b/python/docs/conf.py @@ -201,11 +201,10 @@ ] -def on_missing_reference(app, env, node, contnode): - return contnode - if node["refdomain"] == "cpp": - return contnode - return None +# def on_missing_reference(app, env, node, contnode): +# if node["refdomain"] == "cpp": +# return contnode +# return None def setup(app): @@ -214,4 +213,4 @@ def setup(app): app.add_js_file( "https://docs.rapids.ai/assets/js/custom.js", loading_method="defer" ) - app.connect("missing-reference", on_missing_reference) + # app.connect("missing-reference", on_missing_reference) From bc79db2dc620f1b4756b18ff1488917f2d715075 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Thu, 21 Sep 2023 10:54:17 -0700 Subject: [PATCH 12/17] First pass of reorg --- python/docs/cpp.rst | 8 ++++++++ python/docs/{librmm_api.rst => cpp_api.rst} | 2 +- python/docs/{basics.md => guide.md} | 2 +- python/docs/index.rst | 5 ++--- python/docs/python.rst | 9 +++++++++ python/docs/{api.rst => python_api.rst} | 0 6 files changed, 21 insertions(+), 5 deletions(-) create mode 100644 python/docs/cpp.rst rename python/docs/{librmm_api.rst => cpp_api.rst} (73%) rename python/docs/{basics.md => guide.md} (99%) create mode 100644 python/docs/python.rst rename python/docs/{api.rst => python_api.rst} (100%) diff --git a/python/docs/cpp.rst b/python/docs/cpp.rst new file mode 100644 index 000000000..e60f15129 --- /dev/null +++ b/python/docs/cpp.rst @@ -0,0 +1,8 @@ +Welcome to the rmm C++ documentation! +======================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + cpp_api.rst diff --git a/python/docs/librmm_api.rst b/python/docs/cpp_api.rst similarity index 73% rename from python/docs/librmm_api.rst rename to python/docs/cpp_api.rst index 087eda247..ca0afbb0d 100644 --- a/python/docs/librmm_api.rst +++ b/python/docs/cpp_api.rst @@ -1,4 +1,4 @@ API Reference -============== +============= .. doxygennamespace:: rmm diff --git a/python/docs/basics.md b/python/docs/guide.md similarity index 99% rename from python/docs/basics.md rename to python/docs/guide.md index 997745f00..c06135ca8 100644 --- a/python/docs/basics.md +++ b/python/docs/guide.md @@ -1,4 +1,4 @@ -# RMM - the RAPIDS Memory Manager +# User Guide Achieving optimal performance in GPU-centric workflows frequently requires customizing how GPU ("device") memory is allocated. diff --git a/python/docs/index.rst b/python/docs/index.rst index 4a675d297..524351253 100644 --- a/python/docs/index.rst +++ b/python/docs/index.rst @@ -10,9 +10,8 @@ Welcome to rmm's documentation! :maxdepth: 2 :caption: Contents: - basics.md - api.rst - librmm_api.rst + Python + C++ Indices and tables diff --git a/python/docs/python.rst b/python/docs/python.rst new file mode 100644 index 000000000..bff919627 --- /dev/null +++ b/python/docs/python.rst @@ -0,0 +1,9 @@ +Welcome to the rmm Python documentation! +======================================== + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + guide.md + python_api.rst diff --git a/python/docs/api.rst b/python/docs/python_api.rst similarity index 100% rename from python/docs/api.rst rename to python/docs/python_api.rst From 1272bc8c750e681318aa6dd6569cb902ffa2fd58 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 09:47:07 -0700 Subject: [PATCH 13/17] Remove breathe --- conda/environments/all_cuda-118_arch-x86_64.yaml | 1 - conda/environments/all_cuda-120_arch-x86_64.yaml | 1 - dependencies.yaml | 1 - 3 files changed, 3 deletions(-) diff --git a/conda/environments/all_cuda-118_arch-x86_64.yaml b/conda/environments/all_cuda-118_arch-x86_64.yaml index 71609ca35..5c5ecb453 100644 --- a/conda/environments/all_cuda-118_arch-x86_64.yaml +++ b/conda/environments/all_cuda-118_arch-x86_64.yaml @@ -4,7 +4,6 @@ channels: - rapidsai - conda-forge dependencies: -- breathe - cmake>=3.26.4 - cuda-python>=11.7.1,<12.0a0 - cuda-version=11.8 diff --git a/conda/environments/all_cuda-120_arch-x86_64.yaml b/conda/environments/all_cuda-120_arch-x86_64.yaml index ce89249a9..e01de1ca2 100644 --- a/conda/environments/all_cuda-120_arch-x86_64.yaml +++ b/conda/environments/all_cuda-120_arch-x86_64.yaml @@ -4,7 +4,6 @@ channels: - rapidsai - conda-forge dependencies: -- breathe - cmake>=3.26.4 - cuda-nvcc - cuda-python>=12.0,<13.0a0 diff --git a/dependencies.yaml b/dependencies.yaml index dab7daab8..765a812cb 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -156,7 +156,6 @@ dependencies: common: - output_types: [conda] packages: - - breathe - *doxygen - graphviz - ipython From 51bbbd62cdb64d20b5a28aeeb5f75f76c3c23814 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 09:48:11 -0700 Subject: [PATCH 14/17] Revert code block changes --- include/rmm/device_uvector.hpp | 2 +- include/rmm/mr/device/device_memory_resource.hpp | 2 +- include/rmm/mr/device/failure_callback_resource_adaptor.hpp | 2 +- include/rmm/mr/device/per_device_resource.hpp | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/include/rmm/device_uvector.hpp b/include/rmm/device_uvector.hpp index 74103e9d4..47ed1adff 100644 --- a/include/rmm/device_uvector.hpp +++ b/include/rmm/device_uvector.hpp @@ -39,7 +39,7 @@ namespace rmm { * `thrust::uninitialized_fill`. * * Example: - * @code{.cpp} + * @code{c++} * rmm::mr::device_memory_resource * mr = new my_custom_resource(); * rmm::cuda_stream_view s{}; * diff --git a/include/rmm/mr/device/device_memory_resource.hpp b/include/rmm/mr/device/device_memory_resource.hpp index 247774145..1813fb726 100644 --- a/include/rmm/mr/device/device_memory_resource.hpp +++ b/include/rmm/mr/device/device_memory_resource.hpp @@ -70,7 +70,7 @@ namespace rmm::mr { * pool_memory_resource objects for each device and sets them as the per-device resource for that * device. * - * @code{.cpp} + * @code{c++} * std::vector> per_device_pools; * for(int i = 0; i < N; ++i) { * cudaSetDevice(i); diff --git a/include/rmm/mr/device/failure_callback_resource_adaptor.hpp b/include/rmm/mr/device/failure_callback_resource_adaptor.hpp index a6778d1b0..48f0513d8 100644 --- a/include/rmm/mr/device/failure_callback_resource_adaptor.hpp +++ b/include/rmm/mr/device/failure_callback_resource_adaptor.hpp @@ -55,7 +55,7 @@ using failure_callback_t = std::function; * When implementing a callback function for allocation retry, care must be taken to avoid an * infinite loop. The following example makes sure to only retry the allocation once: * - * @code{.cpp} + * @code{c++} * using failure_callback_adaptor = * rmm::mr::failure_callback_resource_adaptor; * diff --git a/include/rmm/mr/device/per_device_resource.hpp b/include/rmm/mr/device/per_device_resource.hpp index 594b48749..aa7217758 100644 --- a/include/rmm/mr/device/per_device_resource.hpp +++ b/include/rmm/mr/device/per_device_resource.hpp @@ -61,7 +61,7 @@ * pool_memory_resource objects for each device and sets them as the per-device resource for that * device. * - * @code{.cpp} + * @code{c++} * std::vector> per_device_pools; * for(int i = 0; i < N; ++i) { * cudaSetDevice(i); From 657b09847697aa33b25d78c3e33c6bd2731d3eaf Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 09:49:17 -0700 Subject: [PATCH 15/17] Revert Sphinx configuration changes --- python/docs/Makefile | 2 +- python/docs/conf.py | 11 ----------- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/python/docs/Makefile b/python/docs/Makefile index 68e58b474..72d1dcd85 100644 --- a/python/docs/Makefile +++ b/python/docs/Makefile @@ -3,7 +3,7 @@ # You can set these variables from the command line, and also # from the environment for the first two. -SPHINXOPTS = -n -v +SPHINXOPTS = -n -v -W --keep-going SPHINXBUILD ?= sphinx-build SOURCEDIR = . BUILDDIR = _build diff --git a/python/docs/conf.py b/python/docs/conf.py index 021a077c1..d8c7460dc 100644 --- a/python/docs/conf.py +++ b/python/docs/conf.py @@ -46,12 +46,8 @@ "IPython.sphinxext.ipython_directive", "nbsphinx", "recommonmark", - "breathe", ] -# Breathe Configuration -breathe_projects = {"librmm": "../../doxygen/xml"} -breathe_default_project = "librmm" copybutton_prompt_text = ">>> " @@ -201,16 +197,9 @@ ] -# def on_missing_reference(app, env, node, contnode): -# if node["refdomain"] == "cpp": -# return contnode -# return None - - def setup(app): app.add_js_file("copybutton_pydocs.js") app.add_css_file("https://docs.rapids.ai/assets/css/custom.css") app.add_js_file( "https://docs.rapids.ai/assets/js/custom.js", loading_method="defer" ) - # app.connect("missing-reference", on_missing_reference) From 665c7eb4aa35cc18656029ad31f1b9c25a678bfe Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 09:51:09 -0700 Subject: [PATCH 16/17] Revert Sphinx content changes --- python/docs/{python_api.rst => api.rst} | 0 python/docs/{guide.md => basics.md} | 2 +- python/docs/cpp.rst | 8 -------- python/docs/cpp_api.rst | 4 ---- python/docs/index.rst | 4 ++-- python/docs/python.rst | 9 --------- 6 files changed, 3 insertions(+), 24 deletions(-) rename python/docs/{python_api.rst => api.rst} (100%) rename python/docs/{guide.md => basics.md} (99%) delete mode 100644 python/docs/cpp.rst delete mode 100644 python/docs/cpp_api.rst delete mode 100644 python/docs/python.rst diff --git a/python/docs/python_api.rst b/python/docs/api.rst similarity index 100% rename from python/docs/python_api.rst rename to python/docs/api.rst diff --git a/python/docs/guide.md b/python/docs/basics.md similarity index 99% rename from python/docs/guide.md rename to python/docs/basics.md index c06135ca8..997745f00 100644 --- a/python/docs/guide.md +++ b/python/docs/basics.md @@ -1,4 +1,4 @@ -# User Guide +# RMM - the RAPIDS Memory Manager Achieving optimal performance in GPU-centric workflows frequently requires customizing how GPU ("device") memory is allocated. diff --git a/python/docs/cpp.rst b/python/docs/cpp.rst deleted file mode 100644 index e60f15129..000000000 --- a/python/docs/cpp.rst +++ /dev/null @@ -1,8 +0,0 @@ -Welcome to the rmm C++ documentation! -======================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - cpp_api.rst diff --git a/python/docs/cpp_api.rst b/python/docs/cpp_api.rst deleted file mode 100644 index ca0afbb0d..000000000 --- a/python/docs/cpp_api.rst +++ /dev/null @@ -1,4 +0,0 @@ -API Reference -============= - -.. doxygennamespace:: rmm diff --git a/python/docs/index.rst b/python/docs/index.rst index 524351253..8dffdeb02 100644 --- a/python/docs/index.rst +++ b/python/docs/index.rst @@ -10,8 +10,8 @@ Welcome to rmm's documentation! :maxdepth: 2 :caption: Contents: - Python - C++ + basics.md + api.rst Indices and tables diff --git a/python/docs/python.rst b/python/docs/python.rst deleted file mode 100644 index bff919627..000000000 --- a/python/docs/python.rst +++ /dev/null @@ -1,9 +0,0 @@ -Welcome to the rmm Python documentation! -======================================== - -.. toctree:: - :maxdepth: 2 - :caption: Contents: - - guide.md - python_api.rst From 14cb408f50150de952b42ea323645b3deb44fff5 Mon Sep 17 00:00:00 2001 From: Vyas Ramasubramani Date: Mon, 25 Sep 2023 09:52:05 -0700 Subject: [PATCH 17/17] One missed reversion --- include/rmm/mr/device/polymorphic_allocator.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/rmm/mr/device/polymorphic_allocator.hpp b/include/rmm/mr/device/polymorphic_allocator.hpp index c3b8ac6ea..a52ec14d1 100644 --- a/include/rmm/mr/device/polymorphic_allocator.hpp +++ b/include/rmm/mr/device/polymorphic_allocator.hpp @@ -133,7 +133,7 @@ bool operator!=(polymorphic_allocator const& lhs, polymorphic_allocator co *`deallocate` functions. * * Example: - *\code{.cpp} + *\code{c++} * my_stream_ordered_allocator a{...}; * cuda_stream_view s = // create stream; *