Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enable doxygen XML and fix issues #1348

Merged
merged 17 commits into from
Sep 25, 2023
Merged
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ thirdparty/googletest/

## Doxygen
doxygen/html
doxygen/xml

#Java
target
Expand Down
2 changes: 1 addition & 1 deletion doxygen/Doxyfile
Original file line number Diff line number Diff line change
Expand Up @@ -2012,7 +2012,7 @@ MAN_LINKS = NO
# captures the structure of the code including all documentation.
# The default value is: NO.

GENERATE_XML = NO
GENERATE_XML = YES

# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/cuda_stream_view.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ inline bool operator!=(cuda_stream_view lhs, cuda_stream_view rhs) { return not(
* @brief Output stream operator for printing / logging streams
*
* @param os The output ostream
* @param sv The cuda_stream_view to output
* @param stream The cuda_stream_view to output
* @return std::ostream& The output ostream
*/
inline std::ostream& operator<<(std::ostream& os, cuda_stream_view stream)
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/aligned_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ class aligned_resource_adaptor final : public device_memory_resource {
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes Size of the allocation
* @param stream Stream on which to perform the deallocation
*/
Expand Down
3 changes: 2 additions & 1 deletion include/rmm/mr/device/arena_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,8 @@ class arena_memory_resource final : public device_memory_resource {
* @param stream to execute on.
* @return std::pair containing free_size and total_size of memory.
*/
std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}
Expand Down
5 changes: 3 additions & 2 deletions include/rmm/mr/device/binning_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ class binning_memory_resource final : public device_memory_resource {
*
* @throws nothing
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream Stream on which to perform deallocation
Expand All @@ -201,7 +201,8 @@ class binning_memory_resource final : public device_memory_resource {
* @param stream the stream being executed on
* @return std::pair with available and free memory for resource
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return std::make_pair(0, 0);
}
Expand Down
23 changes: 23 additions & 0 deletions include/rmm/mr/device/callback_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,11 +100,34 @@ class callback_memory_resource final : public device_memory_resource {
default; ///< @default_move_assignment{callback_memory_resource}

private:
/**
* @brief Allocates memory of size at least \p bytes.
*
* The returned pointer will have at minimum 256 byte alignment.
*
* If supported by the callback, this operation may optionally be executed on
* a stream. Otherwise, the stream is ignored and the null stream is used.
*
* @param bytes The size of the allocation
* @param stream Stream on which to perform allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, cuda_stream_view stream) override
{
return allocate_callback_(bytes, stream, allocate_callback_arg_);
}

/**
* @brief Deallocate memory pointed to by \p p.
*
* If supported by the callback, this operation may optionally be executed on
* a stream. Otherwise, the stream is ignored and the null stream is used.
*
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream Stream on which to perform deallocation
*/
void do_deallocate(void* ptr, std::size_t bytes, cuda_stream_view stream) override
{
deallocate_callback_(ptr, bytes, stream, deallocate_callback_arg_);
Expand Down
22 changes: 11 additions & 11 deletions include/rmm/mr/device/cuda_async_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,13 +173,12 @@ class cuda_async_memory_resource final : public device_memory_resource {
#endif

/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
* @brief Allocates memory of size at least \p bytes.
*
* The returned pointer has at least 256B alignment.
* The returned pointer will have at minimum 256 byte alignment.
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @param bytes The size of the allocation
* @param stream Stream on which to perform allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override
Expand All @@ -197,17 +196,18 @@ class cuda_async_memory_resource final : public device_memory_resource {
/**
* @brief Deallocate memory pointed to by \p p.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream Stream on which to perform deallocation
*/
void do_deallocate(void* ptr, std::size_t size, rmm::cuda_stream_view stream) override
void do_deallocate(void* ptr, std::size_t bytes, rmm::cuda_stream_view stream) override
{
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
pool_.deallocate(ptr, size, stream);
pool_.deallocate(ptr, bytes, stream);
#else
(void)ptr;
(void)size;
(void)bytes;
(void)stream;
#endif
}
Expand Down
21 changes: 12 additions & 9 deletions include/rmm/mr/device/cuda_async_view_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,13 +108,12 @@ class cuda_async_view_memory_resource final : public device_memory_resource {
#endif

/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
* @brief Allocates memory of size at least \p bytes.
*
* The returned pointer has at least 256B alignment.
* The returned pointer will have at minimum 256 byte alignment.
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @param bytes The size of the allocation
* @param stream Stream on which to perform allocation
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, rmm::cuda_stream_view stream) override
Expand All @@ -135,18 +134,22 @@ class cuda_async_view_memory_resource final : public device_memory_resource {
/**
* @brief Deallocate memory pointed to by \p p.
*
* @throws Nothing.
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream Stream on which to perform deallocation
*/
void do_deallocate(void* ptr, std::size_t, rmm::cuda_stream_view stream) override
void do_deallocate(void* ptr,
[[maybe_unused]] std::size_t bytes,
rmm::cuda_stream_view stream) override
{
#ifdef RMM_CUDA_MALLOC_ASYNC_SUPPORT
if (ptr != nullptr) {
RMM_ASSERT_CUDA_SUCCESS(rmm::detail::async_alloc::cudaFreeAsync(ptr, stream.value()));
}
#else
(void)ptr;
(void)bytes;
(void)stream;
#endif
}
Expand Down
26 changes: 14 additions & 12 deletions include/rmm/mr/device/cuda_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,17 @@ class cuda_memory_resource final : public device_memory_resource {

private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMalloc.
* @brief Allocates memory of size at least \p bytes.
*
* The returned pointer has at least 256B alignment.
* The returned pointer will have at minimum 256 byte alignment.
*
* @note Stream argument is ignored
* The stream argument is ignored.
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @param bytes The size of the allocation
* @param stream This argument is ignored
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, cuda_stream_view) override
void* do_allocate(std::size_t bytes, [[maybe_unused]] cuda_stream_view stream) override
{
void* ptr{nullptr};
RMM_CUDA_TRY_ALLOC(cudaMalloc(&ptr, bytes));
Expand All @@ -76,13 +75,16 @@ class cuda_memory_resource final : public device_memory_resource {
/**
* @brief Deallocate memory pointed to by \p p.
*
* @note Stream argument is ignored.
*
* @throws Nothing.
* The stream argument is ignored.
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream This argument is ignored.
*/
void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override
void do_deallocate(void* ptr,
[[maybe_unused]] std::size_t bytes,
[[maybe_unused]] cuda_stream_view stream) override
{
RMM_ASSERT_CUDA_SUCCESS(cudaFree(ptr));
}
Expand Down
2 changes: 1 addition & 1 deletion include/rmm/mr/device/device_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ class device_memory_resource {
* If supported, this operation may optionally be executed on a stream.
* Otherwise, the stream is ignored and the null stream is used.
*
* @param p Pointer to be deallocated
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream Stream on which to perform deallocation
Expand Down
5 changes: 3 additions & 2 deletions include/rmm/mr/device/limiting_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,8 @@ class limiting_resource_adaptor final : public device_memory_resource {
* @param stream Stream on which to get the mem info.
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
return {allocation_limit_ - allocated_bytes_, allocation_limit_};
}
Expand All @@ -209,7 +210,7 @@ class limiting_resource_adaptor final : public device_memory_resource {
*
* @tparam Upstream Type of the upstream `device_memory_resource`.
* @param upstream Pointer to the upstream resource
* @param limit Maximum amount of memory to allocate
* @param allocation_limit Maximum amount of memory to allocate
*/
template <typename Upstream>
limiting_resource_adaptor<Upstream> make_limiting_adaptor(Upstream* upstream,
Expand Down
4 changes: 4 additions & 0 deletions include/rmm/mr/device/logging_resource_adaptor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -329,6 +329,8 @@ class logging_resource_adaptor final : public device_memory_resource {
* @param upstream Pointer to the upstream resource
* @param filename Name of the file to write log info. If not specified,
* retrieves the log file name from the environment variable "RMM_LOG_FILE".
* @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade
* performance.
*/
template <typename Upstream>
logging_resource_adaptor<Upstream> make_logging_adaptor(
Expand All @@ -346,6 +348,8 @@ logging_resource_adaptor<Upstream> make_logging_adaptor(
* @tparam Upstream Type of the upstream `device_memory_resource`.
* @param upstream Pointer to the upstream resource
* @param stream The ostream to write log info.
* @param auto_flush If true, flushes the log for every (de)allocation. Warning, this will degrade
* performance.
*/
template <typename Upstream>
logging_resource_adaptor<Upstream> make_logging_adaptor(Upstream* upstream,
Expand Down
29 changes: 16 additions & 13 deletions include/rmm/mr/device/managed_memory_resource.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -55,18 +55,17 @@ class managed_memory_resource final : public device_memory_resource {

private:
/**
* @brief Allocates memory of size at least `bytes` using cudaMallocManaged.
* @brief Allocates memory of size at least \p bytes.
*
* The returned pointer has at least 256B alignment.
* The returned pointer will have at minimum 256 byte alignment.
*
* @note Stream argument is ignored
* The stream is ignored.
*
* @throws `rmm::bad_alloc` if the requested allocation could not be fulfilled
*
* @param bytes The size, in bytes, of the allocation
* @param bytes The size of the allocation
* @param stream This argument is ignored
* @return void* Pointer to the newly allocated memory
*/
void* do_allocate(std::size_t bytes, cuda_stream_view) override
void* do_allocate(std::size_t bytes, [[maybe_unused]] cuda_stream_view stream) override
{
// FIXME: Unlike cudaMalloc, cudaMallocManaged will throw an error for 0
// size allocations.
Expand All @@ -78,15 +77,18 @@ class managed_memory_resource final : public device_memory_resource {
}

/**
* @brief Deallocate memory pointed to by `ptr`.
*
* @note Stream argument is ignored.
* @brief Deallocate memory pointed to by \p p.
*
* @throws Nothing.
* The stream is ignored.
*
* @param ptr Pointer to be deallocated
* @param bytes The size in bytes of the allocation. This must be equal to the
* value of `bytes` that was passed to the `allocate` call that returned `p`.
* @param stream This argument is ignored
*/
void do_deallocate(void* ptr, std::size_t, cuda_stream_view) override
void do_deallocate(void* ptr,
[[maybe_unused]] std::size_t bytes,
[[maybe_unused]] cuda_stream_view stream) override
{
RMM_ASSERT_CUDA_SUCCESS(cudaFree(ptr));
}
Expand Down Expand Up @@ -116,7 +118,8 @@ class managed_memory_resource final : public device_memory_resource {
* @param stream to execute on
* @return std::pair contaiing free_size and total_size of memory
*/
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(cuda_stream_view) const override
[[nodiscard]] std::pair<std::size_t, std::size_t> do_get_mem_info(
[[maybe_unused]] cuda_stream_view stream) const override
{
std::size_t free_size{};
std::size_t total_size{};
Expand Down
4 changes: 2 additions & 2 deletions include/rmm/thrust_rmm_allocator.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,9 @@ using exec_policy_t = std::unique_ptr<par_t, deleter_t>;
* @brief Returns a unique_ptr to a Thrust CUDA execution policy that uses RMM
* for temporary memory allocation.
*
* @Param stream The stream that the allocator will use
* @param stream The stream that the allocator will use
*
* @Returns A Thrust execution policy that will use RMM for temporary memory
* @return A Thrust execution policy that will use RMM for temporary memory
* allocation.
*/
[[deprecated("Use new exec_policy in rmm/exec_policy.hpp")]] inline exec_policy_t exec_policy(
Expand Down