Skip to content

Commit

Permalink
Merge branch 'high-lvl-comp-api' of https://github.com/vuule/cudf int…
Browse files Browse the repository at this point in the history
…o high-lvl-comp-api
  • Loading branch information
vuule committed Jan 11, 2025
2 parents c72b66d + b81c7f5 commit 559ca43
Show file tree
Hide file tree
Showing 64 changed files with 672 additions and 597 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/build.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ jobs:
arch: "amd64"
branch: ${{ inputs.branch }}
build_type: ${{ inputs.build_type || 'branch' }}
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
date: ${{ inputs.date }}
node_type: "gpu-v100-latest-1"
run_script: "ci/build_docs.sh"
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/pr.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ jobs:
build_type: pull-request
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/test_java.sh"
static-configure:
needs: checks
Expand All @@ -207,7 +207,7 @@ jobs:
build_type: pull-request
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/test_notebooks.sh"
docs-build:
needs: conda-python-build
Expand All @@ -217,7 +217,7 @@ jobs:
build_type: pull-request
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/build_docs.sh"
wheel-build-libcudf:
needs: checks
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/test.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
sha: ${{ inputs.sha }}
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/test_cpp_memcheck.sh"
static-configure:
secrets: inherit
Expand Down Expand Up @@ -94,7 +94,7 @@ jobs:
sha: ${{ inputs.sha }}
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/test_java.sh"
conda-notebook-tests:
secrets: inherit
Expand All @@ -106,7 +106,7 @@ jobs:
sha: ${{ inputs.sha }}
node_type: "gpu-v100-latest-1"
arch: "amd64"
container_image: "rapidsai/ci-conda:cuda12.5.1-ubuntu22.04-py3.11"
container_image: "rapidsai/ci-conda:latest"
run_script: "ci/test_notebooks.sh"
wheel-tests-cudf:
secrets: inherit
Expand Down
4 changes: 2 additions & 2 deletions conda/environments/all_cuda-118_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ dependencies:
- nbsphinx
- ninja
- notebook
- numba-cuda>=0.0.13,<0.0.18
- numba-cuda>=0.2.0,<0.3.0
- numpy>=1.23,<3.0a0
- numpydoc
- nvcc_linux-64=11.8
Expand All @@ -66,7 +66,7 @@ dependencies:
- pandas
- pandas>=2.0,<2.2.4dev0
- pandoc
- polars>=1.11,<1.15
- polars>=1.11,<1.18
- pre-commit
- ptxcompiler
- pyarrow>=14.0.0,<19.0.0a0
Expand Down
4 changes: 2 additions & 2 deletions conda/environments/all_cuda-125_arch-x86_64.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ dependencies:
- nbsphinx
- ninja
- notebook
- numba-cuda>=0.0.13,<0.0.18
- numba-cuda>=0.2.0,<0.3.0
- numpy>=1.23,<3.0a0
- numpydoc
- nvcomp==4.1.0.6
Expand All @@ -64,7 +64,7 @@ dependencies:
- pandas
- pandas>=2.0,<2.2.4dev0
- pandoc
- polars>=1.11,<1.15
- polars>=1.11,<1.18
- pre-commit
- pyarrow>=14.0.0,<19.0.0a0
- pydata-sphinx-theme!=0.14.2
Expand Down
4 changes: 2 additions & 2 deletions conda/recipes/cudf-polars/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2024, NVIDIA CORPORATION.
# Copyright (c) 2024-2025, NVIDIA CORPORATION.

{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
Expand Down Expand Up @@ -43,7 +43,7 @@ requirements:
run:
- python
- pylibcudf ={{ version }}
- polars >=1.11,<1.15
- polars >=1.11,<1.18
- {{ pin_compatible('cuda-version', max_pin='x', min_pin='x') }}

test:
Expand Down
4 changes: 2 additions & 2 deletions conda/recipes/cudf/meta.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Copyright (c) 2018-2024, NVIDIA CORPORATION.
# Copyright (c) 2018-2025, NVIDIA CORPORATION.

{% set version = environ['RAPIDS_PACKAGE_VERSION'].lstrip('v') %}
{% set minor_version = version.split('.')[0] + '.' + version.split('.')[1] %}
Expand Down Expand Up @@ -80,7 +80,7 @@ requirements:
- typing_extensions >=4.0.0
- pandas >=2.0,<2.2.4dev0
- cupy >=12.0.0
- numba-cuda >=0.0.13,<0.0.18
- numba-cuda >=0.2.0,<0.3.0
- numpy >=1.23,<3.0a0
- pyarrow>=14.0.0,<18.0.0a0
- libcudf ={{ version }}
Expand Down
12 changes: 9 additions & 3 deletions cpp/include/cudf/detail/utilities/integer_utils.hpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/*
* Copyright 2019 BlazingDB, Inc.
* Copyright 2019 Eyal Rozenberg <[email protected]>
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -23,6 +23,8 @@
*/

#include <cudf/fixed_point/temporary.hpp>
#include <cudf/types.hpp>
#include <cudf/utilities/error.hpp>

#include <cmath>
#include <cstdlib>
Expand All @@ -44,13 +46,17 @@ namespace util {
* `modulus` is positive. The safety is in regard to rollover.
*/
template <typename S>
constexpr S round_up_safe(S number_to_round, S modulus)
CUDF_HOST_DEVICE constexpr S round_up_safe(S number_to_round, S modulus)
{
auto remainder = number_to_round % modulus;
if (remainder == 0) { return number_to_round; }
auto rounded_up = number_to_round - remainder + modulus;
if (rounded_up < number_to_round) {
throw std::invalid_argument("Attempt to round up beyond the type's maximum value");
#ifndef __CUDA_ARCH__
CUDF_FAIL("Attempt to round up beyond the type's maximum value", cudf::data_type_error);
#else
CUDF_UNREACHABLE("Attempt to round up beyond the type's maximum value");
#endif
}
return rounded_up;
}
Expand Down
40 changes: 24 additions & 16 deletions cpp/include/cudf/utilities/span.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2020-2024, NVIDIA CORPORATION.
* Copyright (c) 2020-2025, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -197,11 +197,16 @@ struct host_span : public cudf::detail::span_base<T, Extent, host_span<T, Extent

constexpr host_span() noexcept : base() {} // required to compile on centos

/// Constructor from pointer and size
/// @param data Pointer to the first element in the span
/// @param size The number of elements in the span
/// @param is_device_accessible Whether the data is device accessible (e.g. pinned memory)
constexpr host_span(T* data, std::size_t size, bool is_device_accessible)
/**
* @brief Constructor from pointer and size
*
* @note This needs to be host-device , as it's used by a host-device function in base_2dspan
*
* @param data Pointer to the first element in the span
* @param size The number of elements in the span
* @param is_device_accessible Whether the data is device accessible (e.g. pinned memory)
*/
CUDF_HOST_DEVICE constexpr host_span(T* data, std::size_t size, bool is_device_accessible)
: base(data, size), _is_device_accessible{is_device_accessible}
{
}
Expand Down Expand Up @@ -311,8 +316,8 @@ struct host_span : public cudf::detail::span_base<T, Extent, host_span<T, Extent
* @param count The number of elements in the subspan
* @return A subspan of the sequence, of requested count and offset
*/
[[nodiscard]] constexpr host_span subspan(typename base::size_type offset,
typename base::size_type count) const noexcept
[[nodiscard]] CUDF_HOST_DEVICE constexpr host_span subspan(
typename base::size_type offset, typename base::size_type count) const noexcept
{
return host_span{this->data() + offset, count, _is_device_accessible};
}
Expand Down Expand Up @@ -434,8 +439,8 @@ struct device_span : public cudf::detail::span_base<T, Extent, device_span<T, Ex
* @param count The number of elements in the subspan
* @return A subspan of the sequence, of requested count and offset
*/
[[nodiscard]] constexpr device_span subspan(typename base::size_type offset,
typename base::size_type count) const noexcept
[[nodiscard]] CUDF_HOST_DEVICE constexpr device_span subspan(
typename base::size_type offset, typename base::size_type count) const noexcept
{
return device_span{this->data() + offset, count};
}
Expand Down Expand Up @@ -475,28 +480,28 @@ class base_2dspan {
*
* @return A pointer to the first element of the span
*/
[[nodiscard]] constexpr auto data() const noexcept { return _flat.data(); }
[[nodiscard]] CUDF_HOST_DEVICE constexpr auto data() const noexcept { return _flat.data(); }

/**
* @brief Returns the size in the span as pair.
*
* @return pair representing rows and columns size of the span
*/
[[nodiscard]] constexpr auto size() const noexcept { return _size; }
[[nodiscard]] CUDF_HOST_DEVICE constexpr auto size() const noexcept { return _size; }

/**
* @brief Returns the number of elements in the span.
*
* @return Number of elements in the span
*/
[[nodiscard]] constexpr auto count() const noexcept { return _flat.size(); }
[[nodiscard]] CUDF_HOST_DEVICE constexpr auto count() const noexcept { return _flat.size(); }

/**
* @brief Checks if the span is empty.
*
* @return True if the span is empty, false otherwise
*/
[[nodiscard]] constexpr bool is_empty() const noexcept { return count() == 0; }
[[nodiscard]] CUDF_HOST_DEVICE constexpr bool is_empty() const noexcept { return count() == 0; }

/**
* @brief Returns a reference to the row-th element of the sequence.
Expand All @@ -507,7 +512,7 @@ class base_2dspan {
* @param row the index of the element to access
* @return A reference to the row-th element of the sequence, i.e., `data()[row]`
*/
constexpr RowType<T, dynamic_extent> operator[](size_t row) const
CUDF_HOST_DEVICE constexpr RowType<T, dynamic_extent> operator[](size_t row) const
{
return _flat.subspan(row * _size.second, _size.second);
}
Expand All @@ -517,7 +522,10 @@ class base_2dspan {
*
* @return A flattened span of the 2D span
*/
[[nodiscard]] constexpr RowType<T, dynamic_extent> flat_view() const { return _flat; }
[[nodiscard]] CUDF_HOST_DEVICE constexpr RowType<T, dynamic_extent> flat_view() const
{
return _flat;
}

/**
* @brief Construct a 2D span from another 2D span of convertible type
Expand Down
Loading

0 comments on commit 559ca43

Please sign in to comment.