Skip to content

Commit

Permalink
fix: kernels test building errors
Browse files Browse the repository at this point in the history
  • Loading branch information
chenzhuofu committed Dec 16, 2024
1 parent d416c27 commit a8ce685
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 18 deletions.
30 changes: 14 additions & 16 deletions lib/kernels/test/src/test_concat_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,16 @@ TEST_SUITE(FF_TEST_SUITE) {
Allocator allocator = create_local_cuda_memory_allocator();

SUBCASE("forward_kernel") {
std::vector<GenericTensorAccessorR> input_accessors(num_inputs);
generate_n(input_accessors.begin(), num_inputs, [&]() {
return read_only_accessor_from_write_accessor(
create_random_filled_accessor_w(input_shape, allocator));
});
std::vector<GenericTensorAccessorR> input_accessors =
repeat<GenericTensorAccessorR>(num_inputs, [&]() {
return read_only_accessor_from_write_accessor(
create_random_filled_accessor_w(input_shape, allocator));
});
GenericTensorAccessorW output_accessor =
allocator.allocate_tensor(output_shape);

Kernels::Concat::forward_kernel(managed_stream.raw_stream(),
output_accessor,
input_accessors,
output_accessor, input_accessors,
concat_axis);

std::vector<float> host_output_data =
Expand All @@ -44,14 +43,13 @@ TEST_SUITE(FF_TEST_SUITE) {
GenericTensorAccessorR output_grad_accessor =
read_only_accessor_from_write_accessor(
create_random_filled_accessor_w(output_shape, allocator));
std::vector<GenericTensorAccessorW> input_grad_accessors(num_inputs);
generate_n(input_grad_accessors.begin(), num_inputs, [&]() {
return allocator.allocate_tensor(input_shape);

Kernels::Concat::backward_kernel(managed_stream.raw_stream(),
output_grad_accessor,
input_grad_accessors,
concat_axis);
}
std::vector<GenericTensorAccessorW> input_grad_accessors =
repeat<GenericTensorAccessorW>(num_inputs, [&]() {
return allocator.allocate_tensor(input_shape);
});
Kernels::Concat::backward_kernel(managed_stream.raw_stream(),
output_grad_accessor,
input_grad_accessors, concat_axis);
}
}
}
4 changes: 2 additions & 2 deletions lib/kernels/test/src/test_dropout.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ TEST_SUITE(FF_TEST_SUITE) {
DropoutPerDeviceState state = Kernels::Dropout::init_kernel(
managed_handle.raw_handle(), dropout_rate, seed, shape, allocator);

auto get_zero_count = [](std::vector<float> const &data) {
return count(data.begin(), data.end(), [](float x) { return x == 0.0f; });
auto get_zero_count = [](const std::vector<float>& data) {
return std::count_if(data.begin(), data.end(), [](float x) { return x == 0.0f; });
};

SUBCASE("forward_kernel") {
Expand Down
29 changes: 29 additions & 0 deletions lib/kernels/test/src/test_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,10 @@
#include "kernels/local_cuda_allocator.h"
#include "kernels/managed_ff_stream.h"
#include "kernels/managed_per_device_ff_handle.h"
#include <doctest/doctest.h>
#include <vector>
#include <string>
#include <sstream>
#include <random>

using namespace FlexFlow;
Expand Down Expand Up @@ -48,4 +52,29 @@ bool contains_non_zero(std::vector<T> &data) {
data.begin(), data.end(), [](T const &val) { return val == 0; });
}

template <typename T, typename Func>
std::vector<T> repeat(std::size_t n, Func&& func) {
std::vector<T> result;
// result.reserve(n); // Sometimes we don't have default constructor for T
for (std::size_t i = 0; i < n; ++i) {
result.push_back(func());
}
return result;
}

// Specialize doctest's StringMaker for std::vector<float>
template <>
struct doctest::StringMaker<std::vector<float>> {
static doctest::String convert(const std::vector<float>& vec) {
std::ostringstream oss;
for (size_t i = 0; i < vec.size(); ++i) {
oss << vec[i];
if (i != vec.size() - 1) {
oss << ", ";
}
}
return doctest::String(("[" + oss.str() + "]").c_str());
}
};

#endif

0 comments on commit a8ce685

Please sign in to comment.