diff --git a/lib/kernels/test/src/test_concat_kernel.cc b/lib/kernels/test/src/test_concat_kernel.cc index bf2a521b4e..eb6791fc14 100644 --- a/lib/kernels/test/src/test_concat_kernel.cc +++ b/lib/kernels/test/src/test_concat_kernel.cc @@ -20,11 +20,11 @@ TEST_SUITE(FF_TEST_SUITE) { Allocator allocator = create_local_cuda_memory_allocator(); SUBCASE("forward_kernel") { - std::vector input_accessors = - repeat(num_inputs, [&]() { - return read_only_accessor_from_write_accessor( - create_random_filled_accessor_w(input_shape, allocator)); - }); + std::vector input_accessors(num_inputs); + generate_n(input_accessors.begin(), num_inputs, [&]() { + return read_only_accessor_from_write_accessor( + create_random_filled_accessor_w(input_shape, allocator)); + }); GenericTensorAccessorW output_accessor = allocator.allocate_tensor(output_shape); @@ -44,13 +44,14 @@ TEST_SUITE(FF_TEST_SUITE) { GenericTensorAccessorR output_grad_accessor = read_only_accessor_from_write_accessor( create_random_filled_accessor_w(output_shape, allocator)); - std::vector input_grad_accessors = repeat( - num_inputs, [&]() { return allocator.allocate_tensor(input_shape); }); - - Kernels::Concat::backward_kernel(managed_stream.raw_stream(), - output_grad_accessor, - input_grad_accessors, - concat_axis); + std::vector input_grad_accessors(num_inputs); + generate_n(input_grad_accessors.begin(), num_inputs, [&]() { + return allocator.allocate_tensor(input_shape); + + Kernels::Concat::backward_kernel(managed_stream.raw_stream(), + output_grad_accessor, + input_grad_accessors, + concat_axis); + } } } -} diff --git a/lib/kernels/test/src/test_dropout.cc b/lib/kernels/test/src/test_dropout.cc index 81f3c7183a..8a7c21c0fb 100644 --- a/lib/kernels/test/src/test_dropout.cc +++ b/lib/kernels/test/src/test_dropout.cc @@ -25,7 +25,7 @@ TEST_SUITE(FF_TEST_SUITE) { managed_handle.raw_handle(), dropout_rate, seed, shape, allocator); auto get_zero_count = [](std::vector const &data) { - return count(data, [](float x) { return x == 0.0f; }); + return count(data.begin(), data.end(), [](float x) { return x == 0.0f; }); }; SUBCASE("forward_kernel") { diff --git a/lib/kernels/test/src/test_split_kernel.cc b/lib/kernels/test/src/test_split_kernel.cc index 7cc2b28c9e..f2346c9244 100644 --- a/lib/kernels/test/src/test_split_kernel.cc +++ b/lib/kernels/test/src/test_split_kernel.cc @@ -23,7 +23,8 @@ TEST_SUITE(FF_TEST_SUITE) { GenericTensorAccessorW input_accessor = create_random_filled_accessor_w(input_shape, allocator); - std::vector output_ptrs = repeat(num_outputs, [&]() { + std::vector output_ptrs(num_outputs); + generate_n(output_ptrs.begin(), num_outputs, [&]() { GenericTensorAccessorW output_accessor = allocator.allocate_tensor(output_shape); return output_accessor.get_float_ptr(); diff --git a/lib/kernels/test/src/test_utils.h b/lib/kernels/test/src/test_utils.h index abce3fd444..264d66c08a 100644 --- a/lib/kernels/test/src/test_utils.h +++ b/lib/kernels/test/src/test_utils.h @@ -7,6 +7,8 @@ #include "kernels/managed_per_device_ff_handle.h" #include +using namespace FlexFlow; + GenericTensorAccessorW create_random_filled_accessor_w(TensorShape const &shape, Allocator &allocator, bool cpu_fill = false); @@ -42,7 +44,8 @@ std::vector load_data_to_host_from_device(GenericTensorAccessorR accessor) { template bool contains_non_zero(std::vector &data) { - return !all_of(data, [](T const &val) { return val == 0; }); + return !all_of( + data.begin(), data.end(), [](T const &val) { return val == 0; }); } #endif