Skip to content

Commit

Permalink
Changed ff_dim_t to use nonnegative_int, added relative_ff_dim_t that…
Browse files Browse the repository at this point in the history
… uses int
  • Loading branch information
Victor Li committed Nov 27, 2024
1 parent 670fb62 commit 3cae680
Show file tree
Hide file tree
Showing 67 changed files with 719 additions and 320 deletions.
2 changes: 1 addition & 1 deletion lib/kernels/src/legion_dim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ legion_dim_t add_to_legion_dim(legion_dim_t legion_dim, int value) {
}

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, int num_dimensions) {
return legion_dim_t(num_dimensions - ff_dim.value - 1);
return legion_dim_t(num_dimensions - ff_dim.value.get_value() - 1);

Check warning on line 10 in lib/kernels/src/legion_dim.cc

View check run for this annotation

Codecov / codecov/patch

lib/kernels/src/legion_dim.cc#L10

Added line #L10 was not covered by tests
}

} // namespace FlexFlow
2 changes: 1 addition & 1 deletion lib/kernels/test/src/test_concat_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ TEST_SUITE(FF_TEST_SUITE) {
TEST_CASE("Test concat kernel forward and backward") {
size_t num_inputs = 3;
size_t size_per_input = 100;
ff_dim_t concat_axis = ff_dim_t(0);
ff_dim_t concat_axis = ff_dim_t{nonnegative_int{0}};

ManagedPerDeviceFFHandle managed_handle{};
ManagedFFStream managed_stream{};
Expand Down
3 changes: 2 additions & 1 deletion lib/kernels/test/src/test_transpose_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ TEST_SUITE(FF_TEST_SUITE) {
TEST_CASE("Test Transpose Kernel Operations") {
std::size_t num_dims = 2;

std::vector<ff_dim_t> perm = {ff_dim_t(0), ff_dim_t(1)};
std::vector<ff_dim_t> perm = {ff_dim_t{nonnegative_int{0}},
ff_dim_t{nonnegative_int{1}}};

ManagedPerDeviceFFHandle managed_handle{};
ManagedFFStream managed_stream{};
Expand Down
5 changes: 3 additions & 2 deletions lib/local-execution/src/legion_tensor_shape.cc
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
#include "local-execution/legion_tensor_shape.h"
#include "kernels/legion_dim.h"
#include "op-attrs/tensor_shape.h"

namespace FlexFlow {

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, size_t num_dims) {
return legion_dim_t(num_dims - ff_dim.value - 1);
return legion_dim_t(num_dims - ff_dim.value.get_value() - 1);

Check warning on line 8 in lib/local-execution/src/legion_tensor_shape.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/legion_tensor_shape.cc#L8

Added line #L8 was not covered by tests
}

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, TensorShape const &shape) {
return legion_dim_t(num_dims(shape) - ff_dim.value - 1);
return legion_dim_from_ff_dim(ff_dim, num_dims(shape));

Check warning on line 12 in lib/local-execution/src/legion_tensor_shape.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/legion_tensor_shape.cc#L12

Added line #L12 was not covered by tests
}

} // namespace FlexFlow
12 changes: 6 additions & 6 deletions lib/local-execution/src/ops/linear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ static DeviceSpecificDeviceStates
auto input = acc.get_tensor<Permissions::RO>(INPUT);
auto weight = acc.get_tensor<Permissions::RO>(WEIGHT);
auto output = acc.get_tensor<Permissions::WO>(OUTPUT);
int out_dim = output.shape.at(ff_dim_t{0});
int batch_size = output.shape.at(ff_dim_t{1});
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}});
int batch_size = output.shape.at(ff_dim_t{nonnegative_int{1}});

Check warning on line 70 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L69-L70

Added lines #L69 - L70 were not covered by tests

float *one_ptr;

Expand Down Expand Up @@ -96,8 +96,8 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
ProfilingSettings profiling = acc.get_argument<ProfilingSettings>(PROFILING);
auto attrs = acc.get_argument<LinearAttrs>(ATTRS);

int in_dim = input.shape.at(ff_dim_t{0}) + 1;
int out_dim = output.shape.at(ff_dim_t{0}) + 1;
int in_dim = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;

Check warning on line 100 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L99-L100

Added lines #L99 - L100 were not covered by tests
int batch_size = output.shape.get_volume() / out_dim;

float const *bias_ptr = NULL;
Expand Down Expand Up @@ -140,8 +140,8 @@ static std::optional<float>
bias_ptr = bias.get_float_ptr();
}

int in_dim = input.shape.at(ff_dim_t{0}) + 1;
int out_dim = output.shape.at(ff_dim_t{0}) + 1;
int in_dim = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;

Check warning on line 144 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L143-L144

Added lines #L143 - L144 were not covered by tests
int batch_size = output.shape.get_volume() / out_dim;

return profile(backward_kernel,
Expand Down
16 changes: 8 additions & 8 deletions lib/local-execution/src/ops/pool_2d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ static DeviceSpecificDeviceStates
auto input = acc.get_tensor<Permissions::RO>(INPUT);
auto output = acc.get_tensor<Permissions::WO>(OUTPUT);

int input_w = input.shape.at(ff_dim_t(0)) + 1;
int input_h = input.shape.at(ff_dim_t(1)) + 1;
int input_c = input.shape.at(ff_dim_t(2)) + 1;
int input_n = input.shape.at(ff_dim_t(3)) + 1;
int output_w = output.shape.at(ff_dim_t(0)) + 1;
int output_h = output.shape.at(ff_dim_t(1)) + 1;
int output_c = output.shape.at(ff_dim_t(2)) + 1;
int output_n = output.shape.at(ff_dim_t(3)) + 1;
int input_w = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int input_h = input.shape.at(ff_dim_t{nonnegative_int{1}}) + 1;
int input_c = input.shape.at(ff_dim_t{nonnegative_int{2}}) + 1;
int input_n = input.shape.at(ff_dim_t{nonnegative_int{3}}) + 1;
int output_w = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int output_h = output.shape.at(ff_dim_t{nonnegative_int{1}}) + 1;
int output_c = output.shape.at(ff_dim_t{nonnegative_int{2}}) + 1;
int output_n = output.shape.at(ff_dim_t{nonnegative_int{3}}) + 1;

Check warning on line 40 in lib/local-execution/src/ops/pool_2d.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/pool_2d.cc#L33-L40

Added lines #L33 - L40 were not covered by tests

printf("init pool (input): n(%d) c(%d) h(%d) "
"w(%d)\n",
Expand Down
14 changes: 7 additions & 7 deletions lib/local-execution/src/ops/reverse.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
coord_t in_blk_size = 1, reverse_dim_size = 1, num_out_blks = 1;
for (int i = 0; i < output.shape.get_dim(); i++) {
if (i < axis.value) {
in_blk_size *= output.shape.at(ff_dim_t(i));
in_blk_size *= output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 56 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L56

Added line #L56 was not covered by tests
} else if (i == axis.value) {
reverse_dim_size = output.shape.at(ff_dim_t(i));
reverse_dim_size = output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 58 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L58

Added line #L58 was not covered by tests
} else {
num_out_blks *= output.shape.at(ff_dim_t(i));
num_out_blks *= output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 60 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L60

Added line #L60 was not covered by tests
}
}

Expand All @@ -79,15 +79,15 @@ static std::optional<float>
auto output_grad = acc.get_tensor_grad<Permissions::RO>(OUTPUT);
auto attrs = acc.get_argument<ReverseAttrs>(ATTRS);

int axis = input_grad.shape.get_dim() - attrs.axis.value - 1;
int axis = input_grad.shape.get_dim() - attrs.axis.value.get_value() - 1;

Check warning on line 82 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L82

Added line #L82 was not covered by tests
coord_t in_blk_size = 1, reverse_dim_size = 1, num_out_blks = 1;
for (int i = 0; i < input_grad.shape.get_dim(); i++) {
if (i < axis) {
in_blk_size *= input_grad.shape.at(ff_dim_t(i));
in_blk_size *= input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 86 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L86

Added line #L86 was not covered by tests
} else if (i == axis) {
reverse_dim_size = input_grad.shape.at(ff_dim_t(i));
reverse_dim_size = input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 88 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L88

Added line #L88 was not covered by tests
} else {
num_out_blks *= input_grad.shape.at(ff_dim_t(i));
num_out_blks *= input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 90 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L90

Added line #L90 was not covered by tests
}
}

Expand Down
9 changes: 7 additions & 2 deletions lib/local-execution/src/ops/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,13 @@ static DeviceSpecificDeviceStates
int output_c = output.shape.at(legion_dim_t(2));
int output_n = output.shape.at(legion_dim_t(3));

SoftmaxPerDeviceState per_device_state = init_kernel(
handle, attrs.dim.value, output_n, output_c, output_h, output_w);
SoftmaxPerDeviceState per_device_state =
init_kernel(handle,
attrs.dim.value.get_value(),

Check warning on line 69 in lib/local-execution/src/ops/softmax.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/softmax.cc#L67-L69

Added lines #L67 - L69 were not covered by tests
output_n,
output_c,
output_h,
output_w);

return DeviceSpecificDeviceStates{
DeviceSpecific<SoftmaxPerDeviceState>::create(per_device_state)};
Expand Down
13 changes: 6 additions & 7 deletions lib/local-execution/src/ops/split.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,11 @@ OpTaskInvocation backward(SplitAttrs const &attrs) {
void calc_block_size(coord_t &num_blocks,
coord_t &block_size,
ArrayShape const &array_shape,
int axis) {
ff_dim_t axis) {
num_blocks = 1;
block_size = 1;
for (int d = 0; d < array_shape.num_elements(); d++) {
if (d <= axis) {
if (d <= axis.value.get_value()) {
block_size *= array_shape.at(legion_dim_t(d));
} else {
num_blocks *= array_shape.at(legion_dim_t(d));
Expand All @@ -66,12 +66,12 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
auto attrs = acc.get_argument<SplitAttrs>(ATTRS);

coord_t num_blocks, in_block_size, out_block_size[MAX_NUM_OUTPUTS];
calc_block_size(num_blocks, in_block_size, input.shape, attrs.axis.value);
calc_block_size(num_blocks, in_block_size, input.shape, attrs.axis);

for (int i = 0; i < attrs.splits.size(); i++) {
coord_t out_num_blocks;
calc_block_size(
out_num_blocks, out_block_size[i], output.shape, attrs.axis.value);
out_num_blocks, out_block_size[i], output.shape, attrs.axis);

Check warning on line 74 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L74

Added line #L74 was not covered by tests
}
float *output_float_ptr = output.get_float_ptr();
return profile(forward_kernel,
Expand All @@ -94,12 +94,11 @@ static std::optional<float>
auto attrs = acc.get_argument<SplitAttrs>(ATTRS);

coord_t num_blocks, in_block_size, out_block_size[MAX_NUM_OUTPUTS];
calc_block_size(
num_blocks, in_block_size, input_grad.shape, attrs.axis.value);
calc_block_size(num_blocks, in_block_size, input_grad.shape, attrs.axis);

Check warning on line 97 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L97

Added line #L97 was not covered by tests
for (int i = 0; i < attrs.splits.size(); i++) {
coord_t out_num_blocks;
calc_block_size(
out_num_blocks, out_block_size[i], output_grad.shape, attrs.axis.value);
out_num_blocks, out_block_size[i], output_grad.shape, attrs.axis);

Check warning on line 101 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L101

Added line #L101 was not covered by tests
}
float const *output_grad_ptr = output_grad.get_float_ptr();
return profile(backward_kernel,
Expand Down
Loading

0 comments on commit 3cae680

Please sign in to comment.