Skip to content

Commit

Permalink
Changed ff_dim_t to use nonnegative_int, added relative_ff_dim_t that…
Browse files Browse the repository at this point in the history
… uses int
  • Loading branch information
Victor Li committed Nov 15, 2024
1 parent 670fb62 commit 35bd5d9
Show file tree
Hide file tree
Showing 58 changed files with 587 additions and 230 deletions.
2 changes: 1 addition & 1 deletion lib/kernels/src/legion_dim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ legion_dim_t add_to_legion_dim(legion_dim_t legion_dim, int value) {
}

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, int num_dimensions) {
return legion_dim_t(num_dimensions - ff_dim.value - 1);
return legion_dim_t(num_dimensions - ff_dim.value.get_value() - 1);

Check warning on line 10 in lib/kernels/src/legion_dim.cc

View check run for this annotation

Codecov / codecov/patch

lib/kernels/src/legion_dim.cc#L10

Added line #L10 was not covered by tests
}

} // namespace FlexFlow
2 changes: 1 addition & 1 deletion lib/kernels/test/src/test_concat_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ TEST_SUITE(FF_TEST_SUITE) {
TEST_CASE("Test concat kernel forward and backward") {
size_t num_inputs = 3;
size_t size_per_input = 100;
ff_dim_t concat_axis = ff_dim_t(0);
ff_dim_t concat_axis = ff_dim_t{nonnegative_int{0}};

ManagedPerDeviceFFHandle managed_handle{};
ManagedFFStream managed_stream{};
Expand Down
3 changes: 2 additions & 1 deletion lib/kernels/test/src/test_transpose_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,8 @@ TEST_SUITE(FF_TEST_SUITE) {
TEST_CASE("Test Transpose Kernel Operations") {
std::size_t num_dims = 2;

std::vector<ff_dim_t> perm = {ff_dim_t(0), ff_dim_t(1)};
std::vector<ff_dim_t> perm = {ff_dim_t{nonnegative_int{0}},
ff_dim_t{nonnegative_int{0}}};

ManagedPerDeviceFFHandle managed_handle{};
ManagedFFStream managed_stream{};
Expand Down
4 changes: 2 additions & 2 deletions lib/local-execution/src/legion_tensor_shape.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
namespace FlexFlow {

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, size_t num_dims) {
return legion_dim_t(num_dims - ff_dim.value - 1);
return legion_dim_t(num_dims - ff_dim.value.get_value() - 1);

Check warning on line 7 in lib/local-execution/src/legion_tensor_shape.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/legion_tensor_shape.cc#L7

Added line #L7 was not covered by tests
}

legion_dim_t legion_dim_from_ff_dim(ff_dim_t ff_dim, TensorShape const &shape) {
return legion_dim_t(num_dims(shape) - ff_dim.value - 1);
return legion_dim_t(num_dims(shape) - ff_dim.value.get_value() - 1);

Check warning on line 11 in lib/local-execution/src/legion_tensor_shape.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/legion_tensor_shape.cc#L11

Added line #L11 was not covered by tests
}

} // namespace FlexFlow
12 changes: 6 additions & 6 deletions lib/local-execution/src/ops/linear.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,8 +66,8 @@ static DeviceSpecificDeviceStates
auto input = acc.get_tensor<Permissions::RO>(INPUT);
auto weight = acc.get_tensor<Permissions::RO>(WEIGHT);
auto output = acc.get_tensor<Permissions::WO>(OUTPUT);
int out_dim = output.shape.at(ff_dim_t{0});
int batch_size = output.shape.at(ff_dim_t{1});
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}});
int batch_size = output.shape.at(ff_dim_t{nonnegative_int{1}});

Check warning on line 70 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L69-L70

Added lines #L69 - L70 were not covered by tests

float *one_ptr;

Expand Down Expand Up @@ -96,8 +96,8 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
ProfilingSettings profiling = acc.get_argument<ProfilingSettings>(PROFILING);
auto attrs = acc.get_argument<LinearAttrs>(ATTRS);

int in_dim = input.shape.at(ff_dim_t{0}) + 1;
int out_dim = output.shape.at(ff_dim_t{0}) + 1;
int in_dim = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;

Check warning on line 100 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L99-L100

Added lines #L99 - L100 were not covered by tests
int batch_size = output.shape.get_volume() / out_dim;

float const *bias_ptr = NULL;
Expand Down Expand Up @@ -140,8 +140,8 @@ static std::optional<float>
bias_ptr = bias.get_float_ptr();
}

int in_dim = input.shape.at(ff_dim_t{0}) + 1;
int out_dim = output.shape.at(ff_dim_t{0}) + 1;
int in_dim = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int out_dim = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;

Check warning on line 144 in lib/local-execution/src/ops/linear.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/linear.cc#L143-L144

Added lines #L143 - L144 were not covered by tests
int batch_size = output.shape.get_volume() / out_dim;

return profile(backward_kernel,
Expand Down
16 changes: 8 additions & 8 deletions lib/local-execution/src/ops/pool_2d.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ static DeviceSpecificDeviceStates
auto input = acc.get_tensor<Permissions::RO>(INPUT);
auto output = acc.get_tensor<Permissions::WO>(OUTPUT);

int input_w = input.shape.at(ff_dim_t(0)) + 1;
int input_h = input.shape.at(ff_dim_t(1)) + 1;
int input_c = input.shape.at(ff_dim_t(2)) + 1;
int input_n = input.shape.at(ff_dim_t(3)) + 1;
int output_w = output.shape.at(ff_dim_t(0)) + 1;
int output_h = output.shape.at(ff_dim_t(1)) + 1;
int output_c = output.shape.at(ff_dim_t(2)) + 1;
int output_n = output.shape.at(ff_dim_t(3)) + 1;
int input_w = input.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int input_h = input.shape.at(ff_dim_t{nonnegative_int{1}}) + 1;
int input_c = input.shape.at(ff_dim_t{nonnegative_int{2}}) + 1;
int input_n = input.shape.at(ff_dim_t{nonnegative_int{3}}) + 1;
int output_w = output.shape.at(ff_dim_t{nonnegative_int{0}}) + 1;
int output_h = output.shape.at(ff_dim_t{nonnegative_int{1}}) + 1;
int output_c = output.shape.at(ff_dim_t{nonnegative_int{2}}) + 1;
int output_n = output.shape.at(ff_dim_t{nonnegative_int{3}}) + 1;

Check warning on line 40 in lib/local-execution/src/ops/pool_2d.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/pool_2d.cc#L33-L40

Added lines #L33 - L40 were not covered by tests

printf("init pool (input): n(%d) c(%d) h(%d) "
"w(%d)\n",
Expand Down
14 changes: 7 additions & 7 deletions lib/local-execution/src/ops/reverse.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,11 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
coord_t in_blk_size = 1, reverse_dim_size = 1, num_out_blks = 1;
for (int i = 0; i < output.shape.get_dim(); i++) {
if (i < axis.value) {
in_blk_size *= output.shape.at(ff_dim_t(i));
in_blk_size *= output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 56 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L56

Added line #L56 was not covered by tests
} else if (i == axis.value) {
reverse_dim_size = output.shape.at(ff_dim_t(i));
reverse_dim_size = output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 58 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L58

Added line #L58 was not covered by tests
} else {
num_out_blks *= output.shape.at(ff_dim_t(i));
num_out_blks *= output.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 60 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L60

Added line #L60 was not covered by tests
}
}

Expand All @@ -79,15 +79,15 @@ static std::optional<float>
auto output_grad = acc.get_tensor_grad<Permissions::RO>(OUTPUT);
auto attrs = acc.get_argument<ReverseAttrs>(ATTRS);

int axis = input_grad.shape.get_dim() - attrs.axis.value - 1;
int axis = input_grad.shape.get_dim() - attrs.axis.value.get_value() - 1;

Check warning on line 82 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L82

Added line #L82 was not covered by tests
coord_t in_blk_size = 1, reverse_dim_size = 1, num_out_blks = 1;
for (int i = 0; i < input_grad.shape.get_dim(); i++) {
if (i < axis) {
in_blk_size *= input_grad.shape.at(ff_dim_t(i));
in_blk_size *= input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 86 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L86

Added line #L86 was not covered by tests
} else if (i == axis) {
reverse_dim_size = input_grad.shape.at(ff_dim_t(i));
reverse_dim_size = input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 88 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L88

Added line #L88 was not covered by tests
} else {
num_out_blks *= input_grad.shape.at(ff_dim_t(i));
num_out_blks *= input_grad.shape.at(ff_dim_t{nonnegative_int{i}});

Check warning on line 90 in lib/local-execution/src/ops/reverse.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/reverse.cc#L90

Added line #L90 was not covered by tests
}
}

Expand Down
9 changes: 7 additions & 2 deletions lib/local-execution/src/ops/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,13 @@ static DeviceSpecificDeviceStates
int output_c = output.shape.at(legion_dim_t(2));
int output_n = output.shape.at(legion_dim_t(3));

SoftmaxPerDeviceState per_device_state = init_kernel(
handle, attrs.dim.value, output_n, output_c, output_h, output_w);
SoftmaxPerDeviceState per_device_state =
init_kernel(handle,
attrs.dim.value.get_value(),

Check warning on line 69 in lib/local-execution/src/ops/softmax.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/softmax.cc#L67-L69

Added lines #L67 - L69 were not covered by tests
output_n,
output_c,
output_h,
output_w);

return DeviceSpecificDeviceStates{
DeviceSpecific<SoftmaxPerDeviceState>::create(per_device_state)};
Expand Down
21 changes: 14 additions & 7 deletions lib/local-execution/src/ops/split.cc
Original file line number Diff line number Diff line change
Expand Up @@ -66,12 +66,15 @@ static std::optional<float> forward_task_impl(TaskArgumentAccessor const &acc) {
auto attrs = acc.get_argument<SplitAttrs>(ATTRS);

coord_t num_blocks, in_block_size, out_block_size[MAX_NUM_OUTPUTS];
calc_block_size(num_blocks, in_block_size, input.shape, attrs.axis.value);
calc_block_size(
num_blocks, in_block_size, input.shape, attrs.axis.value.get_value());

for (int i = 0; i < attrs.splits.size(); i++) {
coord_t out_num_blocks;
calc_block_size(
out_num_blocks, out_block_size[i], output.shape, attrs.axis.value);
calc_block_size(out_num_blocks,
out_block_size[i],

Check warning on line 75 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L74-L75

Added lines #L74 - L75 were not covered by tests
output.shape,
attrs.axis.value.get_value());
}
float *output_float_ptr = output.get_float_ptr();
return profile(forward_kernel,
Expand All @@ -94,12 +97,16 @@ static std::optional<float>
auto attrs = acc.get_argument<SplitAttrs>(ATTRS);

coord_t num_blocks, in_block_size, out_block_size[MAX_NUM_OUTPUTS];
calc_block_size(
num_blocks, in_block_size, input_grad.shape, attrs.axis.value);
calc_block_size(num_blocks,

Check warning on line 100 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L100

Added line #L100 was not covered by tests
in_block_size,
input_grad.shape,
attrs.axis.value.get_value());
for (int i = 0; i < attrs.splits.size(); i++) {
coord_t out_num_blocks;
calc_block_size(
out_num_blocks, out_block_size[i], output_grad.shape, attrs.axis.value);
calc_block_size(out_num_blocks,
out_block_size[i],

Check warning on line 107 in lib/local-execution/src/ops/split.cc

View check run for this annotation

Codecov / codecov/patch

lib/local-execution/src/ops/split.cc#L106-L107

Added lines #L106 - L107 were not covered by tests
output_grad.shape,
attrs.axis.value.get_value());
}
float const *output_grad_ptr = output_grad.get_float_ptr();
return profile(backward_kernel,
Expand Down
166 changes: 165 additions & 1 deletion lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
#define _FLEXFLOW_OPATTRS_INCLUDE_OPATTRS_FF_STACK_VECTOR_H

#include "op-attrs/ff_dim_t.dtg.h"
#include "op-attrs/relative_ff_dim_t.dtg.h"
#include "utils/fmt/vector.h"
#include "utils/stack_vector.h"
#include <nlohmann/json.hpp>
Expand Down Expand Up @@ -152,6 +153,169 @@ struct DimOrdered {
stack_vector<T, MAX_TENSOR_DIM> contents;
};

template <typename T>
struct DimOrdered<ff_dim_t, T> {
DimOrdered() {}

DimOrdered(std::initializer_list<T> const &l)
: contents(l.begin(), l.end()) {}

DimOrdered(std::vector<T> const &contents)
: contents(contents.begin(), contents.end()) {}

template <typename It>
DimOrdered(It begin, It end) : contents(begin, end) {}

template <size_t MAXSIZE>
DimOrdered(stack_vector<T, MAXSIZE> const &contents)
: contents(contents.begin(), contents.end()) {}

T const &at(ff_dim_t idx) const {
int raw = idx.value.get_value();
return this->contents.at(raw);
}

T const &at(relative_ff_dim_t idx) const {
int raw = idx.value;
if (raw < 0) {
raw = this->contents.size() + raw;
}
return this->contents.at(raw);
}

T &at(ff_dim_t idx) {
int raw = idx.value.get_value();
return this->contents.at(raw);
}

T &at(relative_ff_dim_t idx) {
int raw = idx.value;
if (raw < 0) {
raw = this->contents.size() + raw;
}
return this->contents.at(raw);
}

T const &operator[](ff_dim_t idx) const {
return this->at(idx);
}

T const &operator[](relative_ff_dim_t idx) const {
return this->at(idx);
}

T &operator[](ff_dim_t idx) {
return this->at(idx);
}

T &operator[](relative_ff_dim_t idx) {
return this->at(idx);
}

bool idx_is_valid(ff_dim_t const &idx) const {
int raw = idx.value.get_value();
return raw < this->contents.size();
}

bool idx_is_valid(relative_ff_dim_t const &idx) const {
int raw = idx.value;
if (raw < 0) {
raw = this->contents.size() + raw;

Check warning on line 223 in lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h

View check run for this annotation

Codecov / codecov/patch

lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h#L223

Added line #L223 was not covered by tests
}
return (raw >= 0 && raw < this->contents.size());
}

bool operator==(DimOrdered const &other) const {
return this->contents == other.contents;
}

bool operator!=(DimOrdered const &other) const {
return this->contents != other.contents;
}

bool operator<(DimOrdered const &other) const {
return this->contents < other.contents;

Check warning on line 237 in lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h

View check run for this annotation

Codecov / codecov/patch

lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h#L236-L237

Added lines #L236 - L237 were not covered by tests
}

using iterator = typename stack_vector<T, MAX_TENSOR_DIM>::iterator;
using const_iterator =
typename stack_vector<T, MAX_TENSOR_DIM>::const_iterator;
using reverse_iterator =
typename stack_vector<T, MAX_TENSOR_DIM>::reverse_iterator;
using const_reverse_iterator =
typename stack_vector<T, MAX_TENSOR_DIM>::const_reverse_iterator;
using value_type = T;
using pointer = value_type *;
using const_pointer = value_type const *;
using reference = value_type &;
using const_reference = value_type const &;

iterator begin() {
return this->contents.begin();
}

const_iterator begin() const {
return this->cbegin();
}

const_iterator cbegin() const {
return this->contents.cbegin();
}

iterator end() {
return this->contents.end();
}

const_iterator end() const {
return this->cend();
}

const_iterator cend() const {
return this->contents.cend();
}

reverse_iterator rbegin() {
return this->contents.rbegin();
}

const_reverse_iterator rbegin() const {
return this->crbegin();
}

const_reverse_iterator crbegin() const {
return this->contents.crbegin();
}

reverse_iterator rend() {
return this->contents.crend();
}

const_reverse_iterator rend() const {
return this->crend();
}

const_reverse_iterator crend() const {
return this->contents.crend();
}

size_t size() const {
return this->contents.size();
}

size_t empty() const {
return this->contents.empty();
}

size_t num_dims() const {
return this->size();
}

friend struct ::std::hash<DimOrdered>;

private:
stack_vector<T, MAX_TENSOR_DIM> contents;
};

template <typename T>
using FFOrdered = DimOrdered<ff_dim_t, T>;

Expand All @@ -176,7 +340,7 @@ template <typename T>
std::vector<ff_dim_t> inner_to_outer_idxs(FFOrdered<T> const &ff_ordered) {
std::vector<ff_dim_t> idxs;
for (size_t i = 0; i < ff_ordered.size(); i++) {
idxs.push_back(ff_dim_t(ff_ordered.size() - i - 1));
idxs.push_back(ff_dim_t{nonnegative_int{ff_ordered.size() - i - 1}});

Check warning on line 343 in lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h

View check run for this annotation

Codecov / codecov/patch

lib/op-attrs/include/op-attrs/dim_ordered/dim_ordered.h#L343

Added line #L343 was not covered by tests
}
return idxs;
}
Expand Down
2 changes: 1 addition & 1 deletion lib/op-attrs/include/op-attrs/dim_ordered/enumerate.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ template <typename T>
std::map<ff_dim_t, T> enumerate(FFOrdered<T> const &ff_ordered) {
std::map<ff_dim_t, T> result;
for (int raw_ff_dim : count(ff_ordered.size())) {
ff_dim_t ff_dim = ff_dim_t{raw_ff_dim};
ff_dim_t ff_dim = ff_dim_t{nonnegative_int{raw_ff_dim}};
result.insert({ff_dim, ff_ordered.at(ff_dim)});
}
return result;
Expand Down
Loading

0 comments on commit 35bd5d9

Please sign in to comment.