Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eliminate "static inputs" for translations #682

Draft
wants to merge 11 commits into
base: master
Choose a base branch
from
6 changes: 5 additions & 1 deletion ngraph_bridge/executable.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,11 @@ Executable::Executable(shared_ptr<Function> func, string device)
bool trivial_fn = true;
for (auto result : func->get_results()) {
auto parent = result->input_value(0).get_node_shared_ptr();
auto& shape = result->get_shape();
ngraph::Shape shape = {1};
if (result->get_output_partial_shape(0).is_static()) {
shape = result->get_shape();
}

trivial_fn &= ngraph::is_type<opset::Parameter>(parent) ||
ngraph::is_type<opset::Constant>(parent) ||
count(shape.begin(), shape.end(), 0);
Expand Down
463 changes: 82 additions & 381 deletions ngraph_bridge/ngraph_builder.cc

Large diffs are not rendered by default.

9 changes: 1 addition & 8 deletions ngraph_bridge/ngraph_encapsulate_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,6 @@ namespace ngraph_bridge {

int NGraphEncapsulateOp::s_instance_id = 0;

//---------------------------------------------------------------------------
// NGraphEncapsulateOp::ctor
//---------------------------------------------------------------------------
NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx)
: OpKernel(ctx) {
NGRAPH_VLOG(1) << "Create Executor " << name();
Expand Down Expand Up @@ -115,9 +112,8 @@ NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx)
std::vector<const Node*> arg_nodes;

for (auto node : ng_encap_impl_.m_graph.nodes()) {
if (node->type_string() == "_Arg") {
if (node->IsArg()) {
arg_nodes.push_back(node);

int32 index;
OP_REQUIRES_OK(ctx, GetNodeAttr(node->attrs(), "index", &index));
if (index > max_arg_index) max_arg_index = index;
Expand Down Expand Up @@ -162,9 +158,6 @@ NGraphEncapsulateOp::NGraphEncapsulateOp(OpKernelConstruction* ctx)
node_def.attr(), &additional_attribute_map));
}

//---------------------------------------------------------------------------
// ~NGraphEncapsulateOp()
//---------------------------------------------------------------------------
NGraphEncapsulateOp::~NGraphEncapsulateOp() {
std::ostringstream oss;
oss << "Destroy Encapsulate_" << ng_encap_impl_.GetInstanceId() << ": "
Expand Down
22 changes: 1 addition & 21 deletions ngraph_bridge/ngraph_mark_for_clustering.cc
Original file line number Diff line number Diff line change
Expand Up @@ -195,34 +195,14 @@ const std::map<std::string, SetAttributesFunction>& GetAttributeSetters() {

if (!initialized) {
// Set Additional Attributes (if any)
set_attributes_map["Any"] = SetStaticInputs({1});
set_attributes_map["All"] = SetStaticInputs({1});
set_attributes_map["ArgMax"] = SetStaticInputs({1});
set_attributes_map["ArgMin"] = SetStaticInputs({1});
set_attributes_map["ConcatV2"] = SetStaticInputs({-1});
set_attributes_map["Conv2DBackpropInput"] = SetStaticInputs({0});
set_attributes_map["ExpandDims"] = SetStaticInputs({1});
set_attributes_map["Fill"] = SetStaticInputs({0});
set_attributes_map["GatherV2"] = SetStaticInputs({2});
set_attributes_map["Max"] = SetStaticInputs({1});
set_attributes_map["Mean"] = SetStaticInputs({1});
set_attributes_map["Min"] = SetStaticInputs({1});
set_attributes_map["MirrorPad"] = SetStaticInputs({1});
set_attributes_map["NonMaxSuppressionV4"] = SetStaticInputs({2, 3, 4});
set_attributes_map["OneHot"] = SetStaticInputs({1});
set_attributes_map["Pad"] = SetStaticInputs({1});
set_attributes_map["PadV2"] = SetStaticInputs({1, 2});
set_attributes_map["Prod"] = SetStaticInputs({1});
set_attributes_map["Reshape"] = SetStaticInputs({1});
set_attributes_map["Shape"] = SetStaticInputs({0});
set_attributes_map["Slice"] = SetStaticInputs({1, 2});
set_attributes_map["Split"] = SetStaticInputs({0});
set_attributes_map["SplitV"] = SetStaticInputs({1, 2});
set_attributes_map["StridedSlice"] = SetStaticInputs({1, 2, 3});
set_attributes_map["Sum"] = SetStaticInputs({1});
set_attributes_map["TopKV2"] = SetStaticInputs({1});
set_attributes_map["Tile"] = SetStaticInputs({1});
set_attributes_map["Transpose"] = SetStaticInputs({1});
initialized = true;
}
return set_attributes_map;
Expand Down Expand Up @@ -537,7 +517,7 @@ const TypeConstraintMap& GetTypeConstraintMap() {
type_constraint_map["SquaredDifference"]["T"] = NGraphDTypes();
type_constraint_map["Squeeze"]["T"] = NGraphDTypes();
type_constraint_map["StridedSlice"]["T"] = NGraphDTypes();
type_constraint_map["StridedSlice"]["Index"] = NGraphIndexDTypes();
type_constraint_map["StridedSlice"]["Index"] = {DT_INT64};
type_constraint_map["Sub"]["T"] = NGraphNumericDTypes();
type_constraint_map["Sum"]["T"] = NGraphNumericDTypes();
type_constraint_map["Sum"]["Tidx"] = NGraphIndexDTypes();
Expand Down
57 changes: 26 additions & 31 deletions test/python/tensorflow/tests_common.txt
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,10 @@ array_ops_test.ShapeSizeRankTest.testSizeDtype
array_ops_test.ShapeSizeRankTest.testSparseShape

bias_op_test.BiasAddTest.testEmpty
#This test was commented out when upgrading from tf 1.13 to tf 1.14rc0
#bias_op_test.BiasAddTest.testEmptyGradient
bias_op_test.BiasAddTest.testEmptyGradient
bias_op_test.BiasAddTest.testFloatTypes
#bias_op_test.BiasAddTest.testGradientTensor4D
#bias_op_test.BiasAddTest.testIntTypes
bias_op_test.BiasAddTest.testGradientTensor4D
bias_op_test.BiasAddTest.testIntTypes

#bitwise_ops_test.BitwiseOpTest.testBinaryOps

Expand All @@ -71,7 +70,7 @@ cast_op_test.CastOpTest.testCastToTypeOfVariable
cast_op_test.CastOpTest.testGradients
cast_op_test.CastOpTest.testInfNan
cast_op_test.CastOpTest.testIntToFloatBoundary
#cast_op_test.CastOpTest.testNotImplemented
cast_op_test.CastOpTest.testNotImplemented
cast_op_test.CastOpTest.testRandom
cast_op_test.CastOpTest.testSmallValues

Expand All @@ -97,7 +96,7 @@ concat_op_test.ConcatOpTest.testTensorConcatDim0Grad
concat_op_test.ConcatOpTest.testTensorConcatDim1Grad
concat_op_test.ConcatOpTest.testVStack
concat_op_test.ConcatOpTest.testZeroSize
#concat_op_test.ConcatOpTest.testRandom
concat_op_test.ConcatOpTest.testRandom

conv_ops_test.Conv2DTest.testConv2D1x1Filter
conv_ops_test.Conv2DTest.testConv2D1x2Filter
Expand All @@ -111,39 +110,39 @@ conv_ops_test.Conv2DTest.testConv2D2x2FilterDilation
conv_ops_test.Conv2DTest.testConv2D2x2FilterStride1x2
conv_ops_test.Conv2DTest.testConv2D2x2FilterStride2
conv_ops_test.Conv2DTest.testConv2D2x2FilterStride2Same
#conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSize # this is running the next line which is an error, so excluding for now.
#conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropFilter
conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSize
conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropFilter
conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeBackpropInput
conv_ops_test.Conv2DTest.testConv2DKernelSizeMatchesInputSizeDilation
#conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideSame
conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideSame
conv_ops_test.Conv2DTest.testConv2DKernelSmallerThanStrideValid
#conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropInput
conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropInput
conv_ops_test.Conv2DTest.testInputGradientKernelSizeMatchesInputSize
conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideOne
conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideThree
conv_ops_test.Conv2DTest.testInputGradientSamePaddingStrideTwo
conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideOne
conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideThree
conv_ops_test.Conv2DTest.testInputGradientValidPaddingStrideTwo
#conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter
#conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter
#conv_ops_test.Conv2DTest.testConv2D2x2Depth3ValidBackpropFilterStride1x2
#conv_ops_test.Conv2DTest.testConv2DBackpropFilterWithEmptyInput
conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter
conv_ops_test.Conv2DTest.testConv2D2x2Depth1ValidBackpropFilter
conv_ops_test.Conv2DTest.testConv2D2x2Depth3ValidBackpropFilterStride1x2
conv_ops_test.Conv2DTest.testConv2DBackpropFilterWithEmptyInput
#conv_ops_test.Conv2DTest.testConv2DEmpty
#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilter
conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilter
#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInput
#conv_ops_test.Conv2DTest.testConv2DEmptyDilation
#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilterDilation1x2
#conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInputDilation1x2
#conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropFilter
#conv_ops_test.Conv2DTest.testFilterGradientKernelSizeMatchesInputSize
#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStride2x1
#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideOne
#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideThree
#conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideTwo
#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideOne
#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideThree
#conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideTwo
conv_ops_test.Conv2DTest.testConv2DEmptyBackpropFilterDilation1x2
conv_ops_test.Conv2DTest.testConv2DEmptyBackpropInputDilation1x2
conv_ops_test.Conv2DTest.testConv2DStrideTwoFilterOneSameBackpropFilter
conv_ops_test.Conv2DTest.testFilterGradientKernelSizeMatchesInputSize
conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStride2x1
conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideOne
conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideThree
conv_ops_test.Conv2DTest.testFilterGradientSamePaddingStrideTwo
conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideOne
conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideThree
conv_ops_test.Conv2DTest.testFilterGradientValidPaddingStrideTwo
#conv_ops_test.Conv2DTest.testOpEdgeCases

conv_ops_3d_test.Conv3DTest.testConv3D1x1x1Filter
Expand Down Expand Up @@ -226,10 +225,6 @@ gather_nd_op_test.GatherNdTest.testParamsRankLargerThanIndexSlices
gather_nd_op_test.GatherNdTest.testSimpleDtype
gather_nd_op_test.GatherNdTest.testUnknownIndices
gather_nd_op_test.GatherNdTest.test_session

# Note that all these test will pass on CPU (except testUInt32AndUInt64), because on CPU gather falls back to TF
# In case of NNPI all except the last pass. But CI wont be tracking it because CI runs on CPU backend only
gather_op_test.GatherTest.testBadAxis
#gather_op_test.GatherTest.testBadIndicesCPU ...failing with ngraph error The start corner is out of bounds at axis 0
#gather_op_test.GatherTest.testEmptySlices ...failure in backend due to int64
#gather_op_test.GatherTest.testScalar1D ...failure in backend due to int64
Expand Down Expand Up @@ -527,7 +522,7 @@ split_op_test.SplitOpTest.testIdentity
split_op_test.SplitOpTest.testListOfScalarTensors
split_op_test.SplitOpTest.testNonexistentDimTensor
split_op_test.SplitOpTest.testRandom
split_op_test.SplitOpTest.testShapeInference
#split_op_test.SplitOpTest.testShapeInference
#split_op_test.SplitOpTest.testSpecialCasesVariable
split_op_test.SplitOpTest.testSplitCols
split_op_test.SplitOpTest.testSplitDim0
Expand Down
2 changes: 2 additions & 0 deletions test/python/tensorflow/tests_linux_cpu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ reduction_ops_test.SumReductionTest.testDegenerate
reduction_ops_test.MeanReductionTest.testDegenerate
reduction_ops_test.ProdReductionTest.testDegenerate
cwise_ops_unary_test.UnaryOpTest.testComplexAbsGradGrad
slice_op_test.SliceTest.testRandom

#Failed to set Blob with precision not corresponding to user output precision.
argmax_op_test.ArgMaxTest.testFloat
Expand Down Expand Up @@ -91,6 +92,7 @@ concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim0Grad
concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim1Grad
concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim1Grad_UnknownInputDim
concat_op_test.ConcatOpTest.testIndexedSlicesConcatDim2Grad
concat_op_test.ConcatOpTest.testRandom
identity_bijector_test.IdentityBijectorTest.testBijector
math_ops_test.LogSumExpTest.testInfinity
math_ops_test.LogSumExpTest.testKeepDims
Expand Down
6 changes: 3 additions & 3 deletions test/test_utilities.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -304,9 +304,9 @@ void Compare(const Tensor& T1, const Tensor& T2, float rtol, float atol) {
"expected output datatype."
<< dtype;
}
auto T_size = T1.flat<T>().size();
auto T1_data = T1.flat<T>().data();
auto T2_data = T2.flat<T>().data();
auto T_size = T1.unaligned_flat<T>().size();
auto T1_data = T1.unaligned_flat<T>().data();
auto T2_data = T2.unaligned_flat<T>().data();
bool is_comparable = false;

for (int k = 0; k < T_size; k++) {
Expand Down
6 changes: 0 additions & 6 deletions test/tests_linux_cpu.txt
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ MathOps.Pow0D1D
# Const layer Squeeze/Constant_3544 has incorrect dimensions in the output data 0
MathOps.SqueezeNoAttributes


# Const/Const/Constant_1260 has zero dimension which is not allowed
NNOps.L2Loss

Expand All @@ -59,8 +58,3 @@ ArrayOps.Shape3D
# zero dimension
ArrayOps.SplitVZeroSizeSplit
ArrayOps.SplitVZeroSizeNegSplit

MathOps.FloorModNegFloat #Floor_mod supports only I32 precision of inputs