Skip to content

Commit

Permalink
Extend the existing IO binding subgraph tests
Browse files Browse the repository at this point in the history
  • Loading branch information
maxnick committed Nov 9, 2023
1 parent c851d64 commit 7c4cb2e
Show file tree
Hide file tree
Showing 3 changed files with 77 additions and 6 deletions.
2 changes: 1 addition & 1 deletion src/inference/src/dev/make_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ class ViewTensor : public ITensor {
m_strides{},
m_strides_once{},
m_ptr{ptr} {
OPENVINO_ASSERT(m_ptr != nullptr);
OPENVINO_ASSERT(shape_size(shape) == 0 || m_ptr != nullptr);
OPENVINO_ASSERT(m_element_type != element::undefined && m_element_type.is_static());
}

Expand Down
17 changes: 12 additions & 5 deletions src/plugins/intel_cpu/src/infer_request.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,9 @@ void SyncInferRequest::init_tensor(const std::string& name) {
const bool isDynamic = shape.is_dynamic();
ov::Shape tensor_shape;
if (isDynamic) {
tensor_shape = ov::Shape(shape.rank().get_length(), 0);
for (auto&& item : shape) {
tensor_shape.push_back(item.is_static() ? item.get_length() : 0);
}
} else {
tensor_shape = shape.to_shape();
}
Expand Down Expand Up @@ -697,12 +699,17 @@ SyncInferRequest::OutputControlBlock::OutputControlBlock(const InferenceEngine::
m_buffers[m_buffIndx] = std::make_shared<MemoryMngrWithReuse>();
m_proxyMemMngr = std::make_shared<ProxyMemoryMngr>(m_buffers[m_buffIndx]);

Shape memShape = shape.isDynamic() ?
Shape{VectorDims(shape.getRank(), 0)} : // this is a WA since the ITensor doesn't allow dyn shapes
Shape{shape};
VectorDims memDims;
if (shape.isDynamic()) { // this is a WA since the ITensor doesn't allow dyn shapes
for (auto&& item : shape.getDims()) {
memDims.push_back(item != Shape::UNDEFINED_DIM ? item : 0);
}
} else {
memDims = shape.getStaticDims();
}

CpuBlockedMemoryDescPtr desc =
std::make_shared<CpuBlockedMemoryDesc>(precision, memShape);
std::make_shared<CpuBlockedMemoryDesc>(precision, Shape{memDims});

auto memory = std::make_shared<Memory>(eng, desc, m_proxyMemMngr);
m_tensor = std::make_shared<Tensor>(memory);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,4 +83,68 @@ TEST_F(InputOutputTensorReuse, smoke_Input_Output_Binding) {
}
}

TEST_F(InputOutputTensorReuse, smoke_Input_Output_Bind_Once) {
compile_model();
std::vector<ov::Shape> inputShapes = {{1, 32, 5, 16}, {1, 32, 1, 16}};
generate_inputs(inputShapes);
validate();

auto outputTensor = inferRequest.get_output_tensor(0);
inputShapes.back() = outputTensor.get_shape();
auto itr = std::find_if(inputs.begin(), inputs.end(), [](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor>& item) {
return item.first->get_friendly_name() == "Param_1";
});
ASSERT_NE(itr, inputs.end());
itr->second = outputTensor;

for (const auto& input : inputs) {
inferRequest.set_tensor(input.first, input.second);
}

constexpr size_t num_iter = 10;
for (size_t i = 0; i < num_iter; i++) {
const auto& expectedOutputs = calculate_refs();

inferRequest.infer();
compare(expectedOutputs, {outputTensor});
auto itr = std::find_if(inputs.begin(), inputs.end(), [](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor>& item) {
return item.first->get_friendly_name() == "Param_1";
});
ASSERT_NE(itr, inputs.end());
itr->second = expectedOutputs.front();
}
}

TEST_F(InputOutputTensorReuse, smoke_Input_Output_Bind_Once_Empty_Tensor) {
compile_model();
std::vector<ov::Shape> inputShapes = {{1, 32, 5, 16}, {1, 32, 1, 16}};
generate_inputs(inputShapes);
inferRequest = compiledModel.create_infer_request();

auto outputTensor = inferRequest.get_output_tensor(0);
inputShapes.back() = outputTensor.get_shape();
auto itr = std::find_if(inputs.begin(), inputs.end(), [](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor>& item) {
return item.first->get_friendly_name() == "Param_1";
});
ASSERT_NE(itr, inputs.end());
itr->second = outputTensor;

for (const auto& input : inputs) {
inferRequest.set_tensor(input.first, input.second);
}

constexpr size_t num_iter = 10;
for (size_t i = 0; i < num_iter; i++) {
const auto& expectedOutputs = calculate_refs();

inferRequest.infer();
compare(expectedOutputs, {outputTensor});
auto itr = std::find_if(inputs.begin(), inputs.end(), [](const std::pair<std::shared_ptr<ov::Node>, ov::Tensor>& item) {
return item.first->get_friendly_name() == "Param_1";
});
ASSERT_NE(itr, inputs.end());
itr->second = expectedOutputs.front();
}
}

} // namespace SubgraphTestsDefinitions

0 comments on commit 7c4cb2e

Please sign in to comment.