Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -66,37 +66,6 @@ inline bool has_decompression_converts(const std::shared_ptr<const ov::Model>& f
return false;
}

OPENVINO_DEPRECATED("Plugins should use ov::ISyncInferRequest::find_port")
inline std::string create_ie_output_name(const Output<const Node>& output) {
const auto& prev_layer = output.get_node_shared_ptr();
auto out_name = prev_layer->get_friendly_name();
if (prev_layer->get_output_size() != 1) {
out_name += "." + std::to_string(output.get_index());
}
return out_name;
}

OPENVINO_DEPRECATED("Plugins should use ov::ISyncInferRequest::find_port")
inline std::string create_ie_output_name(const Output<Node>& output) {
OPENVINO_SUPPRESS_DEPRECATED_START
return create_ie_output_name(ov::Output<const Node>(output.get_node(), output.get_index()));
OPENVINO_SUPPRESS_DEPRECATED_END
}

OPENVINO_DEPRECATED("Plugins should use ov::ISyncInferRequest::find_port")
inline std::string get_ie_output_name(const Output<const Node>& output) {
OPENVINO_SUPPRESS_DEPRECATED_START
return create_ie_output_name(output);
OPENVINO_SUPPRESS_DEPRECATED_END
}

OPENVINO_DEPRECATED("Plugins should use ov::ISyncInferRequest::find_port")
inline std::string get_ie_output_name(const Output<Node>& output) {
OPENVINO_SUPPRESS_DEPRECATED_START
return get_ie_output_name(ov::Output<const Node>(output.get_node(), output.get_index()));
OPENVINO_SUPPRESS_DEPRECATED_END
}

/**
* \brief Convert epsilon value from double to float type.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include "itt.hpp"
#include "openvino/core/descriptor/tensor.hpp"
#include "openvino/core/graph_util.hpp"
#include "openvino/core/rt_info.hpp"
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need to add this import? It looks like we only remove code from this file, no?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the include is needed. We removed #include "transformations/utils/utils.hpp" which transitively included openvino/core/rt_info.hpp. This file uses ov::copy_runtime_info (lines 54, 154, 166), so we need to add the direct include.

#include "openvino/op/add.hpp"
#include "openvino/op/avg_pool.hpp"
#include "openvino/op/broadcast.hpp"
Expand All @@ -24,7 +25,6 @@
#include "openvino/pass/manager.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "openvino/pass/visualize_tree.hpp"
#include "transformations/utils/utils.hpp"

ov::pass::ConvertMaxPool8ToMaxPool1::ConvertMaxPool8ToMaxPool1() {
MATCHER_SCOPE(ConvertMaxPool8ToMaxPool1);
Expand All @@ -49,10 +49,6 @@ ov::pass::ConvertMaxPool8ToMaxPool1::ConvertMaxPool8ToMaxPool1() {
maxpool_v8_node->get_rounding_type(),
maxpool_v8_node->get_auto_pad());

OPENVINO_SUPPRESS_DEPRECATED_START
auto out_name = ov::op::util::create_ie_output_name(maxpool_v8_node->output(0));
OPENVINO_SUPPRESS_DEPRECATED_END

maxpool_v1_node->set_friendly_name(maxpool_v8_node->get_friendly_name());
maxpool_v8_node->output(0).replace(maxpool_v1_node->output(0));
ov::copy_runtime_info(maxpool_v8_node, maxpool_v1_node);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
#include "openvino/op/reshape.hpp"
#include "openvino/pass/pattern/op/wrap_type.hpp"
#include "ov_ops/nms_ie_internal.hpp"
#include "transformations/utils/utils.hpp"

ov::pass::ConvertNMS9ToNMSIEInternal::ConvertNMS9ToNMSIEInternal() {
MATCHER_SCOPE(ConvertNMS9ToNMSIEInternal);
Expand Down Expand Up @@ -110,18 +109,22 @@ ov::pass::ConvertNMS9ToNMSIEInternal::ConvertNMS9ToNMSIEInternal() {
Output<Node> output_0 = nms_legacy->output(0);
if (nms_9->output(0).get_element_type() != output_0.get_element_type()) {
output_0 = std::make_shared<ov::op::v0::Convert>(output_0, nms_9->output(0).get_element_type());
OPENVINO_SUPPRESS_DEPRECATED_START
output_0.get_node_shared_ptr()->set_friendly_name(op::util::create_ie_output_name(nms_9->output(0)));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& prev_layer_0 = nms_9->output(0).get_node_shared_ptr();
const std::string friendly_name_0 =
prev_layer_0->get_friendly_name() +
(prev_layer_0->get_output_size() != 1 ? "." + std::to_string(nms_9->output(0).get_index()) : "");
output_0.get_node_shared_ptr()->set_friendly_name(friendly_name_0);
new_ops.emplace_back(output_0.get_node_shared_ptr());
}

Output<Node> output_2 = nms_legacy->output(2);
if (nms_9->output(2).get_element_type() != output_2.get_element_type()) {
output_2 = std::make_shared<ov::op::v0::Convert>(output_2, nms_9->output(2).get_element_type());
OPENVINO_SUPPRESS_DEPRECATED_START
output_2.get_node_shared_ptr()->set_friendly_name(op::util::create_ie_output_name(nms_9->output(2)));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& prev_layer_2 = nms_9->output(2).get_node_shared_ptr();
const std::string friendly_name_2 =
prev_layer_2->get_friendly_name() +
(prev_layer_2->get_output_size() != 1 ? "." + std::to_string(nms_9->output(2).get_index()) : "");
output_2.get_node_shared_ptr()->set_friendly_name(friendly_name_2);
new_ops.emplace_back(output_2.get_node_shared_ptr());
}

Expand Down
8 changes: 4 additions & 4 deletions src/inference/src/dev/icompiled_model.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
#include "openvino/runtime/iasync_infer_request.hpp"
#include "openvino/runtime/iplugin.hpp"
#include "openvino/runtime/properties.hpp"
#include "transformations/utils/utils.hpp"

#if defined(OPENVINO_GNU_LIBC) && !defined(__ANDROID__)
# include <malloc.h>
Expand Down Expand Up @@ -84,9 +83,10 @@ ov::ICompiledModel::ICompiledModel(const std::shared_ptr<const ov::Model>& model
for (const auto& result : model->get_results()) {
auto fake_param = std::make_shared<ov::op::v0::Parameter>(result->get_output_element_type(0),
result->get_output_partial_shape(0));
OPENVINO_SUPPRESS_DEPRECATED_START
const std::string res_name = ov::op::util::create_ie_output_name(result->input_value(0));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& prev_layer = result->input_value(0).get_node_shared_ptr();
const std::string res_name =
prev_layer->get_friendly_name() +
(prev_layer->get_output_size() != 1 ? "." + std::to_string(result->input_value(0).get_index()) : "");
fake_param->set_friendly_name(res_name);
Comment on lines +86 to 90
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
const auto& prev_layer = result->input_value(0).get_node_shared_ptr();
const std::string res_name =
prev_layer->get_friendly_name() +
(prev_layer->get_output_size() != 1 ? "." + std::to_string(result->input_value(0).get_index()) : "");
fake_param->set_friendly_name(res_name);
fake_param->set_friendly_name(make_default_tensor_name(result->input_value(0)));

Can be this helper be used instead?
It will use different port separator : instead of .. can be checked?

There is also util function ov::util::set_tensors_names and other which allow to set tensor names for model, can they be usefull here?

@CuriousPanCake
Is it required to set this name at all?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, the format matters. The code uses . separator (e.g. "layer_name.0") specifically for IR v10 backward compatibility - see line 40-41: add_operation_names = ir_version == 10.

The make_default_tensor_name function uses : separator (defined in descriptor_tensor.hpp as port_separator). Changing to : format would break backward compatibility for users with existing IR v10 models who rely on the dot-separated names for model.input("name.0") or model.output("name.0") lookups.

The same . format is used in model_reader.cpp:80-83 for the same reason (see comments on lines 70-76 explaining IR v10 use cases).

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The . separator is intentional for IR v10 backward compatibility. Using make_default_tensor_name would change it to : (via descriptor::port_separator), breaking existing user code that accesses outputs by name like model.output("layer.0"). The same pattern is used in model_reader.cpp for the same reason.

fake_param->set_element_type(result->get_element_type());
fake_param->validate_and_infer_types();
Expand Down
11 changes: 5 additions & 6 deletions src/inference/src/model_reader.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
#include "openvino/runtime/shared_buffer.hpp"
#include "openvino/util/common_util.hpp"
#include "openvino/util/file_util.hpp"
#include "transformations/utils/utils.hpp"

namespace {
ov::element::Type to_legacy_type(const ov::element::Type& legacy_type, bool input) {
Expand Down Expand Up @@ -77,11 +76,11 @@ void update_v10_model(std::shared_ptr<ov::Model>& model, bool frontendMode = fal
// we need to add operation names as tensor names for inputs and outputs
{
for (const auto& result : model->get_results()) {
OPENVINO_SUPPRESS_DEPRECATED_START
// Note, upon removal of 'create_ie_output_name', just move it to this file as a local function
// we still need to add operation names as tensor names for outputs for IR v10
auto res_name = ov::op::util::create_ie_output_name(result->input_value(0));
OPENVINO_SUPPRESS_DEPRECATED_END
const auto& prev_layer = result->input_value(0).get_node_shared_ptr();
auto res_name =
prev_layer->get_friendly_name() + (prev_layer->get_output_size() != 1
? "." + std::to_string(result->input_value(0).get_index())
: "");
OPENVINO_ASSERT(leaf_names.find(res_name) == leaf_names.end() ||
result->output(0).get_names().find(res_name) != result->output(0).get_names().end(),
"Model operation names have collisions with tensor names.",
Expand Down
Loading