Skip to content

Commit 334c971

Browse files
bukejiyuceciliapeng2011yuxu42
authored
[PDPD]Added and fixed Paddle convert. (openvinotoolkit#28347)
### Details: 1. *added proto::AttrType::SCALARS* 2. support 0D inputs and fixes: - argmax:[argmax](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/argmax_en.html#argmax) - assign_value - divide:[divide](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/divide_en.html#divide) - fill_any_like:[full_like](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/full_like_en.html#full-like) - fill_constan:[full](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/full_en.html#full)\ - set_value - slice:[slice](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/slice_en.html#slice) - tile:[tile](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/tile_en.html#tile) 3. Added convert: - abs:[abs](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/abs_en.html#abs) - argmin:[argmin](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/argmin_en.html#argmin) - scatter:[scatter](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/scatter_en.html#scatter) - scatter_nd_add:[scatter_nd_add](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/scatter_nd_add_en.html#scatter-nd-add) - take_along_axis:[take_along_axis](https://www.paddlepaddle.org.cn/documentation/docs/en/api/paddle/take_along_axis_en.html#take-along-axis) ### Tickets: --------- Co-authored-by: cecilia peng <cecilia.peng@intel.com> Co-authored-by: Yu Xu <yu.xu@intel.com>
1 parent 6929f7e commit 334c971

26 files changed

+808
-68
lines changed

src/frontends/paddle/src/decoder_proto.cpp

+31
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,37 @@ ov::Any DecoderProto::get_attribute(const std::string& name) const {
7070
return attrs[0].block_idx();
7171
case proto::AttrType::BLOCKS:
7272
return std::vector<std::int32_t>(attrs[0].blocks_idx().begin(), attrs[0].blocks_idx().end());
73+
case proto::AttrType::SCALARS: {
74+
auto scalars_size = attrs[0].scalars_size();
75+
if (scalars_size >= 1) {
76+
if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "LONG") {
77+
std::vector<int64_t> res;
78+
res.reserve(scalars_size);
79+
for (int i = 0; i < scalars_size; ++i) {
80+
res.push_back(attrs[0].scalars(i).i());
81+
}
82+
return res;
83+
} else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "FLOAT64") {
84+
std::vector<double> res;
85+
res.reserve(scalars_size);
86+
for (int i = 0; i < scalars_size; ++i) {
87+
res.push_back(attrs[0].scalars(i).r());
88+
}
89+
return res;
90+
} else if (Scalar_Type_Name(attrs[0].scalars(0).type()) == "BOOLEAN") {
91+
std::vector<bool> res;
92+
res.reserve(scalars_size);
93+
for (int i = 0; i < scalars_size; ++i) {
94+
res.push_back(attrs[0].scalars(i).b());
95+
}
96+
return res;
97+
}
98+
} else {
99+
FRONT_END_GENERAL_CHECK(false,
100+
"Conversion from PaddlePaddle to OpenVINO is not supported 0 dims in SCALARS.");
101+
break;
102+
}
103+
}
73104
default:
74105
FRONT_END_GENERAL_CHECK(false, "Conversion from PaddlePaddle to OpenVINO data type is not supported.");
75106
}

src/frontends/paddle/src/op/abs.cpp

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
// Copyright (C) 2018-2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
5+
#include "openvino/frontend/paddle/node_context.hpp"
6+
#include "openvino/opsets/opset6.hpp"
7+
8+
namespace ov {
9+
namespace frontend {
10+
namespace paddle {
11+
namespace op {
12+
NamedOutputs abs(const NodeContext& node) {
13+
auto data = node.get_input("X");
14+
return node.default_single_output_mapping({std::make_shared<ov::opset6::Abs>(data)}, {"Out"});
15+
}
16+
17+
} // namespace op
18+
} // namespace paddle
19+
} // namespace frontend
20+
} // namespace ov

src/frontends/paddle/src/op/argmax.cpp

+8-10
Original file line numberDiff line numberDiff line change
@@ -12,31 +12,29 @@ namespace op {
1212
NamedOutputs argmax(const NodeContext& node) {
1313
auto data = node.get_input("X");
1414
bool flatten = node.get_attribute<bool>("flatten");
15-
const element::Type& index_element_type = element::i64;
16-
const Output<ov::Node> k = ov::opset6::Constant::create(ov::element::i64, {}, {1});
15+
auto dtype = node.get_attribute<ov::element::Type>("dtype");
16+
const Output<ov::Node> k = ov::opset6::Constant::create(dtype, {}, {1});
1717

1818
if (!flatten) {
1919
auto axis = node.get_attribute<int64_t>("axis");
2020
const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis});
21-
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "max", "index", index_element_type);
21+
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "max", "index", dtype);
2222
const auto reshaped_indices = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1), axis_to_remove);
23-
return node.default_single_output_mapping(
24-
{std::make_shared<ov::opset6::Convert>(reshaped_indices, element::i64)},
25-
{"Out"});
23+
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(reshaped_indices, dtype)},
24+
{"Out"});
2625
} else {
2726
int64_t axis = 0;
2827
const Output<ov::Node> reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1});
2928
auto node_reshape = std::make_shared<ov::opset6::Reshape>(data, reshape_flatten, true);
30-
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "max", "index", index_element_type);
29+
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "max", "index", dtype);
3130
const auto output_info = node.get_output_port_infos("Out");
3231
size_t output_size = output_info[0].second.size();
3332
if (output_size == 0) {
3433
auto out = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1));
35-
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, element::i64)},
36-
{"Out"});
34+
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, dtype)}, {"Out"});
3735
} else {
3836
return node.default_single_output_mapping(
39-
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), element::i64)},
37+
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), dtype)},
4038
{"Out"});
4139
}
4240
}
+46
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,46 @@
1+
// Copyright (C) 2018-2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
5+
#include "openvino/frontend/paddle/node_context.hpp"
6+
#include "openvino/opsets/opset6.hpp"
7+
8+
namespace ov {
9+
namespace frontend {
10+
namespace paddle {
11+
namespace op {
12+
NamedOutputs argmin(const NodeContext& node) {
13+
auto data = node.get_input("X");
14+
bool flatten = node.get_attribute<bool>("flatten");
15+
auto dtype = node.get_attribute<ov::element::Type>("dtype");
16+
const Output<ov::Node> k = ov::opset6::Constant::create(ov::element::i64, {}, {1});
17+
18+
if (!flatten) {
19+
auto axis = node.get_attribute<int64_t>("axis");
20+
const auto axis_to_remove = ov::opset6::Constant::create(element::u64, Shape{}, {axis});
21+
auto node_topk = std::make_shared<ov::opset6::TopK>(data, k, axis, "min", "index", dtype);
22+
const auto reshaped_indices = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1), axis_to_remove);
23+
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(reshaped_indices, dtype)},
24+
{"Out"});
25+
} else {
26+
int64_t axis = 0;
27+
const Output<ov::Node> reshape_flatten = ov::opset6::Constant::create(ov::element::i64, {1}, {-1});
28+
auto node_reshape = std::make_shared<ov::opset6::Reshape>(data, reshape_flatten, true);
29+
auto node_topk = std::make_shared<ov::opset6::TopK>(node_reshape, k, axis, "min", "index", dtype);
30+
const auto output_info = node.get_output_port_infos("Out");
31+
size_t output_size = output_info[0].second.size();
32+
if (output_size == 0) {
33+
auto out = std::make_shared<ov::opset6::Squeeze>(node_topk->output(1));
34+
return node.default_single_output_mapping({std::make_shared<ov::opset6::Convert>(out, dtype)}, {"Out"});
35+
} else {
36+
return node.default_single_output_mapping(
37+
{std::make_shared<ov::opset6::Convert>(node_topk->output(1), dtype)},
38+
{"Out"});
39+
}
40+
}
41+
}
42+
43+
} // namespace op
44+
} // namespace paddle
45+
} // namespace frontend
46+
} // namespace ov

src/frontends/paddle/src/op/assign_value.cpp

+32-5
Original file line numberDiff line numberDiff line change
@@ -15,12 +15,35 @@ NamedOutputs assign_value(const NodeContext& node) {
1515

1616
switch (dtype) {
1717
case element::i32: {
18-
auto values = node.get_attribute<std::vector<int32_t>>("int32_values");
19-
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
18+
if (node.has_attribute("int32_values")) {
19+
auto values = node.get_attribute<std::vector<int32_t>>("int32_values");
20+
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
21+
} else {
22+
auto values = node.get_attribute<std::vector<int64_t>>("values");
23+
std::vector<int32_t> int32_values(values.size());
24+
std::transform(values.begin(), values.end(), int32_values.begin(), [](int64_t v) {
25+
return static_cast<int32_t>(v);
26+
});
27+
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, int32_values)};
28+
}
2029
break;
2130
}
2231
case element::f32: {
23-
std::vector<float> values = node.get_attribute<std::vector<float>>("fp32_values");
32+
if (node.has_attribute("fp32_values")) {
33+
std::vector<float> values = node.get_attribute<std::vector<float>>("fp32_values");
34+
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
35+
} else {
36+
auto values = node.get_attribute<std::vector<double>>("values");
37+
std::vector<float> values_f32(values.size());
38+
std::transform(values.begin(), values.end(), values_f32.begin(), [](double v) {
39+
return static_cast<float>(v);
40+
});
41+
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values_f32)};
42+
}
43+
break;
44+
}
45+
case element::f64: {
46+
auto values = node.get_attribute<std::vector<double>>("values");
2447
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
2548
break;
2649
}
@@ -30,12 +53,16 @@ NamedOutputs assign_value(const NodeContext& node) {
3053
break;
3154
}
3255
case element::i64: {
33-
auto values = node.get_attribute<std::vector<int64_t>>("int64_values");
56+
auto values = node.has_attribute("int64_values") ? node.get_attribute<std::vector<int64_t>>("int64_values")
57+
: node.get_attribute<std::vector<int64_t>>("values");
3458
const_node = {opset6::Constant::create(dtype, Shape{shape.begin(), shape.end()}, values)};
3559
break;
3660
}
3761
default: {
38-
PADDLE_OP_CHECK(node, false, "assign_value only supports int32, int64, float32, bool");
62+
std::ostringstream oss;
63+
oss << "assign_value only supports int32, int64, float32, float64, bool, but receive dtype["
64+
<< dtype.get_type_name() << "]";
65+
PADDLE_OP_CHECK(node, false, oss.str());
3966
break;
4067
}
4168
}

src/frontends/paddle/src/op/atan2.cpp

+85
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,85 @@
1+
// Copyright (C) 2018-2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
#include "openvino/frontend/paddle/node_context.hpp"
5+
#include "openvino/op/add.hpp"
6+
#include "openvino/op/atan.hpp"
7+
#include "openvino/op/constant.hpp"
8+
#include "openvino/op/convert_like.hpp"
9+
#include "openvino/op/divide.hpp"
10+
#include "openvino/op/equal.hpp"
11+
#include "openvino/op/greater.hpp"
12+
#include "openvino/op/greater_eq.hpp"
13+
#include "openvino/op/less.hpp"
14+
#include "openvino/op/logical_and.hpp"
15+
#include "openvino/op/multiply.hpp"
16+
#include "openvino/op/select.hpp"
17+
#include "openvino/op/subtract.hpp"
18+
#include "openvino/opsets/opset6.hpp"
19+
using namespace std;
20+
using namespace ov::op;
21+
22+
namespace ov {
23+
namespace frontend {
24+
namespace paddle {
25+
26+
template <typename T>
27+
ov::Output<ov::Node> create_same_type_const_scalar(const ov::Output<ov::Node>& same_type_output, const T& value) {
28+
if (same_type_output.get_element_type().is_static()) {
29+
return std::make_shared<ov::op::v0::Constant>(same_type_output.get_element_type(), ov::Shape{}, value);
30+
} else {
31+
ov::Output<ov::Node> const_res =
32+
std::make_shared<ov::op::v0::Constant>(ov::element::from<T>(), ov::Shape{}, value);
33+
const_res = std::make_shared<ov::op::v1::ConvertLike>(const_res, same_type_output);
34+
return const_res;
35+
}
36+
}
37+
38+
namespace op {
39+
NamedOutputs atan2(const NodeContext& node) {
40+
// default_op_checks(node, 2, {"Atan2"});
41+
auto y = node.get_input("X1");
42+
auto x = node.get_input("X2");
43+
44+
// handle the first condition : x>0
45+
auto div_y_x = make_shared<v1::Divide>(y, x);
46+
auto atan = make_shared<v0::Atan>(div_y_x);
47+
auto const_zero = create_same_type_const_scalar<int32_t>(x, 0);
48+
auto result = atan->output(0);
49+
50+
// handle the second condition : x<0 && y>=0
51+
auto const_pi = create_same_type_const_scalar<double>(x, std::atan(1.0) * 4);
52+
auto is_x_negative = make_shared<v1::Less>(x, const_zero);
53+
auto y_non_negative = make_shared<v1::GreaterEqual>(y, const_zero);
54+
auto cond1 = make_shared<v1::LogicalAnd>(is_x_negative, y_non_negative);
55+
auto atan_y_x_plus_pi = make_shared<v1::Add>(atan, const_pi);
56+
result = make_shared<v1::Select>(cond1, atan_y_x_plus_pi, result);
57+
58+
// handle the third condition : x<0 && y<0
59+
auto is_y_negative = make_shared<v1::Less>(y, const_zero);
60+
auto cond2 = make_shared<v1::LogicalAnd>(is_x_negative, is_y_negative);
61+
auto atan_y_x_minus_pi = make_shared<v1::Subtract>(atan, const_pi);
62+
result = make_shared<v1::Select>(cond2, atan_y_x_minus_pi, result);
63+
64+
// handle the fourth condition : x=0 && y>0
65+
auto is_x_zero = make_shared<v1::Equal>(x, const_zero);
66+
auto is_y_positive = make_shared<v1::Greater>(y, const_zero);
67+
auto cond3 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_positive);
68+
auto const_two = create_same_type_const_scalar<int32_t>(x, 2);
69+
auto pi_div_two = make_shared<v1::Divide>(const_pi, const_two);
70+
result = make_shared<v1::Select>(cond3, pi_div_two, result);
71+
72+
// handle the fifth condition : x=0 && y<0
73+
auto cond4 = make_shared<v1::LogicalAnd>(is_x_zero, is_y_negative);
74+
auto const_minus_two = create_same_type_const_scalar<int32_t>(x, -2);
75+
auto pi_div_minus_two = make_shared<v1::Divide>(const_pi, const_minus_two);
76+
result = make_shared<v1::Select>(cond4, pi_div_two, result);
77+
NamedOutputs named_outputs;
78+
named_outputs["Out"] = {result};
79+
return named_outputs;
80+
}
81+
82+
} // namespace op
83+
} // namespace paddle
84+
} // namespace frontend
85+
} // namespace ov

src/frontends/paddle/src/op/elementwise_ops.cpp

+4
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,8 @@
44

55
#include "elementwise_ops.hpp"
66

7+
#include "op_utils.hpp"
8+
79
namespace ov {
810
namespace frontend {
911
namespace paddle {
@@ -72,6 +74,8 @@ NamedOutputs elementwise_floordiv(const NodeContext& node_context) {
7274
if (pd_version >= 2005000 || pd_version == 0) {
7375
python_div = true;
7476
}
77+
x = get_tensor_safe(x);
78+
y = get_tensor_safe(y);
7579
return node_context.default_single_output_mapping(
7680
{std::make_shared<default_opset::Divide>(x,
7781
y,

src/frontends/paddle/src/op/fill_any_like.cpp

+3-2
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,15 @@
33
//
44

55
#include "default_opset.hpp"
6+
#include "op_utils.hpp"
67
#include "openvino/frontend/paddle/node_context.hpp"
78

89
namespace ov {
910
namespace frontend {
1011
namespace paddle {
1112
namespace op {
1213
NamedOutputs fill_any_like(const NodeContext& node) {
13-
const auto x = node.get_input("X");
14+
auto x = node.get_input("X");
1415
auto dtype = node.get_attribute<ov::element::Type>("dtype", element::undefined);
1516
const auto value = node.get_attribute<float>("value");
1617
if (dtype == element::undefined) {
@@ -25,8 +26,8 @@ NamedOutputs fill_any_like(const NodeContext& node) {
2526
});
2627
PADDLE_OP_CHECK(node, valid_type, "Invalid dtype! Fill_any_like supports boolean, i16, i32, i64, f16, f32, f64");
2728
const auto value_node = default_opset::Constant::create(dtype, {1}, {value});
29+
x = get_tensor_safe(x);
2830
const auto shape_node = std::make_shared<default_opset::ShapeOf>(x);
29-
3031
return node.default_single_output_mapping({std::make_shared<default_opset::Broadcast>(value_node, shape_node)},
3132
{"Out"});
3233
}

src/frontends/paddle/src/op/fill_constant.cpp

+21-9
Original file line numberDiff line numberDiff line change
@@ -16,25 +16,31 @@ NamedOutputs fill_constant(const NodeContext& node) {
1616
Output<Node> shape_node;
1717
if (node.has_input("ValueTensor")) {
1818
value_node = node.get_input("ValueTensor");
19+
} else if (dtype == element::boolean) {
20+
bool value = static_cast<bool>(node.get_attribute<float>("value"));
21+
value_node = opset6::Constant::create(dtype, {}, {value});
1922
} else if (dtype == element::i32) {
2023
int32_t value = static_cast<int32_t>(node.get_attribute<float>("value"));
21-
value_node = opset6::Constant::create(dtype, {1}, {value});
24+
value_node = opset6::Constant::create(dtype, {}, {value});
25+
} else if (dtype == element::f16) {
26+
float value = static_cast<ov::float16>(node.get_attribute<float>("value"));
27+
value_node = opset6::Constant::create(dtype, {}, {value});
2228
} else if (dtype == element::f32) {
2329
float value = node.get_attribute<float>("value");
24-
value_node = opset6::Constant::create(dtype, {1}, {value});
30+
value_node = opset6::Constant::create(dtype, {}, {value});
31+
} else if (dtype == element::f64) {
32+
float f32_value = node.get_attribute<float>("value");
33+
double value = static_cast<double>(f32_value);
34+
value_node = opset6::Constant::create(dtype, {}, {value});
2535
} else if (dtype == element::i64) {
2636
int64_t value = static_cast<int64_t>(node.get_attribute<float>("value"));
27-
value_node = opset6::Constant::create(dtype, {1}, {value});
37+
value_node = opset6::Constant::create(dtype, {}, {value});
2838
} else {
2939
PADDLE_OP_CHECK(node, false, "fill_constant only supports i32, f32, i64");
3040
}
3141

32-
if (shape.empty()) {
33-
shape.emplace_back(1);
34-
}
35-
3642
PADDLE_OP_CHECK(node,
37-
shape.size() > 0 || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"),
43+
node.has_attribute("shape") || node.has_input("ShapeTensor") || node.has_input("ShapeTensorList"),
3844
"fill_constant shape not set");
3945

4046
if (node.has_input("ShapeTensor")) {
@@ -50,7 +56,13 @@ NamedOutputs fill_constant(const NodeContext& node) {
5056
}
5157
shape_node = Output<Node>{std::make_shared<opset6::Concat>(shape_tensor_list, 0)};
5258
} else {
53-
shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape);
59+
if (shape.empty()) {
60+
NamedOutputs named_outputs;
61+
named_outputs["Out"] = {value_node};
62+
return named_outputs;
63+
} else {
64+
shape_node = opset6::Constant::create(element::i64, {shape.size()}, shape);
65+
}
5466
}
5567

5668
return node.default_single_output_mapping({std::make_shared<ov::opset6::Broadcast>(value_node, shape_node)},

0 commit comments

Comments
 (0)