Skip to content

Commit 02e75ff

Browse files
authoredFeb 8, 2024
Remove legacy test utils (#871)
1 parent def3bdd commit 02e75ff

File tree

2 files changed

+46
-42
lines changed

2 files changed

+46
-42
lines changed
 

‎modules/nvidia_plugin/tests/unit/CMakeLists.txt

+4
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ ov_add_test_target(
3737
openvino::gmock
3838
openvino::ov_models
3939
openvino::commonTestUtils
40+
openvino::funcSharedTests
41+
INCLUDES
42+
PRIVATE
43+
"${OpenVINO_SOURCE_DIR}/src/plugins/template/include"
4044
ADD_CPPLINT
4145
ADD_CLANG_FORMAT
4246
LABELS

‎modules/nvidia_plugin/tests/unit/cuda_multi_graph_ti_test.cpp

+42-42
Original file line numberDiff line numberDiff line change
@@ -4,12 +4,21 @@
44

55
#include <gtest/gtest.h>
66

7+
#include "common_test_utils/node_builders/eltwise.hpp"
8+
#include "common_test_utils/node_builders/gru_cell.hpp"
9+
#include "common_test_utils/test_constants.hpp"
710
#include "cuda_graph_topology_runner.hpp"
811
#include "cuda_simple_execution_delegator.hpp"
9-
#include "ops/parameter.hpp"
10-
#include "ops/result.hpp"
11-
#include "ov_models/builders.hpp"
12+
#include "functional_test_utils/ov_plugin_cache.hpp"
13+
#include "openvino/op/concat.hpp"
14+
#include "openvino/op/constant.hpp"
15+
#include "openvino/op/parameter.hpp"
16+
#include "openvino/op/result.hpp"
17+
#include "openvino/op/split.hpp"
18+
#include "openvino/op/squeeze.hpp"
19+
#include "openvino/op/unsqueeze.hpp"
1220
#include "ov_models/utils/data_utils.hpp"
21+
#include "template/properties.hpp"
1322

1423
using namespace ov::nvidia_gpu;
1524
using namespace testing;
@@ -41,49 +50,38 @@ void generateInput(ov::Tensor& tensor, int to = TO, int from = FROM, int seed =
4150
std::generate(ptr, ptr + tensor.get_size(), [&dist, &engine]() { return CalcType{dist(engine)}; });
4251
}
4352

44-
std::vector<std::vector<CalcType>> calcRefs(std::shared_ptr<ov::Model> model,
45-
const std::vector<std::shared_ptr<ov::Tensor>>& inputs) {
53+
ov::TensorVector calcRefs(std::shared_ptr<ov::Model> model, const std::vector<std::shared_ptr<ov::Tensor>>& inputs) {
4654
auto refModel = model->clone();
4755

48-
auto referenceInputs = std::vector<std::vector<uint8_t>>(inputs.size());
49-
auto refInputsTypes = std::vector<ov::element::Type>(inputs.size());
50-
for (std::size_t i = 0; i < inputs.size(); ++i) {
51-
const auto& input = inputs[i];
52-
const auto inputSize = input->get_byte_size();
56+
std::shared_ptr<ov::Core> core = ov::test::utils::PluginCache::get().core();
5357

54-
auto& referenceInput = referenceInputs[i];
55-
referenceInput.resize(inputSize);
56-
57-
const auto* buffer = static_cast<const uint8_t*>(input->data());
58-
std::copy(buffer, buffer + inputSize, referenceInput.data());
59-
60-
refInputsTypes[i] = CALC_ELEMENT_TYPE;
58+
auto compiled_model_ref = core->compile_model(
59+
refModel, ov::test::utils::DEVICE_TEMPLATE, {{ov::template_plugin::disable_transformations(true)}});
60+
auto infer_request_ref = compiled_model_ref.create_infer_request();
61+
auto params = refModel->get_parameters();
62+
OPENVINO_ASSERT(params.size() == inputs.size());
63+
for (int i = 0; i < params.size(); i++) {
64+
infer_request_ref.set_tensor(params[i]->get_default_output(), *inputs[i]);
6165
}
66+
infer_request_ref.infer();
6267

63-
const auto expectedOutputs = ngraph::helpers::interpreterFunction(refModel, referenceInputs, refInputsTypes);
64-
65-
std::vector<std::vector<CalcType>> res(expectedOutputs.size());
66-
for (std::size_t i = 0; i < expectedOutputs.size(); ++i) {
67-
EXPECT_EQ(expectedOutputs[i].first, CALC_ELEMENT_TYPE);
68-
const auto& expOut = expectedOutputs[i].second;
69-
auto& resOut = res[i];
70-
const auto resOutSize = expOut.size() / sizeof(CalcType);
71-
resOut.resize(resOutSize);
72-
73-
const auto* buffer = static_cast<const CalcType*>(static_cast<const void*>(expOut.data()));
74-
std::copy(buffer, buffer + resOutSize, resOut.data());
68+
ov::TensorVector outputs;
69+
for (const auto& output : refModel->outputs()) {
70+
outputs.push_back(infer_request_ref.get_tensor(output));
7571
}
76-
return res;
72+
73+
return outputs;
7774
}
7875

79-
void validateOutput(const ov::Tensor& tensor, const std::vector<CalcType>& refVector, float threshold) {
76+
void validateOutput(const ov::Tensor& tensor, const ov::Tensor& ref_tensor, float threshold) {
8077
EXPECT_EQ(tensor.get_element_type(), CALC_ELEMENT_TYPE);
78+
EXPECT_EQ(ref_tensor.get_element_type(), CALC_ELEMENT_TYPE);
8179
const auto size = tensor.get_size();
82-
EXPECT_EQ(size, refVector.size());
80+
EXPECT_EQ(size, ref_tensor.get_size());
8381
const auto* ptr = getConstPtr(tensor);
84-
bool areEqual = std::equal(ptr, ptr + size, refVector.cbegin(), [threshold](auto val1, auto val2) {
85-
return std::abs(val1 - val2) < threshold;
86-
});
82+
const auto* ref_ptr = getConstPtr(ref_tensor);
83+
bool areEqual = std::equal(
84+
ptr, ptr + size, ptr, [threshold](auto val1, auto val2) { return std::abs(val1 - val2) < threshold; });
8785
EXPECT_TRUE(areEqual);
8886
}
8987

@@ -121,7 +119,7 @@ class GRUTI {
121119
auto squeeze = std::make_shared<ov::op::v0::Squeeze>(bodyParams[0], axis);
122120
ov::OutputVector out_vector = {squeeze, bodyParams[1]};
123121
auto gru_cell =
124-
ngraph::builder::makeGRU(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, {}, {}, clip, false);
122+
ov::test::utils::make_gru(out_vector, WRB, hidden_size, {"sigmoid", "tanh"}, {}, {}, clip, false);
125123
auto unsqueeze = std::make_shared<ov::op::v0::Unsqueeze>(gru_cell->output(0), axis);
126124
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(gru_cell->output(0)),
127125
std::make_shared<ov::op::v0::Result>(unsqueeze)};
@@ -202,10 +200,12 @@ class SplitConcatAddTI {
202200
}
203201

204202
auto squeeze = std::make_shared<ov::op::v0::Squeeze>(bodyParams[0], axisConstant);
205-
const auto split = ngraph::builder::makeSplit(squeeze, CALC_ELEMENT_TYPE, 2, 1);
203+
const auto split_axis_op =
204+
std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, std::vector<int64_t>{1});
205+
const auto split = std::make_shared<ov::op::v1::Split>(squeeze, split_axis_op, 2);
206206
const auto concat =
207207
std::make_shared<ov::op::v0::Concat>(ov::OutputVector{split->output(0), split->output(1)}, 1);
208-
const auto add0 = ngraph::builder::makeEltwise(concat->output(0), bodyParams[1], EltwiseTypes::ADD);
208+
const auto add0 = ov::test::utils::make_eltwise(concat->output(0), bodyParams[1], EltwiseTypes::ADD);
209209

210210
auto unsqueeze = std::make_shared<ov::op::v0::Unsqueeze>(add0->output(0), axisConstant);
211211
ov::ResultVector results{std::make_shared<ov::op::v0::Result>(add0->output(0)),
@@ -299,13 +299,13 @@ class CudaMultiGraphTest : public Test {
299299

300300
void run() { runner_.Run(*inferRequestContext_, deviceMemBlock_); }
301301

302-
void calcRefs() { refOutputs_ = ::calcRefs(model_, inputTensors_); }
302+
void calcRefs() { refOutputTensors_ = ::calcRefs(model_, inputTensors_); }
303303

304304
void validate(float threshold = THRESHOLD) {
305305
const auto size = outputTensors_.size();
306-
EXPECT_EQ(size, refOutputs_.size());
306+
EXPECT_EQ(size, refOutputTensors_.size());
307307
for (std::size_t i = 0; i < size; ++i) {
308-
validateOutput(*outputTensors_[i], refOutputs_[i], THRESHOLD);
308+
validateOutput(*outputTensors_[i], refOutputTensors_[i], THRESHOLD);
309309
}
310310
}
311311

@@ -349,6 +349,7 @@ class CudaMultiGraphTest : public Test {
349349
SimpleExecutionDelegator simpleExecutionDelegator_{};
350350
std::vector<std::shared_ptr<ov::Tensor>> inputTensors_{populateTensors(model_->inputs())};
351351
std::vector<std::shared_ptr<ov::Tensor>> outputTensors_{populateTensors(model_->outputs())};
352+
ov::TensorVector refOutputTensors_;
352353
std::map<std::string, std::size_t> inputIndices_{populateInputIndices(model_)};
353354
std::map<std::string, std::size_t> outputIndices_{populateOutputIndices(model_)};
354355
std::unique_ptr<InferenceRequestContext> inferRequestContext_ =
@@ -363,7 +364,6 @@ class CudaMultiGraphTest : public Test {
363364
false);
364365
DeviceMemBlock deviceMemBlock_{runner_.GetSubGraph().memoryManager()->mutableTensorsMemoryModel()};
365366

366-
std::vector<std::vector<CalcType>> refOutputs_;
367367
int currentSeed_ = SEED;
368368
};
369369

0 commit comments

Comments
 (0)