|
| 1 | +// Copyright (C) 2018-2023 Intel Corporation |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | +// |
| 4 | + |
| 5 | +#pragma once |
| 6 | + |
| 7 | +#include "ie_ngraph_utils.hpp" |
| 8 | +#include "ie_remote_blob.hpp" |
| 9 | +#include "ie_remote_context.hpp" |
| 10 | +#include "openvino/runtime/iremote_context.hpp" |
| 11 | + |
| 12 | +namespace ov { |
| 13 | +namespace legacy_convert { |
| 14 | + |
| 15 | +INFERENCE_ENGINE_API_CPP(ov::SoPtr<ov::IRemoteContext>) |
| 16 | +convert_remote_context(const std::shared_ptr<InferenceEngine::RemoteContext>& context); |
| 17 | +INFERENCE_ENGINE_API_CPP(ie::Blob*) get_hardware_blob(ie::Blob* blob); |
| 18 | + |
| 19 | +class INFERENCE_ENGINE_API_CLASS(TensorHolder) { |
| 20 | +public: |
| 21 | + TensorHolder(ov::SoPtr<ov::ITensor> tensor) : _tensor(tensor) {} |
| 22 | + |
| 23 | + const ov::SoPtr<ov::ITensor>& get_tensor() const { |
| 24 | + return _tensor; |
| 25 | + } |
| 26 | + |
| 27 | +private: |
| 28 | + ov::SoPtr<ov::ITensor> _tensor; |
| 29 | +}; |
| 30 | + |
| 31 | +} // namespace legacy_convert |
| 32 | + |
| 33 | +/** |
| 34 | + * @brief Tensor what contains InferenceEngine::RemoteBlob inside |
| 35 | + * Blob owns the memory |
| 36 | + */ |
| 37 | +class INFERENCE_ENGINE_API_CLASS(RemoteBlobTensor) : public IRemoteTensor { |
| 38 | + mutable element::Type m_type; |
| 39 | + mutable Shape m_shape; |
| 40 | + mutable Strides m_strides; |
| 41 | + mutable ov::AnyMap m_properties; |
| 42 | + mutable std::string m_dev_name; |
| 43 | + |
| 44 | +public: |
| 45 | + std::shared_ptr<ie::RemoteBlob> blob; |
| 46 | + |
| 47 | + RemoteBlobTensor(const InferenceEngine::RemoteBlob::Ptr& blob) : blob{blob} { |
| 48 | + OPENVINO_ASSERT(blob); |
| 49 | + m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); |
| 50 | + } |
| 51 | + |
| 52 | + const element::Type& get_element_type() const override { |
| 53 | + m_type = InferenceEngine::details::convertPrecision(blob->getTensorDesc().getPrecision()); |
| 54 | + return m_type; |
| 55 | + } |
| 56 | + |
| 57 | + void set_shape(ov::Shape shape) override { |
| 58 | + blob->setShape({shape.begin(), shape.end()}); |
| 59 | + } |
| 60 | + |
| 61 | + const Shape& get_shape() const override { |
| 62 | + m_shape = blob->getTensorDesc().getBlockingDesc().getBlockDims(); |
| 63 | + return m_shape; |
| 64 | + } |
| 65 | + |
| 66 | + const Strides& get_strides() const override { |
| 67 | + OPENVINO_ASSERT(get_element_type().bitwidth() >= 8, |
| 68 | + "Could not get strides for types with bitwidths less then 8 bit. Tensor type: ", |
| 69 | + get_element_type()); |
| 70 | + const auto& element_strides = blob->getTensorDesc().getBlockingDesc().getStrides(); |
| 71 | + const size_t elem_size = get_element_type().size(); |
| 72 | + m_strides.clear(); |
| 73 | + m_strides.resize(element_strides.size()); |
| 74 | + std::transform(element_strides.begin(), element_strides.end(), m_strides.begin(), [&elem_size](size_t stride) { |
| 75 | + return stride * elem_size; |
| 76 | + }); |
| 77 | + return m_strides; |
| 78 | + } |
| 79 | + |
| 80 | + size_t get_size() const override { |
| 81 | + return blob->size(); |
| 82 | + } |
| 83 | + |
| 84 | + size_t get_byte_size() const override { |
| 85 | + return blob->byteSize(); |
| 86 | + } |
| 87 | + |
| 88 | + const AnyMap& get_properties() const override { |
| 89 | + m_properties = blob->getParams(); |
| 90 | + return m_properties; |
| 91 | + } |
| 92 | + |
| 93 | + const std::string& get_device_name() const override { |
| 94 | + m_dev_name = blob->getDeviceName(); |
| 95 | + return m_dev_name; |
| 96 | + } |
| 97 | +}; |
| 98 | + |
| 99 | +/** |
| 100 | + * @brief Create InferenceEngine::RemoteBlob from the Tensor |
| 101 | + */ |
| 102 | +class INFERENCE_ENGINE_API_CLASS(TensorRemoteBlob) : public ie::RemoteBlob, public ov::legacy_convert::TensorHolder { |
| 103 | +public: |
| 104 | + TensorRemoteBlob(const ov::SoPtr<ITensor>& tensor, ie::TensorDesc desc) |
| 105 | + : ie::RemoteBlob{desc}, |
| 106 | + ov::legacy_convert::TensorHolder(tensor) { |
| 107 | + OPENVINO_ASSERT(this->get_tensor()); |
| 108 | + } |
| 109 | + std::shared_ptr<ov::IRemoteTensor> cast_tensor() const { |
| 110 | + auto remote = std::dynamic_pointer_cast<ov::IRemoteTensor>(get_tensor()._ptr); |
| 111 | + OPENVINO_ASSERT(remote); |
| 112 | + return remote; |
| 113 | + } |
| 114 | + AnyMap getParams() const override { |
| 115 | + return cast_tensor()->get_properties(); |
| 116 | + } |
| 117 | + std::string getDeviceName() const noexcept override { |
| 118 | + try { |
| 119 | + return cast_tensor()->get_device_name(); |
| 120 | + } catch (...) { |
| 121 | + return {}; |
| 122 | + } |
| 123 | + } |
| 124 | + std::shared_ptr<ie::RemoteContext> getContext() const noexcept override { |
| 125 | + return {}; |
| 126 | + } |
| 127 | + |
| 128 | + void allocate() noexcept override {} |
| 129 | + bool deallocate() noexcept override { |
| 130 | + return true; |
| 131 | + } |
| 132 | + ie::LockedMemory<void> buffer() noexcept override { |
| 133 | + return {nullptr, nullptr, 0}; |
| 134 | + } |
| 135 | + ie::LockedMemory<const void> cbuffer() const noexcept override { |
| 136 | + return {nullptr, nullptr, 0}; |
| 137 | + } |
| 138 | + ie::LockedMemory<void> rwmap() noexcept override { |
| 139 | + return {nullptr, nullptr, 0}; |
| 140 | + } |
| 141 | + ie::LockedMemory<const void> rmap() const noexcept override { |
| 142 | + return {nullptr, nullptr, 0}; |
| 143 | + } |
| 144 | + ie::LockedMemory<void> wmap() noexcept override { |
| 145 | + return {nullptr, nullptr, 0}; |
| 146 | + } |
| 147 | + const std::shared_ptr<ie::IAllocator>& getAllocator() const noexcept override { |
| 148 | + return m_allocator; |
| 149 | + } |
| 150 | + void* getHandle() const noexcept override { |
| 151 | + return nullptr; |
| 152 | + } |
| 153 | + |
| 154 | + using TensorHolder::get_tensor; |
| 155 | + |
| 156 | +private: |
| 157 | + std::shared_ptr<ie::IAllocator> m_allocator; |
| 158 | +}; |
| 159 | + |
| 160 | +} // namespace ov |
| 161 | + |
| 162 | +namespace InferenceEngine { |
| 163 | + |
| 164 | +class INFERENCE_ENGINE_API_CLASS(IRemoteContextWrapper) : public ov::IRemoteContext { |
| 165 | +private: |
| 166 | + std::shared_ptr<InferenceEngine::RemoteContext> m_context; |
| 167 | + mutable std::string m_name; |
| 168 | + mutable ov::AnyMap m_params; |
| 169 | + |
| 170 | +public: |
| 171 | + IRemoteContextWrapper(const std::shared_ptr<InferenceEngine::RemoteContext>& context) : m_context(context) {} |
| 172 | + virtual ~IRemoteContextWrapper() = default; |
| 173 | + const std::shared_ptr<InferenceEngine::RemoteContext>& get_context(); |
| 174 | + const std::string& get_device_name() const override; |
| 175 | + |
| 176 | + const ov::AnyMap& get_property() const override; |
| 177 | + |
| 178 | + ov::SoPtr<ov::IRemoteTensor> create_tensor(const ov::element::Type& type, |
| 179 | + const ov::Shape& shape, |
| 180 | + const ov::AnyMap& params = {}) override; |
| 181 | + ov::SoPtr<ov::ITensor> create_host_tensor(const ov::element::Type type, const ov::Shape& shape) override; |
| 182 | +}; |
| 183 | + |
| 184 | +} // namespace InferenceEngine |
0 commit comments