Skip to content

Commit 4482fb1

Browse files
committed
Update oneDNN submodule and remove cache cleanup WA
1 parent 24424a5 commit 4482fb1

File tree

3 files changed

+1
-17
lines changed

3 files changed

+1
-17
lines changed

src/plugins/intel_gpu/include/intel_gpu/plugin/plugin.hpp

-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ class Plugin : public ov::IPlugin {
4545

4646
public:
4747
Plugin();
48-
~Plugin();
4948

5049
std::shared_ptr<ov::ICompiledModel> compile_model(const std::shared_ptr<const ov::Model>& model,
5150
const ov::AnyMap& properties) const override;

src/plugins/intel_gpu/src/plugin/plugin.cpp

-15
Original file line numberDiff line numberDiff line change
@@ -158,21 +158,6 @@ Plugin::Plugin() {
158158
m_compiled_model_runtime_properties["OV_VERSION"] = ov_version.buildNumber;
159159
}
160160

161-
Plugin::~Plugin() {
162-
#ifdef ENABLE_ONEDNN_FOR_GPU
163-
// To prevent hanging during oneDNN's primitive cache desctruction,
164-
// trigger earlier cache cleanup by setting its capacity to 0.
165-
// Related ticket: 106154.
166-
dnnl::set_primitive_cache_capacity(0);
167-
168-
// In case of multiple ov::Core instances (and multiple GPU plugins) we need to restore original
169-
// cache capacity to prevent working with zero-capacity cache in other GPU Plugin instances, since
170-
// cache is shared between all of GPU Plugin instances and cache clean up affects all of them.
171-
const int default_cache_capacity = 1024;
172-
dnnl::set_primitive_cache_capacity(default_cache_capacity);
173-
#endif
174-
}
175-
176161
std::shared_ptr<ov::ICompiledModel> Plugin::compile_model(const std::shared_ptr<const ov::Model>& model, const ov::AnyMap& orig_config) const {
177162
OV_ITT_SCOPED_TASK(itt::domains::intel_gpu_plugin, "Plugin::compile_model");
178163
std::string device_id = get_device_id(orig_config);

0 commit comments

Comments
 (0)