Skip to content

Commit 6086e88

Browse files
[CPU] Fixed BF16 Matmul inference precision
1 parent 6dbabb9 commit 6086e88

File tree

1 file changed

+1
-1
lines changed

1 file changed

+1
-1
lines changed

src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ static const TypeMapping dnnlFCTypeMapping {
4646
{{_f32 | _bf16 | _f16, _any, _any, _i8 | _u8}, pt(bypass(), bypass(), use<0>(), use<0>())},
4747
// compresses float weights which do not match input data precision
4848
{{_f32, _half_float, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())},
49-
{{_bf16, _f16, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())},
49+
{{_bf16, _f16 | _f32, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())},
5050
{{_f16, _bf16, _any, _any | _any}, pt(bypass(), bypass(), use<0>(), use<0>())},
5151
// quantization configuration (@todo more strict requrements for output precision?)
5252
{{_u8 | _i8, _i8, _any, _any}, pt(bypass(), bypass(), bypass(), use<3>())},

0 commit comments

Comments
 (0)