@@ -85,7 +85,7 @@ void merge_sets(
85
85
}
86
86
87
87
// no uses of tensors in container types
88
- void assertNonTensorTypeDoesNotContainTensors (TypePtr type) {
88
+ void assertNonTensorTypeDoesNotContainTensors (const TypePtr& type) {
89
89
if (type->cast <TensorType>()) {
90
90
return ;
91
91
}
@@ -94,7 +94,7 @@ void assertNonTensorTypeDoesNotContainTensors(TypePtr type) {
94
94
}
95
95
}
96
96
97
- void InplaceMKLDNNSubgraph (std::shared_ptr<Graph> graph) {
97
+ void InplaceMKLDNNSubgraph (const std::shared_ptr<Graph>& graph) {
98
98
// This function first calculates aliasing sets,
99
99
// then calculates the last node each aliasing set is alive for.
100
100
// Then we go through each node, if it's a node which has an equivalent
@@ -234,7 +234,7 @@ void InplaceMKLDNNSubgraph(std::shared_ptr<Graph> graph) {
234
234
// innermost dimension is padded with 0s. The precondition, `aten_op(0) == 0`
235
235
// allows us to avoid any special casing of padded elements.
236
236
Operation createUnaryOp (
237
- std::function<void (at::Tensor output, at::Tensor input)> aten_op,
237
+ const std::function<void (at::Tensor output, at::Tensor input)>& aten_op,
238
238
bool inplace = false) {
239
239
return [aten_op, inplace](Stack& stack) {
240
240
auto a = pop (stack).toTensor ();
@@ -395,7 +395,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> hardtanh_helper(
395
395
const Node* n) {
396
396
auto min_val = n->f (attr::min_val);
397
397
auto max_val = n->f (attr::max_val);
398
- return [min_val, max_val](at::Tensor output, at::Tensor input) {
398
+ return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
399
399
at::cpu::hardtanh_out (output, input, min_val, max_val);
400
400
};
401
401
}
@@ -404,7 +404,7 @@ static std::function<void(at::Tensor output, at::Tensor input)> clamp_helper(
404
404
const Node* n) {
405
405
auto min_val = n->f (attr::min_val);
406
406
auto max_val = n->f (attr::max_val);
407
- return [min_val, max_val](at::Tensor output, at::Tensor input) {
407
+ return [min_val, max_val](at::Tensor output, const at::Tensor& input) {
408
408
at::cpu::clamp_out (output, input, min_val, max_val);
409
409
};
410
410
}
@@ -415,15 +415,15 @@ const RegisterOperators MKLDNNHardSwishOpReg({
415
415
torch::jit::Operator (
416
416
" prim::MKLDNNHardSwish_(Tensor(a!) self) -> Tensor(a!)" ,
417
417
createUnaryOp (
418
- [](at::Tensor output, at::Tensor input) {
418
+ [](at::Tensor output, const at::Tensor& input) {
419
419
at::cpu::hardswish_out (output, input);
420
420
},
421
421
true ),
422
422
AliasAnalysisKind::FROM_SCHEMA),
423
423
torch::jit::Operator (
424
424
" prim::MKLDNNHardSigmoid_(Tensor(a!) self) -> Tensor(a!)" ,
425
425
createUnaryOp (
426
- [](at::Tensor output, at::Tensor input) {
426
+ [](at::Tensor output, const at::Tensor& input) {
427
427
at::cpu::hardsigmoid_out (output, input);
428
428
},
429
429
true ),
@@ -443,15 +443,15 @@ const RegisterOperators MKLDNNHardSwishOpReg({
443
443
torch::jit::Operator (
444
444
" prim::MKLDNNHardSwish(Tensor a) -> Tensor" ,
445
445
createUnaryOp (
446
- [](at::Tensor output, at::Tensor input) {
446
+ [](at::Tensor output, const at::Tensor& input) {
447
447
at::cpu::hardswish_out (output, input);
448
448
},
449
449
false ),
450
450
AliasAnalysisKind::FROM_SCHEMA),
451
451
torch::jit::Operator (
452
452
" prim::MKLDNNHardSigmoid(Tensor a) -> Tensor" ,
453
453
createUnaryOp (
454
- [](at::Tensor output, at::Tensor input) {
454
+ [](at::Tensor output, const at::Tensor& input) {
455
455
at::cpu::hardsigmoid_out (output, input);
456
456
},
457
457
false ),
0 commit comments