Skip to content

Commit 1a73255

Browse files
cyyeverpytorchmergebot
authored andcommitted
Concat namespaces in jit code (pytorch#138976)
Fixes #ISSUE_NUMBER Pull Request resolved: pytorch#138976 Approved by: https://github.com/Skylion007
1 parent 4de93d1 commit 1a73255

File tree

106 files changed

+216
-614
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

106 files changed

+216
-614
lines changed

torch/csrc/jit/backends/backend.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
#include <torch/csrc/jit/backends/backend_interface.h>
66
#include <torch/custom_class.h>
77

8-
namespace torch {
9-
namespace jit {
8+
namespace torch::jit {
109
namespace {
1110
// NOLINTNEXTLINE(clang-diagnostic-unneeded-internal-declaration)
1211
inline c10::FunctionSchema getIsAvailableSchema() {
@@ -115,5 +114,4 @@ class backend {
115114
}
116115
};
117116

118-
} // namespace jit
119-
} // namespace torch
117+
} // namespace torch::jit

torch/csrc/jit/backends/backend_debug_handler.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <stack>
44

5-
namespace torch {
6-
namespace jit {
5+
namespace torch::jit {
76

87
std::atomic<DebugHandleType> BackendDebugInfoRecorder::unique_debug_handle_{0};
98

@@ -33,5 +32,4 @@ BackendDebugInfoMapType BackendDebugInfoRecorder::stopRecording() {
3332
return handles_to_inlined_callstack_ptrs_;
3433
}
3534

36-
} // namespace jit
37-
} // namespace torch
35+
} // namespace torch::jit

torch/csrc/jit/backends/backend_debug_handler.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77

88
#include <atomic>
99

10-
namespace torch {
11-
namespace jit {
10+
namespace torch::jit {
1211

1312
/*
1413
* BackendDebugHandleManager is responsible for issuing debug handles to
@@ -136,5 +135,4 @@ class TORCH_API BackendDebugInfoRecorder {
136135
BackendDebugInfoMapType handles_to_inlined_callstack_ptrs_;
137136
};
138137

139-
} // namespace jit
140-
} // namespace torch
138+
} // namespace torch::jit

torch/csrc/jit/backends/backend_debug_info.cpp

+2-6
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,7 @@
11
#include <c10/macros/Macros.h>
22
#include <torch/csrc/jit/backends/backend_debug_info.h>
33

4-
namespace torch {
5-
namespace jit {
6-
namespace backend {
4+
namespace torch::jit::backend {
75
namespace {
86
#ifdef BUILD_LITE_INTERPRETER
97
static auto cls = torch::class_<PyTorchBackendDebugInfoDummy>(
@@ -18,6 +16,4 @@ static auto cls = torch::class_<PyTorchBackendDebugInfo>(
1816
#endif
1917

2018
} // namespace
21-
} // namespace backend
22-
} // namespace jit
23-
} // namespace torch
19+
} // namespace torch::jit::backend

torch/csrc/jit/backends/backend_debug_info.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -5,8 +5,7 @@
55
#endif
66
#include <torch/custom_class.h>
77

8-
namespace torch {
9-
namespace jit {
8+
namespace torch::jit {
109

1110
constexpr static auto kBackendUtilsNamespace = "backendutils";
1211
constexpr static auto kBackendDebugInfoClass = "BackendDebugInfo";
@@ -61,5 +60,4 @@ class PyTorchBackendDebugInfoDummy : public torch::CustomClassHolder {
6160
PyTorchBackendDebugInfoDummy() = default;
6261
};
6362
#endif
64-
} // namespace jit
65-
} // namespace torch
63+
} // namespace torch::jit

torch/csrc/jit/backends/backend_detail.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66

77
#include <functional>
88

9-
namespace torch {
10-
namespace jit {
9+
namespace torch::jit {
1110

1211
using DebugHandleType = int64_t;
1312

@@ -37,5 +36,4 @@ TORCH_API Module codegen_backend_module(
3736
const c10::Dict<IValue, IValue>& method_compile_spec,
3837
const c10::DictTypePtr& any_dict_ty);
3938
} // namespace detail
40-
} // namespace jit
41-
} // namespace torch
39+
} // namespace torch::jit

torch/csrc/jit/backends/backend_init.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,7 @@
77
#include <torch/csrc/jit/python/pybind_utils.h>
88
#include <torch/csrc/utils/pybind.h>
99

10-
namespace torch {
11-
namespace jit {
10+
namespace torch::jit {
1211

1312
// Get all types that are shared in the module hierarchy rooted at \p mod.
1413
std::unordered_set<TypePtr> getSharedModuleTypes(Module& mod) {
@@ -189,5 +188,4 @@ void initJitBackendBindings(PyObject* module) {
189188
"Object ", py::str(orig_module), " is not a ScriptModule"));
190189
});
191190
}
192-
} // namespace jit
193-
} // namespace torch
191+
} // namespace torch::jit

torch/csrc/jit/backends/backend_init.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,7 @@
33
#include <torch/csrc/jit/python/pybind.h>
44
#include <torch/csrc/utils/pybind.h>
55

6-
namespace torch {
7-
namespace jit {
6+
namespace torch::jit {
87
// Initialize Python bindings for JIT to_<backend> functions.
98
void initJitBackendBindings(PyObject* module);
10-
} // namespace jit
11-
} // namespace torch
9+
} // namespace torch::jit
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
11
#include <torch/csrc/jit/backends/backend_interface.h>
22

3-
namespace torch {
4-
namespace jit {
3+
namespace torch::jit {
54

65
PyTorchBackendInterface::PyTorchBackendInterface() noexcept = default;
76
PyTorchBackendInterface::~PyTorchBackendInterface() = default;
87

9-
} // namespace jit
10-
} // namespace torch
8+
} // namespace torch::jit

torch/csrc/jit/backends/backend_interface.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <torch/custom_class.h>
44

5-
namespace torch {
6-
namespace jit {
5+
namespace torch::jit {
76

87
// Interface for a JIT backend.
98
class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
@@ -30,5 +29,4 @@ class TORCH_API PyTorchBackendInterface : public torch::CustomClassHolder {
3029
c10::IValue handle,
3130
c10::impl::GenericList inputs) = 0;
3231
};
33-
} // namespace jit
34-
} // namespace torch
32+
} // namespace torch::jit
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
#pragma once
22

33
#include <torch/csrc/jit/backends/backend_detail.h>
4-
namespace torch {
5-
namespace jit {
4+
namespace torch::jit {
65
class backend_preprocess_register {
76
std::string backend_name_;
87

@@ -14,5 +13,4 @@ class backend_preprocess_register {
1413
detail::registerBackendPreprocessFunction(name, preprocess);
1514
}
1615
};
17-
} // namespace jit
18-
} // namespace torch
16+
} // namespace torch::jit

torch/csrc/jit/backends/backend_resolver.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22
#include <torch/csrc/jit/frontend/sugared_value.h>
33
#include <torch/custom_class.h>
44

5-
namespace torch {
6-
namespace jit {
5+
namespace torch::jit {
76
namespace {
87
// Essentially ClassNamespaceValue from import_source.cpp without the
98
// SourceImporterImpl reference. This helps resolve the
@@ -67,5 +66,4 @@ std::shared_ptr<Resolver> loweredModuleResolver() {
6766
return resolver;
6867
}
6968

70-
} // namespace jit
71-
} // namespace torch
69+
} // namespace torch::jit

torch/csrc/jit/backends/backend_resolver.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,7 @@
22

33
#include <torch/csrc/jit/frontend/resolver.h>
44

5-
namespace torch {
6-
namespace jit {
5+
namespace torch::jit {
76
// Create a Resolver for use in generating LoweredModules for specific backends.
87
TORCH_API std::shared_ptr<Resolver> loweredModuleResolver();
9-
} // namespace jit
10-
} // namespace torch
8+
} // namespace torch::jit

torch/csrc/jit/backends/coreml/objc/PTMCoreMLTensorSpec.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,7 @@
33

44
#include <string>
55

6-
namespace torch {
7-
namespace jit {
8-
namespace mobile {
9-
namespace coreml {
6+
namespace torch::jit::mobile::coreml {
107

118
struct TensorSpec {
129
std::string name = "";
@@ -26,7 +23,4 @@ static inline c10::ScalarType scalar_type(const std::string& type_string) {
2623
return c10::ScalarType::Undefined;
2724
}
2825

29-
} // namespace coreml
30-
} // namespace mobile
31-
} // namespace jit
32-
} // namespace torch
26+
} // namespace torch::jit::mobile::coreml

torch/csrc/jit/backends/xnnpack/executor/xnn_executor.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,7 @@
88
#include <memory>
99
#include <vector>
1010

11-
namespace torch {
12-
namespace jit {
13-
namespace xnnpack {
14-
namespace delegate {
11+
namespace torch::jit::xnnpack::delegate {
1512

1613
class XNNExecutor {
1714
private:
@@ -68,7 +65,4 @@ class XNNExecutor {
6865
friend class XNNCompiler;
6966
};
7067

71-
} // namespace delegate
72-
} // namespace xnnpack
73-
} // namespace jit
74-
} // namespace torch
68+
} // namespace torch::jit::xnnpack::delegate

torch/csrc/jit/codegen/cuda/interface.cpp

+2-8
Original file line numberDiff line numberDiff line change
@@ -9,10 +9,7 @@
99
#include <torch/csrc/jit/runtime/custom_operator.h>
1010
#include <torch/csrc/jit/runtime/register_ops_utils.h>
1111

12-
namespace torch {
13-
namespace jit {
14-
namespace fuser {
15-
namespace cuda {
12+
namespace torch::jit::fuser::cuda {
1613

1714
static std::atomic<bool> cuda_fusion_guard_mode{true};
1815

@@ -131,7 +128,4 @@ bool skipNode(const std::string& symbol_str, bool flip) {
131128
getFuserInterface()->fn_skip_n(symbol_str, flip);
132129
}
133130

134-
} // namespace cuda
135-
} // namespace fuser
136-
} // namespace jit
137-
} // namespace torch
131+
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/cuda/interface.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,7 @@
1313
* Registration is done in torch/csrc/jit/codegen/cuda/register_interface.cpp
1414
*/
1515

16-
namespace torch {
17-
namespace jit {
18-
namespace fuser {
19-
namespace cuda {
16+
namespace torch::jit::fuser::cuda {
2017

2118
TORCH_API std::atomic<bool>& getCudaFusionGuardMode();
2219

@@ -52,7 +49,4 @@ TORCH_API bool isEnabled();
5249
TORCH_API bool setEnabled(bool is_enabled);
5350
TORCH_API bool canBeEnabled();
5451

55-
} // namespace cuda
56-
} // namespace fuser
57-
} // namespace jit
58-
} // namespace torch
52+
} // namespace torch::jit::fuser::cuda

torch/csrc/jit/codegen/fuser/cpu/fused_kernel.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -13,10 +13,7 @@ namespace at {
1313
struct DynamicLibrary;
1414
}
1515

16-
namespace torch {
17-
namespace jit {
18-
namespace fuser {
19-
namespace cpu {
16+
namespace torch::jit::fuser::cpu {
2017

2118
// Represents a compiled CPU kernel and the metadata necessary to run it
2219
struct TORCH_API FusedKernelCPU : public FusedKernel {
@@ -43,7 +40,4 @@ struct TORCH_API FusedKernelCPU : public FusedKernel {
4340
void (*kernel)(uint32_t, void**) = nullptr;
4441
};
4542

46-
} // namespace cpu
47-
} // namespace fuser
48-
} // namespace jit
49-
} // namespace torch
43+
} // namespace torch::jit::fuser::cpu

torch/csrc/jit/codegen/fuser/cpu/resource_strings.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,7 @@
22

33
#include <ATen/code_template.h>
44

5-
namespace torch {
6-
namespace jit {
7-
namespace fuser {
8-
namespace cpu {
5+
namespace torch::jit::fuser::cpu {
96

107
/*with type_as not checking type of its input, a fusion group can have non-fp32
118
tensor as input. Correct code for this case is generated, however, nvrtc does
@@ -101,7 +98,4 @@ JIT_API void ${kernelName}(IndexType totalElements, void ** args) {
10198
}
10299
)");
103100

104-
} // namespace cpu
105-
} // namespace fuser
106-
} // namespace jit
107-
} // namespace torch
101+
} // namespace torch::jit::fuser::cpu

torch/csrc/jit/codegen/fuser/cpu/temp_file.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,7 @@
2222
#include <string>
2323
#include <vector>
2424

25-
namespace torch {
26-
namespace jit {
27-
namespace fuser {
28-
namespace cpu {
25+
namespace torch::jit::fuser::cpu {
2926

3027
#ifdef _MSC_VER
3128
int wmkstemps(wchar_t* tmpl, int suffix_len) {
@@ -135,7 +132,4 @@ struct TempFile {
135132
std::string name_;
136133
};
137134

138-
} // namespace cpu
139-
} // namespace fuser
140-
} // namespace jit
141-
} // namespace torch
135+
} // namespace torch::jit::fuser::cpu

torch/csrc/jit/codegen/onednn/LlgaTensorImpl.cpp

+2-8
Original file line numberDiff line numberDiff line change
@@ -4,10 +4,7 @@
44
#include <c10/core/CPUAllocator.h>
55
#include <torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h>
66

7-
namespace torch {
8-
namespace jit {
9-
namespace fuser {
10-
namespace onednn {
7+
namespace torch::jit::fuser::onednn {
118

129
// Non-default dnnl::graph::allocator needs an allocator.
1310
// We would let it use c10::GetCPUAllocator's allocator,
@@ -152,9 +149,6 @@ at::ScalarType LlgaTensorDesc::aten_scalar_type() const {
152149
}
153150
}
154151

155-
} // namespace onednn
156-
} // namespace fuser
157-
} // namespace jit
158-
} // namespace torch
152+
} // namespace torch::jit::fuser::onednn
159153

160154
#endif // AT_MKLDNN_ENABLED()

torch/csrc/jit/codegen/onednn/LlgaTensorImpl.h

+2-8
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,7 @@
66
#include <oneapi/dnnl/dnnl_graph.hpp>
77
#include <torch/csrc/jit/ir/ir.h>
88

9-
namespace torch {
10-
namespace jit {
11-
namespace fuser {
12-
namespace onednn {
9+
namespace torch::jit::fuser::onednn {
1310

1411
// Engine represents a device and its context. From the device kind, the engine
1512
// knows how to generate code for the target device and what kind of device
@@ -270,7 +267,4 @@ at::Tensor empty_llga(
270267

271268
dnnl::graph::tensor llga_from_aten_tensor(const at::Tensor& tensor);
272269

273-
} // namespace onednn
274-
} // namespace fuser
275-
} // namespace jit
276-
} // namespace torch
270+
} // namespace torch::jit::fuser::onednn

0 commit comments

Comments
 (0)