Skip to content

Commit 71ca600

Browse files
janeyx99facebook-github-bot
authored andcommitted
Renaming CAFFE2_API to TORCH_API (pytorch#49496)
Summary: Since caffe2 and torch have been consolidated, CAFFE2_API should be merged with TORCH_API. Addresses a TODO. Manually edited some references of the removed `CAFFE2_API`: * `CONTRIBUTING.md` * `caffe2/proto/CMakeLists.txt` * `cmake/ProtoBuf.cmake` * `c10/macros/Export.h` * `torch/csrc/WindowsTorchApiMacro.h` Pull Request resolved: pytorch#49496 Reviewed By: malfet, samestep Differential Revision: D25600726 Pulled By: janeyx99 fbshipit-source-id: 7e068d959e397ac183c097d7e9a9afeca5ddd782
1 parent c9e0521 commit 71ca600

File tree

197 files changed

+849
-854
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

197 files changed

+849
-854
lines changed

CONTRIBUTING.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -754,7 +754,7 @@ than Linux, which are worth keeping in mind when fixing these problems.
754754
1. Symbols are NOT exported by default on Windows; instead, you have to explicitly
755755
mark a symbol as exported/imported in a header file with `__declspec(dllexport)` /
756756
`__declspec(dllimport)`. We have codified this pattern into a set of macros
757-
which follow the convention `*_API`, e.g., `CAFFE2_API` inside Caffe2 and ATen.
757+
which follow the convention `*_API`, e.g., `TORCH_API` inside Caffe2, Aten and Torch.
758758
(Every separate shared library needs a unique macro name, because symbol visibility
759759
is on a per shared library basis. See c10/macros/Macros.h for more details.)
760760

aten/src/ATen/CPUGeneratorImpl.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
namespace at {
99

10-
struct CAFFE2_API CPUGeneratorImpl : public c10::GeneratorImpl {
10+
struct TORCH_API CPUGeneratorImpl : public c10::GeneratorImpl {
1111
// Constructors
1212
CPUGeneratorImpl(uint64_t seed_in = default_rng_seed_val);
1313
~CPUGeneratorImpl() = default;
@@ -36,8 +36,8 @@ struct CAFFE2_API CPUGeneratorImpl : public c10::GeneratorImpl {
3636

3737
namespace detail {
3838

39-
CAFFE2_API const Generator& getDefaultCPUGenerator();
40-
CAFFE2_API Generator createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
39+
TORCH_API const Generator& getDefaultCPUGenerator();
40+
TORCH_API Generator createCPUGenerator(uint64_t seed_val = default_rng_seed_val);
4141

4242
} // namespace detail
4343

aten/src/ATen/Context.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ namespace at {
2121

2222
class Tensor;
2323

24-
class CAFFE2_API Context {
24+
class TORCH_API Context {
2525
public:
2626
Context();
2727

@@ -225,13 +225,13 @@ class CAFFE2_API Context {
225225
std::unique_ptr<THHState, void(*)(THHState*)> thh_state;
226226
};
227227

228-
CAFFE2_API Context& globalContext();
228+
TORCH_API Context& globalContext();
229229

230230
static inline void init() {
231231
globalContext();
232232
}
233233

234-
CAFFE2_API Allocator* getCPUAllocator();
234+
TORCH_API Allocator* getCPUAllocator();
235235

236236
static inline DeprecatedTypeProperties& getDeprecatedTypeProperties(Backend p, ScalarType s) {
237237
return globalDeprecatedTypePropertiesRegistry().getDeprecatedTypeProperties(

aten/src/ATen/DLConvertor.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,10 @@
1010

1111
namespace at {
1212

13-
CAFFE2_API ScalarType toScalarType(const DLDataType& dtype);
14-
CAFFE2_API DLManagedTensor* toDLPack(const Tensor& src);
15-
CAFFE2_API Tensor fromDLPack(const DLManagedTensor* src);
16-
CAFFE2_API DLDataType getDLDataType(const Tensor& t);
17-
CAFFE2_API DLContext getDLContext(const Tensor& tensor, const int64_t& device_id);
13+
TORCH_API ScalarType toScalarType(const DLDataType& dtype);
14+
TORCH_API DLManagedTensor* toDLPack(const Tensor& src);
15+
TORCH_API Tensor fromDLPack(const DLManagedTensor* src);
16+
TORCH_API DLDataType getDLDataType(const Tensor& t);
17+
TORCH_API DLContext getDLContext(const Tensor& tensor, const int64_t& device_id);
1818

1919
} //namespace at

aten/src/ATen/DynamicLibrary.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -8,11 +8,11 @@ namespace at {
88
struct DynamicLibrary {
99
AT_DISALLOW_COPY_AND_ASSIGN(DynamicLibrary);
1010

11-
CAFFE2_API DynamicLibrary(const char* name);
11+
TORCH_API DynamicLibrary(const char* name);
1212

13-
CAFFE2_API void* sym(const char* name);
13+
TORCH_API void* sym(const char* name);
1414

15-
CAFFE2_API ~DynamicLibrary();
15+
TORCH_API ~DynamicLibrary();
1616

1717
private:
1818
void* handle = nullptr;

aten/src/ATen/ExpandUtils.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -9,14 +9,14 @@
99

1010
namespace at {
1111

12-
CAFFE2_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
13-
CAFFE2_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
12+
TORCH_API std::vector<int64_t> infer_size(IntArrayRef a, IntArrayRef b);
13+
TORCH_API std::tuple<std::vector<int64_t>, std::vector<int64_t>>
1414
inferExpandGeometry(
1515
IntArrayRef tensor_sizes,
1616
IntArrayRef tensor_strides,
1717
IntArrayRef sizes);
1818

19-
CAFFE2_API std::vector<int64_t> infer_dense_strides(
19+
TORCH_API std::vector<int64_t> infer_dense_strides(
2020
IntArrayRef tensor_sizes,
2121
IntArrayRef tensor_strides);
2222

aten/src/ATen/MemoryOverlap.h

+9-9
Original file line numberDiff line numberDiff line change
@@ -15,19 +15,19 @@ enum class MemOverlap { NO, YES, TOO_HARD };
1515

1616
enum class MemOverlapStatus { FULL, PARTIAL, NO, TOO_HARD };
1717

18-
CAFFE2_API MemOverlap has_internal_overlap(const Tensor& t);
19-
CAFFE2_API MemOverlap has_internal_overlap(TensorImpl* t);
18+
TORCH_API MemOverlap has_internal_overlap(const Tensor& t);
19+
TORCH_API MemOverlap has_internal_overlap(TensorImpl* t);
2020

21-
CAFFE2_API void assert_no_internal_overlap(const Tensor& t);
22-
CAFFE2_API void assert_no_internal_overlap(TensorImpl* t);
21+
TORCH_API void assert_no_internal_overlap(const Tensor& t);
22+
TORCH_API void assert_no_internal_overlap(TensorImpl* t);
2323

24-
CAFFE2_API MemOverlapStatus get_overlap_status(const Tensor& a, const Tensor& b);
25-
CAFFE2_API MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b);
24+
TORCH_API MemOverlapStatus get_overlap_status(const Tensor& a, const Tensor& b);
25+
TORCH_API MemOverlapStatus get_overlap_status(TensorImpl* a, TensorImpl* b);
2626

27-
CAFFE2_API void assert_no_partial_overlap(const Tensor& a, const Tensor& b);
27+
TORCH_API void assert_no_partial_overlap(const Tensor& a, const Tensor& b);
2828
void assert_no_partial_overlap(TensorImpl* a, TensorImpl* b);
2929

30-
CAFFE2_API void assert_no_overlap(const Tensor& a, const Tensor& b);
31-
CAFFE2_API void assert_no_overlap(TensorImpl* a, TensorImpl* b);
30+
TORCH_API void assert_no_overlap(const Tensor& a, const Tensor& b);
31+
TORCH_API void assert_no_overlap(TensorImpl* a, TensorImpl* b);
3232

3333
}

aten/src/ATen/NamedTensorUtils.h

+24-24
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@ inline bool has_names(TensorList tensors) {
1717

1818
// Converts dim to an positional index. Errors if `dim` cannot be used to
1919
// refer to any dimension of tensor.
20-
CAFFE2_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
21-
CAFFE2_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, DimnameList dims);
20+
TORCH_API int64_t dimname_to_position(const Tensor& tensor, Dimname dim);
21+
TORCH_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, DimnameList dims);
2222

2323
// Unifies two DimnameList to produce a third. This is useful for implementing
2424
// the named inference rule for binary broadcasting operations like add.
@@ -28,7 +28,7 @@ CAFFE2_API std::vector<int64_t> dimnames_to_positions(const Tensor& tensor, Dimn
2828
// 2) Check misaligned: If a name `n` is in `names`, then it must appear at
2929
// the same index from the right in other.
3030
// 3) The output names are obtained by unifying the names individually from the right.
31-
CAFFE2_API std::vector<Dimname>
31+
TORCH_API std::vector<Dimname>
3232
unify_from_right(DimnameList names, DimnameList other, const char* action = "broadcast");
3333

3434
[[noreturn]] inline void reportNYIDimnameOverload(const char* op_name) {
@@ -75,50 +75,50 @@ namespace namedinference {
7575
// `names` can be empty; see [NOTE] Writing name inference rules
7676
// If `names` is not empty, `names.size()` should equal `result.dim()`.
7777
// When in doubt, use this overload instead of the others.
78-
CAFFE2_API Tensor& propagate_names_if_nonempty(
78+
TORCH_API Tensor& propagate_names_if_nonempty(
7979
Tensor& result,
8080
DimnameList maybe_names,
8181
bool validate_names = false);
8282

8383
// Propagates `names` to `result`. Only use this if we are certain that there are
8484
// names to propagate (that names is not empty).
85-
CAFFE2_API Tensor& propagate_names(
85+
TORCH_API Tensor& propagate_names(
8686
Tensor& result,
8787
DimnameList names,
8888
bool validate_names = false);
8989

9090
// Propagates all names from src to result.
91-
CAFFE2_API void propagate_names(Tensor& result, const Tensor& src);
91+
TORCH_API void propagate_names(Tensor& result, const Tensor& src);
9292

9393
// Propagates all names except for those at the excluded_idxs.
94-
CAFFE2_API void propagate_names_except(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs);
94+
TORCH_API void propagate_names_except(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs);
9595

9696
// Used for reduction ops that have a `keepdim` arg.
97-
CAFFE2_API void propagate_names_for_reduction(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs, bool keepdim);
97+
TORCH_API void propagate_names_for_reduction(Tensor& result, const Tensor& src, IntArrayRef excluded_idxs, bool keepdim);
9898

99-
CAFFE2_API void propagate_names_for_expand(Tensor& result, const Tensor& self);
99+
TORCH_API void propagate_names_for_expand(Tensor& result, const Tensor& self);
100100

101-
CAFFE2_API std::vector<Dimname> compute_cat_outnames(TensorList tensors);
101+
TORCH_API std::vector<Dimname> compute_cat_outnames(TensorList tensors);
102102

103-
CAFFE2_API std::vector<Dimname> compute_broadcast_outnames(
103+
TORCH_API std::vector<Dimname> compute_broadcast_outnames(
104104
const Tensor& self,
105105
const Tensor& other);
106106

107-
CAFFE2_API std::vector<Dimname> broadcast_to_outnames(
107+
TORCH_API std::vector<Dimname> broadcast_to_outnames(
108108
const Tensor& tensor,
109109
const Tensor& reference_tensor,
110110
const char* op_name);
111111

112-
CAFFE2_API std::vector<Dimname> compute_matmul_outnames(const Tensor& self, const Tensor& other);
112+
TORCH_API std::vector<Dimname> compute_matmul_outnames(const Tensor& self, const Tensor& other);
113113

114-
CAFFE2_API std::vector<Dimname> compute_cdist_outnames(const Tensor& self, const Tensor& other);
114+
TORCH_API std::vector<Dimname> compute_cdist_outnames(const Tensor& self, const Tensor& other);
115115

116-
CAFFE2_API std::vector<Dimname> compute_bmm_outnames(
116+
TORCH_API std::vector<Dimname> compute_bmm_outnames(
117117
Tensor& result,
118118
const Tensor& self,
119119
const Tensor& other);
120120

121-
CAFFE2_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
121+
TORCH_API std::vector<Dimname> compute_squeeze_outnames(const Tensor& tensor);
122122

123123
std::vector<Dimname> compute_diagonal_outnames(
124124
const Tensor& tensor,
@@ -127,40 +127,40 @@ std::vector<Dimname> compute_diagonal_outnames(
127127

128128
// TensorImpl* overloads for Legacy TH/THC code. Use these sparingly.
129129

130-
CAFFE2_API TensorImpl* propagate_names_if_nonempty(
130+
TORCH_API TensorImpl* propagate_names_if_nonempty(
131131
TensorImpl* result,
132132
DimnameList maybe_names,
133133
bool validate_names = false);
134134

135-
CAFFE2_API TensorImpl* propagate_names(
135+
TORCH_API TensorImpl* propagate_names(
136136
TensorImpl* result,
137137
DimnameList names,
138138
bool validate_names = false);
139139

140-
CAFFE2_API void propagate_names(TensorImpl* result, /*const */TensorImpl* src);
140+
TORCH_API void propagate_names(TensorImpl* result, /*const */TensorImpl* src);
141141

142142
// result = m1 @ m2 + bias
143-
CAFFE2_API void propagate_names_for_addmm(
143+
TORCH_API void propagate_names_for_addmm(
144144
Tensor& result,
145145
const Tensor& m1,
146146
const Tensor& m2,
147147
const Tensor& bias);
148148

149-
CAFFE2_API void propagate_names_for_addmv(
149+
TORCH_API void propagate_names_for_addmv(
150150
Tensor& result,
151151
const Tensor& mat,
152152
const Tensor& vec,
153153
const Tensor& bias);
154154

155-
CAFFE2_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
155+
TORCH_API void check_names_for_dot(TensorImpl* vec1, TensorImpl* vec2);
156156

157-
CAFFE2_API std::vector<Dimname> compute_baddbmm_outnames(
157+
TORCH_API std::vector<Dimname> compute_baddbmm_outnames(
158158
Tensor& result,
159159
const Tensor& self,
160160
const Tensor& other,
161161
const Tensor& bias);
162162

163-
CAFFE2_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
163+
TORCH_API bool are_names_equal(TensorImpl* self, TensorImpl* other);
164164

165165
} // namespace namedinference
166166

aten/src/ATen/OpaqueTensorImpl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ namespace at {
1717
// "shallow copy" in order to add support.
1818

1919
template <typename OpaqueHandle>
20-
struct CAFFE2_API OpaqueTensorImpl : public TensorImpl {
20+
struct TORCH_API OpaqueTensorImpl : public TensorImpl {
2121
// public constructor for now...
2222
OpaqueTensorImpl(
2323
at::DispatchKeySet key_set,

aten/src/ATen/PTThreadPool.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
namespace at {
77

8-
class CAFFE2_API PTThreadPool : public c10::ThreadPool {
8+
class TORCH_API PTThreadPool : public c10::ThreadPool {
99
public:
1010
explicit PTThreadPool(
1111
int pool_size,

aten/src/ATen/Parallel.h

+13-13
Original file line numberDiff line numberDiff line change
@@ -10,25 +10,25 @@ inline int64_t divup(int64_t x, int64_t y) {
1010
}
1111

1212
// Called during new thread initialization
13-
CAFFE2_API void init_num_threads();
13+
TORCH_API void init_num_threads();
1414

1515
// Sets the number of threads to be used in parallel region
16-
CAFFE2_API void set_num_threads(int);
16+
TORCH_API void set_num_threads(int);
1717

1818
// Returns the maximum number of threads that may be used in a parallel region
19-
CAFFE2_API int get_num_threads();
19+
TORCH_API int get_num_threads();
2020

2121
// Returns the current thread number (starting from 0)
2222
// in the current parallel region, or 0 in the sequential region
23-
CAFFE2_API int get_thread_num();
23+
TORCH_API int get_thread_num();
2424

2525
// Checks whether the code runs in parallel region
26-
CAFFE2_API bool in_parallel_region();
26+
TORCH_API bool in_parallel_region();
2727

2828
namespace internal {
2929

3030
// Initialise num_threads lazily at first parallel call
31-
inline CAFFE2_API void lazy_init_num_threads() {
31+
inline TORCH_API void lazy_init_num_threads() {
3232
thread_local bool init = false;
3333
if (C10_UNLIKELY(!init)) {
3434
at::init_num_threads();
@@ -110,29 +110,29 @@ inline scalar_t parallel_reduce(
110110
const SF& sf);
111111

112112
// Returns a detailed string describing parallelization settings
113-
CAFFE2_API std::string get_parallel_info();
113+
TORCH_API std::string get_parallel_info();
114114

115115
// Sets number of threads used for inter-op parallelism
116-
CAFFE2_API void set_num_interop_threads(int);
116+
TORCH_API void set_num_interop_threads(int);
117117

118118
// Returns the number of threads used for inter-op parallelism
119-
CAFFE2_API int get_num_interop_threads();
119+
TORCH_API int get_num_interop_threads();
120120

121121
// Launches inter-op parallel task
122-
CAFFE2_API void launch(std::function<void()> func);
122+
TORCH_API void launch(std::function<void()> func);
123123
namespace internal {
124124
void launch_no_thread_state(std::function<void()> fn);
125125
} // namespace internal
126126

127127
// Launches intra-op parallel task
128-
CAFFE2_API void intraop_launch(std::function<void()> func);
128+
TORCH_API void intraop_launch(std::function<void()> func);
129129

130130
// Launches intra-op parallel task, returns a future
131-
CAFFE2_API std::shared_ptr<c10::ivalue::Future> intraop_launch_future(
131+
TORCH_API std::shared_ptr<c10::ivalue::Future> intraop_launch_future(
132132
std::function<void()> func);
133133

134134
// Returns number of intra-op threads used by default
135-
CAFFE2_API int intraop_default_num_threads();
135+
TORCH_API int intraop_default_num_threads();
136136

137137
} // namespace at
138138

aten/src/ATen/ParallelNative.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@ inline std::tuple<size_t, size_t> calc_num_tasks_and_chunk_size(
2222
return std::make_tuple(num_tasks, chunk_size);
2323
}
2424

25-
CAFFE2_API void _parallel_run(
25+
TORCH_API void _parallel_run(
2626
const int64_t begin,
2727
const int64_t end,
2828
const int64_t grain_size,

aten/src/ATen/SparseTensorImpl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
#include <c10/util/Exception.h>
66

77
namespace at {
8-
struct CAFFE2_API SparseTensorImpl : public TensorImpl {
8+
struct TORCH_API SparseTensorImpl : public TensorImpl {
99
// Stored in COO format, indices + values.
1010

1111
// INVARIANTS:

aten/src/ATen/TensorGeometry.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66
namespace at {
77

8-
struct CAFFE2_API TensorGeometry {
8+
struct TORCH_API TensorGeometry {
99
TensorGeometry() : storage_offset_(0) {}
1010

1111
explicit TensorGeometry(IntArrayRef sizes)

0 commit comments

Comments
 (0)