Skip to content

Commit af0bc75

Browse files
FFFrogpytorchmergebot
authored andcommitted
Remove deprecated alias macro(1/3) (pytorch#137556)
**Detailed Descriptions:** - Remove AT_ERROR Macro Pull Request resolved: pytorch#137556 Approved by: https://github.com/ezyang
1 parent 16caa8c commit af0bc75

File tree

153 files changed

+521
-418
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

153 files changed

+521
-418
lines changed

aten/src/ATen/CPUApplyUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -136,7 +136,7 @@ inline bool _apply_preamble(ArrayRef<Tensor> tensors) {
136136
checkDeviceType("CPU_tensor_apply", tensors, kCPU);
137137
checkLayout("CPU_tensor_apply", tensors, kStrided);
138138
if (!_all_equal_numel(tensors))
139-
AT_ERROR(_all_equal_numel_error(tensors));
139+
TORCH_CHECK(false, _all_equal_numel_error(tensors));
140140
// An empty tensor has no elements
141141
for (auto& t : tensors)
142142
if (t.numel() == 0)

aten/src/ATen/CPUFixedAllocator.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,11 @@
1212
namespace at {
1313

1414
static cpu_fixed_malloc(void*, ptrdiff_t) {
15-
AT_ERROR("attempting to resize a tensor view of an external blob");
15+
TORCH_CHECK(false, "attempting to resize a tensor view of an external blob");
1616
}
1717

1818
static cpu_fixed_realloc(void*, void*, ptrdiff_t) {
19-
AT_ERROR("attempting to resize a tensor view of an external blob");
19+
TORCH_CHECK(false, "attempting to resize a tensor view of an external blob");
2020
}
2121

2222
static cpu_fixed_free(void* state, void* allocation) {

aten/src/ATen/CPUGeneratorImpl.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ void CPUGeneratorImpl::set_state(const c10::TensorImpl& new_state) {
189189
double_normal_sample = std::optional<double>(legacy_pod->normal_y);
190190
}
191191
} else {
192-
AT_ERROR("Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
192+
TORCH_CHECK(false, "Expected either a CPUGeneratorImplStateLegacy of size ", size_legacy,
193193
" or a CPUGeneratorImplState of size ", size_current,
194194
" but found the input RNG state size to be ", new_state_size);
195195
}

aten/src/ATen/Context.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -67,8 +67,10 @@ class TORCH_API Context {
6767
} else if (device_type == at::kHIP) {
6868
return at::detail::getHIPHooks();
6969
} else {
70-
AT_ERROR(
71-
c10::DeviceTypeName(device_type), " device type not an accelerator.");
70+
TORCH_CHECK(
71+
false,
72+
c10::DeviceTypeName(device_type),
73+
" device type not an accelerator.");
7274
}
7375
}
7476

aten/src/ATen/Dispatch.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ TORCH_API void record_kernel_function_dtype(std::string name);
5555
do { \
5656
if constexpr (!at::should_include_kernel_dtype( \
5757
at_dispatch_name, enum_type)) { \
58-
AT_ERROR( \
58+
TORCH_CHECK( \
59+
false, \
5960
"dtype '", \
6061
toString(enum_type), \
6162
"' not selected for kernel tag ", \
@@ -220,7 +221,8 @@ inline void deprecated_AT_DISPATCH_ALL_TYPES_AND_HALF_AND_COMPLEX() {}
220221
switch (_st) { \
221222
__VA_ARGS__ \
222223
default: \
223-
AT_ERROR( \
224+
TORCH_CHECK( \
225+
false, \
224226
'"', \
225227
at_dispatch_name, \
226228
"\" not implemented for '", \

aten/src/ATen/ExpandUtils.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ inline void check_defined(
7878
const char* api_name) {
7979
for (auto& t : tensors) {
8080
if (!t.get().defined()) {
81-
AT_ERROR(api_name, "(...) called with an undefined Tensor");
81+
TORCH_CHECK(false, api_name, "(...) called with an undefined Tensor");
8282
}
8383
}
8484
}

aten/src/ATen/InferSize.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ inline void infer_size_impl(
3333
} else if (shape[dim] >= 0) {
3434
newsize *= shape[dim];
3535
} else {
36-
AT_ERROR("invalid shape dimension ", shape[dim]);
36+
TORCH_CHECK(false, "invalid shape dimension ", shape[dim]);
3737
}
3838
}
3939

aten/src/ATen/OpaqueTensorImpl.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -45,15 +45,15 @@ struct TORCH_API OpaqueTensorImpl : public TensorImpl {
4545
}
4646

4747
void set_size(int64_t dim, int64_t new_size) override {
48-
AT_ERROR("opaque tensors do not have set_size");
48+
TORCH_CHECK(false, "opaque tensors do not have set_size");
4949
}
5050

5151
void set_stride(int64_t dim, int64_t new_stride) override {
52-
AT_ERROR("opaque tensors do not have set_stride");
52+
TORCH_CHECK(false, "opaque tensors do not have set_stride");
5353
}
5454

5555
void set_storage_offset(int64_t storage_offset) override {
56-
AT_ERROR("opaque tensors do not have set_storage_offset");
56+
TORCH_CHECK(false, "opaque tensors do not have set_storage_offset");
5757
}
5858

5959
#ifdef DEBUG

aten/src/ATen/SparseCsrTensorUtils.h

+14-7
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,8 @@
2323
case kSparseBsc: \
2424
return __VA_ARGS__(); \
2525
default: \
26-
AT_ERROR( \
26+
TORCH_CHECK( \
27+
false, \
2728
NAME, \
2829
" expected sparse compressed tensor layout but got ", \
2930
the_layout); \
@@ -42,7 +43,8 @@
4243
case kSparseBsc: \
4344
return (COLUMN_DIM_ACTION)(); \
4445
default: \
45-
AT_ERROR( \
46+
TORCH_CHECK( \
47+
false, \
4648
NAME, \
4749
" expected sparse compressed tensor layout but got ", \
4850
the_layout); \
@@ -61,7 +63,8 @@
6163
case kSparseBsc: \
6264
return (BLOCK_ACTION)(); \
6365
default: \
64-
AT_ERROR( \
66+
TORCH_CHECK( \
67+
false, \
6568
NAME, \
6669
" expected sparse compressed tensor layout but got ", \
6770
the_layout); \
@@ -77,7 +80,8 @@
7780
case kSparseBsr: \
7881
return (ROW_DIM_ACTION)(); \
7982
default: \
80-
AT_ERROR( \
83+
TORCH_CHECK( \
84+
false, \
8185
NAME, \
8286
" expected sparse row compressed tensor layout but got ", \
8387
the_layout); \
@@ -93,7 +97,8 @@
9397
case kSparseBsc: \
9498
return (COL_DIM_ACTION)(); \
9599
default: \
96-
AT_ERROR( \
100+
TORCH_CHECK( \
101+
false, \
97102
NAME, \
98103
" expected sparse column compressed tensor layout but got ", \
99104
the_layout); \
@@ -108,7 +113,8 @@
108113
case kSparseCsc: \
109114
return (ACTION)(); \
110115
default: \
111-
AT_ERROR( \
116+
TORCH_CHECK( \
117+
false, \
112118
NAME, \
113119
" expected sparse compressed (non-block) tensor layout but got ", \
114120
the_layout); \
@@ -123,7 +129,8 @@
123129
case kSparseBsc: \
124130
return (ACTION)(); \
125131
default: \
126-
AT_ERROR( \
132+
TORCH_CHECK( \
133+
false, \
127134
NAME, \
128135
" expected sparse compressed block tensor layout but got ", \
129136
the_layout); \

aten/src/ATen/SparseTensorImpl.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -57,13 +57,13 @@ void SparseTensorImpl::release_resources() {
5757
}
5858

5959
void SparseTensorImpl::set_size(int64_t dim, int64_t new_size) {
60-
AT_ERROR("sparse tensors do not have set_size");
60+
TORCH_CHECK(false, "sparse tensors do not have set_size");
6161
}
6262
void SparseTensorImpl::set_stride(int64_t dim, int64_t new_stride) {
63-
AT_ERROR("sparse tensors do not have set_stride");
63+
TORCH_CHECK(false, "sparse tensors do not have set_stride");
6464
}
6565
void SparseTensorImpl::set_storage_offset(int64_t storage_offset) {
66-
AT_ERROR("sparse tensors do not have set_storage_offset");
66+
TORCH_CHECK(false, "sparse tensors do not have set_storage_offset");
6767
}
6868
#ifdef DEBUG
6969
bool SparseTensorImpl::has_storage() const {

aten/src/ATen/TensorUtils.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ void checkSameGPU(CheckedFrom c, const TensorArg& t1, const TensorArg& t2) {
155155
}
156156
oss << "but expected " << ((!t1->is_cpu() && !t2->is_cpu()) ? "them" : "it")
157157
<< " to be on GPU (while checking arguments for " << c << ")";
158-
AT_ERROR(oss.str());
158+
TORCH_CHECK(false, oss.str());
159159
}
160160
TORCH_CHECK(
161161
t1->get_device() == t2->get_device(),
@@ -200,7 +200,7 @@ void checkScalarTypes(CheckedFrom c, const TensorArg& t,
200200
}
201201
oss << "; but got " << t->toString()
202202
<< " instead (while checking arguments for " << c << ")";
203-
AT_ERROR(oss.str());
203+
TORCH_CHECK(false, oss.str());
204204
}
205205
}
206206

aten/src/ATen/Utils.h

+8-4
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
3636
for (const auto i : c10::irange(tensors.size())) {
3737
const auto& expr = tensors[i];
3838
if (expr.layout() != Layout::Strided) {
39-
AT_ERROR(
39+
TORCH_CHECK(
40+
false,
4041
"Expected dense tensor but got ",
4142
expr.layout(),
4243
" for sequence element ",
@@ -48,7 +49,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
4849
"'");
4950
}
5051
if (expr.device().type() != device_type) {
51-
AT_ERROR(
52+
TORCH_CHECK(
53+
false,
5254
"Expected object of device type ",
5355
device_type,
5456
" but got device type ",
@@ -62,7 +64,8 @@ inline std::vector<TensorImpl*> checked_dense_tensor_list_unwrap(
6264
"'");
6365
}
6466
if (expr.scalar_type() != scalar_type) {
65-
AT_ERROR(
67+
TORCH_CHECK(
68+
false,
6669
"Expected object of scalar type ",
6770
scalar_type,
6871
" but got scalar type ",
@@ -96,7 +99,8 @@ std::array<int64_t, N> check_intlist(
9699
return res;
97100
}
98101
if (list.size() != N) {
99-
AT_ERROR(
102+
TORCH_CHECK(
103+
false,
100104
"Expected a list of ",
101105
N,
102106
" ints but got ",

aten/src/ATen/autocast_mode.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ Banned functions
149149
*******************************/
150150

151151
static Tensor binary_cross_entropy_banned(const Tensor &, const Tensor &, const std::optional<Tensor>&, int64_t) {
152-
AT_ERROR("torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
152+
TORCH_CHECK(false, "torch.nn.functional.binary_cross_entropy and torch.nn.BCELoss are unsafe to autocast.\n"
153153
"Many models use a sigmoid layer right before the binary cross entropy layer.\n"
154154
"In this case, combine the two layers using torch.nn.functional.binary_cross_entropy_with_logits\n"
155155
"or torch.nn.BCEWithLogitsLoss. binary_cross_entropy_with_logits and BCEWithLogits are\n"

aten/src/ATen/autocast_mode.h

+3-2
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ inline at::ScalarType prioritize(
211211
const Tensor& nextArg,
212212
c10::DeviceType device_type = c10::DeviceType::CUDA) {
213213
if (current == at::kDouble) {
214-
AT_ERROR("promote type is double in at::autocast::prioritize");
214+
TORCH_CHECK(false, "promote type is double in at::autocast::prioritize");
215215
return current;
216216
}
217217
at::ScalarType lower_precision_fp =
@@ -225,7 +225,8 @@ inline at::ScalarType prioritize(
225225
} else if (current == lower_precision_fp && next == lower_precision_fp) {
226226
return lower_precision_fp;
227227
} else {
228-
AT_ERROR("Unexpected floating ScalarType in at::autocast::prioritize");
228+
TORCH_CHECK(
229+
false, "Unexpected floating ScalarType in at::autocast::prioritize");
229230
return current;
230231
}
231232
} else {

aten/src/ATen/core/enum_type.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ struct TORCH_API EnumType : public NamedType {
2828
std::move(enum_names_values),
2929
std::move(cu)));
3030
default:
31-
AT_ERROR(
31+
TORCH_CHECK(false,
3232
"Cannot create Enum with value type '",
3333
value->str(),
3434
"', only int, float and string are supported");

aten/src/ATen/core/function_schema_inl.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ inline void FunctionSchema::checkAndNormalizeInputs(
5555
inputs.push_back(*argument.default_value());
5656
continue;
5757
}
58-
AT_ERROR(
58+
TORCH_CHECK(false,
5959
name(),
6060
"() is missing value for argument '",
6161
argument.name(),

aten/src/ATen/core/ivalue.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -756,7 +756,7 @@ IValueComparator getLessThanComparator(const IValue& v) {
756756
torch::jit::Function* lt_func =
757757
checkObjectSortSchema(v.type()->expect<ClassType>(), why_not);
758758
if (!lt_func) {
759-
AT_ERROR(why_not.str());
759+
TORCH_CHECK(false, why_not.str());
760760
}
761761

762762
return [lt_func](const IValue& a, const IValue& b) {
@@ -772,7 +772,7 @@ IValueComparator getLessThanComparator(const IValue& v) {
772772
};
773773
}
774774

775-
AT_ERROR("IValues of type: ", v.tagKind(), " are not comparable");
775+
TORCH_CHECK(false, "IValues of type: ", v.tagKind(), " are not comparable");
776776
}
777777

778778
IValueComparator getGreaterThanComparator(const IValue& v) {
@@ -967,7 +967,7 @@ IValue IValue::deepcopy(
967967
copy = *this;
968968
} break;
969969
default: {
970-
AT_ERROR("Can't deepcopy IValue with tag: ", tagKind());
970+
TORCH_CHECK(false, "Can't deepcopy IValue with tag: ", tagKind());
971971
}
972972
}
973973
// NB: this doesn't work if an object contains itself, and it may
@@ -1050,7 +1050,7 @@ c10::intrusive_ptr<ivalue::Object> ivalue::Object::deepcopy(
10501050
}
10511051
err << ". Please define serialization methods via def_pickle() for "
10521052
"this class.";
1053-
AT_ERROR(err.str());
1053+
TORCH_CHECK(false, err.str());
10541054
}
10551055
object->setSlot(i, slots_[i].deepcopy(memo, device));
10561056
}

aten/src/ATen/core/jit_type.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -938,7 +938,7 @@ struct TORCH_API DictType : public SharedType {
938938
case TypeKind::DeviceObjType:
939939
return DictTypePtr(new DictType(std::move(key), std::move(value)));
940940
default:
941-
AT_ERROR(
941+
TORCH_CHECK(false,
942942
"Cannot create dict for key type '",
943943
key->str(),
944944
"', only int, float, complex, Tensor, device and string keys are supported");

aten/src/ATen/core/jit_type_base.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,7 @@ struct TORCH_API Type {
585585
virtual TypePtr createWithContained(
586586
// NOLINTNEXTLINE(performance-unnecessary-value-param)
587587
std::vector<TypePtr> /*contained_types*/) const {
588-
AT_ERROR(
588+
TORCH_CHECK(false,
589589
"type with contained types did not overload createWithContained: ",
590590
str());
591591
}

aten/src/ATen/core/type.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -629,7 +629,7 @@ MatchTypeReturn matchTypeVariables(
629629
}
630630
}
631631

632-
AT_ERROR("Unhandled free variable container: ", formal->repr_str());
632+
TORCH_CHECK(false, "Unhandled free variable container: ", formal->repr_str());
633633
}
634634

635635
// change return types like List[List[t]] into List[List[int]]

aten/src/ATen/cuda/CUDABlas.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ static rocblas_operation hipOperationToRocOperation(hipblasOperation_t op)
3434
case HIPBLAS_OP_C:
3535
return rocblas_operation_conjugate_transpose;
3636
}
37-
AT_ERROR("HIPBLAS_STATUS_INVALID_ENUM");
37+
TORCH_CHECK(false, "HIPBLAS_STATUS_INVALID_ENUM");
3838
}
3939
static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
4040
{
@@ -57,7 +57,7 @@ static hipblasStatus_t rocBLASStatusToHIPStatus(rocblas_status error)
5757
case rocblas_status_internal_error:
5858
return HIPBLAS_STATUS_INTERNAL_ERROR;
5959
}
60-
AT_ERROR("HIPBLAS_STATUS_INVALID_ENUM");
60+
TORCH_CHECK(false, "HIPBLAS_STATUS_INVALID_ENUM");
6161
}
6262
// hipblas does not have hipblasSetMathMode
6363
#define hipblasSetMathMode(handle, flags) HIPBLAS_STATUS_SUCCESS
@@ -116,7 +116,7 @@ static cublasOperation_t _cublasOpFromChar(char op) {
116116
case 'C':
117117
return CUBLAS_OP_C;
118118
}
119-
AT_ERROR(
119+
TORCH_CHECK(false,
120120
"_cublasOpFromChar input should be 't', 'n' or 'c' but got `", op, "`");
121121
}
122122

0 commit comments

Comments
 (0)