Skip to content

Commit 81d765e

Browse files
malfetpytorchmergebot
authored andcommitted
Fix sign-compare violations in cpp tests
Prerequisite change for enabling `-Werror=sign-compare` across PyTorch repo Pull Request resolved: pytorch#75080 Approved by: https://github.com/atalman
1 parent ef56497 commit 81d765e

14 files changed

+24
-29
lines changed

aten/src/ATen/test/vulkan_api_test.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ void showRtol(const at::Tensor& a, const at::Tensor& b) {
6565
}
6666

6767

68-
static void gen_allpermutations(std::vector<std::vector<int64_t>>& out, std::vector<int64_t> in, int i) {
68+
static void gen_allpermutations(std::vector<std::vector<int64_t>>& out, std::vector<int64_t> in, unsigned i) {
6969
// generate all permutations of a given dims
7070
if (i == in.size()) {
7171
out.push_back(in);

test/cpp/api/dataloader.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1982,7 +1982,7 @@ TEST(DataLoaderTest, ChunkDatasetSave) {
19821982

19831983
for (const auto epoch_index : c10::irange(epoch_count)) {
19841984
(void)epoch_index; // Suppress unused variable warning
1985-
int iteration_count = 0;
1985+
unsigned iteration_count = 0;
19861986
for (auto iterator = data_loader->begin(); iterator != data_loader->end();
19871987
++iterator, ++iteration_count) {
19881988
if ((iteration_count + 1) % save_interval == 0) {
@@ -2316,7 +2316,7 @@ TEST(DataLoaderTest, CustomPreprocessPolicy) {
23162316
++iterator) {
23172317
auto batch_result = *iterator;
23182318
if (batch_result.size() > chunk_size * cross_chunk_shuffle_count) {
2319-
for (int i = 0; i < batch_result.size(); i += chunk_size) {
2319+
for (unsigned i = 0; i < batch_result.size(); i += chunk_size) {
23202320
ASSERT_TRUE(std::is_sorted(
23212321
batch_result.begin() + i,
23222322
batch_result.begin() + i + chunk_size));

test/cpp/api/init.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ void check_exact_values(
1919
auto layerParameters = parameters[i];
2020
auto expectedLayerParameters = expected_parameters[i];
2121

22-
if (layerParameters.size(0) != expectedLayerParameters.size()) {
22+
if (static_cast<size_t>(layerParameters.size(0)) != expectedLayerParameters.size()) {
2323
std::cout << "layer #" << i
2424
<< " layerParameters size: " << layerParameters.size(0)
2525
<< " != "

test/cpp/api/nn_utils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -615,7 +615,7 @@ TEST_F(NNUtilsTest, PackPaddedSequence) {
615615
}
616616
int64_t offset = 0;
617617
std::vector<torch::Tensor> tensors_to_be_cat;
618-
for (int64_t i = 1; i < sorted_lengths.size() + 1; i++) {
618+
for (int64_t i = 1; i < static_cast<int64_t>(sorted_lengths.size() + 1); i++) {
619619
int64_t l = sorted_lengths.at(i-1);
620620
tensors_to_be_cat.emplace_back(pad(i * 100 + torch::arange(1., 5 * l + 1).view({l, 1, 5}), max_length));
621621
}

test/cpp/api/parameterdict.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ TEST_F(ParameterDictTest, Values) {
105105
auto dict = torch::nn::ParameterDict(params);
106106
std::vector<torch::Tensor> values = dict->values();
107107
std::vector<torch::Tensor> true_values{ta, tb, tc};
108-
for (auto i = 0; i < values.size(); i += 1) {
108+
for (auto i = 0U; i < values.size(); i += 1) {
109109
ASSERT_TRUE(torch::all(torch::eq(values[i], true_values[i])).item<bool>());
110110
}
111111
}

test/cpp/api/serialize.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ void test_serialize_optimizer(DerivedOptimizerOptions options, bool only_has_glo
129129
// optim3_2 and optim1 should have param_groups and state of size 1 and state_size respectively
130130
ASSERT_TRUE(optim3_2_param_groups.size() == 1);
131131
// state_size = 2 for all optimizers except LBFGS as LBFGS only maintains one global state
132-
int state_size = only_has_global_state ? 1 : 2;
132+
unsigned state_size = only_has_global_state ? 1 : 2;
133133
ASSERT_TRUE(optim3_2_state.size() == state_size);
134134

135135
// optim3_2 and optim1 should have param_groups and state of same size

test/cpp/jit/test_graph_iterator.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ void assert_ordering(
6262
ASSERT_EQ(expected.size(), actual.size())
6363
<< "Got " << actual.size() << " elements (" << actual << ")"
6464
<< " expected " << expected.size() << " elements (" << expected << ")";
65-
for (int i = 0; i < expected.size(); i++) {
65+
for (unsigned i = 0; i < expected.size(); i++) {
6666
ASSERT_EQ(expected[i], actual[i])
6767
<< "Difference at index " << i << " in " << actual << " (expected "
6868
<< actual << ")";

test/cpp/jit/test_lite_interpreter.cpp

+5-4
Original file line numberDiff line numberDiff line change
@@ -599,7 +599,7 @@ void runAndCheckTorchScriptModel(
599599
std::stringstream& input_model_stream,
600600
const std::vector<IValue>& input_data,
601601
const std::vector<IValue>& expect_result_list,
602-
const int64_t expect_version) {
602+
const uint64_t expect_version) {
603603
auto actual_version = _get_model_bytecode_version(input_model_stream);
604604
AT_ASSERT(actual_version == expect_version);
605605

@@ -616,7 +616,7 @@ void runAndCheckBytecodeModel(
616616
std::stringstream& input_model_stream,
617617
const std::vector<IValue>& input_data,
618618
const std::vector<IValue>& expect_result_list,
619-
const int64_t expect_version) {
619+
const uint64_t expect_version) {
620620
auto actual_version = _get_model_bytecode_version(input_model_stream);
621621
AT_ASSERT(actual_version == expect_version);
622622

@@ -634,13 +634,14 @@ void backportAllVersionCheck(
634634
std::stringstream& test_model_file_stream,
635635
std::vector<IValue>& input_data,
636636
std::vector<IValue>& expect_result_list,
637-
const int64_t expect_from_version) {
637+
const uint64_t expect_from_version) {
638638
auto from_version = _get_model_bytecode_version(test_model_file_stream);
639639
AT_ASSERT(from_version == expect_from_version);
640+
AT_ASSERT(from_version > 0);
640641

641642
// Backport script_module_v5.ptl to an older version
642643
constexpr int64_t minimum_to_version = 4;
643-
int64_t current_to_version = from_version - 1;
644+
auto current_to_version = from_version - 1;
644645

645646
// Verify all candidate to_version work as expected. All backport to version
646647
// larger than minimum_to_version should success.

test/cpp/jit/test_utils.h

+5-11
Original file line numberDiff line numberDiff line change
@@ -17,21 +17,15 @@ static inline void trim(std::string& s) {
1717
[](unsigned char ch) { return !std::isspace(ch); })
1818
.base(),
1919
s.end());
20-
for (int64_t i = 0; i < s.size(); ++i) {
21-
if (s[i] == '\n') {
20+
for (size_t i = 0; i < s.size(); ++i) {
21+
while (i < s.size() && s[i] == '\n') {
2222
s.erase(i, 1);
23-
i--;
2423
}
2524
}
26-
for (int64_t i = 0; i < s.size(); ++i) {
25+
for (size_t i = 0; i < s.size(); ++i) {
2726
if (s[i] == ' ') {
28-
for (int64_t j = i + 1; j < s.size(); j++) {
29-
if (s[j] == ' ') {
30-
s.erase(j, 1);
31-
j--;
32-
} else {
33-
break;
34-
}
27+
while (i + 1 < s.size() && s[i + 1] == ' ') {
28+
s.erase(i + 1, 1);
3529
}
3630
}
3731
}

test/cpp/lazy/test_lazy_ops.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -4132,7 +4132,7 @@ TEST_F(LazyOpsTest, TestDropoutInPlace) {
41324132
}
41334133

41344134
TEST_F(LazyOpsTest, TestRandperm) {
4135-
int n = 5;
4135+
unsigned n = 5;
41364136
torch::Tensor shuffle = torch::randperm(
41374137
n, torch::TensorOptions(torch::kLong).device(torch::kLazy));
41384138
torch::Tensor shuffle_cpu = CopyToDevice(shuffle, torch::kCPU);

test/cpp/lazy/test_lazy_ops_util.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ void TestBackward(
165165
// Check grad of sum(outs) w.r.t inputs_w_grad.
166166
torch::Tensor sum = torch::zeros_like(outs[0]).sum();
167167
torch::Tensor xsum = torch::zeros_like(xouts[0]).sum();
168-
for (int i = 0; i < outs.size(); ++i) {
168+
for (size_t i = 0; i < outs.size(); ++i) {
169169
if (outs[i].requires_grad()) {
170170
sum += outs[i].sum();
171171
xsum += xouts[i].sum();

test/cpp/tensorexpr/test_base.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ static void assertAllEqual(const std::vector<T>& vec, const T& val) {
7878
template <typename T>
7979
static void assertAllEqual(const std::vector<T>& v1, const std::vector<T>& v2) {
8080
ASSERT_EQ(v1.size(), v2.size());
81-
for (int i = 0; i < v1.size(); i++) {
81+
for (size_t i = 0; i < v1.size(); ++i) {
8282
ASSERT_EQ(v1[i], v2[i]);
8383
}
8484
}

test/cpp/tensorexpr/test_memdependency.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ TEST(MemDependency, BoundSubtractMultiDim) {
274274
if (x.size() != y.size()) {
275275
return false;
276276
}
277-
for (auto i = 0; i < x.size(); ++i) {
277+
for (auto i = 0U; i < x.size(); ++i) {
278278
if (!indexBoundsEquals(x[i], y[i])) {
279279
return false;
280280
}
@@ -338,7 +338,7 @@ TEST(MemDependency, BoundSubtractMultiDimSymbolic) {
338338
if (x.size() != y.size()) {
339339
return false;
340340
}
341-
for (auto i = 0; i < x.size(); ++i) {
341+
for (auto i = 0U; i < x.size(); ++i) {
342342
if (!indexBoundsEquals(x[i], y[i])) {
343343
return false;
344344
}

test/cpp/tensorexpr/test_ops.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ TEST(Ops, Sum) {
2424
constexpr int N = 16;
2525
std::vector<IntList> testDims = {{0}, {1}, {0, 1}};
2626
std::vector<std::vector<ExprHandle>> outputShapes = {{N}, {M}, {}};
27-
for (int idx = 0; idx < testDims.size(); idx++) {
27+
for (unsigned idx = 0; idx < testDims.size(); idx++) {
2828
const auto& dims = testDims[idx];
2929
const auto& outShape = outputShapes[idx];
3030

0 commit comments

Comments
 (0)