@@ -136,7 +136,7 @@ void cpu_padding(
136
136
auto input = input_.contiguous ();
137
137
auto output = output_.contiguous ();
138
138
139
- auto input_data = input.data_ptr <scalar_t >();
139
+ auto input_data = input.const_data_ptr <scalar_t >();
140
140
auto output_data = output.data_ptr <scalar_t >();
141
141
142
142
// fold nbatch and channels into single dimension for channels first.
@@ -158,7 +158,7 @@ void cpu_padding(
158
158
159
159
// do vectorized copy whe output is overlapped with input on W,
160
160
// only applies to positive padding
161
- auto loop = [=](scalar_t * out, scalar_t * in, bool positive_padding) {
161
+ auto loop = [=](scalar_t * out, const scalar_t * in, bool positive_padding) {
162
162
if (positive_padding) {
163
163
for (const auto ow : c10::irange (pad_w)) {
164
164
int64_t iw = PaddingType::index (ow, input_width, pad_w, offset_w);
@@ -198,7 +198,7 @@ void cpu_padding(
198
198
for (const auto i : c10::irange (begin, end)) {
199
199
int64_t ih = PaddingType::index (oh, input_height, pad_h, offset_h);
200
200
scalar_t * output_ptr = output_data + i * output_width;
201
- scalar_t * input_ptr = input_data + c * input_height * input_width + ih * input_width;
201
+ const scalar_t * input_ptr = input_data + c * input_height * input_width + ih * input_width;
202
202
203
203
loop (output_ptr, input_ptr, p.is_padding_positive_width );
204
204
data_index_step (c, channels, oh, output_height);
@@ -214,7 +214,7 @@ void cpu_padding(
214
214
int64_t id = PaddingType::index (od, input_depth, pad_d, offset_d);
215
215
int64_t ih = PaddingType::index (oh, input_height, pad_h, offset_h);
216
216
scalar_t * output_ptr = output_data + i * output_width;
217
- scalar_t * input_ptr = input_data + c * input_depth * input_height * input_width +
217
+ const scalar_t * input_ptr = input_data + c * input_depth * input_height * input_width +
218
218
id * input_height * input_width + ih * input_width;
219
219
220
220
loop (output_ptr, input_ptr, p.is_padding_positive_width );
@@ -243,7 +243,7 @@ void cpu_padding_channels_last(
243
243
auto input = input_.contiguous (memory_format);
244
244
auto output = output_.contiguous (memory_format);
245
245
246
- auto input_data = input.data_ptr <scalar_t >();
246
+ auto input_data = input.const_data_ptr <scalar_t >();
247
247
auto output_data = output.data_ptr <scalar_t >();
248
248
249
249
int64_t nbatch = p.nbatch ;
@@ -274,7 +274,7 @@ void cpu_padding_channels_last(
274
274
int64_t iw = PaddingType::index (ow, input_width, pad_w, offset_w);
275
275
276
276
scalar_t * output_ptr = output_data + i * channels;
277
- scalar_t * input_ptr = input_data + (n * input_height * input_width + ih * input_width + iw) * channels;
277
+ const scalar_t * input_ptr = input_data + (n * input_height * input_width + ih * input_width + iw) * channels;
278
278
copy_stub (output_ptr, input_ptr, channels);
279
279
280
280
data_index_step (n, nbatch, oh, output_height, ow, output_width);
@@ -292,7 +292,7 @@ void cpu_padding_channels_last(
292
292
int64_t iw = PaddingType::index (ow, input_width, pad_w, offset_w);
293
293
294
294
scalar_t * output_ptr = output_data + i * channels;
295
- scalar_t * input_ptr = input_data + (n * input_depth * input_height * input_width +
295
+ const scalar_t * input_ptr = input_data + (n * input_depth * input_height * input_width +
296
296
id * input_height * input_width + ih * input_width + iw) * channels;
297
297
copy_stub (output_ptr, input_ptr, channels);
298
298
0 commit comments