|
3 | 3 | use ::primitives::*;
|
4 | 4 | use ::slice::*;
|
5 | 5 |
|
| 6 | +const MAX_U32_U64: u64 = __transmute::<u32, u64>(u32::max()); |
| 7 | +const MAX_U16_U64: u64 = __transmute::<u16, u64>(u16::max()); |
| 8 | + |
6 | 9 | /// Trait for the addition of two values.
|
7 | 10 | pub trait Add {
|
8 | 11 | /// Add two values of the same type.
|
@@ -56,69 +59,62 @@ impl Add for u64 {
|
56 | 59 | // Emulate overflowing arithmetic for non-64-bit integer types
|
57 | 60 | impl Add for u32 {
|
58 | 61 | fn add(self, other: Self) -> Self {
|
59 |
| - // any non-64-bit value is compiled to a u64 value under-the-hood |
60 |
| - // constants (like Self::max() below) are also automatically promoted to u64 |
61 |
| - let res = __add(self, other); |
62 |
| - // integer overflow |
63 |
| - if __gt(res, Self::max()) { |
| 62 | + let res_u64 = __add( |
| 63 | + __transmute::<Self, u64>(self), |
| 64 | + __transmute::<Self, u64>(other), |
| 65 | + ); |
| 66 | + |
| 67 | + if __gt(res_u64, MAX_U32_U64) { |
64 | 68 | if panic_on_overflow_is_enabled() {
|
65 | 69 | __revert(0)
|
66 | 70 | } else {
|
67 | 71 | // overflow enabled
|
68 | 72 | // res % (Self::max() + 1)
|
69 |
| - __mod(res, __add(Self::max(), 1)) |
| 73 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U32_U64, 1))) |
70 | 74 | }
|
71 | 75 | } else {
|
72 |
| - // no overflow |
73 |
| - res |
| 76 | + __transmute::<u64, Self>(res_u64) |
74 | 77 | }
|
75 | 78 | }
|
76 | 79 | }
|
77 | 80 |
|
78 | 81 | impl Add for u16 {
|
79 | 82 | fn add(self, other: Self) -> Self {
|
80 |
| - let res = __add(self, other); |
81 |
| - if __gt(res, Self::max()) { |
| 83 | + let res_u64 = __add( |
| 84 | + __transmute::<Self, u64>(self), |
| 85 | + __transmute::<Self, u64>(other), |
| 86 | + ); |
| 87 | + |
| 88 | + if __gt(res_u64, MAX_U16_U64) { |
82 | 89 | if panic_on_overflow_is_enabled() {
|
83 | 90 | __revert(0)
|
84 | 91 | } else {
|
85 | 92 | // overflow enabled
|
86 | 93 | // res % (Self::max() + 1)
|
87 |
| - __mod(res, __add(Self::max(), 1)) |
| 94 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U16_U64, 1))) |
88 | 95 | }
|
89 | 96 | } else {
|
90 |
| - res |
| 97 | + __transmute::<u64, Self>(res_u64) |
91 | 98 | }
|
92 | 99 | }
|
93 | 100 | }
|
94 | 101 |
|
95 | 102 | impl Add for u8 {
|
96 | 103 | fn add(self, other: Self) -> Self {
|
97 |
| - let self_u64 = asm(input: self) { |
98 |
| - input: u64 |
99 |
| - }; |
100 |
| - let other_u64 = asm(input: other) { |
101 |
| - input: u64 |
102 |
| - }; |
103 |
| - let res_u64 = __add(self_u64, other_u64); |
104 |
| - let max_u8_u64 = asm(input: Self::max()) { |
105 |
| - input: u64 |
106 |
| - }; |
| 104 | + let res_u64 = __add(u8_as_u64(self), u8_as_u64(other)); |
| 105 | + |
| 106 | + let max_u8_u64 = u8_as_u64(Self::max()); |
| 107 | + |
107 | 108 | if __gt(res_u64, max_u8_u64) {
|
108 | 109 | if panic_on_overflow_is_enabled() {
|
109 | 110 | __revert(0)
|
110 | 111 | } else {
|
111 | 112 | // overflow enabled
|
112 | 113 | // res % (Self::max() + 1)
|
113 |
| - let res_u64 = __mod(res_u64, __add(max_u8_u64, 1)); |
114 |
| - asm(input: res_u64) { |
115 |
| - input: u8 |
116 |
| - } |
| 114 | + u64_as_u8(__mod(res_u64, __add(max_u8_u64, 1))) |
117 | 115 | }
|
118 | 116 | } else {
|
119 |
| - asm(input: res_u64) { |
120 |
| - input: u8 |
121 |
| - } |
| 117 | + u64_as_u8(res_u64) |
122 | 118 | }
|
123 | 119 | }
|
124 | 120 | }
|
@@ -173,23 +169,65 @@ impl Subtract for u64 {
|
173 | 169 | }
|
174 | 170 | }
|
175 | 171 |
|
176 |
| -// unlike addition, underflowing subtraction does not need special treatment |
177 |
| -// because VM handles underflow |
178 | 172 | impl Subtract for u32 {
|
179 | 173 | fn subtract(self, other: Self) -> Self {
|
180 |
| - __sub(self, other) |
| 174 | + let res_u64 = __sub( |
| 175 | + __transmute::<Self, u64>(self), |
| 176 | + __transmute::<Self, u64>(other), |
| 177 | + ); |
| 178 | + |
| 179 | + if __gt(res_u64, MAX_U32_U64) { |
| 180 | + if panic_on_overflow_is_enabled() { |
| 181 | + __revert(0) |
| 182 | + } else { |
| 183 | + // overflow enabled |
| 184 | + // res % (Self::max() + 1) |
| 185 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U32_U64, 1))) |
| 186 | + } |
| 187 | + } else { |
| 188 | + __transmute::<u64, Self>(res_u64) |
| 189 | + } |
181 | 190 | }
|
182 | 191 | }
|
183 | 192 |
|
184 | 193 | impl Subtract for u16 {
|
185 | 194 | fn subtract(self, other: Self) -> Self {
|
186 |
| - __sub(self, other) |
| 195 | + let res_u64 = __sub( |
| 196 | + __transmute::<Self, u64>(self), |
| 197 | + __transmute::<Self, u64>(other), |
| 198 | + ); |
| 199 | + |
| 200 | + if __gt(res_u64, MAX_U16_U64) { |
| 201 | + if panic_on_overflow_is_enabled() { |
| 202 | + __revert(0) |
| 203 | + } else { |
| 204 | + // overflow enabled |
| 205 | + // res % (Self::max() + 1) |
| 206 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U16_U64, 1))) |
| 207 | + } |
| 208 | + } else { |
| 209 | + __transmute::<u64, Self>(res_u64) |
| 210 | + } |
187 | 211 | }
|
188 | 212 | }
|
189 | 213 |
|
190 | 214 | impl Subtract for u8 {
|
191 | 215 | fn subtract(self, other: Self) -> Self {
|
192 |
| - __sub(self, other) |
| 216 | + let res_u64 = __sub(u8_as_u64(self), u8_as_u64(other)); |
| 217 | + |
| 218 | + let max_u8_u64 = u8_as_u64(Self::max()); |
| 219 | + |
| 220 | + if __gt(res_u64, max_u8_u64) { |
| 221 | + if panic_on_overflow_is_enabled() { |
| 222 | + __revert(0) |
| 223 | + } else { |
| 224 | + // overflow enabled |
| 225 | + // res % (Self::max() + 1) |
| 226 | + u64_as_u8(__mod(res_u64, __add(max_u8_u64, 1))) |
| 227 | + } |
| 228 | + } else { |
| 229 | + u64_as_u8(res_u64) |
| 230 | + } |
193 | 231 | }
|
194 | 232 | }
|
195 | 233 |
|
@@ -246,67 +284,62 @@ impl Multiply for u64 {
|
246 | 284 | // Emulate overflowing arithmetic for non-64-bit integer types
|
247 | 285 | impl Multiply for u32 {
|
248 | 286 | fn multiply(self, other: Self) -> Self {
|
249 |
| - // any non-64-bit value is compiled to a u64 value under-the-hood |
250 |
| - // constants (like Self::max() below) are also automatically promoted to u64 |
251 |
| - let res = __mul(self, other); |
252 |
| - if __gt(res, Self::max()) { |
| 287 | + let res_u64 = __mul( |
| 288 | + __transmute::<Self, u64>(self), |
| 289 | + __transmute::<Self, u64>(other), |
| 290 | + ); |
| 291 | + |
| 292 | + if __gt(res_u64, MAX_U32_U64) { |
253 | 293 | if panic_on_overflow_is_enabled() {
|
254 |
| - // integer overflow |
255 | 294 | __revert(0)
|
256 | 295 | } else {
|
257 | 296 | // overflow enabled
|
258 | 297 | // res % (Self::max() + 1)
|
259 |
| - __mod(res, __add(Self::max(), 1)) |
| 298 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U32_U64, 1))) |
260 | 299 | }
|
261 | 300 | } else {
|
262 |
| - // no overflow |
263 |
| - res |
| 301 | + __transmute::<u64, Self>(res_u64) |
264 | 302 | }
|
265 | 303 | }
|
266 | 304 | }
|
267 | 305 |
|
268 | 306 | impl Multiply for u16 {
|
269 | 307 | fn multiply(self, other: Self) -> Self {
|
270 |
| - let res = __mul(self, other); |
271 |
| - if __gt(res, Self::max()) { |
| 308 | + let res_u64 = __mul( |
| 309 | + __transmute::<Self, u64>(self), |
| 310 | + __transmute::<Self, u64>(other), |
| 311 | + ); |
| 312 | + |
| 313 | + if __gt(res_u64, MAX_U16_U64) { |
272 | 314 | if panic_on_overflow_is_enabled() {
|
273 | 315 | __revert(0)
|
274 | 316 | } else {
|
275 |
| - __mod(res, __add(Self::max(), 1)) |
| 317 | + // overflow enabled |
| 318 | + // res % (Self::max() + 1) |
| 319 | + __transmute::<u64, Self>(__mod(res_u64, __add(MAX_U16_U64, 1))) |
276 | 320 | }
|
277 | 321 | } else {
|
278 |
| - res |
| 322 | + __transmute::<u64, Self>(res_u64) |
279 | 323 | }
|
280 | 324 | }
|
281 | 325 | }
|
282 | 326 |
|
283 | 327 | impl Multiply for u8 {
|
284 | 328 | fn multiply(self, other: Self) -> Self {
|
285 |
| - let self_u64 = asm(input: self) { |
286 |
| - input: u64 |
287 |
| - }; |
288 |
| - let other_u64 = asm(input: other) { |
289 |
| - input: u64 |
290 |
| - }; |
291 |
| - let res_u64 = __mul(self_u64, other_u64); |
292 |
| - let max_u8_u64 = asm(input: Self::max()) { |
293 |
| - input: u64 |
294 |
| - }; |
| 329 | + let res_u64 = __mul(u8_as_u64(self), u8_as_u64(other)); |
| 330 | + |
| 331 | + let max_u8_u64 = u8_as_u64(Self::max()); |
| 332 | + |
295 | 333 | if __gt(res_u64, max_u8_u64) {
|
296 | 334 | if panic_on_overflow_is_enabled() {
|
297 | 335 | __revert(0)
|
298 | 336 | } else {
|
299 | 337 | // overflow enabled
|
300 | 338 | // res % (Self::max() + 1)
|
301 |
| - let res_u64 = __mod(res_u64, __add(max_u8_u64, 1)); |
302 |
| - asm(input: res_u64) { |
303 |
| - input: u8 |
304 |
| - } |
| 339 | + u64_as_u8(__mod(res_u64, __add(max_u8_u64, 1))) |
305 | 340 | }
|
306 | 341 | } else {
|
307 |
| - asm(input: res_u64) { |
308 |
| - input: u8 |
309 |
| - } |
| 342 | + u64_as_u8(res_u64) |
310 | 343 | }
|
311 | 344 | }
|
312 | 345 | }
|
@@ -1741,3 +1774,15 @@ fn panic_on_overflow_is_enabled() -> bool {
|
1741 | 1774 | 0,
|
1742 | 1775 | )
|
1743 | 1776 | }
|
| 1777 | + |
| 1778 | +fn u8_as_u64(val: u8) -> u64 { |
| 1779 | + asm(input: val) { |
| 1780 | + input: u64 |
| 1781 | + } |
| 1782 | +} |
| 1783 | + |
| 1784 | +fn u64_as_u8(val: u64) -> u8 { |
| 1785 | + asm(input: val) { |
| 1786 | + input: u8 |
| 1787 | + } |
| 1788 | +} |
0 commit comments