1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/wasm/wasm-interpreter.h"
6
7 #include "src/utils.h"
8 #include "src/wasm/ast-decoder.h"
9 #include "src/wasm/decoder.h"
10 #include "src/wasm/wasm-external-refs.h"
11 #include "src/wasm/wasm-module.h"
12
13 #include "src/zone/accounting-allocator.h"
14 #include "src/zone/zone-containers.h"
15
16 namespace v8 {
17 namespace internal {
18 namespace wasm {
19
20 #if DEBUG
21 #define TRACE(...) \
22 do { \
23 if (FLAG_trace_wasm_interpreter) PrintF(__VA_ARGS__); \
24 } while (false)
25 #else
26 #define TRACE(...)
27 #endif
28
29 #define FOREACH_INTERNAL_OPCODE(V) V(Breakpoint, 0xFF)
30
31 #define FOREACH_SIMPLE_BINOP(V) \
32 V(I32Add, uint32_t, +) \
33 V(I32Sub, uint32_t, -) \
34 V(I32Mul, uint32_t, *) \
35 V(I32And, uint32_t, &) \
36 V(I32Ior, uint32_t, |) \
37 V(I32Xor, uint32_t, ^) \
38 V(I32Eq, uint32_t, ==) \
39 V(I32Ne, uint32_t, !=) \
40 V(I32LtU, uint32_t, <) \
41 V(I32LeU, uint32_t, <=) \
42 V(I32GtU, uint32_t, >) \
43 V(I32GeU, uint32_t, >=) \
44 V(I32LtS, int32_t, <) \
45 V(I32LeS, int32_t, <=) \
46 V(I32GtS, int32_t, >) \
47 V(I32GeS, int32_t, >=) \
48 V(I64Add, uint64_t, +) \
49 V(I64Sub, uint64_t, -) \
50 V(I64Mul, uint64_t, *) \
51 V(I64And, uint64_t, &) \
52 V(I64Ior, uint64_t, |) \
53 V(I64Xor, uint64_t, ^) \
54 V(I64Eq, uint64_t, ==) \
55 V(I64Ne, uint64_t, !=) \
56 V(I64LtU, uint64_t, <) \
57 V(I64LeU, uint64_t, <=) \
58 V(I64GtU, uint64_t, >) \
59 V(I64GeU, uint64_t, >=) \
60 V(I64LtS, int64_t, <) \
61 V(I64LeS, int64_t, <=) \
62 V(I64GtS, int64_t, >) \
63 V(I64GeS, int64_t, >=) \
64 V(F32Add, float, +) \
65 V(F32Eq, float, ==) \
66 V(F32Ne, float, !=) \
67 V(F32Lt, float, <) \
68 V(F32Le, float, <=) \
69 V(F32Gt, float, >) \
70 V(F32Ge, float, >=) \
71 V(F64Add, double, +) \
72 V(F64Eq, double, ==) \
73 V(F64Ne, double, !=) \
74 V(F64Lt, double, <) \
75 V(F64Le, double, <=) \
76 V(F64Gt, double, >) \
77 V(F64Ge, double, >=)
78
79 #define FOREACH_SIMPLE_BINOP_NAN(V) \
80 V(F32Mul, float, *) \
81 V(F64Mul, double, *) \
82 V(F32Div, float, /) \
83 V(F64Div, double, /)
84
85 #define FOREACH_OTHER_BINOP(V) \
86 V(I32DivS, int32_t) \
87 V(I32DivU, uint32_t) \
88 V(I32RemS, int32_t) \
89 V(I32RemU, uint32_t) \
90 V(I32Shl, uint32_t) \
91 V(I32ShrU, uint32_t) \
92 V(I32ShrS, int32_t) \
93 V(I64DivS, int64_t) \
94 V(I64DivU, uint64_t) \
95 V(I64RemS, int64_t) \
96 V(I64RemU, uint64_t) \
97 V(I64Shl, uint64_t) \
98 V(I64ShrU, uint64_t) \
99 V(I64ShrS, int64_t) \
100 V(I32Ror, int32_t) \
101 V(I32Rol, int32_t) \
102 V(I64Ror, int64_t) \
103 V(I64Rol, int64_t) \
104 V(F32Sub, float) \
105 V(F32Min, float) \
106 V(F32Max, float) \
107 V(F32CopySign, float) \
108 V(F64Min, double) \
109 V(F64Max, double) \
110 V(F64Sub, double) \
111 V(F64CopySign, double) \
112 V(I32AsmjsDivS, int32_t) \
113 V(I32AsmjsDivU, uint32_t) \
114 V(I32AsmjsRemS, int32_t) \
115 V(I32AsmjsRemU, uint32_t)
116
117 #define FOREACH_OTHER_UNOP(V) \
118 V(I32Clz, uint32_t) \
119 V(I32Ctz, uint32_t) \
120 V(I32Popcnt, uint32_t) \
121 V(I32Eqz, uint32_t) \
122 V(I64Clz, uint64_t) \
123 V(I64Ctz, uint64_t) \
124 V(I64Popcnt, uint64_t) \
125 V(I64Eqz, uint64_t) \
126 V(F32Abs, float) \
127 V(F32Neg, float) \
128 V(F32Ceil, float) \
129 V(F32Floor, float) \
130 V(F32Trunc, float) \
131 V(F32NearestInt, float) \
132 V(F64Abs, double) \
133 V(F64Neg, double) \
134 V(F64Ceil, double) \
135 V(F64Floor, double) \
136 V(F64Trunc, double) \
137 V(F64NearestInt, double) \
138 V(I32SConvertF32, float) \
139 V(I32SConvertF64, double) \
140 V(I32UConvertF32, float) \
141 V(I32UConvertF64, double) \
142 V(I32ConvertI64, int64_t) \
143 V(I64SConvertF32, float) \
144 V(I64SConvertF64, double) \
145 V(I64UConvertF32, float) \
146 V(I64UConvertF64, double) \
147 V(I64SConvertI32, int32_t) \
148 V(I64UConvertI32, uint32_t) \
149 V(F32SConvertI32, int32_t) \
150 V(F32UConvertI32, uint32_t) \
151 V(F32SConvertI64, int64_t) \
152 V(F32UConvertI64, uint64_t) \
153 V(F32ConvertF64, double) \
154 V(F32ReinterpretI32, int32_t) \
155 V(F64SConvertI32, int32_t) \
156 V(F64UConvertI32, uint32_t) \
157 V(F64SConvertI64, int64_t) \
158 V(F64UConvertI64, uint64_t) \
159 V(F64ConvertF32, float) \
160 V(F64ReinterpretI64, int64_t) \
161 V(I32ReinterpretF32, float) \
162 V(I64ReinterpretF64, double) \
163 V(I32AsmjsSConvertF32, float) \
164 V(I32AsmjsUConvertF32, float) \
165 V(I32AsmjsSConvertF64, double) \
166 V(I32AsmjsUConvertF64, double)
167
168 #define FOREACH_OTHER_UNOP_NAN(V) \
169 V(F32Sqrt, float) \
170 V(F64Sqrt, double)
171
ExecuteI32DivS(int32_t a,int32_t b,TrapReason * trap)172 static inline int32_t ExecuteI32DivS(int32_t a, int32_t b, TrapReason* trap) {
173 if (b == 0) {
174 *trap = kTrapDivByZero;
175 return 0;
176 }
177 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
178 *trap = kTrapDivUnrepresentable;
179 return 0;
180 }
181 return a / b;
182 }
183
ExecuteI32DivU(uint32_t a,uint32_t b,TrapReason * trap)184 static inline uint32_t ExecuteI32DivU(uint32_t a, uint32_t b,
185 TrapReason* trap) {
186 if (b == 0) {
187 *trap = kTrapDivByZero;
188 return 0;
189 }
190 return a / b;
191 }
192
ExecuteI32RemS(int32_t a,int32_t b,TrapReason * trap)193 static inline int32_t ExecuteI32RemS(int32_t a, int32_t b, TrapReason* trap) {
194 if (b == 0) {
195 *trap = kTrapRemByZero;
196 return 0;
197 }
198 if (b == -1) return 0;
199 return a % b;
200 }
201
ExecuteI32RemU(uint32_t a,uint32_t b,TrapReason * trap)202 static inline uint32_t ExecuteI32RemU(uint32_t a, uint32_t b,
203 TrapReason* trap) {
204 if (b == 0) {
205 *trap = kTrapRemByZero;
206 return 0;
207 }
208 return a % b;
209 }
210
ExecuteI32Shl(uint32_t a,uint32_t b,TrapReason * trap)211 static inline uint32_t ExecuteI32Shl(uint32_t a, uint32_t b, TrapReason* trap) {
212 return a << (b & 0x1f);
213 }
214
ExecuteI32ShrU(uint32_t a,uint32_t b,TrapReason * trap)215 static inline uint32_t ExecuteI32ShrU(uint32_t a, uint32_t b,
216 TrapReason* trap) {
217 return a >> (b & 0x1f);
218 }
219
ExecuteI32ShrS(int32_t a,int32_t b,TrapReason * trap)220 static inline int32_t ExecuteI32ShrS(int32_t a, int32_t b, TrapReason* trap) {
221 return a >> (b & 0x1f);
222 }
223
ExecuteI64DivS(int64_t a,int64_t b,TrapReason * trap)224 static inline int64_t ExecuteI64DivS(int64_t a, int64_t b, TrapReason* trap) {
225 if (b == 0) {
226 *trap = kTrapDivByZero;
227 return 0;
228 }
229 if (b == -1 && a == std::numeric_limits<int64_t>::min()) {
230 *trap = kTrapDivUnrepresentable;
231 return 0;
232 }
233 return a / b;
234 }
235
ExecuteI64DivU(uint64_t a,uint64_t b,TrapReason * trap)236 static inline uint64_t ExecuteI64DivU(uint64_t a, uint64_t b,
237 TrapReason* trap) {
238 if (b == 0) {
239 *trap = kTrapDivByZero;
240 return 0;
241 }
242 return a / b;
243 }
244
ExecuteI64RemS(int64_t a,int64_t b,TrapReason * trap)245 static inline int64_t ExecuteI64RemS(int64_t a, int64_t b, TrapReason* trap) {
246 if (b == 0) {
247 *trap = kTrapRemByZero;
248 return 0;
249 }
250 if (b == -1) return 0;
251 return a % b;
252 }
253
ExecuteI64RemU(uint64_t a,uint64_t b,TrapReason * trap)254 static inline uint64_t ExecuteI64RemU(uint64_t a, uint64_t b,
255 TrapReason* trap) {
256 if (b == 0) {
257 *trap = kTrapRemByZero;
258 return 0;
259 }
260 return a % b;
261 }
262
ExecuteI64Shl(uint64_t a,uint64_t b,TrapReason * trap)263 static inline uint64_t ExecuteI64Shl(uint64_t a, uint64_t b, TrapReason* trap) {
264 return a << (b & 0x3f);
265 }
266
ExecuteI64ShrU(uint64_t a,uint64_t b,TrapReason * trap)267 static inline uint64_t ExecuteI64ShrU(uint64_t a, uint64_t b,
268 TrapReason* trap) {
269 return a >> (b & 0x3f);
270 }
271
ExecuteI64ShrS(int64_t a,int64_t b,TrapReason * trap)272 static inline int64_t ExecuteI64ShrS(int64_t a, int64_t b, TrapReason* trap) {
273 return a >> (b & 0x3f);
274 }
275
ExecuteI32Ror(uint32_t a,uint32_t b,TrapReason * trap)276 static inline uint32_t ExecuteI32Ror(uint32_t a, uint32_t b, TrapReason* trap) {
277 uint32_t shift = (b & 0x1f);
278 return (a >> shift) | (a << (32 - shift));
279 }
280
ExecuteI32Rol(uint32_t a,uint32_t b,TrapReason * trap)281 static inline uint32_t ExecuteI32Rol(uint32_t a, uint32_t b, TrapReason* trap) {
282 uint32_t shift = (b & 0x1f);
283 return (a << shift) | (a >> (32 - shift));
284 }
285
ExecuteI64Ror(uint64_t a,uint64_t b,TrapReason * trap)286 static inline uint64_t ExecuteI64Ror(uint64_t a, uint64_t b, TrapReason* trap) {
287 uint32_t shift = (b & 0x3f);
288 return (a >> shift) | (a << (64 - shift));
289 }
290
ExecuteI64Rol(uint64_t a,uint64_t b,TrapReason * trap)291 static inline uint64_t ExecuteI64Rol(uint64_t a, uint64_t b, TrapReason* trap) {
292 uint32_t shift = (b & 0x3f);
293 return (a << shift) | (a >> (64 - shift));
294 }
295
quiet(float a)296 static float quiet(float a) {
297 static const uint32_t kSignalingBit = 1 << 22;
298 uint32_t q = bit_cast<uint32_t>(std::numeric_limits<float>::quiet_NaN());
299 if ((q & kSignalingBit) != 0) {
300 // On some machines, the signaling bit set indicates it's a quiet NaN.
301 return bit_cast<float>(bit_cast<uint32_t>(a) | kSignalingBit);
302 } else {
303 // On others, the signaling bit set indicates it's a signaling NaN.
304 return bit_cast<float>(bit_cast<uint32_t>(a) & ~kSignalingBit);
305 }
306 }
307
quiet(double a)308 static double quiet(double a) {
309 static const uint64_t kSignalingBit = 1ULL << 51;
310 uint64_t q = bit_cast<uint64_t>(std::numeric_limits<double>::quiet_NaN());
311 if ((q & kSignalingBit) != 0) {
312 // On some machines, the signaling bit set indicates it's a quiet NaN.
313 return bit_cast<double>(bit_cast<uint64_t>(a) | kSignalingBit);
314 } else {
315 // On others, the signaling bit set indicates it's a signaling NaN.
316 return bit_cast<double>(bit_cast<uint64_t>(a) & ~kSignalingBit);
317 }
318 }
319
ExecuteF32Sub(float a,float b,TrapReason * trap)320 static inline float ExecuteF32Sub(float a, float b, TrapReason* trap) {
321 float result = a - b;
322 // Some architectures (e.g. MIPS) need extra checking to preserve the payload
323 // of a NaN operand.
324 if (result - result != 0) {
325 if (std::isnan(a)) return quiet(a);
326 if (std::isnan(b)) return quiet(b);
327 }
328 return result;
329 }
330
ExecuteF32Min(float a,float b,TrapReason * trap)331 static inline float ExecuteF32Min(float a, float b, TrapReason* trap) {
332 return JSMin(a, b);
333 }
334
ExecuteF32Max(float a,float b,TrapReason * trap)335 static inline float ExecuteF32Max(float a, float b, TrapReason* trap) {
336 return JSMax(a, b);
337 }
338
ExecuteF32CopySign(float a,float b,TrapReason * trap)339 static inline float ExecuteF32CopySign(float a, float b, TrapReason* trap) {
340 return copysignf(a, b);
341 }
342
ExecuteF64Sub(double a,double b,TrapReason * trap)343 static inline double ExecuteF64Sub(double a, double b, TrapReason* trap) {
344 double result = a - b;
345 // Some architectures (e.g. MIPS) need extra checking to preserve the payload
346 // of a NaN operand.
347 if (result - result != 0) {
348 if (std::isnan(a)) return quiet(a);
349 if (std::isnan(b)) return quiet(b);
350 }
351 return result;
352 }
353
ExecuteF64Min(double a,double b,TrapReason * trap)354 static inline double ExecuteF64Min(double a, double b, TrapReason* trap) {
355 return JSMin(a, b);
356 }
357
ExecuteF64Max(double a,double b,TrapReason * trap)358 static inline double ExecuteF64Max(double a, double b, TrapReason* trap) {
359 return JSMax(a, b);
360 }
361
ExecuteF64CopySign(double a,double b,TrapReason * trap)362 static inline double ExecuteF64CopySign(double a, double b, TrapReason* trap) {
363 return copysign(a, b);
364 }
365
ExecuteI32AsmjsDivS(int32_t a,int32_t b,TrapReason * trap)366 static inline int32_t ExecuteI32AsmjsDivS(int32_t a, int32_t b,
367 TrapReason* trap) {
368 if (b == 0) return 0;
369 if (b == -1 && a == std::numeric_limits<int32_t>::min()) {
370 return std::numeric_limits<int32_t>::min();
371 }
372 return a / b;
373 }
374
ExecuteI32AsmjsDivU(uint32_t a,uint32_t b,TrapReason * trap)375 static inline uint32_t ExecuteI32AsmjsDivU(uint32_t a, uint32_t b,
376 TrapReason* trap) {
377 if (b == 0) return 0;
378 return a / b;
379 }
380
ExecuteI32AsmjsRemS(int32_t a,int32_t b,TrapReason * trap)381 static inline int32_t ExecuteI32AsmjsRemS(int32_t a, int32_t b,
382 TrapReason* trap) {
383 if (b == 0) return 0;
384 if (b == -1) return 0;
385 return a % b;
386 }
387
ExecuteI32AsmjsRemU(uint32_t a,uint32_t b,TrapReason * trap)388 static inline uint32_t ExecuteI32AsmjsRemU(uint32_t a, uint32_t b,
389 TrapReason* trap) {
390 if (b == 0) return 0;
391 return a % b;
392 }
393
ExecuteI32AsmjsSConvertF32(float a,TrapReason * trap)394 static inline int32_t ExecuteI32AsmjsSConvertF32(float a, TrapReason* trap) {
395 return DoubleToInt32(a);
396 }
397
ExecuteI32AsmjsUConvertF32(float a,TrapReason * trap)398 static inline uint32_t ExecuteI32AsmjsUConvertF32(float a, TrapReason* trap) {
399 return DoubleToUint32(a);
400 }
401
ExecuteI32AsmjsSConvertF64(double a,TrapReason * trap)402 static inline int32_t ExecuteI32AsmjsSConvertF64(double a, TrapReason* trap) {
403 return DoubleToInt32(a);
404 }
405
ExecuteI32AsmjsUConvertF64(double a,TrapReason * trap)406 static inline uint32_t ExecuteI32AsmjsUConvertF64(double a, TrapReason* trap) {
407 return DoubleToUint32(a);
408 }
409
ExecuteI32Clz(uint32_t val,TrapReason * trap)410 static int32_t ExecuteI32Clz(uint32_t val, TrapReason* trap) {
411 return base::bits::CountLeadingZeros32(val);
412 }
413
ExecuteI32Ctz(uint32_t val,TrapReason * trap)414 static uint32_t ExecuteI32Ctz(uint32_t val, TrapReason* trap) {
415 return base::bits::CountTrailingZeros32(val);
416 }
417
ExecuteI32Popcnt(uint32_t val,TrapReason * trap)418 static uint32_t ExecuteI32Popcnt(uint32_t val, TrapReason* trap) {
419 return word32_popcnt_wrapper(&val);
420 }
421
ExecuteI32Eqz(uint32_t val,TrapReason * trap)422 static inline uint32_t ExecuteI32Eqz(uint32_t val, TrapReason* trap) {
423 return val == 0 ? 1 : 0;
424 }
425
ExecuteI64Clz(uint64_t val,TrapReason * trap)426 static int64_t ExecuteI64Clz(uint64_t val, TrapReason* trap) {
427 return base::bits::CountLeadingZeros64(val);
428 }
429
ExecuteI64Ctz(uint64_t val,TrapReason * trap)430 static inline uint64_t ExecuteI64Ctz(uint64_t val, TrapReason* trap) {
431 return base::bits::CountTrailingZeros64(val);
432 }
433
ExecuteI64Popcnt(uint64_t val,TrapReason * trap)434 static inline int64_t ExecuteI64Popcnt(uint64_t val, TrapReason* trap) {
435 return word64_popcnt_wrapper(&val);
436 }
437
ExecuteI64Eqz(uint64_t val,TrapReason * trap)438 static inline int32_t ExecuteI64Eqz(uint64_t val, TrapReason* trap) {
439 return val == 0 ? 1 : 0;
440 }
441
ExecuteF32Abs(float a,TrapReason * trap)442 static inline float ExecuteF32Abs(float a, TrapReason* trap) {
443 return bit_cast<float>(bit_cast<uint32_t>(a) & 0x7fffffff);
444 }
445
ExecuteF32Neg(float a,TrapReason * trap)446 static inline float ExecuteF32Neg(float a, TrapReason* trap) {
447 return bit_cast<float>(bit_cast<uint32_t>(a) ^ 0x80000000);
448 }
449
ExecuteF32Ceil(float a,TrapReason * trap)450 static inline float ExecuteF32Ceil(float a, TrapReason* trap) {
451 return ceilf(a);
452 }
453
ExecuteF32Floor(float a,TrapReason * trap)454 static inline float ExecuteF32Floor(float a, TrapReason* trap) {
455 return floorf(a);
456 }
457
ExecuteF32Trunc(float a,TrapReason * trap)458 static inline float ExecuteF32Trunc(float a, TrapReason* trap) {
459 return truncf(a);
460 }
461
ExecuteF32NearestInt(float a,TrapReason * trap)462 static inline float ExecuteF32NearestInt(float a, TrapReason* trap) {
463 return nearbyintf(a);
464 }
465
ExecuteF32Sqrt(float a,TrapReason * trap)466 static inline float ExecuteF32Sqrt(float a, TrapReason* trap) {
467 float result = sqrtf(a);
468 return result;
469 }
470
ExecuteF64Abs(double a,TrapReason * trap)471 static inline double ExecuteF64Abs(double a, TrapReason* trap) {
472 return bit_cast<double>(bit_cast<uint64_t>(a) & 0x7fffffffffffffff);
473 }
474
ExecuteF64Neg(double a,TrapReason * trap)475 static inline double ExecuteF64Neg(double a, TrapReason* trap) {
476 return bit_cast<double>(bit_cast<uint64_t>(a) ^ 0x8000000000000000);
477 }
478
ExecuteF64Ceil(double a,TrapReason * trap)479 static inline double ExecuteF64Ceil(double a, TrapReason* trap) {
480 return ceil(a);
481 }
482
ExecuteF64Floor(double a,TrapReason * trap)483 static inline double ExecuteF64Floor(double a, TrapReason* trap) {
484 return floor(a);
485 }
486
ExecuteF64Trunc(double a,TrapReason * trap)487 static inline double ExecuteF64Trunc(double a, TrapReason* trap) {
488 return trunc(a);
489 }
490
ExecuteF64NearestInt(double a,TrapReason * trap)491 static inline double ExecuteF64NearestInt(double a, TrapReason* trap) {
492 return nearbyint(a);
493 }
494
ExecuteF64Sqrt(double a,TrapReason * trap)495 static inline double ExecuteF64Sqrt(double a, TrapReason* trap) {
496 return sqrt(a);
497 }
498
ExecuteI32SConvertF32(float a,TrapReason * trap)499 static int32_t ExecuteI32SConvertF32(float a, TrapReason* trap) {
500 // The upper bound is (INT32_MAX + 1), which is the lowest float-representable
501 // number above INT32_MAX which cannot be represented as int32.
502 float upper_bound = 2147483648.0f;
503 // We use INT32_MIN as a lower bound because (INT32_MIN - 1) is not
504 // representable as float, and no number between (INT32_MIN - 1) and INT32_MIN
505 // is.
506 float lower_bound = static_cast<float>(INT32_MIN);
507 if (a < upper_bound && a >= lower_bound) {
508 return static_cast<int32_t>(a);
509 }
510 *trap = kTrapFloatUnrepresentable;
511 return 0;
512 }
513
ExecuteI32SConvertF64(double a,TrapReason * trap)514 static int32_t ExecuteI32SConvertF64(double a, TrapReason* trap) {
515 // The upper bound is (INT32_MAX + 1), which is the lowest double-
516 // representable number above INT32_MAX which cannot be represented as int32.
517 double upper_bound = 2147483648.0;
518 // The lower bound is (INT32_MIN - 1), which is the greatest double-
519 // representable number below INT32_MIN which cannot be represented as int32.
520 double lower_bound = -2147483649.0;
521 if (a < upper_bound && a > lower_bound) {
522 return static_cast<int32_t>(a);
523 }
524 *trap = kTrapFloatUnrepresentable;
525 return 0;
526 }
527
ExecuteI32UConvertF32(float a,TrapReason * trap)528 static uint32_t ExecuteI32UConvertF32(float a, TrapReason* trap) {
529 // The upper bound is (UINT32_MAX + 1), which is the lowest
530 // float-representable number above UINT32_MAX which cannot be represented as
531 // uint32.
532 double upper_bound = 4294967296.0f;
533 double lower_bound = -1.0f;
534 if (a < upper_bound && a > lower_bound) {
535 return static_cast<uint32_t>(a);
536 }
537 *trap = kTrapFloatUnrepresentable;
538 return 0;
539 }
540
ExecuteI32UConvertF64(double a,TrapReason * trap)541 static uint32_t ExecuteI32UConvertF64(double a, TrapReason* trap) {
542 // The upper bound is (UINT32_MAX + 1), which is the lowest
543 // double-representable number above UINT32_MAX which cannot be represented as
544 // uint32.
545 double upper_bound = 4294967296.0;
546 double lower_bound = -1.0;
547 if (a < upper_bound && a > lower_bound) {
548 return static_cast<uint32_t>(a);
549 }
550 *trap = kTrapFloatUnrepresentable;
551 return 0;
552 }
553
ExecuteI32ConvertI64(int64_t a,TrapReason * trap)554 static inline uint32_t ExecuteI32ConvertI64(int64_t a, TrapReason* trap) {
555 return static_cast<uint32_t>(a & 0xFFFFFFFF);
556 }
557
ExecuteI64SConvertF32(float a,TrapReason * trap)558 static int64_t ExecuteI64SConvertF32(float a, TrapReason* trap) {
559 int64_t output;
560 if (!float32_to_int64_wrapper(&a, &output)) {
561 *trap = kTrapFloatUnrepresentable;
562 }
563 return output;
564 }
565
ExecuteI64SConvertF64(double a,TrapReason * trap)566 static int64_t ExecuteI64SConvertF64(double a, TrapReason* trap) {
567 int64_t output;
568 if (!float64_to_int64_wrapper(&a, &output)) {
569 *trap = kTrapFloatUnrepresentable;
570 }
571 return output;
572 }
573
ExecuteI64UConvertF32(float a,TrapReason * trap)574 static uint64_t ExecuteI64UConvertF32(float a, TrapReason* trap) {
575 uint64_t output;
576 if (!float32_to_uint64_wrapper(&a, &output)) {
577 *trap = kTrapFloatUnrepresentable;
578 }
579 return output;
580 }
581
ExecuteI64UConvertF64(double a,TrapReason * trap)582 static uint64_t ExecuteI64UConvertF64(double a, TrapReason* trap) {
583 uint64_t output;
584 if (!float64_to_uint64_wrapper(&a, &output)) {
585 *trap = kTrapFloatUnrepresentable;
586 }
587 return output;
588 }
589
ExecuteI64SConvertI32(int32_t a,TrapReason * trap)590 static inline int64_t ExecuteI64SConvertI32(int32_t a, TrapReason* trap) {
591 return static_cast<int64_t>(a);
592 }
593
ExecuteI64UConvertI32(uint32_t a,TrapReason * trap)594 static inline int64_t ExecuteI64UConvertI32(uint32_t a, TrapReason* trap) {
595 return static_cast<uint64_t>(a);
596 }
597
ExecuteF32SConvertI32(int32_t a,TrapReason * trap)598 static inline float ExecuteF32SConvertI32(int32_t a, TrapReason* trap) {
599 return static_cast<float>(a);
600 }
601
ExecuteF32UConvertI32(uint32_t a,TrapReason * trap)602 static inline float ExecuteF32UConvertI32(uint32_t a, TrapReason* trap) {
603 return static_cast<float>(a);
604 }
605
ExecuteF32SConvertI64(int64_t a,TrapReason * trap)606 static inline float ExecuteF32SConvertI64(int64_t a, TrapReason* trap) {
607 float output;
608 int64_to_float32_wrapper(&a, &output);
609 return output;
610 }
611
ExecuteF32UConvertI64(uint64_t a,TrapReason * trap)612 static inline float ExecuteF32UConvertI64(uint64_t a, TrapReason* trap) {
613 float output;
614 uint64_to_float32_wrapper(&a, &output);
615 return output;
616 }
617
ExecuteF32ConvertF64(double a,TrapReason * trap)618 static inline float ExecuteF32ConvertF64(double a, TrapReason* trap) {
619 return static_cast<float>(a);
620 }
621
ExecuteF32ReinterpretI32(int32_t a,TrapReason * trap)622 static inline float ExecuteF32ReinterpretI32(int32_t a, TrapReason* trap) {
623 return bit_cast<float>(a);
624 }
625
ExecuteF64SConvertI32(int32_t a,TrapReason * trap)626 static inline double ExecuteF64SConvertI32(int32_t a, TrapReason* trap) {
627 return static_cast<double>(a);
628 }
629
ExecuteF64UConvertI32(uint32_t a,TrapReason * trap)630 static inline double ExecuteF64UConvertI32(uint32_t a, TrapReason* trap) {
631 return static_cast<double>(a);
632 }
633
ExecuteF64SConvertI64(int64_t a,TrapReason * trap)634 static inline double ExecuteF64SConvertI64(int64_t a, TrapReason* trap) {
635 double output;
636 int64_to_float64_wrapper(&a, &output);
637 return output;
638 }
639
ExecuteF64UConvertI64(uint64_t a,TrapReason * trap)640 static inline double ExecuteF64UConvertI64(uint64_t a, TrapReason* trap) {
641 double output;
642 uint64_to_float64_wrapper(&a, &output);
643 return output;
644 }
645
ExecuteF64ConvertF32(float a,TrapReason * trap)646 static inline double ExecuteF64ConvertF32(float a, TrapReason* trap) {
647 return static_cast<double>(a);
648 }
649
ExecuteF64ReinterpretI64(int64_t a,TrapReason * trap)650 static inline double ExecuteF64ReinterpretI64(int64_t a, TrapReason* trap) {
651 return bit_cast<double>(a);
652 }
653
ExecuteI32ReinterpretF32(float a,TrapReason * trap)654 static inline int32_t ExecuteI32ReinterpretF32(float a, TrapReason* trap) {
655 return bit_cast<int32_t>(a);
656 }
657
ExecuteI64ReinterpretF64(double a,TrapReason * trap)658 static inline int64_t ExecuteI64ReinterpretF64(double a, TrapReason* trap) {
659 return bit_cast<int64_t>(a);
660 }
661
ExecuteGrowMemory(uint32_t delta_pages,WasmInstance * instance)662 static inline int32_t ExecuteGrowMemory(uint32_t delta_pages,
663 WasmInstance* instance) {
664 // TODO(ahaas): Move memory allocation to wasm-module.cc for better
665 // encapsulation.
666 if (delta_pages > wasm::WasmModule::kV8MaxPages) {
667 return -1;
668 }
669 uint32_t old_size = instance->mem_size;
670 uint32_t new_size;
671 byte* new_mem_start;
672 if (instance->mem_size == 0) {
673 // TODO(gdeepti): Fix bounds check to take into account size of memtype.
674 new_size = delta_pages * wasm::WasmModule::kPageSize;
675 new_mem_start = static_cast<byte*>(calloc(new_size, sizeof(byte)));
676 if (!new_mem_start) {
677 return -1;
678 }
679 } else {
680 DCHECK_NOT_NULL(instance->mem_start);
681 new_size = old_size + delta_pages * wasm::WasmModule::kPageSize;
682 if (new_size >
683 wasm::WasmModule::kV8MaxPages * wasm::WasmModule::kPageSize) {
684 return -1;
685 }
686 new_mem_start = static_cast<byte*>(realloc(instance->mem_start, new_size));
687 if (!new_mem_start) {
688 return -1;
689 }
690 // Zero initializing uninitialized memory from realloc
691 memset(new_mem_start + old_size, 0, new_size - old_size);
692 }
693 instance->mem_start = new_mem_start;
694 instance->mem_size = new_size;
695 return static_cast<int32_t>(old_size / WasmModule::kPageSize);
696 }
697
698 enum InternalOpcode {
699 #define DECL_INTERNAL_ENUM(name, value) kInternal##name = value,
700 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_ENUM)
701 #undef DECL_INTERNAL_ENUM
702 };
703
OpcodeName(uint32_t val)704 static const char* OpcodeName(uint32_t val) {
705 switch (val) {
706 #define DECL_INTERNAL_CASE(name, value) \
707 case kInternal##name: \
708 return "Internal" #name;
709 FOREACH_INTERNAL_OPCODE(DECL_INTERNAL_CASE)
710 #undef DECL_INTERNAL_CASE
711 }
712 return WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(val));
713 }
714
715 static const int kRunSteps = 1000;
716
717 // A helper class to compute the control transfers for each bytecode offset.
718 // Control transfers allow Br, BrIf, BrTable, If, Else, and End bytecodes to
719 // be directly executed without the need to dynamically track blocks.
720 class ControlTransfers : public ZoneObject {
721 public:
722 ControlTransferMap map_;
723
ControlTransfers(Zone * zone,ModuleEnv * env,AstLocalDecls * locals,const byte * start,const byte * end)724 ControlTransfers(Zone* zone, ModuleEnv* env, AstLocalDecls* locals,
725 const byte* start, const byte* end)
726 : map_(zone) {
727 // Represents a control flow label.
728 struct CLabel : public ZoneObject {
729 const byte* target;
730 ZoneVector<const byte*> refs;
731
732 explicit CLabel(Zone* zone) : target(nullptr), refs(zone) {}
733
734 // Bind this label to the given PC.
735 void Bind(ControlTransferMap* map, const byte* start, const byte* pc) {
736 DCHECK_NULL(target);
737 target = pc;
738 for (auto from_pc : refs) {
739 auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
740 size_t offset = static_cast<size_t>(from_pc - start);
741 (*map)[offset] = pcdiff;
742 }
743 }
744
745 // Reference this label from the given location.
746 void Ref(ControlTransferMap* map, const byte* start,
747 const byte* from_pc) {
748 if (target) {
749 // Target being bound before a reference means this is a loop.
750 DCHECK_EQ(kExprLoop, *target);
751 auto pcdiff = static_cast<pcdiff_t>(target - from_pc);
752 size_t offset = static_cast<size_t>(from_pc - start);
753 (*map)[offset] = pcdiff;
754 } else {
755 refs.push_back(from_pc);
756 }
757 }
758 };
759
760 // An entry in the control stack.
761 struct Control {
762 const byte* pc;
763 CLabel* end_label;
764 CLabel* else_label;
765
766 void Ref(ControlTransferMap* map, const byte* start,
767 const byte* from_pc) {
768 end_label->Ref(map, start, from_pc);
769 }
770 };
771
772 // Compute the ControlTransfer map.
773 // This algorithm maintains a stack of control constructs similar to the
774 // AST decoder. The {control_stack} allows matching {br,br_if,br_table}
775 // bytecodes with their target, as well as determining whether the current
776 // bytecodes are within the true or false block of an else.
777 std::vector<Control> control_stack;
778 CLabel* func_label = new (zone) CLabel(zone);
779 control_stack.push_back({start, func_label, nullptr});
780 for (BytecodeIterator i(start, end, locals); i.has_next(); i.next()) {
781 WasmOpcode opcode = i.current();
782 TRACE("@%u: control %s\n", i.pc_offset(),
783 WasmOpcodes::OpcodeName(opcode));
784 switch (opcode) {
785 case kExprBlock: {
786 TRACE("control @%u: Block\n", i.pc_offset());
787 CLabel* label = new (zone) CLabel(zone);
788 control_stack.push_back({i.pc(), label, nullptr});
789 break;
790 }
791 case kExprLoop: {
792 TRACE("control @%u: Loop\n", i.pc_offset());
793 CLabel* label = new (zone) CLabel(zone);
794 control_stack.push_back({i.pc(), label, nullptr});
795 label->Bind(&map_, start, i.pc());
796 break;
797 }
798 case kExprIf: {
799 TRACE("control @%u: If\n", i.pc_offset());
800 CLabel* end_label = new (zone) CLabel(zone);
801 CLabel* else_label = new (zone) CLabel(zone);
802 control_stack.push_back({i.pc(), end_label, else_label});
803 else_label->Ref(&map_, start, i.pc());
804 break;
805 }
806 case kExprElse: {
807 Control* c = &control_stack.back();
808 TRACE("control @%u: Else\n", i.pc_offset());
809 c->end_label->Ref(&map_, start, i.pc());
810 DCHECK_NOT_NULL(c->else_label);
811 c->else_label->Bind(&map_, start, i.pc() + 1);
812 c->else_label = nullptr;
813 break;
814 }
815 case kExprEnd: {
816 Control* c = &control_stack.back();
817 TRACE("control @%u: End\n", i.pc_offset());
818 if (c->end_label->target) {
819 // only loops have bound labels.
820 DCHECK_EQ(kExprLoop, *c->pc);
821 } else {
822 if (c->else_label) c->else_label->Bind(&map_, start, i.pc());
823 c->end_label->Bind(&map_, start, i.pc() + 1);
824 }
825 control_stack.pop_back();
826 break;
827 }
828 case kExprBr: {
829 BreakDepthOperand operand(&i, i.pc());
830 TRACE("control @%u: Br[depth=%u]\n", i.pc_offset(), operand.depth);
831 Control* c = &control_stack[control_stack.size() - operand.depth - 1];
832 c->Ref(&map_, start, i.pc());
833 break;
834 }
835 case kExprBrIf: {
836 BreakDepthOperand operand(&i, i.pc());
837 TRACE("control @%u: BrIf[depth=%u]\n", i.pc_offset(), operand.depth);
838 Control* c = &control_stack[control_stack.size() - operand.depth - 1];
839 c->Ref(&map_, start, i.pc());
840 break;
841 }
842 case kExprBrTable: {
843 BranchTableOperand operand(&i, i.pc());
844 BranchTableIterator iterator(&i, operand);
845 TRACE("control @%u: BrTable[count=%u]\n", i.pc_offset(),
846 operand.table_count);
847 while (iterator.has_next()) {
848 uint32_t j = iterator.cur_index();
849 uint32_t target = iterator.next();
850 Control* c = &control_stack[control_stack.size() - target - 1];
851 c->Ref(&map_, start, i.pc() + j);
852 }
853 break;
854 }
855 default: {
856 break;
857 }
858 }
859 }
860 if (!func_label->target) func_label->Bind(&map_, start, end);
861 }
862
Lookup(pc_t from)863 pcdiff_t Lookup(pc_t from) {
864 auto result = map_.find(from);
865 if (result == map_.end()) {
866 V8_Fatal(__FILE__, __LINE__, "no control target for pc %zu", from);
867 }
868 return result->second;
869 }
870 };
871
872 // Code and metadata needed to execute a function.
873 struct InterpreterCode {
874 const WasmFunction* function; // wasm function
875 AstLocalDecls locals; // local declarations
876 const byte* orig_start; // start of original code
877 const byte* orig_end; // end of original code
878 byte* start; // start of (maybe altered) code
879 byte* end; // end of (maybe altered) code
880 ControlTransfers* targets; // helper for control flow.
881
atv8::internal::wasm::InterpreterCode882 const byte* at(pc_t pc) { return start + pc; }
883 };
884
885 // The main storage for interpreter code. It maps {WasmFunction} to the
886 // metadata needed to execute each function.
887 class CodeMap {
888 public:
889 Zone* zone_;
890 const WasmModule* module_;
891 ZoneVector<InterpreterCode> interpreter_code_;
892
CodeMap(const WasmModule * module,Zone * zone)893 CodeMap(const WasmModule* module, Zone* zone)
894 : zone_(zone), module_(module), interpreter_code_(zone) {
895 if (module == nullptr) return;
896 for (size_t i = 0; i < module->functions.size(); ++i) {
897 const WasmFunction* function = &module->functions[i];
898 const byte* code_start =
899 module->module_start + function->code_start_offset;
900 const byte* code_end = module->module_start + function->code_end_offset;
901 AddFunction(function, code_start, code_end);
902 }
903 }
904
FindCode(const WasmFunction * function)905 InterpreterCode* FindCode(const WasmFunction* function) {
906 if (function->func_index < interpreter_code_.size()) {
907 InterpreterCode* code = &interpreter_code_[function->func_index];
908 DCHECK_EQ(function, code->function);
909 return Preprocess(code);
910 }
911 return nullptr;
912 }
913
GetCode(uint32_t function_index)914 InterpreterCode* GetCode(uint32_t function_index) {
915 CHECK_LT(function_index, interpreter_code_.size());
916 return Preprocess(&interpreter_code_[function_index]);
917 }
918
GetIndirectCode(uint32_t table_index,uint32_t entry_index)919 InterpreterCode* GetIndirectCode(uint32_t table_index, uint32_t entry_index) {
920 if (table_index >= module_->function_tables.size()) return nullptr;
921 const WasmIndirectFunctionTable* table =
922 &module_->function_tables[table_index];
923 if (entry_index >= table->values.size()) return nullptr;
924 uint32_t index = table->values[entry_index];
925 if (index >= interpreter_code_.size()) return nullptr;
926 return GetCode(index);
927 }
928
Preprocess(InterpreterCode * code)929 InterpreterCode* Preprocess(InterpreterCode* code) {
930 if (code->targets == nullptr && code->start) {
931 // Compute the control targets map and the local declarations.
932 CHECK(DecodeLocalDecls(code->locals, code->start, code->end));
933 ModuleEnv env = {module_, nullptr, kWasmOrigin};
934 code->targets = new (zone_) ControlTransfers(
935 zone_, &env, &code->locals, code->orig_start, code->orig_end);
936 }
937 return code;
938 }
939
AddFunction(const WasmFunction * function,const byte * code_start,const byte * code_end)940 int AddFunction(const WasmFunction* function, const byte* code_start,
941 const byte* code_end) {
942 InterpreterCode code = {
943 function, AstLocalDecls(zone_), code_start,
944 code_end, const_cast<byte*>(code_start), const_cast<byte*>(code_end),
945 nullptr};
946
947 DCHECK_EQ(interpreter_code_.size(), function->func_index);
948 interpreter_code_.push_back(code);
949 return static_cast<int>(interpreter_code_.size()) - 1;
950 }
951
SetFunctionCode(const WasmFunction * function,const byte * start,const byte * end)952 bool SetFunctionCode(const WasmFunction* function, const byte* start,
953 const byte* end) {
954 InterpreterCode* code = FindCode(function);
955 if (code == nullptr) return false;
956 code->targets = nullptr;
957 code->orig_start = start;
958 code->orig_end = end;
959 code->start = const_cast<byte*>(start);
960 code->end = const_cast<byte*>(end);
961 Preprocess(code);
962 return true;
963 }
964 };
965
966 // Responsible for executing code directly.
967 class ThreadImpl : public WasmInterpreter::Thread {
968 public:
ThreadImpl(Zone * zone,CodeMap * codemap,WasmInstance * instance)969 ThreadImpl(Zone* zone, CodeMap* codemap, WasmInstance* instance)
970 : codemap_(codemap),
971 instance_(instance),
972 stack_(zone),
973 frames_(zone),
974 blocks_(zone),
975 state_(WasmInterpreter::STOPPED),
976 break_pc_(kInvalidPc),
977 trap_reason_(kTrapCount),
978 possible_nondeterminism_(false) {}
979
~ThreadImpl()980 virtual ~ThreadImpl() {}
981
982 //==========================================================================
983 // Implementation of public interface for WasmInterpreter::Thread.
984 //==========================================================================
985
state()986 virtual WasmInterpreter::State state() { return state_; }
987
PushFrame(const WasmFunction * function,WasmVal * args)988 virtual void PushFrame(const WasmFunction* function, WasmVal* args) {
989 InterpreterCode* code = codemap()->FindCode(function);
990 CHECK_NOT_NULL(code);
991 frames_.push_back({code, 0, 0, stack_.size()});
992 for (size_t i = 0; i < function->sig->parameter_count(); ++i) {
993 stack_.push_back(args[i]);
994 }
995 frames_.back().ret_pc = InitLocals(code);
996 blocks_.push_back(
997 {0, stack_.size(), frames_.size(),
998 static_cast<uint32_t>(code->function->sig->return_count())});
999 TRACE(" => PushFrame(#%u @%zu)\n", code->function->func_index,
1000 frames_.back().ret_pc);
1001 }
1002
Run()1003 virtual WasmInterpreter::State Run() {
1004 do {
1005 TRACE(" => Run()\n");
1006 if (state_ == WasmInterpreter::STOPPED ||
1007 state_ == WasmInterpreter::PAUSED) {
1008 state_ = WasmInterpreter::RUNNING;
1009 Execute(frames_.back().code, frames_.back().ret_pc, kRunSteps);
1010 }
1011 } while (state_ == WasmInterpreter::STOPPED);
1012 return state_;
1013 }
1014
Step()1015 virtual WasmInterpreter::State Step() {
1016 TRACE(" => Step()\n");
1017 if (state_ == WasmInterpreter::STOPPED ||
1018 state_ == WasmInterpreter::PAUSED) {
1019 state_ = WasmInterpreter::RUNNING;
1020 Execute(frames_.back().code, frames_.back().ret_pc, 1);
1021 }
1022 return state_;
1023 }
1024
Pause()1025 virtual void Pause() { UNIMPLEMENTED(); }
1026
Reset()1027 virtual void Reset() {
1028 TRACE("----- RESET -----\n");
1029 stack_.clear();
1030 frames_.clear();
1031 state_ = WasmInterpreter::STOPPED;
1032 trap_reason_ = kTrapCount;
1033 possible_nondeterminism_ = false;
1034 }
1035
GetFrameCount()1036 virtual int GetFrameCount() { return static_cast<int>(frames_.size()); }
1037
GetFrame(int index)1038 virtual const WasmFrame* GetFrame(int index) {
1039 UNIMPLEMENTED();
1040 return nullptr;
1041 }
1042
GetMutableFrame(int index)1043 virtual WasmFrame* GetMutableFrame(int index) {
1044 UNIMPLEMENTED();
1045 return nullptr;
1046 }
1047
GetReturnValue(int index)1048 virtual WasmVal GetReturnValue(int index) {
1049 if (state_ == WasmInterpreter::TRAPPED) return WasmVal(0xdeadbeef);
1050 CHECK_EQ(WasmInterpreter::FINISHED, state_);
1051 CHECK_LT(static_cast<size_t>(index), stack_.size());
1052 return stack_[index];
1053 }
1054
GetBreakpointPc()1055 virtual pc_t GetBreakpointPc() { return break_pc_; }
1056
PossibleNondeterminism()1057 virtual bool PossibleNondeterminism() { return possible_nondeterminism_; }
1058
Terminated()1059 bool Terminated() {
1060 return state_ == WasmInterpreter::TRAPPED ||
1061 state_ == WasmInterpreter::FINISHED;
1062 }
1063
1064 private:
1065 // Entries on the stack of functions being evaluated.
1066 struct Frame {
1067 InterpreterCode* code;
1068 pc_t call_pc;
1069 pc_t ret_pc;
1070 sp_t sp;
1071
1072 // Limit of parameters.
plimitv8::internal::wasm::ThreadImpl::Frame1073 sp_t plimit() { return sp + code->function->sig->parameter_count(); }
1074 // Limit of locals.
llimitv8::internal::wasm::ThreadImpl::Frame1075 sp_t llimit() { return plimit() + code->locals.total_local_count; }
1076 };
1077
1078 struct Block {
1079 pc_t pc;
1080 sp_t sp;
1081 size_t fp;
1082 unsigned arity;
1083 };
1084
1085 CodeMap* codemap_;
1086 WasmInstance* instance_;
1087 ZoneVector<WasmVal> stack_;
1088 ZoneVector<Frame> frames_;
1089 ZoneVector<Block> blocks_;
1090 WasmInterpreter::State state_;
1091 pc_t break_pc_;
1092 TrapReason trap_reason_;
1093 bool possible_nondeterminism_;
1094
codemap()1095 CodeMap* codemap() { return codemap_; }
instance()1096 WasmInstance* instance() { return instance_; }
module()1097 const WasmModule* module() { return instance_->module; }
1098
DoTrap(TrapReason trap,pc_t pc)1099 void DoTrap(TrapReason trap, pc_t pc) {
1100 state_ = WasmInterpreter::TRAPPED;
1101 trap_reason_ = trap;
1102 CommitPc(pc);
1103 }
1104
1105 // Push a frame with arguments already on the stack.
PushFrame(InterpreterCode * code,pc_t call_pc,pc_t ret_pc)1106 void PushFrame(InterpreterCode* code, pc_t call_pc, pc_t ret_pc) {
1107 CHECK_NOT_NULL(code);
1108 DCHECK(!frames_.empty());
1109 frames_.back().call_pc = call_pc;
1110 frames_.back().ret_pc = ret_pc;
1111 size_t arity = code->function->sig->parameter_count();
1112 DCHECK_GE(stack_.size(), arity);
1113 // The parameters will overlap the arguments already on the stack.
1114 frames_.push_back({code, 0, 0, stack_.size() - arity});
1115 blocks_.push_back(
1116 {0, stack_.size(), frames_.size(),
1117 static_cast<uint32_t>(code->function->sig->return_count())});
1118 frames_.back().ret_pc = InitLocals(code);
1119 TRACE(" => push func#%u @%zu\n", code->function->func_index,
1120 frames_.back().ret_pc);
1121 }
1122
InitLocals(InterpreterCode * code)1123 pc_t InitLocals(InterpreterCode* code) {
1124 for (auto p : code->locals.local_types) {
1125 WasmVal val;
1126 switch (p.first) {
1127 case kAstI32:
1128 val = WasmVal(static_cast<int32_t>(0));
1129 break;
1130 case kAstI64:
1131 val = WasmVal(static_cast<int64_t>(0));
1132 break;
1133 case kAstF32:
1134 val = WasmVal(static_cast<float>(0));
1135 break;
1136 case kAstF64:
1137 val = WasmVal(static_cast<double>(0));
1138 break;
1139 default:
1140 UNREACHABLE();
1141 break;
1142 }
1143 stack_.insert(stack_.end(), p.second, val);
1144 }
1145 return code->locals.decls_encoded_size;
1146 }
1147
CommitPc(pc_t pc)1148 void CommitPc(pc_t pc) {
1149 if (!frames_.empty()) {
1150 frames_.back().ret_pc = pc;
1151 }
1152 }
1153
SkipBreakpoint(InterpreterCode * code,pc_t pc)1154 bool SkipBreakpoint(InterpreterCode* code, pc_t pc) {
1155 if (pc == break_pc_) {
1156 // Skip the previously hit breakpoint when resuming.
1157 break_pc_ = kInvalidPc;
1158 return true;
1159 }
1160 return false;
1161 }
1162
LookupTarget(InterpreterCode * code,pc_t pc)1163 int LookupTarget(InterpreterCode* code, pc_t pc) {
1164 return static_cast<int>(code->targets->Lookup(pc));
1165 }
1166
DoBreak(InterpreterCode * code,pc_t pc,size_t depth)1167 int DoBreak(InterpreterCode* code, pc_t pc, size_t depth) {
1168 size_t bp = blocks_.size() - depth - 1;
1169 Block* target = &blocks_[bp];
1170 DoStackTransfer(target->sp, target->arity);
1171 blocks_.resize(bp);
1172 return LookupTarget(code, pc);
1173 }
1174
DoReturn(InterpreterCode ** code,pc_t * pc,pc_t * limit,size_t arity)1175 bool DoReturn(InterpreterCode** code, pc_t* pc, pc_t* limit, size_t arity) {
1176 DCHECK_GT(frames_.size(), 0u);
1177 // Pop all blocks for this frame.
1178 while (!blocks_.empty() && blocks_.back().fp == frames_.size()) {
1179 blocks_.pop_back();
1180 }
1181
1182 sp_t dest = frames_.back().sp;
1183 frames_.pop_back();
1184 if (frames_.size() == 0) {
1185 // A return from the last frame terminates the execution.
1186 state_ = WasmInterpreter::FINISHED;
1187 DoStackTransfer(0, arity);
1188 TRACE(" => finish\n");
1189 return false;
1190 } else {
1191 // Return to caller frame.
1192 Frame* top = &frames_.back();
1193 *code = top->code;
1194 *pc = top->ret_pc;
1195 *limit = top->code->end - top->code->start;
1196 TRACE(" => pop func#%u @%zu\n", (*code)->function->func_index, *pc);
1197 DoStackTransfer(dest, arity);
1198 return true;
1199 }
1200 }
1201
DoCall(InterpreterCode * target,pc_t * pc,pc_t ret_pc,pc_t * limit)1202 void DoCall(InterpreterCode* target, pc_t* pc, pc_t ret_pc, pc_t* limit) {
1203 PushFrame(target, *pc, ret_pc);
1204 *pc = frames_.back().ret_pc;
1205 *limit = target->end - target->start;
1206 }
1207
1208 // Copies {arity} values on the top of the stack down the stack to {dest},
1209 // dropping the values in-between.
DoStackTransfer(sp_t dest,size_t arity)1210 void DoStackTransfer(sp_t dest, size_t arity) {
1211 // before: |---------------| pop_count | arity |
1212 // ^ 0 ^ dest ^ stack_.size()
1213 //
1214 // after: |---------------| arity |
1215 // ^ 0 ^ stack_.size()
1216 DCHECK_LE(dest, stack_.size());
1217 DCHECK_LE(dest + arity, stack_.size());
1218 size_t pop_count = stack_.size() - dest - arity;
1219 for (size_t i = 0; i < arity; i++) {
1220 stack_[dest + i] = stack_[dest + pop_count + i];
1221 }
1222 stack_.resize(stack_.size() - pop_count);
1223 }
1224
Execute(InterpreterCode * code,pc_t pc,int max)1225 void Execute(InterpreterCode* code, pc_t pc, int max) {
1226 Decoder decoder(code->start, code->end);
1227 pc_t limit = code->end - code->start;
1228 while (true) {
1229 if (max-- <= 0) {
1230 // Maximum number of instructions reached.
1231 state_ = WasmInterpreter::PAUSED;
1232 return CommitPc(pc);
1233 }
1234
1235 if (pc >= limit) {
1236 // Fell off end of code; do an implicit return.
1237 TRACE("@%-3zu: ImplicitReturn\n", pc);
1238 if (!DoReturn(&code, &pc, &limit, code->function->sig->return_count()))
1239 return;
1240 decoder.Reset(code->start, code->end);
1241 continue;
1242 }
1243
1244 const char* skip = " ";
1245 int len = 1;
1246 byte opcode = code->start[pc];
1247 byte orig = opcode;
1248 if (opcode == kInternalBreakpoint) {
1249 orig = code->orig_start[pc];
1250 if (SkipBreakpoint(code, pc)) {
1251 // skip breakpoint by switching on original code.
1252 skip = "[skip] ";
1253 } else {
1254 state_ = WasmInterpreter::PAUSED;
1255 TRACE("@%-3zu: [break] %-24s:", pc,
1256 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
1257 TraceValueStack();
1258 TRACE("\n");
1259 break_pc_ = pc;
1260 return CommitPc(pc);
1261 }
1262 }
1263
1264 USE(skip);
1265 TRACE("@%-3zu: %s%-24s:", pc, skip,
1266 WasmOpcodes::OpcodeName(static_cast<WasmOpcode>(orig)));
1267 TraceValueStack();
1268 TRACE("\n");
1269
1270 switch (orig) {
1271 case kExprNop:
1272 break;
1273 case kExprBlock: {
1274 BlockTypeOperand operand(&decoder, code->at(pc));
1275 blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
1276 len = 1 + operand.length;
1277 break;
1278 }
1279 case kExprLoop: {
1280 BlockTypeOperand operand(&decoder, code->at(pc));
1281 blocks_.push_back({pc, stack_.size(), frames_.size(), 0});
1282 len = 1 + operand.length;
1283 break;
1284 }
1285 case kExprIf: {
1286 BlockTypeOperand operand(&decoder, code->at(pc));
1287 WasmVal cond = Pop();
1288 bool is_true = cond.to<uint32_t>() != 0;
1289 blocks_.push_back({pc, stack_.size(), frames_.size(), operand.arity});
1290 if (is_true) {
1291 // fall through to the true block.
1292 len = 1 + operand.length;
1293 TRACE(" true => fallthrough\n");
1294 } else {
1295 len = LookupTarget(code, pc);
1296 TRACE(" false => @%zu\n", pc + len);
1297 }
1298 break;
1299 }
1300 case kExprElse: {
1301 blocks_.pop_back();
1302 len = LookupTarget(code, pc);
1303 TRACE(" end => @%zu\n", pc + len);
1304 break;
1305 }
1306 case kExprSelect: {
1307 WasmVal cond = Pop();
1308 WasmVal fval = Pop();
1309 WasmVal tval = Pop();
1310 Push(pc, cond.to<int32_t>() != 0 ? tval : fval);
1311 break;
1312 }
1313 case kExprBr: {
1314 BreakDepthOperand operand(&decoder, code->at(pc));
1315 len = DoBreak(code, pc, operand.depth);
1316 TRACE(" br => @%zu\n", pc + len);
1317 break;
1318 }
1319 case kExprBrIf: {
1320 BreakDepthOperand operand(&decoder, code->at(pc));
1321 WasmVal cond = Pop();
1322 bool is_true = cond.to<uint32_t>() != 0;
1323 if (is_true) {
1324 len = DoBreak(code, pc, operand.depth);
1325 TRACE(" br_if => @%zu\n", pc + len);
1326 } else {
1327 TRACE(" false => fallthrough\n");
1328 len = 1 + operand.length;
1329 }
1330 break;
1331 }
1332 case kExprBrTable: {
1333 BranchTableOperand operand(&decoder, code->at(pc));
1334 BranchTableIterator iterator(&decoder, operand);
1335 uint32_t key = Pop().to<uint32_t>();
1336 uint32_t depth = 0;
1337 if (key >= operand.table_count) key = operand.table_count;
1338 for (uint32_t i = 0; i <= key; i++) {
1339 DCHECK(iterator.has_next());
1340 depth = iterator.next();
1341 }
1342 len = key + DoBreak(code, pc + key, static_cast<size_t>(depth));
1343 TRACE(" br[%u] => @%zu\n", key, pc + key + len);
1344 break;
1345 }
1346 case kExprReturn: {
1347 size_t arity = code->function->sig->return_count();
1348 if (!DoReturn(&code, &pc, &limit, arity)) return;
1349 decoder.Reset(code->start, code->end);
1350 continue;
1351 }
1352 case kExprUnreachable: {
1353 DoTrap(kTrapUnreachable, pc);
1354 return CommitPc(pc);
1355 }
1356 case kExprEnd: {
1357 blocks_.pop_back();
1358 break;
1359 }
1360 case kExprI8Const: {
1361 ImmI8Operand operand(&decoder, code->at(pc));
1362 Push(pc, WasmVal(operand.value));
1363 len = 1 + operand.length;
1364 break;
1365 }
1366 case kExprI32Const: {
1367 ImmI32Operand operand(&decoder, code->at(pc));
1368 Push(pc, WasmVal(operand.value));
1369 len = 1 + operand.length;
1370 break;
1371 }
1372 case kExprI64Const: {
1373 ImmI64Operand operand(&decoder, code->at(pc));
1374 Push(pc, WasmVal(operand.value));
1375 len = 1 + operand.length;
1376 break;
1377 }
1378 case kExprF32Const: {
1379 ImmF32Operand operand(&decoder, code->at(pc));
1380 Push(pc, WasmVal(operand.value));
1381 len = 1 + operand.length;
1382 break;
1383 }
1384 case kExprF64Const: {
1385 ImmF64Operand operand(&decoder, code->at(pc));
1386 Push(pc, WasmVal(operand.value));
1387 len = 1 + operand.length;
1388 break;
1389 }
1390 case kExprGetLocal: {
1391 LocalIndexOperand operand(&decoder, code->at(pc));
1392 Push(pc, stack_[frames_.back().sp + operand.index]);
1393 len = 1 + operand.length;
1394 break;
1395 }
1396 case kExprSetLocal: {
1397 LocalIndexOperand operand(&decoder, code->at(pc));
1398 WasmVal val = Pop();
1399 stack_[frames_.back().sp + operand.index] = val;
1400 len = 1 + operand.length;
1401 break;
1402 }
1403 case kExprTeeLocal: {
1404 LocalIndexOperand operand(&decoder, code->at(pc));
1405 WasmVal val = Pop();
1406 stack_[frames_.back().sp + operand.index] = val;
1407 Push(pc, val);
1408 len = 1 + operand.length;
1409 break;
1410 }
1411 case kExprDrop: {
1412 Pop();
1413 break;
1414 }
1415 case kExprCallFunction: {
1416 CallFunctionOperand operand(&decoder, code->at(pc));
1417 InterpreterCode* target = codemap()->GetCode(operand.index);
1418 DoCall(target, &pc, pc + 1 + operand.length, &limit);
1419 code = target;
1420 decoder.Reset(code->start, code->end);
1421 continue;
1422 }
1423 case kExprCallIndirect: {
1424 CallIndirectOperand operand(&decoder, code->at(pc));
1425 uint32_t entry_index = Pop().to<uint32_t>();
1426 // Assume only one table for now.
1427 DCHECK_LE(module()->function_tables.size(), 1u);
1428 InterpreterCode* target = codemap()->GetIndirectCode(0, entry_index);
1429 if (target == nullptr) {
1430 return DoTrap(kTrapFuncInvalid, pc);
1431 } else if (target->function->sig_index != operand.index) {
1432 // If not an exact match, we have to do a canonical check.
1433 // TODO(titzer): make this faster with some kind of caching?
1434 const WasmIndirectFunctionTable* table =
1435 &module()->function_tables[0];
1436 int function_key = table->map.Find(target->function->sig);
1437 if (function_key < 0 ||
1438 (function_key !=
1439 table->map.Find(module()->signatures[operand.index]))) {
1440 return DoTrap(kTrapFuncSigMismatch, pc);
1441 }
1442 }
1443
1444 DoCall(target, &pc, pc + 1 + operand.length, &limit);
1445 code = target;
1446 decoder.Reset(code->start, code->end);
1447 continue;
1448 }
1449 case kExprGetGlobal: {
1450 GlobalIndexOperand operand(&decoder, code->at(pc));
1451 const WasmGlobal* global = &module()->globals[operand.index];
1452 byte* ptr = instance()->globals_start + global->offset;
1453 LocalType type = global->type;
1454 WasmVal val;
1455 if (type == kAstI32) {
1456 val = WasmVal(*reinterpret_cast<int32_t*>(ptr));
1457 } else if (type == kAstI64) {
1458 val = WasmVal(*reinterpret_cast<int64_t*>(ptr));
1459 } else if (type == kAstF32) {
1460 val = WasmVal(*reinterpret_cast<float*>(ptr));
1461 } else if (type == kAstF64) {
1462 val = WasmVal(*reinterpret_cast<double*>(ptr));
1463 } else {
1464 UNREACHABLE();
1465 }
1466 Push(pc, val);
1467 len = 1 + operand.length;
1468 break;
1469 }
1470 case kExprSetGlobal: {
1471 GlobalIndexOperand operand(&decoder, code->at(pc));
1472 const WasmGlobal* global = &module()->globals[operand.index];
1473 byte* ptr = instance()->globals_start + global->offset;
1474 LocalType type = global->type;
1475 WasmVal val = Pop();
1476 if (type == kAstI32) {
1477 *reinterpret_cast<int32_t*>(ptr) = val.to<int32_t>();
1478 } else if (type == kAstI64) {
1479 *reinterpret_cast<int64_t*>(ptr) = val.to<int64_t>();
1480 } else if (type == kAstF32) {
1481 *reinterpret_cast<float*>(ptr) = val.to<float>();
1482 } else if (type == kAstF64) {
1483 *reinterpret_cast<double*>(ptr) = val.to<double>();
1484 } else {
1485 UNREACHABLE();
1486 }
1487 len = 1 + operand.length;
1488 break;
1489 }
1490
1491 #define LOAD_CASE(name, ctype, mtype) \
1492 case kExpr##name: { \
1493 MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
1494 uint32_t index = Pop().to<uint32_t>(); \
1495 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
1496 if (operand.offset > effective_mem_size || \
1497 index > (effective_mem_size - operand.offset)) { \
1498 return DoTrap(kTrapMemOutOfBounds, pc); \
1499 } \
1500 byte* addr = instance()->mem_start + operand.offset + index; \
1501 WasmVal result(static_cast<ctype>(ReadLittleEndianValue<mtype>(addr))); \
1502 Push(pc, result); \
1503 len = 1 + operand.length; \
1504 break; \
1505 }
1506
1507 LOAD_CASE(I32LoadMem8S, int32_t, int8_t);
1508 LOAD_CASE(I32LoadMem8U, int32_t, uint8_t);
1509 LOAD_CASE(I32LoadMem16S, int32_t, int16_t);
1510 LOAD_CASE(I32LoadMem16U, int32_t, uint16_t);
1511 LOAD_CASE(I64LoadMem8S, int64_t, int8_t);
1512 LOAD_CASE(I64LoadMem8U, int64_t, uint8_t);
1513 LOAD_CASE(I64LoadMem16S, int64_t, int16_t);
1514 LOAD_CASE(I64LoadMem16U, int64_t, uint16_t);
1515 LOAD_CASE(I64LoadMem32S, int64_t, int32_t);
1516 LOAD_CASE(I64LoadMem32U, int64_t, uint32_t);
1517 LOAD_CASE(I32LoadMem, int32_t, int32_t);
1518 LOAD_CASE(I64LoadMem, int64_t, int64_t);
1519 LOAD_CASE(F32LoadMem, float, float);
1520 LOAD_CASE(F64LoadMem, double, double);
1521 #undef LOAD_CASE
1522
1523 #define STORE_CASE(name, ctype, mtype) \
1524 case kExpr##name: { \
1525 MemoryAccessOperand operand(&decoder, code->at(pc), sizeof(ctype)); \
1526 WasmVal val = Pop(); \
1527 uint32_t index = Pop().to<uint32_t>(); \
1528 size_t effective_mem_size = instance()->mem_size - sizeof(mtype); \
1529 if (operand.offset > effective_mem_size || \
1530 index > (effective_mem_size - operand.offset)) { \
1531 return DoTrap(kTrapMemOutOfBounds, pc); \
1532 } \
1533 byte* addr = instance()->mem_start + operand.offset + index; \
1534 WriteLittleEndianValue<mtype>(addr, static_cast<mtype>(val.to<ctype>())); \
1535 len = 1 + operand.length; \
1536 break; \
1537 }
1538
1539 STORE_CASE(I32StoreMem8, int32_t, int8_t);
1540 STORE_CASE(I32StoreMem16, int32_t, int16_t);
1541 STORE_CASE(I64StoreMem8, int64_t, int8_t);
1542 STORE_CASE(I64StoreMem16, int64_t, int16_t);
1543 STORE_CASE(I64StoreMem32, int64_t, int32_t);
1544 STORE_CASE(I32StoreMem, int32_t, int32_t);
1545 STORE_CASE(I64StoreMem, int64_t, int64_t);
1546 STORE_CASE(F32StoreMem, float, float);
1547 STORE_CASE(F64StoreMem, double, double);
1548 #undef STORE_CASE
1549
1550 #define ASMJS_LOAD_CASE(name, ctype, mtype, defval) \
1551 case kExpr##name: { \
1552 uint32_t index = Pop().to<uint32_t>(); \
1553 ctype result; \
1554 if (index >= (instance()->mem_size - sizeof(mtype))) { \
1555 result = defval; \
1556 } else { \
1557 byte* addr = instance()->mem_start + index; \
1558 /* TODO(titzer): alignment for asmjs load mem? */ \
1559 result = static_cast<ctype>(*reinterpret_cast<mtype*>(addr)); \
1560 } \
1561 Push(pc, WasmVal(result)); \
1562 break; \
1563 }
1564 ASMJS_LOAD_CASE(I32AsmjsLoadMem8S, int32_t, int8_t, 0);
1565 ASMJS_LOAD_CASE(I32AsmjsLoadMem8U, int32_t, uint8_t, 0);
1566 ASMJS_LOAD_CASE(I32AsmjsLoadMem16S, int32_t, int16_t, 0);
1567 ASMJS_LOAD_CASE(I32AsmjsLoadMem16U, int32_t, uint16_t, 0);
1568 ASMJS_LOAD_CASE(I32AsmjsLoadMem, int32_t, int32_t, 0);
1569 ASMJS_LOAD_CASE(F32AsmjsLoadMem, float, float,
1570 std::numeric_limits<float>::quiet_NaN());
1571 ASMJS_LOAD_CASE(F64AsmjsLoadMem, double, double,
1572 std::numeric_limits<double>::quiet_NaN());
1573 #undef ASMJS_LOAD_CASE
1574
1575 #define ASMJS_STORE_CASE(name, ctype, mtype) \
1576 case kExpr##name: { \
1577 WasmVal val = Pop(); \
1578 uint32_t index = Pop().to<uint32_t>(); \
1579 if (index < (instance()->mem_size - sizeof(mtype))) { \
1580 byte* addr = instance()->mem_start + index; \
1581 /* TODO(titzer): alignment for asmjs store mem? */ \
1582 *(reinterpret_cast<mtype*>(addr)) = static_cast<mtype>(val.to<ctype>()); \
1583 } \
1584 Push(pc, val); \
1585 break; \
1586 }
1587
1588 ASMJS_STORE_CASE(I32AsmjsStoreMem8, int32_t, int8_t);
1589 ASMJS_STORE_CASE(I32AsmjsStoreMem16, int32_t, int16_t);
1590 ASMJS_STORE_CASE(I32AsmjsStoreMem, int32_t, int32_t);
1591 ASMJS_STORE_CASE(F32AsmjsStoreMem, float, float);
1592 ASMJS_STORE_CASE(F64AsmjsStoreMem, double, double);
1593 #undef ASMJS_STORE_CASE
1594 case kExprGrowMemory: {
1595 MemoryIndexOperand operand(&decoder, code->at(pc));
1596 uint32_t delta_pages = Pop().to<uint32_t>();
1597 Push(pc, WasmVal(ExecuteGrowMemory(delta_pages, instance())));
1598 len = 1 + operand.length;
1599 break;
1600 }
1601 case kExprMemorySize: {
1602 MemoryIndexOperand operand(&decoder, code->at(pc));
1603 Push(pc, WasmVal(static_cast<uint32_t>(instance()->mem_size /
1604 WasmModule::kPageSize)));
1605 len = 1 + operand.length;
1606 break;
1607 }
1608 #define EXECUTE_SIMPLE_BINOP(name, ctype, op) \
1609 case kExpr##name: { \
1610 WasmVal rval = Pop(); \
1611 WasmVal lval = Pop(); \
1612 WasmVal result(lval.to<ctype>() op rval.to<ctype>()); \
1613 Push(pc, result); \
1614 break; \
1615 }
1616 FOREACH_SIMPLE_BINOP(EXECUTE_SIMPLE_BINOP)
1617 #undef EXECUTE_SIMPLE_BINOP
1618
1619 #define EXECUTE_SIMPLE_BINOP_NAN(name, ctype, op) \
1620 case kExpr##name: { \
1621 WasmVal rval = Pop(); \
1622 WasmVal lval = Pop(); \
1623 ctype result = lval.to<ctype>() op rval.to<ctype>(); \
1624 possible_nondeterminism_ |= std::isnan(result); \
1625 WasmVal result_val(result); \
1626 Push(pc, result_val); \
1627 break; \
1628 }
1629 FOREACH_SIMPLE_BINOP_NAN(EXECUTE_SIMPLE_BINOP_NAN)
1630 #undef EXECUTE_SIMPLE_BINOP_NAN
1631
1632 #define EXECUTE_OTHER_BINOP(name, ctype) \
1633 case kExpr##name: { \
1634 TrapReason trap = kTrapCount; \
1635 volatile ctype rval = Pop().to<ctype>(); \
1636 volatile ctype lval = Pop().to<ctype>(); \
1637 WasmVal result(Execute##name(lval, rval, &trap)); \
1638 if (trap != kTrapCount) return DoTrap(trap, pc); \
1639 Push(pc, result); \
1640 break; \
1641 }
1642 FOREACH_OTHER_BINOP(EXECUTE_OTHER_BINOP)
1643 #undef EXECUTE_OTHER_BINOP
1644
1645 #define EXECUTE_OTHER_UNOP(name, ctype) \
1646 case kExpr##name: { \
1647 TrapReason trap = kTrapCount; \
1648 volatile ctype val = Pop().to<ctype>(); \
1649 WasmVal result(Execute##name(val, &trap)); \
1650 if (trap != kTrapCount) return DoTrap(trap, pc); \
1651 Push(pc, result); \
1652 break; \
1653 }
1654 FOREACH_OTHER_UNOP(EXECUTE_OTHER_UNOP)
1655 #undef EXECUTE_OTHER_UNOP
1656
1657 #define EXECUTE_OTHER_UNOP_NAN(name, ctype) \
1658 case kExpr##name: { \
1659 TrapReason trap = kTrapCount; \
1660 volatile ctype val = Pop().to<ctype>(); \
1661 ctype result = Execute##name(val, &trap); \
1662 possible_nondeterminism_ |= std::isnan(result); \
1663 WasmVal result_val(result); \
1664 if (trap != kTrapCount) return DoTrap(trap, pc); \
1665 Push(pc, result_val); \
1666 break; \
1667 }
1668 FOREACH_OTHER_UNOP_NAN(EXECUTE_OTHER_UNOP_NAN)
1669 #undef EXECUTE_OTHER_UNOP_NAN
1670
1671 default:
1672 V8_Fatal(__FILE__, __LINE__, "Unknown or unimplemented opcode #%d:%s",
1673 code->start[pc], OpcodeName(code->start[pc]));
1674 UNREACHABLE();
1675 }
1676
1677 pc += len;
1678 }
1679 UNREACHABLE(); // above decoding loop should run forever.
1680 }
1681
Pop()1682 WasmVal Pop() {
1683 DCHECK_GT(stack_.size(), 0u);
1684 DCHECK_GT(frames_.size(), 0u);
1685 DCHECK_GT(stack_.size(), frames_.back().llimit()); // can't pop into locals
1686 WasmVal val = stack_.back();
1687 stack_.pop_back();
1688 return val;
1689 }
1690
PopN(int n)1691 void PopN(int n) {
1692 DCHECK_GE(stack_.size(), static_cast<size_t>(n));
1693 DCHECK_GT(frames_.size(), 0u);
1694 size_t nsize = stack_.size() - n;
1695 DCHECK_GE(nsize, frames_.back().llimit()); // can't pop into locals
1696 stack_.resize(nsize);
1697 }
1698
PopArity(size_t arity)1699 WasmVal PopArity(size_t arity) {
1700 if (arity == 0) return WasmVal();
1701 CHECK_EQ(1u, arity);
1702 return Pop();
1703 }
1704
Push(pc_t pc,WasmVal val)1705 void Push(pc_t pc, WasmVal val) {
1706 // TODO(titzer): store PC as well?
1707 if (val.type != kAstStmt) stack_.push_back(val);
1708 }
1709
TraceStack(const char * phase,pc_t pc)1710 void TraceStack(const char* phase, pc_t pc) {
1711 if (FLAG_trace_wasm_interpreter) {
1712 PrintF("%s @%zu", phase, pc);
1713 UNIMPLEMENTED();
1714 PrintF("\n");
1715 }
1716 }
1717
TraceValueStack()1718 void TraceValueStack() {
1719 Frame* top = frames_.size() > 0 ? &frames_.back() : nullptr;
1720 sp_t sp = top ? top->sp : 0;
1721 sp_t plimit = top ? top->plimit() : 0;
1722 sp_t llimit = top ? top->llimit() : 0;
1723 if (FLAG_trace_wasm_interpreter) {
1724 for (size_t i = sp; i < stack_.size(); ++i) {
1725 if (i < plimit)
1726 PrintF(" p%zu:", i);
1727 else if (i < llimit)
1728 PrintF(" l%zu:", i);
1729 else
1730 PrintF(" s%zu:", i);
1731 WasmVal val = stack_[i];
1732 switch (val.type) {
1733 case kAstI32:
1734 PrintF("i32:%d", val.to<int32_t>());
1735 break;
1736 case kAstI64:
1737 PrintF("i64:%" PRId64 "", val.to<int64_t>());
1738 break;
1739 case kAstF32:
1740 PrintF("f32:%f", val.to<float>());
1741 break;
1742 case kAstF64:
1743 PrintF("f64:%lf", val.to<double>());
1744 break;
1745 case kAstStmt:
1746 PrintF("void");
1747 break;
1748 default:
1749 UNREACHABLE();
1750 break;
1751 }
1752 }
1753 }
1754 }
1755 };
1756
1757 //============================================================================
1758 // The implementation details of the interpreter.
1759 //============================================================================
1760 class WasmInterpreterInternals : public ZoneObject {
1761 public:
1762 WasmInstance* instance_;
1763 CodeMap codemap_;
1764 ZoneVector<ThreadImpl*> threads_;
1765
WasmInterpreterInternals(Zone * zone,WasmInstance * instance)1766 WasmInterpreterInternals(Zone* zone, WasmInstance* instance)
1767 : instance_(instance),
1768 codemap_(instance_ ? instance_->module : nullptr, zone),
1769 threads_(zone) {
1770 threads_.push_back(new ThreadImpl(zone, &codemap_, instance));
1771 }
1772
Delete()1773 void Delete() {
1774 // TODO(titzer): CFI doesn't like threads in the ZoneVector.
1775 for (auto t : threads_) delete t;
1776 threads_.resize(0);
1777 }
1778 };
1779
1780 //============================================================================
1781 // Implementation of the public interface of the interpreter.
1782 //============================================================================
WasmInterpreter(WasmInstance * instance,AccountingAllocator * allocator)1783 WasmInterpreter::WasmInterpreter(WasmInstance* instance,
1784 AccountingAllocator* allocator)
1785 : zone_(allocator, ZONE_NAME),
1786 internals_(new (&zone_) WasmInterpreterInternals(&zone_, instance)) {}
1787
~WasmInterpreter()1788 WasmInterpreter::~WasmInterpreter() { internals_->Delete(); }
1789
Run()1790 void WasmInterpreter::Run() { internals_->threads_[0]->Run(); }
1791
Pause()1792 void WasmInterpreter::Pause() { internals_->threads_[0]->Pause(); }
1793
SetBreakpoint(const WasmFunction * function,pc_t pc,bool enabled)1794 bool WasmInterpreter::SetBreakpoint(const WasmFunction* function, pc_t pc,
1795 bool enabled) {
1796 InterpreterCode* code = internals_->codemap_.FindCode(function);
1797 if (!code) return false;
1798 size_t size = static_cast<size_t>(code->end - code->start);
1799 // Check bounds for {pc}.
1800 if (pc < code->locals.decls_encoded_size || pc >= size) return false;
1801 // Make a copy of the code before enabling a breakpoint.
1802 if (enabled && code->orig_start == code->start) {
1803 code->start = reinterpret_cast<byte*>(zone_.New(size));
1804 memcpy(code->start, code->orig_start, size);
1805 code->end = code->start + size;
1806 }
1807 bool prev = code->start[pc] == kInternalBreakpoint;
1808 if (enabled) {
1809 code->start[pc] = kInternalBreakpoint;
1810 } else {
1811 code->start[pc] = code->orig_start[pc];
1812 }
1813 return prev;
1814 }
1815
GetBreakpoint(const WasmFunction * function,pc_t pc)1816 bool WasmInterpreter::GetBreakpoint(const WasmFunction* function, pc_t pc) {
1817 InterpreterCode* code = internals_->codemap_.FindCode(function);
1818 if (!code) return false;
1819 size_t size = static_cast<size_t>(code->end - code->start);
1820 // Check bounds for {pc}.
1821 if (pc < code->locals.decls_encoded_size || pc >= size) return false;
1822 // Check if a breakpoint is present at that place in the code.
1823 return code->start[pc] == kInternalBreakpoint;
1824 }
1825
SetTracing(const WasmFunction * function,bool enabled)1826 bool WasmInterpreter::SetTracing(const WasmFunction* function, bool enabled) {
1827 UNIMPLEMENTED();
1828 return false;
1829 }
1830
GetThreadCount()1831 int WasmInterpreter::GetThreadCount() {
1832 return 1; // only one thread for now.
1833 }
1834
GetThread(int id)1835 WasmInterpreter::Thread* WasmInterpreter::GetThread(int id) {
1836 CHECK_EQ(0, id); // only one thread for now.
1837 return internals_->threads_[id];
1838 }
1839
GetLocalVal(const WasmFrame * frame,int index)1840 WasmVal WasmInterpreter::GetLocalVal(const WasmFrame* frame, int index) {
1841 CHECK_GE(index, 0);
1842 UNIMPLEMENTED();
1843 WasmVal none;
1844 none.type = kAstStmt;
1845 return none;
1846 }
1847
GetExprVal(const WasmFrame * frame,int pc)1848 WasmVal WasmInterpreter::GetExprVal(const WasmFrame* frame, int pc) {
1849 UNIMPLEMENTED();
1850 WasmVal none;
1851 none.type = kAstStmt;
1852 return none;
1853 }
1854
SetLocalVal(WasmFrame * frame,int index,WasmVal val)1855 void WasmInterpreter::SetLocalVal(WasmFrame* frame, int index, WasmVal val) {
1856 UNIMPLEMENTED();
1857 }
1858
SetExprVal(WasmFrame * frame,int pc,WasmVal val)1859 void WasmInterpreter::SetExprVal(WasmFrame* frame, int pc, WasmVal val) {
1860 UNIMPLEMENTED();
1861 }
1862
GetMemorySize()1863 size_t WasmInterpreter::GetMemorySize() {
1864 return internals_->instance_->mem_size;
1865 }
1866
ReadMemory(size_t offset)1867 WasmVal WasmInterpreter::ReadMemory(size_t offset) {
1868 UNIMPLEMENTED();
1869 return WasmVal();
1870 }
1871
WriteMemory(size_t offset,WasmVal val)1872 void WasmInterpreter::WriteMemory(size_t offset, WasmVal val) {
1873 UNIMPLEMENTED();
1874 }
1875
AddFunctionForTesting(const WasmFunction * function)1876 int WasmInterpreter::AddFunctionForTesting(const WasmFunction* function) {
1877 return internals_->codemap_.AddFunction(function, nullptr, nullptr);
1878 }
1879
SetFunctionCodeForTesting(const WasmFunction * function,const byte * start,const byte * end)1880 bool WasmInterpreter::SetFunctionCodeForTesting(const WasmFunction* function,
1881 const byte* start,
1882 const byte* end) {
1883 return internals_->codemap_.SetFunctionCode(function, start, end);
1884 }
1885
ComputeControlTransfersForTesting(Zone * zone,const byte * start,const byte * end)1886 ControlTransferMap WasmInterpreter::ComputeControlTransfersForTesting(
1887 Zone* zone, const byte* start, const byte* end) {
1888 ControlTransfers targets(zone, nullptr, nullptr, start, end);
1889 return targets.map_;
1890 }
1891
1892 } // namespace wasm
1893 } // namespace internal
1894 } // namespace v8
1895