Lines Matching refs:SIMD128

1 …-registers -mattr=+unimplemented-simd128 | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-SLOW
2 …-mattr=+unimplemented-simd128 -fast-isel | FileCheck %s --check-prefixes CHECK,SIMD128,SIMD128-FAST
5 …-wasm-disable-explicit-locals -wasm-keep-registers | FileCheck %s --check-prefixes CHECK,NO-SIMD128
6 …le-explicit-locals -wasm-keep-registers -fast-isel | FileCheck %s --check-prefixes CHECK,NO-SIMD128
12 ; Test that basic SIMD128 arithmetic operations assemble as expected.
21 ; NO-SIMD128-NOT: i8x16
22 ; SIMD128-NEXT: .functype add_v16i8 (v128, v128) -> (v128){{$}}
23 ; SIMD128-NEXT: i8x16.add $push[[R:[0-9]+]]=, $0, $1{{$}}
24 ; SIMD128-NEXT: return $pop[[R]]{{$}}
31 ; NO-SIMD128-NOT: i8x16
32 ; SIMD128-NEXT: .functype sub_v16i8 (v128, v128) -> (v128){{$}}
33 ; SIMD128-NEXT: i8x16.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
34 ; SIMD128-NEXT: return $pop[[R]]{{$}}
42 ; NO-SIMD128-NOT: i8x16
43 ; SIMD128-NOT: i8x16.mul
44 ; SIMD128: i8x16.extract_lane_u
45 ; SIMD128: i32.mul
52 ; NO-SIMD128-NOT: i8x16
53 ; SIMD128-NEXT: .functype min_s_v16i8 (v128, v128) -> (v128){{$}}
54 ; SIMD128-NEXT: i8x16.min_s $push[[R:[0-9]+]]=, $0, $1{{$}}
55 ; SIMD128-NEXT: return $pop[[R]]{{$}}
63 ; NO-SIMD128-NOT: i8x16
64 ; SIMD128-NEXT: .functype min_u_v16i8 (v128, v128) -> (v128){{$}}
65 ; SIMD128-NEXT: i8x16.min_u $push[[R:[0-9]+]]=, $0, $1{{$}}
66 ; SIMD128-NEXT: return $pop[[R]]{{$}}
74 ; NO-SIMD128-NOT: i8x16
75 ; SIMD128-NEXT: .functype max_s_v16i8 (v128, v128) -> (v128){{$}}
76 ; SIMD128-NEXT: i8x16.max_s $push[[R:[0-9]+]]=, $0, $1{{$}}
77 ; SIMD128-NEXT: return $pop[[R]]{{$}}
85 ; NO-SIMD128-NOT: i8x16
86 ; SIMD128-NEXT: .functype max_u_v16i8 (v128, v128) -> (v128){{$}}
87 ; SIMD128-NEXT: i8x16.max_u $push[[R:[0-9]+]]=, $0, $1{{$}}
88 ; SIMD128-NEXT: return $pop[[R]]{{$}}
96 ; NO-SIMD128-NOT: i8x16
97 ; SIMD128-NEXT: .functype avgr_u_v16i8 (v128, v128) -> (v128){{$}}
98 ; SIMD128-NEXT: i8x16.avgr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
99 ; SIMD128-NEXT: return $pop[[R]]{{$}}
110 ; NO-SIMD128-NOT: i8x16
111 ; SIMD128-NEXT: .functype avgr_u_v16i8_wrap (v128, v128) -> (v128){{$}}
112 ; SIMD128-NOT: i8x16.avgr_u
123 ; NO-SIMD128-NOT: i8x16
124 ; SIMD128-NEXT: .functype abs_v16i8 (v128) -> (v128){{$}}
125 ; SIMD128-NEXT: i8x16.abs $push[[R:[0-9]+]]=, $0{{$}}
126 ; SIMD128-NEXT: return $pop[[R]]{{$}}
135 ; NO-SIMD128-NOT: i8x16
136 ; SIMD128-NEXT: .functype neg_v16i8 (v128) -> (v128){{$}}
137 ; SIMD128-NEXT: i8x16.neg $push[[R:[0-9]+]]=, $0{{$}}
138 ; SIMD128-NEXT: return $pop[[R]]{{$}}
147 ; NO-SIMD128-NOT: i8x16
148 ; SIMD128-NEXT: .functype shl_v16i8 (v128, i32) -> (v128){{$}}
149 ; SIMD128-NEXT: i8x16.shl $push[[R:[0-9]+]]=, $0, $1{{$}}
150 ; SIMD128-NEXT: return $pop[[R]]{{$}}
161 ; NO-SIMD128-NOT: i8x16
162 ; SIMD128-NEXT: .functype shl_const_v16i8 (v128) -> (v128){{$}}
163 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5
164 ; SIMD128-NEXT: i8x16.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
165 ; SIMD128-NEXT: return $pop[[R]]{{$}}
174 ; NO-SIMD128-NOT: i8x16
175 ; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}}
176 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
177 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
178 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
179 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
180 ; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
181 ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
183 ; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}}
184 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
185 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
186 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
187 ; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
188 ; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
189 ; SIMD128-NEXT: return $pop[[R]]{{$}}
196 ; NO-SIMD128-NOT: i8x16
197 ; SIMD128-NEXT: .functype shr_s_v16i8 (v128, i32) -> (v128){{$}}
198 ; SIMD128-NEXT: i8x16.shr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
199 ; SIMD128-NEXT: return $pop[[R]]{{$}}
210 ; NO-SIMD128-NOT: i8x16
211 ; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}}
212 ; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
213 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
214 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
215 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
216 ; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
217 ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
219 ; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
220 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
221 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
222 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
223 ; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
224 ; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
225 ; SIMD128-NEXT: return $pop[[R]]{{$}}
232 ; NO-SIMD128-NOT: i8x16
233 ; SIMD128-NEXT: .functype shr_u_v16i8 (v128, i32) -> (v128){{$}}
234 ; SIMD128-NEXT: i8x16.shr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
235 ; SIMD128-NEXT: return $pop[[R]]{{$}}
246 ; NO-SIMD128-NOT: i8x16
247 ; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}}
248 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
249 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
250 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
251 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
252 ; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
253 ; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
255 ; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}}
256 ; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
257 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
258 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
259 ; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
260 ; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
261 ; SIMD128-NEXT: return $pop[[R]]{{$}}
268 ; NO-SIMD128-NOT: v128
269 ; SIMD128-NEXT: .functype and_v16i8 (v128, v128) -> (v128){{$}}
270 ; SIMD128-NEXT: v128.and $push[[R:[0-9]+]]=, $0, $1{{$}}
271 ; SIMD128-NEXT: return $pop[[R]]{{$}}
278 ; NO-SIMD128-NOT: v128
279 ; SIMD128-NEXT: .functype or_v16i8 (v128, v128) -> (v128){{$}}
280 ; SIMD128-NEXT: v128.or $push[[R:[0-9]+]]=, $0, $1{{$}}
281 ; SIMD128-NEXT: return $pop[[R]]{{$}}
288 ; NO-SIMD128-NOT: v128
289 ; SIMD128-NEXT: .functype xor_v16i8 (v128, v128) -> (v128){{$}}
290 ; SIMD128-NEXT: v128.xor $push[[R:[0-9]+]]=, $0, $1{{$}}
291 ; SIMD128-NEXT: return $pop[[R]]{{$}}
298 ; NO-SIMD128-NOT: v128
299 ; SIMD128-NEXT: .functype not_v16i8 (v128) -> (v128){{$}}
300 ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $0{{$}}
301 ; SIMD128-NEXT: return $pop[[R]]{{$}}
311 ; NO-SIMD128-NOT: v128
312 ; SIMD128-NEXT: .functype andnot_v16i8 (v128, v128) -> (v128){{$}}
313 ; SIMD128-SLOW-NEXT: v128.andnot $push[[R:[0-9]+]]=, $0, $1{{$}}
314 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
315 ; SIMD128-FAST-NEXT: v128.not
316 ; SIMD128-FAST-NEXT: v128.and
317 ; SIMD128-FAST-NEXT: return
327 ; NO-SIMD128-NOT: v128
328 ; SIMD128-NEXT: .functype bitselect_v16i8 (v128, v128, v128) -> (v128){{$}}
329 ; SIMD128-SLOW-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
330 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
331 ; SIMD128-FAST-NEXT: v128.and
332 ; SIMD128-FAST-NEXT: v128.not
333 ; SIMD128-FAST-NEXT: v128.and
334 ; SIMD128-FAST-NEXT: v128.or
335 ; SIMD128-FAST-NEXT: return
350 ; NO-SIMD128-NOT: i16x8
351 ; SIMD128-NEXT: .functype add_v8i16 (v128, v128) -> (v128){{$}}
352 ; SIMD128-NEXT: i16x8.add $push[[R:[0-9]+]]=, $0, $1{{$}}
353 ; SIMD128-NEXT: return $pop[[R]]{{$}}
360 ; NO-SIMD128-NOT: i16x8
361 ; SIMD128-NEXT: .functype sub_v8i16 (v128, v128) -> (v128){{$}}
362 ; SIMD128-NEXT: i16x8.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
363 ; SIMD128-NEXT: return $pop[[R]]{{$}}
370 ; NO-SIMD128-NOT: i16x8
371 ; SIMD128-NEXT: .functype mul_v8i16 (v128, v128) -> (v128){{$}}
372 ; SIMD128-NEXT: i16x8.mul $push[[R:[0-9]+]]=, $0, $1{{$}}
373 ; SIMD128-NEXT: return $pop[[R]]{{$}}
380 ; NO-SIMD128-NOT: i16x8
381 ; SIMD128-NEXT: .functype min_s_v8i16 (v128, v128) -> (v128){{$}}
382 ; SIMD128-NEXT: i16x8.min_s $push[[R:[0-9]+]]=, $0, $1{{$}}
383 ; SIMD128-NEXT: return $pop[[R]]{{$}}
391 ; NO-SIMD128-NOT: i16x8
392 ; SIMD128-NEXT: .functype min_u_v8i16 (v128, v128) -> (v128){{$}}
393 ; SIMD128-NEXT: i16x8.min_u $push[[R:[0-9]+]]=, $0, $1{{$}}
394 ; SIMD128-NEXT: return $pop[[R]]{{$}}
402 ; NO-SIMD128-NOT: i16x8
403 ; SIMD128-NEXT: .functype max_s_v8i16 (v128, v128) -> (v128){{$}}
404 ; SIMD128-NEXT: i16x8.max_s $push[[R:[0-9]+]]=, $0, $1{{$}}
405 ; SIMD128-NEXT: return $pop[[R]]{{$}}
413 ; NO-SIMD128-NOT: i16x8
414 ; SIMD128-NEXT: .functype max_u_v8i16 (v128, v128) -> (v128){{$}}
415 ; SIMD128-NEXT: i16x8.max_u $push[[R:[0-9]+]]=, $0, $1{{$}}
416 ; SIMD128-NEXT: return $pop[[R]]{{$}}
424 ; NO-SIMD128-NOT: i16x8
425 ; SIMD128-NEXT: .functype avgr_u_v8i16 (v128, v128) -> (v128){{$}}
426 ; SIMD128-NEXT: i16x8.avgr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
427 ; SIMD128-NEXT: return $pop[[R]]{{$}}
436 ; NO-SIMD128-NOT: i16x8
437 ; SIMD128-NEXT: .functype avgr_u_v8i16_wrap (v128, v128) -> (v128){{$}}
438 ; SIMD128-NOT: i16x8.avgr_u
447 ; NO-SIMD128-NOT: i16x8
448 ; SIMD128-NEXT: .functype abs_v8i16 (v128) -> (v128){{$}}
449 ; SIMD128-NEXT: i16x8.abs $push[[R:[0-9]+]]=, $0{{$}}
450 ; SIMD128-NEXT: return $pop[[R]]{{$}}
459 ; NO-SIMD128-NOT: i16x8
460 ; SIMD128-NEXT: .functype neg_v8i16 (v128) -> (v128){{$}}
461 ; SIMD128-NEXT: i16x8.neg $push[[R:[0-9]+]]=, $0{{$}}
462 ; SIMD128-NEXT: return $pop[[R]]{{$}}
470 ; NO-SIMD128-NOT: i16x8
471 ; SIMD128-NEXT: .functype shl_v8i16 (v128, i32) -> (v128){{$}}
472 ; SIMD128-NEXT: i16x8.shl $push[[R:[0-9]+]]=, $0, $1{{$}}
473 ; SIMD128-NEXT: return $pop[[R]]{{$}}
483 ; NO-SIMD128-NOT: i16x8
484 ; SIMD128-NEXT: .functype shl_const_v8i16 (v128) -> (v128){{$}}
485 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5
486 ; SIMD128-NEXT: i16x8.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
487 ; SIMD128-NEXT: return $pop[[R]]{{$}}
495 ; NO-SIMD128-NOT: i16x8
496 ; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}}
497 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
498 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
499 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
500 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
501 ; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
502 ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
504 ; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}}
505 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
506 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
507 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
508 ; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
509 ; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
510 ; SIMD128-NEXT: return $pop[[R]]{{$}}
517 ; NO-SIMD128-NOT: i16x8
518 ; SIMD128-NEXT: .functype shr_s_v8i16 (v128, i32) -> (v128){{$}}
519 ; SIMD128-NEXT: i16x8.shr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
520 ; SIMD128-NEXT: return $pop[[R]]{{$}}
530 ; NO-SIMD128-NOT: i16x8
531 ; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}}
532 ; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
533 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
534 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
535 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
536 ; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
537 ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
539 ; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
540 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
541 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
542 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
543 ; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
544 ; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
545 ; SIMD128-NEXT: return $pop[[R]]{{$}}
552 ; NO-SIMD128-NOT: i16x8
553 ; SIMD128-NEXT: .functype shr_u_v8i16 (v128, i32) -> (v128){{$}}
554 ; SIMD128-NEXT: i16x8.shr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
555 ; SIMD128-NEXT: return $pop[[R]]{{$}}
565 ; NO-SIMD128-NOT: i16x8
566 ; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}}
567 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
568 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 0{{$}}
569 ; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
570 ; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
571 ; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
572 ; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
574 ; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}}
575 ; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
576 ; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
577 ; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
578 ; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
579 ; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
580 ; SIMD128-NEXT: return $pop[[R]]{{$}}
587 ; NO-SIMD128-NOT: v128
588 ; SIMD128-NEXT: .functype and_v8i16 (v128, v128) -> (v128){{$}}
589 ; SIMD128-NEXT: v128.and $push[[R:[0-9]+]]=, $0, $1{{$}}
590 ; SIMD128-NEXT: return $pop[[R]]{{$}}
597 ; NO-SIMD128-NOT: v128
598 ; SIMD128-NEXT: .functype or_v8i16 (v128, v128) -> (v128){{$}}
599 ; SIMD128-NEXT: v128.or $push[[R:[0-9]+]]=, $0, $1{{$}}
600 ; SIMD128-NEXT: return $pop[[R]]{{$}}
607 ; NO-SIMD128-NOT: v128
608 ; SIMD128-NEXT: .functype xor_v8i16 (v128, v128) -> (v128){{$}}
609 ; SIMD128-NEXT: v128.xor $push[[R:[0-9]+]]=, $0, $1{{$}}
610 ; SIMD128-NEXT: return $pop[[R]]{{$}}
617 ; NO-SIMD128-NOT: v128
618 ; SIMD128-NEXT: .functype not_v8i16 (v128) -> (v128){{$}}
619 ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $0{{$}}
620 ; SIMD128-NEXT: return $pop[[R]]{{$}}
628 ; NO-SIMD128-NOT: v128
629 ; SIMD128-NEXT: .functype andnot_v8i16 (v128, v128) -> (v128){{$}}
630 ; SIMD128-SLOW-NEXT: v128.andnot $push[[R:[0-9]+]]=, $0, $1{{$}}
631 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
632 ; SIMD128-FAST-NEXT: v128.not
633 ; SIMD128-FAST-NEXT: v128.and
634 ; SIMD128-FAST-NEXT: return
643 ; NO-SIMD128-NOT: v128
644 ; SIMD128-NEXT: .functype bitselect_v8i16 (v128, v128, v128) -> (v128){{$}}
645 ; SIMD128-SLOW-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
646 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
647 ; SIMD128-FAST-NEXT: v128.and
648 ; SIMD128-FAST-NEXT: v128.not
649 ; SIMD128-FAST-NEXT: v128.and
650 ; SIMD128-FAST-NEXT: v128.or
651 ; SIMD128-FAST-NEXT: return
666 ; NO-SIMD128-NOT: i32x4
667 ; SIMD128-NEXT: .functype add_v4i32 (v128, v128) -> (v128){{$}}
668 ; SIMD128-NEXT: i32x4.add $push[[R:[0-9]+]]=, $0, $1{{$}}
669 ; SIMD128-NEXT: return $pop[[R]]{{$}}
676 ; NO-SIMD128-NOT: i32x4
677 ; SIMD128-NEXT: .functype sub_v4i32 (v128, v128) -> (v128){{$}}
678 ; SIMD128-NEXT: i32x4.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
679 ; SIMD128-NEXT: return $pop[[R]]{{$}}
686 ; NO-SIMD128-NOT: i32x4
687 ; SIMD128-NEXT: .functype mul_v4i32 (v128, v128) -> (v128){{$}}
688 ; SIMD128-NEXT: i32x4.mul $push[[R:[0-9]+]]=, $0, $1{{$}}
689 ; SIMD128-NEXT: return $pop[[R]]{{$}}
696 ; NO-SIMD128-NOT: i32x4
697 ; SIMD128-NEXT: .functype min_s_v4i32 (v128, v128) -> (v128){{$}}
698 ; SIMD128-NEXT: i32x4.min_s $push[[R:[0-9]+]]=, $0, $1{{$}}
699 ; SIMD128-NEXT: return $pop[[R]]{{$}}
707 ; NO-SIMD128-NOT: i32x4
708 ; SIMD128-NEXT: .functype min_u_v4i32 (v128, v128) -> (v128){{$}}
709 ; SIMD128-NEXT: i32x4.min_u $push[[R:[0-9]+]]=, $0, $1{{$}}
710 ; SIMD128-NEXT: return $pop[[R]]{{$}}
718 ; NO-SIMD128-NOT: i32x4
719 ; SIMD128-NEXT: .functype max_s_v4i32 (v128, v128) -> (v128){{$}}
720 ; SIMD128-NEXT: i32x4.max_s $push[[R:[0-9]+]]=, $0, $1{{$}}
721 ; SIMD128-NEXT: return $pop[[R]]{{$}}
729 ; NO-SIMD128-NOT: i32x4
730 ; SIMD128-NEXT: .functype max_u_v4i32 (v128, v128) -> (v128){{$}}
731 ; SIMD128-NEXT: i32x4.max_u $push[[R:[0-9]+]]=, $0, $1{{$}}
732 ; SIMD128-NEXT: return $pop[[R]]{{$}}
740 ; NO-SIMD128-NOT: i32x4
741 ; SIMD128-NEXT: .functype abs_v4i32 (v128) -> (v128){{$}}
742 ; SIMD128-NEXT: i32x4.abs $push[[R:[0-9]+]]=, $0{{$}}
743 ; SIMD128-NEXT: return $pop[[R]]{{$}}
752 ; NO-SIMD128-NOT: i32x4
753 ; SIMD128-NEXT: .functype neg_v4i32 (v128) -> (v128){{$}}
754 ; SIMD128-NEXT: i32x4.neg $push[[R:[0-9]+]]=, $0{{$}}
755 ; SIMD128-NEXT: return $pop[[R]]{{$}}
762 ; NO-SIMD128-NOT: i32x4
763 ; SIMD128-NEXT: .functype shl_v4i32 (v128, i32) -> (v128){{$}}
764 ; SIMD128-NEXT: i32x4.shl $push[[R:[0-9]+]]=, $0, $1{{$}}
765 ; SIMD128-NEXT: return $pop[[R]]{{$}}
775 ; NO-SIMD128-NOT: i32x4
776 ; SIMD128-NEXT: .functype shl_const_v4i32 (v128) -> (v128){{$}}
777 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5
778 ; SIMD128-NEXT: i32x4.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
779 ; SIMD128-NEXT: return $pop[[R]]{{$}}
786 ; NO-SIMD128-NOT: i32x4
787 ; SIMD128-NEXT: .functype shl_vec_v4i32 (v128, v128) -> (v128){{$}}
788 ; SIMD128-NEXT: i32x4.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
789 ; SIMD128-NEXT: i32x4.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
790 ; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
791 ; SIMD128-NEXT: i32x4.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
793 ; SIMD128: i32x4.extract_lane $push[[L4:[0-9]+]]=, $0, 3{{$}}
794 ; SIMD128-NEXT: i32x4.extract_lane $push[[L5:[0-9]+]]=, $1, 3{{$}}
795 ; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
796 ; SIMD128-NEXT: i32x4.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 3, $pop[[L6]]{{$}}
797 ; SIMD128-NEXT: return $pop[[R]]{{$}}
804 ; NO-SIMD128-NOT: i32x4
805 ; SIMD128-NEXT: .functype shr_s_v4i32 (v128, i32) -> (v128){{$}}
806 ; SIMD128-NEXT: i32x4.shr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
807 ; SIMD128-NEXT: return $pop[[R]]{{$}}
817 ; NO-SIMD128-NOT: i32x4
818 ; SIMD128-NEXT: .functype shr_s_vec_v4i32 (v128, v128) -> (v128){{$}}
819 ; SIMD128-NEXT: i32x4.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
820 ; SIMD128-NEXT: i32x4.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
821 ; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
822 ; SIMD128-NEXT: i32x4.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
824 ; SIMD128: i32x4.extract_lane $push[[L4:[0-9]+]]=, $0, 3{{$}}
825 ; SIMD128-NEXT: i32x4.extract_lane $push[[L5:[0-9]+]]=, $1, 3{{$}}
826 ; SIMD128-NEXT: i32.shr_s $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
827 ; SIMD128-NEXT: i32x4.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 3, $pop[[L6]]{{$}}
828 ; SIMD128-NEXT: return $pop[[R]]{{$}}
835 ; NO-SIMD128-NOT: i32x4
836 ; SIMD128-NEXT: .functype shr_u_v4i32 (v128, i32) -> (v128){{$}}
837 ; SIMD128-NEXT: i32x4.shr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
838 ; SIMD128-NEXT: return $pop[[R]]{{$}}
848 ; NO-SIMD128-NOT: i32x4
849 ; SIMD128-NEXT: .functype shr_u_vec_v4i32 (v128, v128) -> (v128){{$}}
850 ; SIMD128-NEXT: i32x4.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
851 ; SIMD128-NEXT: i32x4.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
852 ; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
853 ; SIMD128-NEXT: i32x4.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
855 ; SIMD128: i32x4.extract_lane $push[[L4:[0-9]+]]=, $0, 3{{$}}
856 ; SIMD128-NEXT: i32x4.extract_lane $push[[L5:[0-9]+]]=, $1, 3{{$}}
857 ; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
858 ; SIMD128-NEXT: i32x4.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 3, $pop[[L6]]{{$}}
859 ; SIMD128-NEXT: return $pop[[R]]{{$}}
866 ; NO-SIMD128-NOT: v128
867 ; SIMD128-NEXT: .functype and_v4i32 (v128, v128) -> (v128){{$}}
868 ; SIMD128-NEXT: v128.and $push[[R:[0-9]+]]=, $0, $1{{$}}
869 ; SIMD128-NEXT: return $pop[[R]]{{$}}
876 ; NO-SIMD128-NOT: v128
877 ; SIMD128-NEXT: .functype or_v4i32 (v128, v128) -> (v128){{$}}
878 ; SIMD128-NEXT: v128.or $push[[R:[0-9]+]]=, $0, $1{{$}}
879 ; SIMD128-NEXT: return $pop[[R]]{{$}}
886 ; NO-SIMD128-NOT: v128
887 ; SIMD128-NEXT: .functype xor_v4i32 (v128, v128) -> (v128){{$}}
888 ; SIMD128-NEXT: v128.xor $push[[R:[0-9]+]]=, $0, $1{{$}}
889 ; SIMD128-NEXT: return $pop[[R]]{{$}}
896 ; NO-SIMD128-NOT: v128
897 ; SIMD128-NEXT: .functype not_v4i32 (v128) -> (v128){{$}}
898 ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $0{{$}}
899 ; SIMD128-NEXT: return $pop[[R]]{{$}}
906 ; NO-SIMD128-NOT: v128
907 ; SIMD128-NEXT: .functype andnot_v4i32 (v128, v128) -> (v128){{$}}
908 ; SIMD128-SLOW-NEXT: v128.andnot $push[[R:[0-9]+]]=, $0, $1{{$}}
909 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
910 ; SIMD128-FAST-NEXT: v128.not
911 ; SIMD128-FAST-NEXT: v128.and
912 ; SIMD128-FAST-NEXT: return
920 ; NO-SIMD128-NOT: v128
921 ; SIMD128-NEXT: .functype bitselect_v4i32 (v128, v128, v128) -> (v128){{$}}
922 ; SIMD128-SLOW-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
923 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
924 ; SIMD128-FAST-NEXT: v128.not
925 ; SIMD128-FAST-NEXT: v128.and
926 ; SIMD128-FAST-NEXT: v128.and
927 ; SIMD128-FAST-NEXT: v128.or
928 ; SIMD128-FAST-NEXT: return
941 ; NO-SIMD128-NOT: i64x2
942 ; SIMD128-NEXT: .functype add_v2i64 (v128, v128) -> (v128){{$}}
943 ; SIMD128-NEXT: i64x2.add $push[[R:[0-9]+]]=, $0, $1{{$}}
944 ; SIMD128-NEXT: return $pop[[R]]{{$}}
951 ; NO-SIMD128-NOT: i64x2
952 ; SIMD128-NEXT: .functype sub_v2i64 (v128, v128) -> (v128){{$}}
953 ; SIMD128-NEXT: i64x2.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
954 ; SIMD128-NEXT: return $pop[[R]]{{$}}
961 ; NO-SIMD128-NOT: i64x2
962 ; SIMD128-NEXT: .functype mul_v2i64 (v128, v128) -> (v128){{$}}
963 ; SIMD128: i64x2.mul $push[[R:[0-9]+]]=, $0, $1{{$}}
964 ; SIMD128-NEXT: return $pop[[R]]{{$}}
971 ; NO-SIMD128-NOT: i64x2
972 ; SIMD128-NEXT: .functype neg_v2i64 (v128) -> (v128){{$}}
973 ; SIMD128-NEXT: i64x2.neg $push[[R:[0-9]+]]=, $0{{$}}
974 ; SIMD128-NEXT: return $pop[[R]]{{$}}
981 ; NO-SIMD128-NOT: i64x2
982 ; SIMD128-NEXT: .functype shl_v2i64 (v128, i32) -> (v128){{$}}
983 ; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $1{{$}}
984 ; SIMD128-NEXT: return $pop[[R]]{{$}}
994 ; NO-SIMD128-NOT: i64x2
995 ; SIMD128-NEXT: .functype shl_sext_v2i64 (v128, i32) -> (v128){{$}}
996 ; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $1{{$}}
997 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1007 ; NO-SIMD128-NOT: i64x2
1008 ; SIMD128-NEXT: .functype shl_noext_v2i64 (v128, i64) -> (v128){{$}}
1009 ; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
1010 ; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1011 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1020 ; NO-SIMD128-NOT: i64x2
1021 ; SIMD128-NEXT: .functype shl_const_v2i64 (v128) -> (v128){{$}}
1022 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
1023 ; SIMD128-NEXT: i64x2.shl $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1024 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1031 ; NO-SIMD128-NOT: i64x2
1032 ; SIMD128-NEXT: .functype shl_vec_v2i64 (v128, v128) -> (v128){{$}}
1033 ; SIMD128-NEXT: i64x2.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
1034 ; SIMD128-NEXT: i64x2.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
1035 ; SIMD128-NEXT: i64.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
1036 ; SIMD128-NEXT: i64x2.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
1037 ; SIMD128-NEXT: i64x2.extract_lane $push[[L4:[0-9]+]]=, $0, 1{{$}}
1038 ; SIMD128-NEXT: i64x2.extract_lane $push[[L5:[0-9]+]]=, $1, 1{{$}}
1039 ; SIMD128-NEXT: i64.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
1040 ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $pop[[L3]], 1, $pop[[L6]]{{$}}
1041 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1048 ; NO-SIMD128-NOT: i64x2
1049 ; SIMD128-NEXT: .functype shr_s_v2i64 (v128, i32) -> (v128){{$}}
1050 ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
1051 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1061 ; NO-SIMD128-NOT: i64x2
1062 ; SIMD128-NEXT: .functype shr_s_sext_v2i64 (v128, i32) -> (v128){{$}}
1063 ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $1{{$}}
1064 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1074 ; NO-SIMD128-NOT: i64x2
1075 ; SIMD128-NEXT: .functype shr_s_noext_v2i64 (v128, i64) -> (v128){{$}}
1076 ; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
1077 ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1078 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1087 ; NO-SIMD128-NOT: i64x2
1088 ; SIMD128-NEXT: .functype shr_s_const_v2i64 (v128) -> (v128){{$}}
1089 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
1090 ; SIMD128-NEXT: i64x2.shr_s $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1091 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1098 ; NO-SIMD128-NOT: i64x2
1099 ; SIMD128-NEXT: .functype shr_s_vec_v2i64 (v128, v128) -> (v128){{$}}
1100 ; SIMD128-NEXT: i64x2.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
1101 ; SIMD128-NEXT: i64x2.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
1102 ; SIMD128-NEXT: i64.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
1103 ; SIMD128-NEXT: i64x2.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
1104 ; SIMD128-NEXT: i64x2.extract_lane $push[[L4:[0-9]+]]=, $0, 1{{$}}
1105 ; SIMD128-NEXT: i64x2.extract_lane $push[[L5:[0-9]+]]=, $1, 1{{$}}
1106 ; SIMD128-NEXT: i64.shr_s $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
1107 ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $pop[[L3]], 1, $pop[[L6]]{{$}}
1108 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1115 ; NO-SIMD128-NOT: i64x2
1116 ; SIMD128-NEXT: .functype shr_u_v2i64 (v128, i32) -> (v128){{$}}
1117 ; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
1118 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1128 ; NO-SIMD128-NOT: i64x2
1129 ; SIMD128-NEXT: .functype shr_u_sext_v2i64 (v128, i32) -> (v128){{$}}
1130 ; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $1{{$}}
1131 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1141 ; NO-SIMD128-NOT: i64x2
1142 ; SIMD128-NEXT: .functype shr_u_noext_v2i64 (v128, i64) -> (v128){{$}}
1143 ; SIMD128-NEXT: i32.wrap_i64 $push[[L0:[0-9]+]]=, $1{{$}}
1144 ; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1145 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1154 ; NO-SIMD128-NOT: i64x2
1155 ; SIMD128-NEXT: .functype shr_u_const_v2i64 (v128) -> (v128){{$}}
1156 ; SIMD128-NEXT: i32.const $push[[L0:[0-9]+]]=, 5{{$}}
1157 ; SIMD128-NEXT: i64x2.shr_u $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1158 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1165 ; NO-SIMD128-NOT: i64x2
1166 ; SIMD128-NEXT: .functype shr_u_vec_v2i64 (v128, v128) -> (v128){{$}}
1167 ; SIMD128-NEXT: i64x2.extract_lane $push[[L0:[0-9]+]]=, $0, 0{{$}}
1168 ; SIMD128-NEXT: i64x2.extract_lane $push[[L1:[0-9]+]]=, $1, 0{{$}}
1169 ; SIMD128-NEXT: i64.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
1170 ; SIMD128-NEXT: i64x2.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
1171 ; SIMD128-NEXT: i64x2.extract_lane $push[[L4:[0-9]+]]=, $0, 1{{$}}
1172 ; SIMD128-NEXT: i64x2.extract_lane $push[[L5:[0-9]+]]=, $1, 1{{$}}
1173 ; SIMD128-NEXT: i64.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
1174 ; SIMD128-NEXT: i64x2.replace_lane $push[[R:[0-9]+]]=, $pop[[L3]], 1, $pop[[L6]]{{$}}
1175 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1182 ; NO-SIMD128-NOT: v128
1183 ; SIMD128-NEXT: .functype and_v2i64 (v128, v128) -> (v128){{$}}
1184 ; SIMD128-NEXT: v128.and $push[[R:[0-9]+]]=, $0, $1{{$}}
1185 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1192 ; NO-SIMD128-NOT: v128
1193 ; SIMD128-NEXT: .functype or_v2i64 (v128, v128) -> (v128){{$}}
1194 ; SIMD128-NEXT: v128.or $push[[R:[0-9]+]]=, $0, $1{{$}}
1195 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1202 ; NO-SIMD128-NOT: v128
1203 ; SIMD128-NEXT: .functype xor_v2i64 (v128, v128) -> (v128){{$}}
1204 ; SIMD128-NEXT: v128.xor $push[[R:[0-9]+]]=, $0, $1{{$}}
1205 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1212 ; NO-SIMD128-NOT: v128
1213 ; SIMD128-NEXT: .functype not_v2i64 (v128) -> (v128){{$}}
1214 ; SIMD128-NEXT: v128.not $push[[R:[0-9]+]]=, $0{{$}}
1215 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1222 ; NO-SIMD128-NOT: v128
1223 ; SIMD128-NEXT: .functype andnot_v2i64 (v128, v128) -> (v128){{$}}
1224 ; SIMD128-SLOW-NEXT: v128.andnot $push[[R:[0-9]+]]=, $0, $1{{$}}
1225 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
1226 ; SIMD128-FAST-NEXT: v128.not
1227 ; SIMD128-FAST-NEXT: v128.and
1228 ; SIMD128-FAST-NEXT: return
1236 ; NO-SIMD128-NOT: v128
1237 ; SIMD128-NEXT: .functype bitselect_v2i64 (v128, v128, v128) -> (v128){{$}}
1238 ; SIMD128-SLOW-NEXT: v128.bitselect $push[[R:[0-9]+]]=, $1, $2, $0{{$}}
1239 ; SIMD128-SLOW-NEXT: return $pop[[R]]{{$}}
1240 ; SIMD128-FAST-NEXT: v128.not
1241 ; SIMD128-FAST-NEXT: v128.and
1242 ; SIMD128-FAST-NEXT: v128.and
1243 ; SIMD128-FAST-NEXT: v128.or
1244 ; SIMD128-FAST-NEXT: return
1257 ; NO-SIMD128-NOT: f32x4
1258 ; SIMD128-NEXT: .functype neg_v4f32 (v128) -> (v128){{$}}
1259 ; SIMD128-NEXT: f32x4.neg $push[[R:[0-9]+]]=, $0{{$}}
1260 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1268 ; NO-SIMD128-NOT: f32x4
1269 ; SIMD128-NEXT: .functype abs_v4f32 (v128) -> (v128){{$}}
1270 ; SIMD128-NEXT: f32x4.abs $push[[R:[0-9]+]]=, $0{{$}}
1271 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1279 ; NO-SIMD128-NOT: f32x4
1280 ; SIMD128-NEXT: .functype min_unordered_v4f32 (v128) -> (v128){{$}}
1281 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2, 0x1.4p2, 0x1.4p2{{$}}
1282 ; SIMD128-NEXT: f32x4.min $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1283 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1292 ; NO-SIMD128-NOT: f32x4
1293 ; SIMD128-NEXT: .functype max_unordered_v4f32 (v128) -> (v128){{$}}
1294 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2, 0x1.4p2, 0x1.4p2
1295 ; SIMD128-NEXT: f32x4.max $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1296 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1305 ; NO-SIMD128-NOT: f32x4
1306 ; SIMD128-NEXT: .functype min_ordered_v4f32 (v128) -> (v128){{$}}
1307 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2, 0x1.4p2, 0x1.4p2{{$}}
1308 ; SIMD128-NEXT: f32x4.min $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1309 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1318 ; NO-SIMD128-NOT: f32x4
1319 ; SIMD128-NEXT: .functype max_ordered_v4f32 (v128) -> (v128){{$}}
1320 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2, 0x1.4p2, 0x1.4p2{{$}}
1321 ; SIMD128-NEXT: f32x4.max $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1322 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1331 ; NO-SIMD128-NOT: f32x4
1332 ; SIMD128-NEXT: .functype min_intrinsic_v4f32 (v128, v128) -> (v128){{$}}
1333 ; SIMD128-NEXT: f32x4.min $push[[R:[0-9]+]]=, $0, $1{{$}}
1334 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1342 ; NO-SIMD128-NOT: f32x4
1343 ; SIMD128-NEXT: .functype minnum_intrinsic_v4f32 (v128, v128) -> (v128){{$}}
1344 ; SIMD128-NEXT: f32x4.min $push[[R:[0-9]+]]=, $0, $1{{$}}
1345 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1353 ; NO-SIMD128-NOT: f32x4
1354 ; SIMD128-NEXT: .functype max_intrinsic_v4f32 (v128, v128) -> (v128){{$}}
1355 ; SIMD128-NEXT: f32x4.max $push[[R:[0-9]+]]=, $0, $1{{$}}
1356 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1364 ; NO-SIMD128-NOT: f32x4
1365 ; SIMD128-NEXT: .functype maxnum_intrinsic_v4f32 (v128, v128) -> (v128){{$}}
1366 ; SIMD128-NEXT: f32x4.max $push[[R:[0-9]+]]=, $0, $1{{$}}
1367 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1375 ; NO-SIMD128-NOT: f32x4
1376 ; SIMD128-NEXT: .functype min_const_intrinsic_v4f32 () -> (v128){{$}}
1377 ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 0x1.4p2, 0x1.4p2, 0x1.4p2, 0x1.4p2{{$}}
1378 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1388 ; NO-SIMD128-NOT: f32x4
1389 ; SIMD128-NEXT: .functype max_const_intrinsic_v4f32 () -> (v128){{$}}
1390 ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 0x1.5p5, 0x1.5p5, 0x1.5p5, 0x1.5p5{{$}}
1391 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1401 ; NO-SIMD128-NOT: f32x4
1402 ; SIMD128-NEXT: .functype add_v4f32 (v128, v128) -> (v128){{$}}
1403 ; SIMD128-NEXT: f32x4.add $push[[R:[0-9]+]]=, $0, $1{{$}}
1404 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1411 ; NO-SIMD128-NOT: f32x4
1412 ; SIMD128-NEXT: .functype sub_v4f32 (v128, v128) -> (v128){{$}}
1413 ; SIMD128-NEXT: f32x4.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
1414 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1421 ; NO-SIMD128-NOT: f32x4
1422 ; SIMD128-NEXT: .functype div_v4f32 (v128, v128) -> (v128){{$}}
1423 ; SIMD128-NEXT: f32x4.div $push[[R:[0-9]+]]=, $0, $1{{$}}
1424 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1431 ; NO-SIMD128-NOT: f32x4
1432 ; SIMD128-NEXT: .functype mul_v4f32 (v128, v128) -> (v128){{$}}
1433 ; SIMD128-NEXT: f32x4.mul $push[[R:[0-9]+]]=, $0, $1{{$}}
1434 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1441 ; NO-SIMD128-NOT: f32x4
1442 ; SIMD128-NEXT: .functype sqrt_v4f32 (v128) -> (v128){{$}}
1443 ; SIMD128-NEXT: f32x4.sqrt $push[[R:[0-9]+]]=, $0{{$}}
1444 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1455 ; NO-SIMD128-NOT: f64x2
1456 ; SIMD128-NEXT: .functype neg_v2f64 (v128) -> (v128){{$}}
1457 ; SIMD128-NEXT: f64x2.neg $push[[R:[0-9]+]]=, $0{{$}}
1458 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1466 ; NO-SIMD128-NOT: f64x2
1467 ; SIMD128-NEXT: .functype abs_v2f64 (v128) -> (v128){{$}}
1468 ; SIMD128-NEXT: f64x2.abs $push[[R:[0-9]+]]=, $0{{$}}
1469 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1477 ; NO-SIMD128-NOT: f64x2
1478 ; SIMD128-NEXT: .functype min_unordered_v2f64 (v128) -> (v128){{$}}
1479 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2{{$}}
1480 ; SIMD128-NEXT: f64x2.min $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1481 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1490 ; NO-SIMD128-NOT: f64x2
1491 ; SIMD128-NEXT: .functype max_unordered_v2f64 (v128) -> (v128){{$}}
1492 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2{{$}}
1493 ; SIMD128-NEXT: f64x2.max $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1494 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1503 ; NO-SIMD128-NOT: f64x2
1504 ; SIMD128-NEXT: .functype min_ordered_v2f64 (v128) -> (v128){{$}}
1505 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2{{$}}
1506 ; SIMD128-NEXT: f64x2.min $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1507 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1516 ; NO-SIMD128-NOT: f64x2
1517 ; SIMD128-NEXT: .functype max_ordered_v2f64 (v128) -> (v128){{$}}
1518 ; SIMD128-NEXT: v128.const $push[[L0:[0-9]+]]=, 0x1.4p2, 0x1.4p2{{$}}
1519 ; SIMD128-NEXT: f64x2.max $push[[R:[0-9]+]]=, $0, $pop[[L0]]{{$}}
1520 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1529 ; NO-SIMD128-NOT: f64x2
1530 ; SIMD128-NEXT: .functype min_intrinsic_v2f64 (v128, v128) -> (v128){{$}}
1531 ; SIMD128-NEXT: f64x2.min $push[[R:[0-9]+]]=, $0, $1{{$}}
1532 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1540 ; NO-SIMD128-NOT: f64x2
1541 ; SIMD128-NEXT: .functype max_intrinsic_v2f64 (v128, v128) -> (v128){{$}}
1542 ; SIMD128-NEXT: f64x2.max $push[[R:[0-9]+]]=, $0, $1{{$}}
1543 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1551 ; NO-SIMD128-NOT: f64x2
1552 ; SIMD128-NEXT: .functype min_const_intrinsic_v2f64 () -> (v128){{$}}
1553 ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 0x1.4p2, 0x1.4p2{{$}}
1554 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1564 ; NO-SIMD128-NOT: f64x2
1565 ; SIMD128-NEXT: .functype max_const_intrinsic_v2f64 () -> (v128){{$}}
1566 ; SIMD128-NEXT: v128.const $push[[R:[0-9]+]]=, 0x1.5p5, 0x1.5p5{{$}}
1567 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1577 ; NO-SIMD128-NOT: f64x2
1578 ; SIMD128-NEXT: .functype add_v2f64 (v128, v128) -> (v128){{$}}
1579 ; SIMD128-NEXT: f64x2.add $push[[R:[0-9]+]]=, $0, $1{{$}}
1580 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1587 ; NO-SIMD128-NOT: f64x2
1588 ; SIMD128-NEXT: .functype sub_v2f64 (v128, v128) -> (v128){{$}}
1589 ; SIMD128-NEXT: f64x2.sub $push[[R:[0-9]+]]=, $0, $1{{$}}
1590 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1597 ; NO-SIMD128-NOT: f64x2
1598 ; SIMD128-NEXT: .functype div_v2f64 (v128, v128) -> (v128){{$}}
1599 ; SIMD128-NEXT: f64x2.div $push[[R:[0-9]+]]=, $0, $1{{$}}
1600 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1607 ; NO-SIMD128-NOT: f64x2
1608 ; SIMD128-NEXT: .functype mul_v2f64 (v128, v128) -> (v128){{$}}
1609 ; SIMD128-NEXT: f64x2.mul $push[[R:[0-9]+]]=, $0, $1{{$}}
1610 ; SIMD128-NEXT: return $pop[[R]]{{$}}
1617 ; NO-SIMD128-NOT: f64x2
1618 ; SIMD128-NEXT: .functype sqrt_v2f64 (v128) -> (v128){{$}}
1619 ; SIMD128-NEXT: f64x2.sqrt $push[[R:[0-9]+]]=, $0{{$}}
1620 ; SIMD128-NEXT: return $pop[[R]]{{$}}