1; RUN: llc -mtriple=aarch64-linux-gnu -o - %s | FileCheck %s
2
3define i64 @dotests_616() {
4; CHECK-LABEL: dotests_616
5; CHECK:       movi d0, #0000000000000000
6; CHECK-NEXT:  fmov x0, d0
7; CHECK-NEXT:  ret
8entry:
9  %0 = bitcast <2 x i64> zeroinitializer to <8 x i16>
10  %1 = and <8 x i16> zeroinitializer, %0
11  %2 = icmp ne <8 x i16> %1, zeroinitializer
12  %3 = extractelement <8 x i1> %2, i32 2
13  %vgetq_lane285 = sext i1 %3 to i16
14  %vset_lane = insertelement <4 x i16> undef, i16 %vgetq_lane285, i32 0
15  %4 = bitcast <4 x i16> %vset_lane to <1 x i64>
16  %vget_lane = extractelement <1 x i64> %4, i32 0
17  ret i64 %vget_lane
18}
19
20; PR25763 - folding constant vector comparisons with sign-extended result
21define <8 x i16> @dotests_458() {
22; CHECK-LABEL: dotests_458
23; CHECK:       movi d0, #0x00000000ff0000
24; CHECK-NEXT:  sshll v0.8h, v0.8b, #0
25; CHECK-NEXT:  ret
26entry:
27  %vclz_v.i = call <8 x i8> @llvm.ctlz.v8i8(<8 x i8> <i8 127, i8 38, i8 -1, i8 -128, i8 127, i8 0, i8 0, i8 0>, i1 false) #6
28  %vsra_n = lshr <8 x i8> %vclz_v.i, <i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5, i8 5>
29  %name_6 = or <8 x i8> %vsra_n, <i8 127, i8 -128, i8 -1, i8 67, i8 84, i8 127, i8 -1, i8 0>
30  %cmp.i603 = icmp slt <8 x i8> %name_6, <i8 -57, i8 -128, i8 127, i8 -128, i8 -1, i8 0, i8 -1, i8 -1>
31  %vmovl.i4.i = sext <8 x i1> %cmp.i603 to <8 x i16>
32  ret <8 x i16> %vmovl.i4.i
33}
34declare <8 x i8> @llvm.ctlz.v8i8(<8 x i8>, i1)
35