Home
last modified time | relevance | path

Searched refs:shl (Results 1 – 25 of 1101) sorted by relevance

12345678910>>...45

/external/llvm/test/CodeGen/X86/
Dnarrow-shl-cst.ll5 %and = shl i32 %x, 10
6 %shl = and i32 %and, 31744
7 ret i32 %shl
14 %or = shl i32 %x, 10
15 %shl = or i32 %or, 31744
16 ret i32 %shl
23 %xor = shl i32 %x, 10
24 %shl = xor i32 %xor, 31744
25 ret i32 %shl
32 %and = shl i64 %x, 40
[all …]
Ddagcombine-shifts.ll3 ; fold (shl (zext (lshr (A, X))), X) -> (zext (shl (lshr (A, X)), X))
5 ; Canolicalize the sequence shl/zext/lshr performing the zeroextend
16 %shl = shl i16 %ext, 4
17 ret i16 %shl
23 ; CHECK-NOT: shl
30 %shl = shl i32 %ext, 4
31 ret i32 %shl
37 ; CHECK-NOT: shl
44 %shl = shl i32 %ext, 4
45 ret i32 %shl
[all …]
Dshift-bmi2.ll6 %shl = shl i32 %x, %shamt
13 ret i32 %shl
18 %shl = shl i32 %x, 5
25 ret i32 %shl
31 %shl = shl i32 %x, %shamt
38 ret i32 %shl
44 %shl = shl i32 %x, 5
51 ret i32 %shl
56 %shl = shl i64 %x, %shamt
60 ret i64 %shl
[all …]
Dshift-combine-crash.ll15 %shl = shl <4 x i64> %A, <i64 undef, i64 undef, i64 1, i64 2>
16 ret <4 x i64> %shl
23 %shl = shl <4 x i64> %A, <i64 2, i64 3, i64 undef, i64 undef>
24 ret <4 x i64> %shl
28 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 3, i64 undef>
29 ret <4 x i64> %shl
33 %shl = shl <4 x i64> %A, <i64 undef, i64 2, i64 undef, i64 3>
34 ret <4 x i64> %shl
38 %shl = shl <4 x i64> %A, <i64 2, i64 undef, i64 undef, i64 undef>
39 ret <4 x i64> %shl
[all …]
Dsse2-vector-shifts.ll11 %shl = shl <8 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0>
12 ret <8 x i16> %shl
21 %shl = shl <8 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
22 ret <8 x i16> %shl
31 %shl = shl <8 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
32 ret <8 x i16> %shl
40 %shl = shl <4 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0>
41 ret <4 x i32> %shl
50 %shl = shl <4 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1>
51 ret <4 x i32> %shl
[all …]
Dor-lea.ll16 %shl = shl i32 %x, 1
18 %or = or i32 %and, %shl
29 %shl = shl i32 %x, 1
31 %or = or i32 %shl, %and
42 %shl = shl i32 %x, 2
44 %or = or i32 %shl, %and
55 %shl = shl i32 %x, 3
57 %or = or i32 %shl, %and
68 %shl = shl i32 %x, 3
70 %or = or i32 %shl, %and
[all …]
Drotate4.ll12 %shl = shl i32 %a, %and
16 %or = or i32 %shl, %shr
26 %shl = lshr i32 %a, %and
29 %shr = shl i32 %a, %and3
30 %or = or i32 %shl, %shr
40 %shl = shl i64 %a, %and
44 %or = or i64 %shl, %shr
54 %shl = lshr i64 %a, %and
57 %shr = shl i64 %a, %and3
58 %or = or i64 %shl, %shr
[all …]
Dvshift-1.ll10 %shl = shl <2 x i64> %val, < i64 32, i64 32 >
11 store <2 x i64> %shl, <2 x i64>* %dst
22 %shl = shl <2 x i64> %val, %1
23 store <2 x i64> %shl, <2 x i64>* %dst
32 %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
33 store <4 x i32> %shl, <4 x i32>* %dst
46 %shl = shl <4 x i32> %val, %3
47 store <4 x i32> %shl, <4 x i32>* %dst
55 %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
56 store <8 x i16> %shl, <8 x i16>* %dst
[all …]
Dvec_shift6.ll11 %shl = shl <8 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11>
12 ret <8 x i16> %shl
20 %shl = shl <8 x i16> %a, <i16 0, i16 undef, i16 0, i16 0, i16 1, i16 undef, i16 -1, i16 1>
21 ret <8 x i16> %shl
33 %shl = shl <4 x i32> %a, <i32 1, i32 -1, i32 2, i32 -3>
34 ret <4 x i32> %shl
44 %shl = shl <4 x i32> %a, <i32 0, i32 0, i32 1, i32 1>
45 ret <4 x i32> %shl
59 …%shl = shl <16 x i16> %a, <i16 1, i16 1, i16 2, i16 3, i16 7, i16 0, i16 9, i16 11, i16 1, i16 1, …
60 ret <16 x i16> %shl
[all …]
Davx2-vector-shifts.ll7 …%shl = shl <16 x i16> %InVec, <i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 0, i16 …
8 ret <16 x i16> %shl
17 …%shl = shl <16 x i16> %InVec, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 …
18 ret <16 x i16> %shl
27 …%shl = shl <16 x i16> %InVec, <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16…
28 ret <16 x i16> %shl
37 %shl = shl <8 x i32> %InVec, <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
38 ret <8 x i32> %shl
47 %shl = shl <8 x i32> %InVec, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
48 ret <8 x i32> %shl
[all …]
Dvshift-4.ll11 %shl = shl <2 x i64> %val, %shamt
12 store <2 x i64> %shl, <2 x i64>* %dst
27 %shl = shl <2 x i64> %val, %shamt
28 store <2 x i64> %shl, <2 x i64>* %dst
37 %shl = shl <4 x i32> %val, %shamt
38 store <4 x i32> %shl, <4 x i32>* %dst
47 %shl = shl <4 x i32> %val, %shamt
48 store <4 x i32> %shl, <4 x i32>* %dst
57 %shl = shl <4 x i32> %val, %shamt
58 store <4 x i32> %shl, <4 x i32>* %dst
[all …]
Dshl_undef.ll6 ; the successor shl(s) become shl undef, 1. This pattern then matches
7 ; shl x, 1 -> add x, x. add undef, undef doesn't guarantee the low
13 ; Use intel syntax, or "shl" might hit "pushl".
15 ; CHECK-NOT: shl
26 %tmp1506 = shl i32 %tmp1220, 1
33 %tmp1618 = shl i32 %tmp1676, 1
40 ; CHECK-NOT: shl
41 ; shl undef, 0 -> undef
44 %tmp2 = shl i32 undef, 0;
48 ; CHECK-NOT: shl
[all …]
Dlegalize-shift-64.ll6 %shl = shl i64 %conv, %sh_prom
7 ret i64 %shl
18 %shl = shl i64 %xx, %sh_prom
19 ret i64 %shl
59 %shl = shl <2 x i64> %A, %B
60 ret <2 x i64> %shl
62 ; CHECK: shl
64 ; CHECK: shl
75 %shl = shl i32 %load, 8
76 %add = add i32 %shl, -224
[all …]
/external/llvm/test/Analysis/CostModel/X86/
Dtestshiftshl.ll8 ; SSE2: cost of 4 {{.*}} shl
12 %0 = shl %shifttype %a , %b
20 ; SSE2: cost of 10 {{.*}} shl
24 %0 = shl %shifttype4i16 %a , %b
32 ; SSE2: cost of 32 {{.*}} shl
36 %0 = shl %shifttype8i16 %a , %b
44 ; SSE2: cost of 64 {{.*}} shl
48 %0 = shl %shifttype16i16 %a , %b
56 ; SSE2: cost of 128 {{.*}} shl
60 %0 = shl %shifttype32i16 %a , %b
[all …]
Dvshift-shl-cost.ll23 %shift = shl <2 x i64> %a, %b
35 %shift = shl <4 x i64> %a, %b
47 %shift = shl <4 x i32> %a, %b
59 %shift = shl <8 x i32> %a, %b
70 %shift = shl <8 x i16> %a, %b
81 %shift = shl <16 x i16> %a, %b
92 %shift = shl <16 x i8> %a, %b
103 %shift = shl <32 x i8> %a, %b
120 %shift = shl <2 x i64> %a, %splat
133 %shift = shl <4 x i64> %a, %splat
[all …]
/external/llvm/test/CodeGen/AArch64/
Dxbfiz.ll6 %shl = shl i64 %v, 48
7 %shr = ashr i64 %shl, 47
14 %shl = shl i32 %v, 18
15 %shr = ashr i32 %shl, 17
22 %shl = shl i64 %v, 53
23 %shr = lshr i64 %shl, 17
30 %shl = shl i32 %v, 8
31 %shr = lshr i32 %shl, 2
38 %shl = shl i64 %v, 36
39 %and = and i64 %shl, 140668768878592
[all …]
Darm64-shifted-sext.ll12 %shl = shl nsw i32 %conv1, 4
13 %conv2 = trunc i32 %shl to i16
36 %shl = shl nsw i32 %conv1, 8
37 %conv2 = trunc i32 %shl to i16
61 %shl = shl nsw i32 %conv, 4
62 ret i32 %shl
83 %shl = shl nsw i32 %conv, 8
84 ret i32 %shl
106 %shl = shl nsw i64 %conv, 4
107 ret i64 %shl
[all …]
/external/llvm/lib/Target/AMDGPU/
DSIRegisterInfo.td93 (add (decimate (shl SGPR_32, 1), 2))]>;
98 (add (decimate (shl SGPR_32, 1), 4)),
99 (add (decimate (shl SGPR_32, 2), 4)),
100 (add (decimate (shl SGPR_32, 3), 4))]>;
105 (add (decimate (shl SGPR_32, 1), 4)),
106 (add (decimate (shl SGPR_32, 2), 4)),
107 (add (decimate (shl SGPR_32, 3), 4)),
108 (add (decimate (shl SGPR_32, 4), 4)),
109 (add (decimate (shl SGPR_32, 5), 4)),
110 (add (decimate (shl SGPR_32, 6), 4)),
[all …]
/external/llvm/test/CodeGen/Mips/
Dmips64shift.ll6 %shl = shl i64 %a0, %a1
7 ret i64 %shl
27 %shl = shl i64 %a0, 10
28 ret i64 %shl
48 %shl = shl i64 %a0, 40
49 ret i64 %shl
72 %shl = shl i64 %a0, %sub
73 %or = or i64 %shl, %shr
81 %shl = shl i64 %a0, %a1
84 %or = or i64 %shr, %shl
[all …]
Drotate.ll8 %shl = shl i32 %a, %b
11 %or = or i32 %shr, %shl
19 %shl = shl i32 %a, 10
21 %or = or i32 %shl, %shr
31 %shl = shl i32 %a, %sub
32 %or = or i32 %shl, %shr
41 %shl = shl i32 %a, 22
42 %or = or i32 %shr, %shl
/external/llvm/test/Transforms/InstCombine/
Dshift.ll8 %B = shl i32 %A, 0 ; <i32> [#uses=1]
16 %B = shl i32 0, %shift.upgrd.1 ; <i32> [#uses=1]
67 %B = shl i32 %A, 32 ;; shift all bits out
74 %B = shl <4 x i32> %A, <i32 32, i32 32, i32 32, i32 32> ;; shift all bits out
81 %B = shl <4 x i32> %A, <i32 32, i32 1, i32 2, i32 3>
103 %B = shl i32 %A, 1 ;; convert to an mul instruction
113 %C = shl i32 %B, 1 ;; convert to an mul instruction
129 %B = shl i8 %A, 5 ; <i8> [#uses=1]
130 %C = shl i8 %B, 3 ; <i8> [#uses=1]
139 %B = shl i8 %A, 7 ; <i8> [#uses=1]
[all …]
Dbitreverse-recognize.ll10 %1 = shl i8 %a, 7
11 %2 = shl i8 %a, 5
13 %4 = shl i8 %a, 3
15 %6 = shl i8 %a, 1
40 %1 = shl i8 %a, 7
41 %2 = shl i8 %a, 5
43 %4 = shl i8 %a, 3
45 %6 = shl i8 %a, 1
68 %1 = shl i16 %a, 15
69 %2 = shl i16 %a, 13
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dsext-in-reg.ll18 %shl = shl i32 %in, 31
19 %sext = ashr i32 %shl, 31
36 %shl = shl i32 %c, 24
37 %ashr = ashr i32 %shl, 24
54 %shl = shl i32 %c, 16
55 %ashr = ashr i32 %shl, 16
72 %shl = shl <1 x i32> %c, <i32 24>
73 %ashr = ashr <1 x i32> %shl, <i32 24>
85 %c = shl i64 %a, %b
86 %shl = shl i64 %c, 63
[all …]
/external/llvm/test/CodeGen/ARM/
Drev.ll8 %tmp4 = shl i32 %X15, 8
26 %tmp4 = shl i16 %tmp3, 8
51 %shl = shl nuw nsw i32 %conv, 8
52 %or = or i32 %conv2, %shl
53 %sext = shl i32 %or, 16
63 %shl = shl i32 %i, 24
64 %shr = ashr exact i32 %shl, 16
76 %and = shl i32 %x, 8
77 %shl = and i32 %and, 65280
84 %or10 = or i32 %or6, %shl
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dint-neg-02.ll98 %shl = shl i64 %val, 32
99 %ashr = ashr i64 %shl, 32
101 %cmp = icmp slt i64 %shl, 0
112 %shl = shl i64 %val, 32
113 %ashr = ashr i64 %shl, 32
115 %cmp = icmp sle i64 %shl, 0
126 %shl = shl i64 %val, 32
127 %ashr = ashr i64 %shl, 32
129 %cmp = icmp sgt i64 %shl, 0
140 %shl = shl i64 %val, 32
[all …]

12345678910>>...45