Home
last modified time | relevance | path

Searched refs:ashr (Results 1 – 25 of 1285) sorted by relevance

12345678910>>...52

/external/llvm/test/Analysis/CostModel/X86/
Dtestshiftashr.ll8 ; SSE2: cost of 12 {{.*}} ashr
12 %0 = ashr %shifttype %a , %b
20 ; SSE2: cost of 16 {{.*}} ashr
24 %0 = ashr %shifttype4i16 %a , %b
32 ; SSE2: cost of 32 {{.*}} ashr
36 %0 = ashr %shifttype8i16 %a , %b
44 ; SSE2: cost of 64 {{.*}} ashr
48 %0 = ashr %shifttype16i16 %a , %b
56 ; SSE2: cost of 128 {{.*}} ashr
60 %0 = ashr %shifttype32i16 %a , %b
[all …]
/external/llvm-project/llvm/test/Analysis/CostModel/X86/
Dtestshiftashr.ll8 ; SSE2: cost of 32 {{.*}} ashr
12 %0 = ashr %shifttype %a , %b
20 ; SSE2: cost of 32 {{.*}} ashr
24 %0 = ashr %shifttype4i16 %a , %b
32 ; SSE2: cost of 32 {{.*}} ashr
36 %0 = ashr %shifttype8i16 %a , %b
44 ; SSE2: cost of 64 {{.*}} ashr
48 %0 = ashr %shifttype16i16 %a , %b
56 ; SSE2: cost of 128 {{.*}} ashr
60 %0 = ashr %shifttype32i16 %a , %b
[all …]
/external/llvm-project/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll11 %conv1 = ashr exact i64 %sext, 56
13 %conv4 = ashr exact i64 %sext1, 56
14 %shr = ashr i64 %a, 16
15 %shr9 = ashr i64 %b, 16
29 %conv1 = ashr exact i64 %sext, 32
31 %conv4 = ashr exact i64 %sext1, 32
32 %shr = ashr i64 %a, 16
33 %shr9 = ashr i64 %b, 16
47 %conv1 = ashr exact i64 %sext, 48
49 %conv4 = ashr exact i64 %sext1, 48
[all …]
/external/llvm/test/CodeGen/NVPTX/
Dsext-in-reg.ll11 %conv1 = ashr exact i64 %sext, 56
13 %conv4 = ashr exact i64 %sext1, 56
14 %shr = ashr i64 %a, 16
15 %shr9 = ashr i64 %b, 16
29 %conv1 = ashr exact i64 %sext, 32
31 %conv4 = ashr exact i64 %sext1, 32
32 %shr = ashr i64 %a, 16
33 %shr9 = ashr i64 %b, 16
47 %conv1 = ashr exact i64 %sext, 48
49 %conv4 = ashr exact i64 %sext1, 48
[all …]
/external/llvm-project/llvm/test/Transforms/InstCombine/
Dshift-sra.ll14 %Y = ashr i32 %X, %shift.upgrd.1
28 %tmp5 = ashr i32 %tmp4, 3
39 ; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
50 %Y2 = ashr i64 %Y, 63
54 %S = ashr i64 %P, 12
65 ; CHECK-NEXT: [[Y2:%.*]] = ashr i64 %Y, 63
76 %Y2 = ashr i64 %Y, 63
81 %S = ashr i64 %R, 12
97 ; CHECK-NEXT: [[S:%.*]] = ashr i32 [[P]], 16
111 %S = ashr i32 %P, 16
[all …]
Dcanonicalize-ashr-shl-to-masking.ll22 %tmp0 = ashr i8 %x, %y
32 %tmp0 = ashr i8 %x, 3
39 ; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
43 %tmp0 = ashr i8 %x, 6
54 %tmp0 = ashr i8 %x, 3
69 %tmp0 = ashr i8 %x, %y
79 %tmp0 = ashr i8 %x, 3
86 ; CHECK-NEXT: [[TMP1:%.*]] = ashr i8 [[X:%.*]], 3
90 %tmp0 = ashr i8 %x, 6
101 %tmp0 = ashr i8 %x, 3
[all …]
Dnarrow-math.ll10 ; CHECK-NEXT: [[B:%.*]] = ashr i32 [[A:%.*]], 7
11 ; CHECK-NEXT: [[C:%.*]] = ashr i32 [[A]], 9
16 %B = ashr i32 %A, 7
17 %C = ashr i32 %A, 9
28 ; CHECK-NEXT: [[B:%.*]] = ashr i32 [[A:%.*]], 7
35 %B = ashr i32 %A, 7
47 ; CHECK-NEXT: [[B:%.*]] = ashr i16 [[A:%.*]], 7
48 ; CHECK-NEXT: [[C:%.*]] = ashr i32 [[X:%.*]], 9
54 %B = ashr i16 %A, 7
55 %C = ashr i32 %x, 9
[all …]
Dselect-bitext-bitwise-ops.ll9 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
16 %5 = ashr i64 %y, %3
26 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
33 %5 = ashr i64 %y, %3
43 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
50 %5 = ashr i64 %y, %3
60 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
67 %5 = ashr i64 %y, %3
74 ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 2
77 ; CHECK-NEXT: [[TMP4:%.*]] = ashr i64 [[Y:%.*]], [[TMP3]]
[all …]
Dshift-amount-reassociation-with-truncation-ashr.ll15 ; CHECK-NEXT: [[TMP1:%.*]] = ashr i32 [[X:%.*]], 31
21 %t2 = ashr i32 %x, %t1
24 %t5 = ashr i16 %t3, %t4
32 ; CHECK-NEXT: [[TMP1:%.*]] = ashr <2 x i32> [[X:%.*]], <i32 31, i32 31>
38 %t2 = ashr <2 x i32> %x, %t1
41 %t5 = ashr <2 x i16> %t3, %t4
47 ; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], <i32 31, i32 0, i32 31>
53 %t2 = ashr <3 x i32> %x, %t1
56 %t5 = ashr <3 x i16> %t3, %t4
62 ; CHECK-NEXT: [[TMP1:%.*]] = ashr <3 x i32> [[X:%.*]], <i32 31, i32 0, i32 31>
[all …]
Dashr-lshr.ll6 ; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[X:%.*]], [[Y:%.*]]
11 %r = ashr exact i32 %x, %y
18 ; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[X:%.*]], [[Y:%.*]]
23 %r = ashr i32 %x, %y
30 ; CHECK-NEXT: [[CMP1:%.*]] = ashr exact i32 [[X:%.*]], [[Y:%.*]]
35 %r = ashr exact i32 %x, %y
42 ; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[X:%.*]], [[Y:%.*]]
47 %r = ashr i32 %x, %y
54 ; CHECK-NEXT: [[CMP1:%.*]] = ashr i32 [[X:%.*]], [[Y:%.*]]
59 %r = ashr exact i32 %x, %y
[all …]
/external/llvm/test/CodeGen/X86/
Dvshift-3.ll6 ; Note that x86 does have ashr
12 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
13 store <2 x i64> %ashr, <2 x i64>* %dst
21 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
22 store <4 x i32> %ashr, <4 x i32>* %dst
35 %ashr = ashr <4 x i32> %val, %3
36 store <4 x i32> %ashr, <4 x i32>* %dst
44 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
45 store <8 x i16> %ashr, <8 x i16>* %dst
63 %ashr = ashr <8 x i16> %val, %7
[all …]
/external/llvm/test/CodeGen/SystemZ/
Dint-neg-02.ll99 %ashr = ashr i64 %shl, 32
100 %neg = sub i64 0, %ashr
102 %abs = select i1 %cmp, i64 %neg, i64 %ashr
113 %ashr = ashr i64 %shl, 32
114 %neg = sub i64 0, %ashr
116 %abs = select i1 %cmp, i64 %neg, i64 %ashr
127 %ashr = ashr i64 %shl, 32
128 %neg = sub i64 0, %ashr
130 %abs = select i1 %cmp, i64 %ashr, i64 %neg
141 %ashr = ashr i64 %shl, 32
[all …]
Dint-abs-01.ll91 %ashr = ashr i64 %shl, 32
92 %neg = sub i64 0, %ashr
94 %abs = select i1 %cmp, i64 %neg, i64 %ashr
104 %ashr = ashr i64 %shl, 32
105 %neg = sub i64 0, %ashr
107 %abs = select i1 %cmp, i64 %neg, i64 %ashr
117 %ashr = ashr i64 %shl, 32
118 %neg = sub i64 0, %ashr
120 %abs = select i1 %cmp, i64 %ashr, i64 %neg
130 %ashr = ashr i64 %shl, 32
[all …]
/external/llvm-project/llvm/test/CodeGen/SystemZ/
Dint-neg-02.ll99 %ashr = ashr i64 %shl, 32
100 %neg = sub i64 0, %ashr
102 %abs = select i1 %cmp, i64 %neg, i64 %ashr
113 %ashr = ashr i64 %shl, 32
114 %neg = sub i64 0, %ashr
116 %abs = select i1 %cmp, i64 %neg, i64 %ashr
127 %ashr = ashr i64 %shl, 32
128 %neg = sub i64 0, %ashr
130 %abs = select i1 %cmp, i64 %ashr, i64 %neg
141 %ashr = ashr i64 %shl, 32
[all …]
/external/llvm/test/CodeGen/XCore/
Dashr.ll2 define i32 @ashr(i32 %a, i32 %b) nounwind {
3 %1 = ashr i32 %a, %b
6 ; CHECK-LABEL: ashr:
7 ; CHECK-NEXT: ashr r0, r0, r1
10 %1 = ashr i32 %a, 24
14 ; CHECK-NEXT: ashr r0, r0, 24
17 %1 = ashr i32 %a, 31
21 ; CHECK-NEXT: ashr r0, r0, 32
32 ; CHECK-NEXT: ashr r0, r0, 32
44 ; CHECK-NEXT: ashr r0, r0, 32
[all …]
/external/llvm-project/llvm/test/CodeGen/XCore/
Dashr.ll2 define i32 @ashr(i32 %a, i32 %b) nounwind {
3 %1 = ashr i32 %a, %b
6 ; CHECK-LABEL: ashr:
7 ; CHECK-NEXT: ashr r0, r0, r1
10 %1 = ashr i32 %a, 24
14 ; CHECK-NEXT: ashr r0, r0, 24
17 %1 = ashr i32 %a, 31
21 ; CHECK-NEXT: ashr r0, r0, 32
32 ; CHECK-NEXT: ashr r0, r0, 32
44 ; CHECK-NEXT: ashr r0, r0, 32
[all …]
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dsext-in-reg.ll20 %sext = ashr i32 %shl, 31
38 %ashr = ashr i32 %shl, 24
39 store i32 %ashr, i32 addrspace(1)* %out, align 4
56 %ashr = ashr i32 %shl, 16
57 store i32 %ashr, i32 addrspace(1)* %out, align 4
74 %ashr = ashr <1 x i32> %shl, <i32 24>
75 store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
88 %ashr = ashr i64 %shl, 63
89 store i64 %ashr, i64 addrspace(1)* %out, align 8
102 %ashr = ashr i64 %shl, 56
[all …]
Dmad_int24.ll17 %a_24 = ashr i32 %0, 8
19 %b_24 = ashr i32 %1, 8
34 %sra.0 = ashr i32 %shl.0, 8
36 %sra.1 = ashr i32 %shl.1, 8
42 %sra.2 = ashr i32 %shl.2, 8
45 %sra.3 = ashr i32 %shl.3, 8
58 %sra.0 = ashr i32 %shl.0, 8
60 %sra.1 = ashr i32 %shl.1, 8
66 %sra.2 = ashr i32 %shl.2, 8
69 %sra.3 = ashr i32 %shl.3, 8
[all …]
/external/llvm/test/CodeGen/AMDGPU/
Dsext-in-reg.ll19 %sext = ashr i32 %shl, 31
37 %ashr = ashr i32 %shl, 24
38 store i32 %ashr, i32 addrspace(1)* %out, align 4
55 %ashr = ashr i32 %shl, 16
56 store i32 %ashr, i32 addrspace(1)* %out, align 4
73 %ashr = ashr <1 x i32> %shl, <i32 24>
74 store <1 x i32> %ashr, <1 x i32> addrspace(1)* %out, align 4
87 %ashr = ashr i64 %shl, 63
88 store i64 %ashr, i64 addrspace(1)* %out, align 8
101 %ashr = ashr i64 %shl, 56
[all …]
/external/llvm/test/Transforms/InstSimplify/
Dshr-nop.ll12 %t = ashr i32 %n, 17
29 %shr = ashr exact i8 0, %a
38 %shr = ashr i8 0, %a
56 %shr = ashr exact i8 0, %a
74 %shr = ashr i8 0, %a
92 %shr = ashr exact i8 -128, %a
119 %shr = ashr exact i8 -128, %a
137 %shr = ashr i8 -128, %a
155 %shr = ashr i8 -128, %a
173 %shr = ashr i8 0, %a
[all …]
/external/llvm-project/llvm/test/Transforms/InstSimplify/
Dshr-nop.ll12 %t = ashr i32 %n, 17
29 %shr = ashr exact i8 0, %a
38 %shr = ashr i8 0, %a
56 %shr = ashr exact i8 0, %a
74 %shr = ashr i8 0, %a
92 %shr = ashr exact i8 -128, %a
119 %shr = ashr exact i8 -128, %a
137 %shr = ashr i8 -128, %a
155 %shr = ashr i8 -128, %a
173 %shr = ashr i8 0, %a
[all …]
/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dfast-isel-shifter.ll35 define i32 @ashr() nounwind {
37 ; ELF64: ashr
39 %ashr = ashr i32 -1, 2
40 ret i32 %ashr
47 %ashr = ashr i32 %src1, %src2
48 ret i32 %ashr
/external/llvm/test/CodeGen/PowerPC/
Dfast-isel-shifter.ll35 define i32 @ashr() nounwind {
37 ; ELF64: ashr
39 %ashr = ashr i32 -1, 2
40 ret i32 %ashr
47 %ashr = ashr i32 %src1, %src2
48 ret i32 %ashr
/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/
Dcombine-shift-imm-chain.ll64 %z1 = ashr i32 %arg1, 2
65 %z2 = ashr i32 %z1, 3
75 %z1 = ashr i32 %arg1, 1
76 %z2 = ashr i32 %z1, 2
77 %z3 = ashr i32 %z2, 3
78 %z4 = ashr i32 %z3, 4
88 %z1 = ashr i32 %arg1, 10
89 %z2 = ashr i32 %z1, 10
90 %z3 = ashr i32 %z2, 10
91 %z4 = ashr i32 %z3, 10
[all …]
/external/llvm-project/llvm/test/CodeGen/X86/
Dvshift-3.ll8 ; Note that x86 does have ashr
30 %ashr = ashr <2 x i64> %val, < i64 32, i64 32 >
31 store <2 x i64> %ashr, <2 x i64>* %dst
49 %ashr = ashr <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
50 store <4 x i32> %ashr, <4 x i32>* %dst
74 %ashr = ashr <4 x i32> %val, %3
75 store <4 x i32> %ashr, <4 x i32>* %dst
93 %ashr = ashr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
94 store <8 x i16> %ashr, <8 x i16>* %dst
124 %ashr = ashr <8 x i16> %val, %7
[all …]

12345678910>>...52