Home
last modified time | relevance | path

Searched refs:TMP1 (Results 1 – 25 of 1193) sorted by relevance

12345678910>>...48

/external/llvm-project/llvm/test/Transforms/InstCombine/
Dpull-conditional-binop-through-shift.ll8 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 8
9 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -16777216
10 ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND:%.*]], i32 [[TMP2]], i32 [[TMP1]]
20 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 8
21 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], -16777216
22 ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND:%.*]], i32 [[TMP2]], i32 [[TMP1]]
33 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 8
34 ; CHECK-NEXT: [[TMP2:%.*]] = or i32 [[TMP1]], -16777216
35 ; CHECK-NEXT: [[R:%.*]] = select i1 [[COND:%.*]], i32 [[TMP2]], i32 [[TMP1]]
45 ; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X:%.*]], 8
[all …]
Dvector-xor.ll8 ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
9 ; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
20 ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[B:%.*]], [[C:%.*]]
21 ; CHECK-NEXT: [[TMP2:%.*]] = and <4 x i32> [[TMP1]], [[A:%.*]]
36 ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[A0:%.*]], <i32 -16777216, i32 -16777216, i32 -16777…
37 ; CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[TMP1]])
47 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[A0:%.*]])
48 ; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 0, i32 -16777216, i32 2, i32 3>
58 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.bswap.v4i32(<4 x i32> [[A0:%.*]])
59 ; CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 undef, i32 0, i32 2, i32 3>
[all …]
Dor-fcmp.ll6 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno double [[X:%.*]], [[Y:%.*]]
7 ; CHECK-NEXT: ret i1 [[TMP1]]
17 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno <2 x double> [[X:%.*]], [[Y:%.*]]
18 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
29 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno double [[D:%.*]], [[C:%.*]]
30 ; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[UNO1]]
44 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno double [[D:%.*]], [[C:%.*]]
45 ; CHECK-NEXT: [[R:%.*]] = or i1 [[TMP1]], [[UNO1]]
58 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp uno <2 x float> [[D:%.*]], [[C:%.*]]
59 ; CHECK-NEXT: [[R:%.*]] = or <2 x i1> [[TMP1]], [[Z:%.*]]
[all …]
Dand-fcmp.ll6 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord double [[X:%.*]], [[Y:%.*]]
7 ; CHECK-NEXT: ret i1 [[TMP1]]
17 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord <2 x double> [[X:%.*]], [[Y:%.*]]
18 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
28 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord float [[D:%.*]], [[C:%.*]]
29 ; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[Z:%.*]]
41 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ninf ord float [[D:%.*]], [[C:%.*]]
42 ; CHECK-NEXT: [[R:%.*]] = and i1 [[TMP1]], [[Z:%.*]]
57 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ord <2 x double> [[D:%.*]], [[C:%.*]]
58 ; CHECK-NEXT: [[R:%.*]] = and <2 x i1> [[TMP1]], [[ORD1]]
[all …]
Dtrunc-extractelement.ll7 ; LE-NEXT: [[TMP1:%.*]] = bitcast <3 x i64> [[X:%.*]] to <6 x i32>
8 ; LE-NEXT: [[T:%.*]] = extractelement <6 x i32> [[TMP1]], i32 0
12 ; BE-NEXT: [[TMP1:%.*]] = bitcast <3 x i64> [[X:%.*]] to <6 x i32>
13 ; BE-NEXT: [[T:%.*]] = extractelement <6 x i32> [[TMP1]], i32 1
23 ; LE-NEXT: [[TMP1:%.*]] = bitcast <3 x i64> [[X:%.*]] to <6 x i32>
24 ; LE-NEXT: [[T:%.*]] = extractelement <6 x i32> [[TMP1]], i32 2
28 ; BE-NEXT: [[TMP1:%.*]] = bitcast <3 x i64> [[X:%.*]] to <6 x i32>
29 ; BE-NEXT: [[T:%.*]] = extractelement <6 x i32> [[TMP1]], i32 3
39 ; LE-NEXT: [[TMP1:%.*]] = bitcast <3 x i64> [[X:%.*]] to <6 x i32>
40 ; LE-NEXT: [[T:%.*]] = extractelement <6 x i32> [[TMP1]], i32 4
[all …]
Dpow-4.ll16 ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast double [[SQUARE]], [[X]]
17 ; CHECK-NEXT: ret double [[TMP1]]
27 ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[SQUARE]], [[SQUARE]]
28 ; CHECK-NEXT: ret float [[TMP1]]
38 ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast double [[SQUARE]], [[SQUARE]]
39 ; CHECK-NEXT: ret double [[TMP1]]
49 ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <2 x float> [[SQUARE]], [[X]]
50 ; CHECK-NEXT: [[TMP2:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP1]]
52 ; CHECK-NEXT: [[TMP4:%.*]] = fmul fast <2 x float> [[TMP1]], [[TMP3]]
63 ; CHECK-NEXT: [[TMP1:%.*]] = fmul fast <2 x double> [[SQUARE]], [[SQUARE]]
[all …]
Dcanonicalize-low-bit-mask-and-icmp-eq-to-icmp-ule.ll18 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge i8 [[TMP0]], [[X:%.*]]
19 ; CHECK-NEXT: ret i1 [[TMP1]]
34 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <2 x i8> [[TMP0]], [[X:%.*]]
35 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
46 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <3 x i8> [[TMP0]], [[X:%.*]]
47 ; CHECK-NEXT: ret <3 x i1> [[TMP1]]
65 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[TMP0]]
66 ; CHECK-NEXT: ret i1 [[TMP1]]
79 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule i8 [[X]], [[TMP0]]
80 ; CHECK-NEXT: ret i1 [[TMP1]]
[all …]
Dcanonicalize-low-bit-mask-and-icmp-ne-to-icmp-ugt.ll18 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult i8 [[TMP0]], [[X:%.*]]
19 ; CHECK-NEXT: ret i1 [[TMP1]]
34 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i8> [[TMP0]], [[X:%.*]]
35 ; CHECK-NEXT: ret <2 x i1> [[TMP1]]
46 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <3 x i8> [[TMP0]], [[X:%.*]]
47 ; CHECK-NEXT: ret <3 x i1> [[TMP1]]
65 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[TMP0]]
66 ; CHECK-NEXT: ret i1 [[TMP1]]
79 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i8 [[X]], [[TMP0]]
80 ; CHECK-NEXT: ret i1 [[TMP1]]
[all …]
Dtrunc-binop-ext.ll5 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %y32 to i16
6 ; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], %x16
17 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %y32 to i16
18 ; CHECK-NEXT: [[R:%.*]] = and i16 [[TMP1]], %x16
29 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %y32 to i16
30 ; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], %x16
41 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %y32 to i16
42 ; CHECK-NEXT: [[R:%.*]] = or i16 [[TMP1]], %x16
53 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 %y32 to i16
54 ; CHECK-NEXT: [[R:%.*]] = xor i16 [[TMP1]], %x16
[all …]
Dsign-test-and-or.ll8 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 %a, %b
9 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
20 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %a, %b
21 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1
32 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %a, %b
33 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
44 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 %a, %b
45 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i32 [[TMP1]], -1
56 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %a, -2013265920
57 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 0
[all …]
Dcanonicalize-signed-truncation-check.ll18 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 4
19 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt i8 [[TMP1]], 7
31 ; CHECK-NEXT: [[TMP1:%.*]] = add i65 [[X:%.*]], 9223372036854775808
32 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i65 [[TMP1]], 0
47 ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
48 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ugt <2 x i8> [[TMP1]], <i8 7, i8 7>
60 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
61 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <2 x i8> [[TMP1]], [[X]]
73 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
74 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne <3 x i8> [[TMP1]], [[X]]
[all …]
Dcanonicalize-lack-of-signed-truncation-check.ll18 ; CHECK-NEXT: [[TMP1:%.*]] = add i8 [[X:%.*]], 4
19 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i8 [[TMP1]], 8
31 ; CHECK-NEXT: [[TMP1:%.*]] = add i65 [[X:%.*]], 9223372036854775808
32 ; CHECK-NEXT: [[TMP2:%.*]] = icmp sgt i65 [[TMP1]], -1
47 ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i8> [[X:%.*]], <i8 4, i8 4>
48 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult <2 x i8> [[TMP1]], <i8 8, i8 8>
60 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <2 x i8> [[TMP0]], <i8 5, i8 6>
61 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <2 x i8> [[TMP1]], [[X]]
73 ; CHECK-NEXT: [[TMP1:%.*]] = ashr exact <3 x i8> [[TMP0]], <i8 5, i8 5, i8 5>
74 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq <3 x i8> [[TMP1]], [[X]]
[all …]
Dshl-factor.ll8 ; CHECK-NEXT: [[TMP1:%.*]] = add i6 [[X:%.*]], [[Y:%.*]]
9 ; CHECK-NEXT: [[DIFF:%.*]] = shl i6 [[TMP1]], [[Z:%.*]]
20 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw <2 x i4> [[X:%.*]], [[Y:%.*]]
21 ; CHECK-NEXT: [[DIFF:%.*]] = shl nsw <2 x i4> [[TMP1]], [[Z:%.*]]
32 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw i64 [[X:%.*]], [[Y:%.*]]
33 ; CHECK-NEXT: [[DIFF:%.*]] = shl nuw i64 [[TMP1]], [[Z:%.*]]
46 ; CHECK-NEXT: [[TMP1:%.*]] = add nsw i8 [[X]], [[Y:%.*]]
47 ; CHECK-NEXT: [[DIFF:%.*]] = shl nsw i8 [[TMP1]], [[Z]]
61 ; CHECK-NEXT: [[TMP1:%.*]] = add nuw i8 [[X:%.*]], [[Y]]
62 ; CHECK-NEXT: [[DIFF:%.*]] = shl nuw i8 [[TMP1]], [[Z]]
[all …]
Dand-or-icmps.ll30 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[B:%.*]], [[A:%.*]]
31 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[TMP1]], 8
46 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2
47 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 50
60 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -2
61 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 50
74 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[X:%.*]], -33
75 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 65
86 ; CHECK-NEXT: [[TMP1:%.*]] = and i19 [[X:%.*]], -129
87 ; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i19 [[TMP1]], 65
[all …]
Dbit-checks.ll6 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %argc, 3
7 ; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 3
22 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %argc, 3
23 ; CHECK-NEXT: [[NOT_:%.*]] = icmp eq i32 [[TMP1]], 3
43 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %argc, 55
44 ; CHECK-NEXT: [[NOT_:%.*]] = icmp ne i32 [[TMP1]], 0
59 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 %argc, 23
60 ; CHECK-NEXT: [[NOT_:%.*]] = icmp ne i32 [[TMP1]], 0
75 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 %argc2, %argc3
76 ; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], %argc
[all …]
Dadjust-for-minmax.ll62 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[N:%.*]], 0
63 ; CHECK-NEXT: [[M:%.*]] = select i1 [[TMP1]], i32 [[N]], i32 0
75 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <2 x i32> [[N:%.*]], zeroinitializer
76 ; CHECK-NEXT: [[M:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[N]], <2 x i32> zeroinitializer
88 ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt i32 [[N:%.*]], 0
89 ; CHECK-NEXT: [[M:%.*]] = select i1 [[TMP1]], i32 [[N]], i32 0
101 ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i32> [[N:%.*]], zeroinitializer
102 ; CHECK-NEXT: [[M:%.*]] = select <2 x i1> [[TMP1]], <2 x i32> [[N]], <2 x i32> zeroinitializer
114 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i32 [[N:%.*]], 5
115 ; CHECK-NEXT: [[M:%.*]] = select i1 [[TMP1]], i32 [[N]], i32 5
[all …]
Dcanonicalize-clamp-with-select-of-constant-threshold-pattern.ll10 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], -32768
11 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 -32768
25 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], -32768
26 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 -32768
43 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], -32768
44 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 -32768
58 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[X:%.*]], -32768
59 ; CHECK-NEXT: [[TMP2:%.*]] = select i1 [[TMP1]], i32 [[X]], i32 -32768
77 ; CHECK-NEXT: [[TMP1:%.*]] = select i1 [[DOTINV1]], i32 [[X]], i32 -32768
78 ; CHECK-NEXT: [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 32767
[all …]
/external/pcre/dist2/src/
Dpcre2_jit_compile.c554 #define TMP1 SLJIT_R0 macro
2148 OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); in init_frame()
2151 OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); in init_frame()
2165 OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); in init_frame()
2168 OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); in init_frame()
2178 OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), OVECTOR(0)); in init_frame()
2181 OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); in init_frame()
2187 OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->mark_ptr); in init_frame()
2190 OP1(SLJIT_MOV, SLJIT_MEM1(STACK_TOP), stackpos, TMP1, 0); in init_frame()
2196 OP1(SLJIT_MOV, TMP1, 0, SLJIT_MEM1(SLJIT_SP), common->capture_last_ptr); in init_frame()
[all …]
/external/boringssl/src/crypto/cipher_extra/asm/
Daes128gcmsiv-x86_64.pl85 my $TMP1 = "%xmm2";
95 vpclmulqdq \$0x00, $TMP0, $T, $TMP1
102 vpxor $TMP3, $TMP1, $TMP1
105 vpclmulqdq \$0x10, poly(%rip), $TMP1, $TMP2
106 vpshufd \$78, $TMP1, $TMP3
107 vpxor $TMP3, $TMP2, $TMP1
109 vpclmulqdq \$0x10, poly(%rip), $TMP1, $TMP2
110 vpshufd \$78, $TMP1, $TMP3
111 vpxor $TMP3, $TMP2, $TMP1
113 vpxor $TMP4, $TMP1, $T
[all …]
/external/llvm/test/Transforms/InstCombine/
Dor-fcmp.ll27 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq double %a, %b
28 ; CHECK-NEXT: ret i1 [[TMP1]]
49 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oge double %a, %b
50 ; CHECK-NEXT: ret i1 [[TMP1]]
60 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt double %a, %b
61 ; CHECK-NEXT: ret i1 [[TMP1]]
82 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oge double %a, %b
83 ; CHECK-NEXT: ret i1 [[TMP1]]
93 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oge double %a, %b
94 ; CHECK-NEXT: ret i1 [[TMP1]]
[all …]
Dand-fcmp.ll53 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq double %a, %b
54 ; CHECK-NEXT: ret i1 [[TMP1]]
84 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt double %a, %b
85 ; CHECK-NEXT: ret i1 [[TMP1]]
105 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oeq double %a, %b
106 ; CHECK-NEXT: ret i1 [[TMP1]]
116 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp ogt double %a, %b
117 ; CHECK-NEXT: ret i1 [[TMP1]]
127 ; CHECK-NEXT: [[TMP1:%.*]] = fcmp oge double %a, %b
128 ; CHECK-NEXT: ret i1 [[TMP1]]
[all …]
/external/llvm-project/llvm/test/Transforms/Attributor/
Dheap_to_stack.ll47 ; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 noundef 4)
48 ; CHECK-NEXT: tail call void @nocapture_func_frees_pointer(i8* noalias nocapture [[TMP1]])
50 ; CHECK-NEXT: tail call void @free(i8* noalias nocapture [[TMP1]])
64 ; CHECK-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 noundef 4)
65 ; CHECK-NEXT: tail call void @sync_func(i8* [[TMP1]])
66 ; CHECK-NEXT: tail call void @free(i8* nocapture [[TMP1]])
79 ; IS________OPM-NEXT: [[TMP1:%.*]] = tail call noalias i8* @malloc(i64 noundef 4)
80 ; IS________OPM-NEXT: tail call void @no_sync_func(i8* noalias nocapture nofree [[TMP1]])
81 ; IS________OPM-NEXT: tail call void @free(i8* noalias nocapture [[TMP1]])
85 ; IS________NPM-NEXT: [[TMP1:%.*]] = alloca i8, i64 4, align 1
[all …]
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/
Darith-fp.ll16 ; CHECK-NEXT: [[TMP1:%.*]] = fadd <2 x double> [[A:%.*]], [[B:%.*]]
17 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
19 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
36 ; CHECK-NEXT: [[TMP1:%.*]] = fsub <2 x double> [[A:%.*]], [[B:%.*]]
37 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
39 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
56 ; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x double> [[A:%.*]], [[B:%.*]]
57 ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x double> [[TMP1]], i32 0
59 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP1]], i32 1
76 ; SSE-NEXT: [[TMP1:%.*]] = fdiv <2 x double> [[A:%.*]], [[B:%.*]]
[all …]
/external/llvm-project/llvm/test/Instrumentation/PoisonChecking/
Dbasic-flag-validation.ll17 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 [[…
18 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
30 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[A:%.*]], i32 [[…
31 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
43 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[A:%.*]], i32 [[…
44 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
68 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 [[A:%.*]], i32 [[…
69 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
81 ; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[A:%.*]], i32 [[…
82 ; CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
[all …]
/external/llvm-project/llvm/test/Transforms/CodeGenPrepare/X86/
Doverflow-intrinsics.ll10 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[…
11 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
12 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
24 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[…
25 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
26 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
40 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[…
41 ; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
42 ; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
54 ; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[…
[all …]

12345678910>>...48