Home
last modified time | relevance | path

Searched refs:tmp6 (Results 1 – 25 of 1193) sorted by relevance

12345678910>>...48

/external/llvm-project/polly/test/ScopInfo/NonAffine/
Dnon_affine_conditional_surrounding_affine_loop.ll27 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { Stmt_bb4[] -> MemRef_A[p_2] };
28 …NEXT: Execution Context: [tmp6, N, p_2] -> { : (tmp6 > 0 and p_2 >= N) or (tmp6 < 0 and…
31 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { : -2147483648 <= tmp6 <= 2147483647 and -2147483648 <= N …
33 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { : }
35 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { : p_2 < N and (tmp6 < 0 or tmp6 > 0) }
36 ; INNERMOST-NEXT: p0: %tmp6
52 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { Stmt_bb11[i0] : 0 <= i0 < N and (tmp6 < 0 or t…
54 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { Stmt_bb11[i0] -> [0, i0] : tmp6 < 0 or tmp6 > …
56 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { Stmt_bb11[i0] -> MemRef_A[i0] };
58 ; INNERMOST-NEXT: [tmp6, N, p_2] -> { Stmt_bb11[i0] -> MemRef_A[i0] };
[all …]
Dnon_affine_conditional_surrounding_non_affine_loop.ll31 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { Stmt_bb4[] -> MemRef_A[p_2] };
32 …T: Execution Context: [tmp6, p_1, p_2] -> { : (tmp6 > 0 and p_2 >= p_1) or (tmp6 < 0 an…
35 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { : -2147483648 <= tmp6 <= 2147483647 and -2199023255552 …
37 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { : }
39 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { : p_2 < p_1 and (tmp6 < 0 or tmp6 > 0) }
40 ; INNERMOST-NEXT: p0: %tmp6
56 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { Stmt_bb12[i0] : 0 <= i0 < p_1 and (tmp6 < 0 …
58 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { Stmt_bb12[i0] -> [0, i0] : tmp6 < 0 or tmp6
60 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { Stmt_bb12[i0] -> MemRef_A[i0] };
62 ; INNERMOST-NEXT: [tmp6, p_1, p_2] -> { Stmt_bb12[i0] -> MemRef_A[i0] };
[all …]
/external/llvm/test/CodeGen/ARM/
Ddyn-stackalloc.ll22 %tmp6 = load i32, i32* null
23 %tmp8 = alloca float, i32 %tmp6
50 %tmp6 = alloca i8, i32 %tmp5
51 %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag)
52 %tmp6.len = call i32 @strlen(i8* %tmp6)
53 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
54 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x…
55 %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents)
56 call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
Dvbsl-constant.ll12 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
13 %tmp7 = or <8 x i8> %tmp4, %tmp6
26 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4>
27 %tmp7 = or <4 x i16> %tmp4, %tmp6
40 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4>
41 %tmp7 = or <2 x i32> %tmp4, %tmp6
55 %tmp6 = and <1 x i64> %tmp3, <i64 -4>
56 %tmp7 = or <1 x i64> %tmp4, %tmp6
69 …%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4…
70 %tmp7 = or <16 x i8> %tmp4, %tmp6
[all …]
Duxtb.ll23 %tmp6 = and i32 %tmp1, 16711935 ; <i32> [#uses=1]
24 ret i32 %tmp6
38 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
39 ret i32 %tmp6
47 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
48 ret i32 %tmp6
55 %tmp6 = or i32 %tmp2, %tmp5 ; <i32> [#uses=1]
56 ret i32 %tmp6
63 %tmp6 = or i32 %tmp5, %tmp1 ; <i32> [#uses=1]
64 ret i32 %tmp6
/external/llvm-project/llvm/test/CodeGen/ARM/
Ddyn-stackalloc.ll22 %tmp6 = load i32, i32* null
23 %tmp8 = alloca float, i32 %tmp6
50 %tmp6 = alloca i8, i32 %tmp5
51 %tmp9 = call i8* @strcpy(i8* %tmp6, i8* %tag)
52 %tmp6.len = call i32 @strlen(i8* %tmp6)
53 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
54 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp6.indexed, i8* align 1 getelementptr inbounds…
55 %tmp15 = call i8* @strcat(i8* %tmp6, i8* %contents)
56 call fastcc void @comment_add(%struct.comment* %vc, i8* %tmp6)
Duxtb.ll38 %tmp6 = and i32 %tmp1, 16711935
39 ret i32 %tmp6
61 %tmp6 = or i32 %tmp2, %tmp5
62 ret i32 %tmp6
74 %tmp6 = or i32 %tmp2, %tmp5
75 ret i32 %tmp6
86 %tmp6 = or i32 %tmp2, %tmp5
87 ret i32 %tmp6
98 %tmp6 = or i32 %tmp5, %tmp1
99 ret i32 %tmp6
Dvbsl-constant.ll17 %tmp6 = and <8 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4>
18 %tmp7 = or <8 x i8> %tmp4, %tmp6
35 %tmp6 = and <4 x i16> %tmp3, <i16 -4, i16 -4, i16 -4, i16 -4>
36 %tmp7 = or <4 x i16> %tmp4, %tmp6
53 %tmp6 = and <2 x i32> %tmp3, <i32 -4, i32 -4>
54 %tmp7 = or <2 x i32> %tmp4, %tmp6
71 %tmp6 = and <1 x i64> %tmp3, <i64 -4>
72 %tmp7 = or <1 x i64> %tmp4, %tmp6
90 …%tmp6 = and <16 x i8> %tmp3, <i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4, i8 -4…
91 %tmp7 = or <16 x i8> %tmp4, %tmp6
[all …]
/external/libjpeg-turbo/
Djfdctint.c145 JLONG tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable
161 tmp6 = dataptr[1] - dataptr[6];
191 z2 = tmp5 + tmp6;
192 z3 = tmp4 + tmp6;
198 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
210 dataptr[3] = (DCTELEM)DESCALE(tmp6 + z2 + z3, CONST_BITS - PASS1_BITS);
226 tmp6 = dataptr[DCTSIZE * 1] - dataptr[DCTSIZE * 6];
258 z2 = tmp5 + tmp6;
259 z3 = tmp4 + tmp6;
265 tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
[all …]
Djidctflt.c76 FAST_FLOAT tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable
148 tmp6 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5] * _0_125);
151 z13 = tmp6 + tmp5; /* phase 6 */
152 z10 = tmp6 - tmp5;
163 tmp6 = tmp12 - tmp7; /* phase 2 */
164 tmp5 = tmp11 - tmp6;
169 wsptr[DCTSIZE * 1] = tmp1 + tmp6;
170 wsptr[DCTSIZE * 6] = tmp1 - tmp6;
221 tmp6 = tmp12 - tmp7;
222 tmp5 = tmp11 - tmp6;
[all …]
Djidctfst.c175 DCTELEM tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; variable
247 tmp6 = DEQUANTIZE(inptr[DCTSIZE * 5], quantptr[DCTSIZE * 5]);
250 z13 = tmp6 + tmp5; /* phase 6 */
251 z10 = tmp6 - tmp5;
262 tmp6 = tmp12 - tmp7; /* phase 2 */
263 tmp5 = tmp11 - tmp6;
268 wsptr[DCTSIZE * 1] = (int)(tmp1 + tmp6);
269 wsptr[DCTSIZE * 6] = (int)(tmp1 - tmp6);
344 tmp6 = tmp12 - tmp7; /* phase 2 */
345 tmp5 = tmp11 - tmp6;
[all …]
/external/llvm/test/CodeGen/Thumb/
Ddyn-stackalloc.ll15 %tmp6 = load i32, i32* null
16 %tmp8 = alloca float, i32 %tmp6
60 %tmp6 = alloca i8, i32 %tmp5
61 %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag )
62 %tmp6.len = call i32 @strlen( i8* %tmp6 )
63 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
64 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* %tmp6.indexed, i8* getelementptr inbounds ([2 x i8], [2 x…
65 %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents )
66 call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
/external/llvm-project/llvm/test/CodeGen/Thumb/
Ddyn-stackalloc.ll15 %tmp6 = load i32, i32* null
16 %tmp8 = alloca float, i32 %tmp6
60 %tmp6 = alloca i8, i32 %tmp5
61 %tmp9 = call i8* @strcpy( i8* %tmp6, i8* %tag )
62 %tmp6.len = call i32 @strlen( i8* %tmp6 )
63 %tmp6.indexed = getelementptr i8, i8* %tmp6, i32 %tmp6.len
64 …call void @llvm.memcpy.p0i8.p0i8.i32(i8* align 1 %tmp6.indexed, i8* align 1 getelementptr inbounds…
65 %tmp15 = call i8* @strcat( i8* %tmp6, i8* %contents )
66 call fastcc void @comment_add( %struct.comment* %vc, i8* %tmp6 )
/external/llvm-project/llvm/test/CodeGen/AMDGPU/
Dalignbit-pat.ll14 %tmp6 = trunc i64 %tmp5 to i32
15 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
34 %tmp6 = trunc i64 %tmp5 to i32
35 store i32 %tmp6, i32 addrspace(1)* %gep2, align 4
50 %tmp6 = trunc i64 %tmp5 to i32
51 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
66 %tmp6 = trunc i64 %tmp5 to i32
67 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
79 %tmp6 = trunc i64 %tmp5 to i32
80 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
[all …]
Dpromote-alloca-array-aggregate.ll30 %tmp6 = load float, float* %tmp5
33 %tmp9 = insertelement <4 x float> %tmp8, float %tmp6, i32 0
34 %tmp10 = insertelement <4 x float> %tmp9, float %tmp6, i32 1
35 %tmp11 = insertelement <4 x float> %tmp10, float %tmp6, i32 2
36 %tmp12 = insertelement <4 x float> %tmp11, float %tmp6, i32 3
44 ; OPT: %tmp6 = load [2 x float], [2 x float]* %f1
61 %tmp6 = load [2 x float], [2 x float]* %f1
63 store [2 x float] %tmp6, [2 x float] addrspace(1)* %tmp7
86 %tmp6 = load float, float* %tmp5
89 %tmp9 = insertelement <4 x float> %tmp8, float %tmp6, i32 0
[all …]
/external/llvm/test/CodeGen/PowerPC/
D2006-01-20-ShiftPartsCrash.ll10 %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1]
11 %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1]
13 %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
14 %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
/external/llvm-project/llvm/test/CodeGen/PowerPC/
D2006-01-20-ShiftPartsCrash.ll10 %tmp6.u = add i32 %tmp5, 32 ; <i32> [#uses=1]
11 %tmp6 = bitcast i32 %tmp6.u to i32 ; <i32> [#uses=1]
13 %tmp6.upgrd.1 = trunc i32 %tmp6 to i8 ; <i8> [#uses=1]
14 %shift.upgrd.2 = zext i8 %tmp6.upgrd.1 to i64 ; <i64> [#uses=1]
/external/llvm-project/llvm/test/Transforms/InstCombine/
D2007-03-21-SignedRangeTest.ll6 define i1 @test(i32 %tmp6) {
8 ; CHECK-NEXT: [[TMP6_OFF:%.*]] = add i32 %tmp6, 83
12 %tmp7 = sdiv i32 %tmp6, 12
17 define <2 x i1> @test_vec(<2 x i32> %tmp6) {
19 ; CHECK-NEXT: [[TMP6_OFF:%.*]] = add <2 x i32> %tmp6, <i32 83, i32 83>
23 %tmp7 = sdiv <2 x i32> %tmp6, <i32 12, i32 12>
/external/libjpeg-turbo/simd/arm/
Djidctfst-neon.c149 int16x4_t tmp6 = vmul_s16(vget_high_s16(row5), quant_row5); in jsimd_idct_ifast_neon() local
152 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon()
153 int16x4_t neg_z10 = vsub_s16(tmp5, tmp6); in jsimd_idct_ifast_neon()
172 tmp6 = vsub_s16(tmp12, tmp7); /* phase 2 */ in jsimd_idct_ifast_neon()
173 tmp5 = vsub_s16(tmp11, tmp6); in jsimd_idct_ifast_neon()
178 row1 = vcombine_s16(dcval, vadd_s16(tmp1, tmp6)); in jsimd_idct_ifast_neon()
179 row6 = vcombine_s16(dcval, vsub_s16(tmp1, tmp6)); in jsimd_idct_ifast_neon()
224 int16x4_t tmp6 = vmul_s16(vget_low_s16(row5), quant_row5); in jsimd_idct_ifast_neon() local
227 int16x4_t z13 = vadd_s16(tmp6, tmp5); /* phase 6 */ in jsimd_idct_ifast_neon()
228 int16x4_t neg_z10 = vsub_s16(tmp5, tmp6); in jsimd_idct_ifast_neon()
[all …]
/external/llvm/test/Transforms/IndVarSimplify/
Div-fold.ll17 %tmp6 = load i32, i32* %arrayidx, align 4
21 %tmp6.1 = load i32, i32* %arrayidx.1, align 4
27 %r = add i32 %tmp6, %tmp6.1
44 %tmp6 = load i32, i32* %arrayidx, align 4
48 %tmp6.1 = load i32, i32* %arrayidx.1, align 4
54 %r = add i32 %tmp6, %tmp6.1
/external/llvm-project/llvm/test/Transforms/IndVarSimplify/
Div-fold.ll17 %tmp6 = load i32, i32* %arrayidx, align 4
21 %tmp6.1 = load i32, i32* %arrayidx.1, align 4
27 %r = add i32 %tmp6, %tmp6.1
44 %tmp6 = load i32, i32* %arrayidx, align 4
48 %tmp6.1 = load i32, i32* %arrayidx.1, align 4
54 %r = add i32 %tmp6, %tmp6.1
/external/llvm/test/Transforms/InstCombine/
Dand-or-not.ll10 %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
11 %tmp7 = or i32 %tmp6, %tmp3not ; <i32> [#uses=1]
22 %tmp6 = and i32 %b, %a ; <i32> [#uses=1]
23 %tmp6not = xor i32 %tmp6, -1 ; <i32> [#uses=1]
35 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
36 %tmp7 = or <4 x i32> %tmp6, %tmp3not ; <<4 x i32>> [#uses=1]
47 %tmp6 = and <4 x i32> %a, %b ; <<4 x i32>> [#uses=1]
48 …%tmp6not = xor <4 x i32> %tmp6, < i32 -1, i32 -1, i32 -1, i32 -1 > ; <<4 x i32>> [#us…
/external/llvm-project/llvm/test/Transforms/Reassociate/
Drepeats.ll86 %tmp6 = mul i3 %tmp5, %x
87 ret i3 %tmp6
101 %tmp6 = mul i4 %tmp5, %x
102 %tmp7 = mul i4 %tmp6, %x
118 %tmp6 = mul i4 %tmp5, %x
119 %tmp7 = mul i4 %tmp6, %x
136 %tmp6 = mul i4 %tmp5, %x
137 %tmp7 = mul i4 %tmp6, %x
156 %tmp6 = mul i4 %tmp5, %x
157 %tmp7 = mul i4 %tmp6, %x
[all …]
/external/llvm/test/Transforms/Reassociate/
Drepeats.ll86 %tmp6 = mul i3 %tmp5, %x
87 ret i3 %tmp6
101 %tmp6 = mul i4 %tmp5, %x
102 %tmp7 = mul i4 %tmp6, %x
118 %tmp6 = mul i4 %tmp5, %x
119 %tmp7 = mul i4 %tmp6, %x
136 %tmp6 = mul i4 %tmp5, %x
137 %tmp7 = mul i4 %tmp6, %x
156 %tmp6 = mul i4 %tmp5, %x
157 %tmp7 = mul i4 %tmp6, %x
[all …]
/external/libyuv/files/source/
Drotate_mmi.cc27 uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; in TransposeWx8_MMI() local
140 [tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8), in TransposeWx8_MMI()
156 uint64_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6; in TransposeUVWx8_MMI() local
277 [tmp6] "=&f"(tmp6), [tmp7] "=&f"(tmp7), [tmp8] "=&f"(tmp8), in TransposeUVWx8_MMI()

12345678910>>...48