/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | lea-recursion.ll | 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 20 store i32 %tmp10, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 1) 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 24 store i32 %tmp10.1, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 2) 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 28 store i32 %tmp10.2, i32* getelementptr ([1000 x i32]* @g0, i32 0, i32 3) 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] [all …]
|
D | xor.ll | 55 %tmp10 = icmp eq i32 %tmp8, 0 56 br i1 %tmp10, label %bb12, label %bb 78 %tmp10 = icmp eq i16 %tmp8, 0 79 br i1 %tmp10, label %bb12, label %bb 100 %tmp10 = icmp eq i8 %tmp8, 0 101 br i1 %tmp10, label %bb12, label %bb 122 %tmp10 = icmp eq i32 %tmp8, 0 123 br i1 %tmp10, label %bb12, label %bb
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | lea-recursion.ll | 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 20 store i32 %tmp10, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 1) 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 24 store i32 %tmp10.1, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 2) 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 28 store i32 %tmp10.2, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 3) 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | lea-recursion.ll | 19 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=2] 20 store i32 %tmp10, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 1) 22 %tmp9.1 = add i32 %tmp10, 1 ; <i32> [#uses=1] 23 %tmp10.1 = add i32 %tmp9.1, %tmp8.1 ; <i32> [#uses=2] 24 store i32 %tmp10.1, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 2) 26 %tmp9.2 = add i32 %tmp10.1, 1 ; <i32> [#uses=1] 27 %tmp10.2 = add i32 %tmp9.2, %tmp8.2 ; <i32> [#uses=2] 28 store i32 %tmp10.2, i32* getelementptr ([1000 x i32], [1000 x i32]* @g0, i32 0, i32 3) 30 %tmp9.3 = add i32 %tmp10.2, 1 ; <i32> [#uses=1] 31 %tmp10.3 = add i32 %tmp9.3, %tmp8.3 ; <i32> [#uses=2] [all …]
|
/external/libjpeg-turbo/ |
D | jidctint.c | 178 JLONG tmp10, tmp11, tmp12, tmp13; variable 245 tmp10 = tmp0 + tmp3; 284 wsptr[DCTSIZE * 0] = (int)DESCALE(tmp10 + tmp3, CONST_BITS - PASS1_BITS); 285 wsptr[DCTSIZE * 7] = (int)DESCALE(tmp10 - tmp3, CONST_BITS - PASS1_BITS); 347 tmp10 = tmp0 + tmp3; 386 outptr[0] = range_limit[(int)DESCALE(tmp10 + tmp3, 389 outptr[7] = range_limit[(int)DESCALE(tmp10 - tmp3, 431 JLONG tmp0, tmp1, tmp2, tmp10, tmp11, tmp12, tmp13; in jpeg_idct_7x7() local 459 tmp10 = MULTIPLY(z2 - z3, FIX(0.881747734)); /* c4 */ in jpeg_idct_7x7() 461 tmp11 = tmp10 + tmp12 + tmp13 - MULTIPLY(z2, FIX(1.841218003)); /* c2+c4-c6 */ in jpeg_idct_7x7() [all …]
|
D | jfdctflt.c | 63 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 83 tmp10 = tmp0 + tmp3; /* phase 2 */ 88 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 89 dataptr[4] = tmp10 - tmp11; 97 tmp10 = tmp4 + tmp5; /* phase 2 */ 102 z5 = (tmp10 - tmp12) * ((FAST_FLOAT)0.382683433); /* c6 */ 103 z2 = ((FAST_FLOAT)0.541196100) * tmp10 + z5; /* c2-c6 */ 133 tmp10 = tmp0 + tmp3; /* phase 2 */ 138 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 139 dataptr[DCTSIZE * 4] = tmp10 - tmp11; [all …]
|
D | jfdctfst.c | 120 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 141 tmp10 = tmp0 + tmp3; /* phase 2 */ 146 dataptr[0] = tmp10 + tmp11; /* phase 3 */ 147 dataptr[4] = tmp10 - tmp11; 155 tmp10 = tmp4 + tmp5; /* phase 2 */ 160 z5 = MULTIPLY(tmp10 - tmp12, FIX_0_382683433); /* c6 */ 161 z2 = MULTIPLY(tmp10, FIX_0_541196100) + z5; /* c2-c6 */ 191 tmp10 = tmp0 + tmp3; /* phase 2 */ 196 dataptr[DCTSIZE * 0] = tmp10 + tmp11; /* phase 3 */ 197 dataptr[DCTSIZE * 4] = tmp10 - tmp11; [all …]
|
D | jidctflt.c | 77 FAST_FLOAT tmp10, tmp11, tmp12, tmp13; variable 133 tmp10 = tmp0 + tmp2; /* phase 3 */ 139 tmp0 = tmp10 + tmp13; /* phase 2 */ 140 tmp3 = tmp10 - tmp13; 160 tmp10 = z5 - z12 * ((FAST_FLOAT)1.082392200); /* 2*(c2-c6) */ 165 tmp4 = tmp10 - tmp5; 196 tmp10 = z5 + wsptr[4]; 202 tmp0 = tmp10 + tmp13; 203 tmp3 = tmp10 - tmp13; 218 tmp10 = z5 - z12 * ((FAST_FLOAT)1.082392200); /* 2*(c2-c6) */ [all …]
|
D | jidctred.c | 125 JLONG tmp0, tmp2, tmp10, tmp12; variable 170 tmp10 = tmp0 + tmp2; 193 (int)DESCALE(tmp10 + tmp2, CONST_BITS - PASS1_BITS + 1); 195 (int)DESCALE(tmp10 - tmp2, CONST_BITS - PASS1_BITS + 1); 233 tmp10 = tmp0 + tmp2; 255 outptr[0] = range_limit[(int)DESCALE(tmp10 + tmp2, 258 outptr[3] = range_limit[(int)DESCALE(tmp10 - tmp2, 283 JLONG tmp0, tmp10, z1; in jpeg_idct_2x2() local 317 tmp10 = LEFT_SHIFT(z1, CONST_BITS + 2); in jpeg_idct_2x2() 333 (int)DESCALE(tmp10 + tmp0, CONST_BITS - PASS1_BITS + 2); in jpeg_idct_2x2() [all …]
|
D | jidctfst.c | 176 DCTELEM tmp10, tmp11, tmp12, tmp13; variable 232 tmp10 = tmp0 + tmp2; /* phase 3 */ 238 tmp0 = tmp10 + tmp13; /* phase 2 */ 239 tmp3 = tmp10 - tmp13; 259 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ 264 tmp4 = tmp10 + tmp5; 318 tmp10 = ((DCTELEM)wsptr[0] + (DCTELEM)wsptr[4]); 325 tmp0 = tmp10 + tmp13; 326 tmp3 = tmp10 - tmp13; 341 tmp10 = MULTIPLY(z12, FIX_1_082392200) - z5; /* 2*(c2-c6) */ [all …]
|
/external/libjpeg-turbo/simd/powerpc/ |
D | jfdctfst-altivec.c | 48 tmp10 = vec_add(tmp0, tmp3); \ 53 out0 = vec_add(tmp10, tmp11); \ 54 out4 = vec_sub(tmp10, tmp11); \ 65 tmp10 = vec_add(tmp4, tmp5); \ 69 tmp10 = vec_sl(tmp10, pre_multiply_scale_bits); \ 71 z5 = vec_sub(tmp10, tmp12); \ 74 z2 = vec_madds(tmp10, pw_0541, z5); \ 94 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13, in jsimd_fdct_ifast_altivec() local
|
D | jidctfst-altivec.c | 50 tmp10 = vec_add(in##0, in##4); \ 59 tmp0 = vec_add(tmp10, tmp13); \ 60 tmp3 = vec_sub(tmp10, tmp13); \ 92 tmp10 = vec_madds(z12s, pw_F1082, pw_zero); \ 93 tmp10 = vec_sub(tmp10, z5); \ 99 tmp4 = vec_add(tmp10, tmp5); \ 121 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp10, tmp11, tmp12, tmp13, in jsimd_idct_ifast_altivec() local
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/TailCallElim/ |
D | reorder_load.ll | 34 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 35 ret i32 %tmp10 65 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 66 ret i32 %tmp10 89 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 90 ret i32 %tmp10 123 %tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1] 124 ret i32 %tmp10 146 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 147 ret i32 %tmp10 [all …]
|
D | dont_reorder_load.ll | 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 26 ret i32 %tmp10 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 45 ret i32 %tmp10 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 ret i32 %tmp10 80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 81 ret i32 %tmp10
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Inline/ |
D | nested-inline.ll | 30 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 31 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 32 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11 73 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 74 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 75 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
|
/external/llvm/test/Transforms/TailCallElim/ |
D | dont_reorder_load.ll | 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 26 ret i32 %tmp10 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 45 ret i32 %tmp10 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 ret i32 %tmp10 80 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 81 ret i32 %tmp10
|
D | reorder_load.ll | 33 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 34 ret i32 %tmp10 64 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 65 ret i32 %tmp10 88 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 89 ret i32 %tmp10 122 %tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1] 123 ret i32 %tmp10 145 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 146 ret i32 %tmp10
|
/external/swiftshader/third_party/LLVM/test/Transforms/Inline/ |
D | nested-inline.ll | 29 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 30 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 31 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11 72 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 73 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 74 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
|
/external/llvm/test/Transforms/Inline/ |
D | nested-inline.ll | 29 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 30 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 31 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11 72 %tmp10 = and i32 %X, 4 ; <i32> [#uses=1] 73 %tmp10.upgrd.3 = icmp eq i32 %tmp10, 0 ; <i1> [#uses=1] 74 br i1 %tmp10.upgrd.3, label %cond_next13, label %cond_true11
|
/external/swiftshader/third_party/LLVM/test/Transforms/TailCallElim/ |
D | reorder_load.ll | 26 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 27 ret i32 %tmp10 52 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 53 ret i32 %tmp10 71 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 72 ret i32 %tmp10 99 %tmp10 = add i32 %second, %tmp8 ; <i32> [#uses=1] 100 ret i32 %tmp10
|
D | dont_reorder_load.ll | 25 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 26 ret i32 %tmp10 44 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 45 ret i32 %tmp10 62 %tmp10 = add i32 %tmp9, %tmp8 ; <i32> [#uses=1] 63 ret i32 %tmp10
|
/external/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 15 %tmp = add i32 %tmp10, -1 20 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 24 %tmp10 = add i32 undef, %tmp9 52 %tmp = add i32 %tmp10, -1 57 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 61 %tmp10 = add i32 undef, %tmp9
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ScalarEvolution/ |
D | pr25369.ll | 15 %tmp = add i32 %tmp10, -1 20 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 24 %tmp10 = add i32 undef, %tmp9 52 %tmp = add i32 %tmp10, -1 57 %tmp6 = phi i32 [ %tmp10, %bb4 ], [ 0, %bb2 ], [ 0, %bb ] 61 %tmp10 = add i32 undef, %tmp9
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | llvm.SI.load.dword.ll | 20 %tmp10 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0 22 …%tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 0, i32 0, i32 0, i32 0,… 24 …%tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i… 26 …%tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<4 x i32> %tmp10, i32 %tmp11, i32 0, i32 0, i… 28 …%tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer… 31 …%tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer… 34 …%tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<4 x i32> %tmp10, <2 x i32> zeroinitializer…
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | llvm.SI.load.dword.ll | 20 %tmp10 = load <16 x i8>, <16 x i8> addrspace(2)* %tmp, !tbaa !0 22 …%tmp12 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 0, i32 0, i32 0, i32 0,… 24 …%tmp14 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i… 26 …%tmp16 = call i32 @llvm.SI.buffer.load.dword.i32.i32(<16 x i8> %tmp10, i32 %tmp11, i32 0, i32 0, i… 28 …%tmp18 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer… 31 …%tmp20 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer… 34 …%tmp22 = call i32 @llvm.SI.buffer.load.dword.i32.v2i32(<16 x i8> %tmp10, <2 x i32> zeroinitializer…
|