/art/runtime/arch/arm64/ |
D | memcmp16_arm64.S | 41 #define tmp1 x8 macro 53 eor tmp1, src1, src2 54 tst tmp1, #7 56 ands tmp1, src1, #7 111 add limit, limit, tmp1 /* Adjust the limit for the extra. */ 112 lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */ 114 neg tmp1, tmp1 /* Bits to alignment -64. */ 118 lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
|
/art/compiler/optimizing/ |
D | code_generator_vector_mips64.cc | 1115 VectorRegister tmp1 = VectorRegisterFrom(locations->GetTemp(0)); in VisitVecSADAccumulate() local 1134 __ Hadd_sH(tmp1, left, tmp); in VisitVecSADAccumulate() 1136 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1137 __ AddvH(acc, acc, tmp1); in VisitVecSADAccumulate() 1138 __ Hadd_sH(tmp1, tmp, left); in VisitVecSADAccumulate() 1140 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1141 __ AddvH(acc, acc, tmp1); in VisitVecSADAccumulate() 1148 __ Hadd_sH(tmp1, left, tmp); in VisitVecSADAccumulate() 1150 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1151 __ Hadd_sW(tmp1, tmp1, tmp1); in VisitVecSADAccumulate() [all …]
|
D | code_generator_vector_mips.cc | 1117 VectorRegister tmp1 = VectorRegisterFrom(locations->GetTemp(0)); in VisitVecSADAccumulate() local 1136 __ Hadd_sH(tmp1, left, tmp); in VisitVecSADAccumulate() 1138 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1139 __ AddvH(acc, acc, tmp1); in VisitVecSADAccumulate() 1140 __ Hadd_sH(tmp1, tmp, left); in VisitVecSADAccumulate() 1142 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1143 __ AddvH(acc, acc, tmp1); in VisitVecSADAccumulate() 1150 __ Hadd_sH(tmp1, left, tmp); in VisitVecSADAccumulate() 1152 __ Asub_sH(tmp1, tmp1, tmp2); in VisitVecSADAccumulate() 1153 __ Hadd_sW(tmp1, tmp1, tmp1); in VisitVecSADAccumulate() [all …]
|
D | code_generator_vector_arm64.cc | 1175 VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); in VisitVecSADAccumulate() local 1177 __ Sxtl(tmp1.V8H(), left.V8B()); in VisitVecSADAccumulate() 1179 __ Sabal(acc.V4S(), tmp1.V4H(), tmp2.V4H()); in VisitVecSADAccumulate() 1180 __ Sabal2(acc.V4S(), tmp1.V8H(), tmp2.V8H()); in VisitVecSADAccumulate() 1181 __ Sxtl2(tmp1.V8H(), left.V16B()); in VisitVecSADAccumulate() 1183 __ Sabal(acc.V4S(), tmp1.V4H(), tmp2.V4H()); in VisitVecSADAccumulate() 1184 __ Sabal2(acc.V4S(), tmp1.V8H(), tmp2.V8H()); in VisitVecSADAccumulate() 1189 VRegister tmp1 = VRegisterFrom(locations->GetTemp(0)); in VisitVecSADAccumulate() local 1193 __ Sxtl(tmp1.V8H(), left.V8B()); in VisitVecSADAccumulate() 1195 __ Sxtl(tmp3.V4S(), tmp1.V4H()); in VisitVecSADAccumulate() [all …]
|
D | intrinsics_arm64.cc | 1956 Register tmp1 = XRegisterFrom(locations->GetTemp(2)); in VisitStringGetCharsNoCheck() local 1989 __ Subs(tmp1, num_chr, 8); in VisitStringGetCharsNoCheck() 1993 __ Mov(num_chr, tmp1); in VisitStringGetCharsNoCheck() 1998 __ Ldp(tmp1, tmp2, MemOperand(src_ptr, char_size * 8, PostIndex)); in VisitStringGetCharsNoCheck() 2000 __ Stp(tmp1, tmp2, MemOperand(dst_ptr, char_size * 8, PostIndex)); in VisitStringGetCharsNoCheck() 2009 __ Ldrh(tmp1, MemOperand(src_ptr, char_size, PostIndex)); in VisitStringGetCharsNoCheck() 2011 __ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex)); in VisitStringGetCharsNoCheck() 2022 __ Ldrb(tmp1, MemOperand(src_ptr, c_char_size, PostIndex)); in VisitStringGetCharsNoCheck() 2023 __ Strh(tmp1, MemOperand(dst_ptr, char_size, PostIndex)); in VisitStringGetCharsNoCheck()
|
/art/test/527-checker-array-access-split/src/ |
D | Main.java | 620 int tmp1 = a[index]; in checkObjectArrayGet() local 621 tmp1 += a[index + 1]; in checkObjectArrayGet() 623 tmp1 += a[index + 2]; in checkObjectArrayGet() 626 return tmp1; in checkObjectArrayGet()
|
/art/runtime/interpreter/mterp/arm/ |
D | main.S | 287 .macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2 288 mov \tmp1, #0 290 SET_VREG_SHADOW \tmp1, \vreg 291 SET_VREG_SHADOW \tmp1, \tmp2
|
/art/test/160-read-barrier-stress/src/ |
D | Main.java | 128 Object tmp1 = la[i0]; in testArrayReadsWithNonConstIndex() local 135 assertSameObject(f0000, tmp1); in testArrayReadsWithNonConstIndex()
|