/art/runtime/interpreter/mterp/arm/ |
D | arithmetic.S | 22 GET_VREG r1, r3 @ r1<- vCC 25 cmp r1, #0 @ is second operand zero? 55 GET_VREG r1, r3 @ r1<- vB 58 cmp r1, #0 @ is second operand zero? 84 FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended) 89 cmp r1, #0 @ is second operand zero? 123 $extract @ optional; typically r1<- ssssssCC (sign extended) 125 @cmp r1, #0 @ is second operand zero? 160 GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1<- vBB/vBB+1 191 mov r1, rINST, lsr #12 @ r1<- B [all …]
|
D | other.S | 9 mov r1, rINST, lsr #8 @ r1<- AA 30 FETCH r1, 2 @ r1<- BBBB (high) 32 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 48 sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended) 52 SET_VREG r1, r0 @ fp[A]<- r1 82 mov r1, rINST, lsr #8 @ r1<- AA 83 orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb 97 FETCH r1, 2 @ r1<- BBBB (low middle) 99 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 102 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) [all …]
|
D | array.S | 18 GET_VREG r1, r3 @ r1<- vCC (requested index) 22 add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width 23 cmp r1, r3 @ compare unsigned index, length 52 GET_VREG r1, r3 @ r1<- vCC (requested index) 54 ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET] 56 cmp r1, #0 78 GET_VREG r1, r3 @ r1<- vCC (requested index) 82 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 83 cmp r1, r3 @ compare unsigned index, length 110 GET_VREG r1, r3 @ r1<- vCC (requested index) [all …]
|
D | object.S | 7 mov r1, rINST @ arg1: uint16_t inst_data 25 mov r1, rINST, lsr #8 @ r1<- AA 26 VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object 40 ubfx r1, rPC, #2, #THREAD_INTERPRETER_CACHE_SIZE_LOG2 @ entry index 41 add r0, r0, r1, lsl #3 @ entry address within the cache 42 ldrd r0, r1, [r0] @ entry key (pc) and value (offset) 130 FETCH r1, 1 @ r1<- field byte offset 163 SET_VREG_WIDE_BY_ADDR r0, r1, r3 @ fp[A]<- r0/r1 176 mov r1, rINST, lsr #12 @ r1<- B 177 VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object [all …]
|
D | control_flow.S | 9 mov r1, rINST, lsr #12 @ r1<- B 11 GET_VREG r3, r1 @ r3<- vB 77 FETCH r3, 2 @ r1<- AAAA (hi) 129 FETCH r1, 2 @ r1<- BBBB (hi) 131 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 132 GET_VREG r1, r3 @ r1<- vAA 153 mov r1, #0 167 mov r1, #0 176 mov r1, #0 192 GET_VREG_WIDE_BY_ADDR r0, r1, r2 @ r0/r1 <- vAA/vAA+1 [all …]
|
D | floating_point.S | 337 ubfx r2, r1, #20, #11 @ grab the exponent 347 adds r1, r1, r1 @ sign bit to carry 349 mov r1, #0x7fffffff @ assume maxlong for msw 351 adc r1, r1, #0 @ convert maxlong to minlong if exp negative 354 orrs r3, r0, r1, lsl #12 389 mov r1, #0x7fffffff @ assume maxlong for msw 391 adc r1, r1, #0 @ convert maxlong to minlong if exp negative
|
D | main.S | 402 str r1, [r2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET] 410 add rPC, r1, r0, lsl #1 @ Create direct pointer to 1st dex opcode 419 add r1, rFP, #OFF_FP_SHADOWFRAME 436 add r1, rFP, #OFF_FP_SHADOWFRAME 473 add r1, rFP, #OFF_FP_SHADOWFRAME 482 add r1, rFP, #OFF_FP_SHADOWFRAME 491 add r1, rFP, #OFF_FP_SHADOWFRAME 500 add r1, rFP, #OFF_FP_SHADOWFRAME 509 add r1, rFP, #OFF_FP_SHADOWFRAME 518 add r1, rFP, #OFF_FP_SHADOWFRAME [all …]
|
D | invoke.S | 10 add r1, rFP, #OFF_FP_SHADOWFRAME 33 add r1, rFP, #OFF_FP_SHADOWFRAME
|
/art/runtime/arch/arm/ |
D | memcmp16_arm.S | 33 pld [r1, #0] 36 cmp r0, r1 56 pld [r1, #32] 59 ldrh ip, [r1], #2 78 ldrh ip, [r1], #2 91 eor r0, r3, r1 101 ldr ip, [r1] 107 pld [r1, #64] 109 ldr lr, [r1, #4]! 112 ldreq ip, [r1, #4]! [all …]
|
D | quick_entrypoints_arm.S | 122 push {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves and args. 124 .cfi_rel_offset r1, 0 167 pop {r1-r3, r5-r8, r10-r11, lr} @ 10 words of callee saves 168 .cfi_restore r1 212 .cfi_rel_offset r1, 4 235 .cfi_restore r1 641 strd r0, [r10] @ Store r0/r1 into result pointer 656 mov r1, r0 658 bl memcpy @ memcpy (dest r0, src r1, bytes r2) 668 vldm r1, {s0-s31} @ Load all fprs from argument fprs_. [all …]
|
D | instruction_set_features_assembly_tests.S | 30 mov r1,#1 54 vmov r1, s0 62 vmov s0, r1
|
D | jni_entrypoints_arm.S | 24 push {r0, r1, r2, r3, lr} @ spill regs 27 .cfi_rel_offset r1, 4 38 pop {r0, r1, r2, r3, lr} @ restore regs 41 .cfi_restore r1
|
/art/runtime/interpreter/mterp/arm64/ |
D | floating_point.S | 21 %def fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2"): 31 GET_VREG_DOUBLE $r1, w1 // w1<- vBB 57 %def fbinopWide2addr(instr="fadd d0, d0, d1", r0="d0", r1="d1"): 64 GET_VREG_DOUBLE $r1, w1 // x1<- vB 72 %def fcmp(wide="", r1="s1", r2="s2", cond="lt"): 82 % if r1.startswith("d"): 83 GET_VREG_DOUBLE $r1, w2 86 GET_VREG $r1, w2 89 fcmp $r1, $r2 179 % fbinopWide(instr="fadd d0, d1, d2", result="d0", r1="d1", r2="d2") [all …]
|
D | arithmetic.S | 130 %def binopWide(preinstr="", instr="add x0, x1, x2", result="x0", r1="x1", r2="x2", chkzero="0"): 149 GET_VREG_WIDE $r1, w1 // w1<- vBB 161 %def binopWide2addr(preinstr="", instr="add x0, x0, x1", r0="x0", r1="x1", chkzero="0"): 178 GET_VREG_WIDE $r1, w1 // x1<- vB 181 cbz $r1, common_errDivideByZero
|
/art/libartbase/base/ |
D | data_hash.h | 51 static constexpr uint32_t r1 = 15; in operator() local 65 k = (k << r1) | (k >> (32 - r1)); in operator() 86 k1 = (k1 << r1) | (k1 >> (32 - r1)); in operator()
|
/art/test/1922-owned-monitors-info/src/art/ |
D | Test1922.java | 194 for (Function<Runnable, Runnable> r1 = li1.next(); li1.hasNext(); r1 = li1.next()) { in runTestsOtherThread() 203 r1.apply(null).getClass(), in runTestsOtherThread() 209 final Thread thr = new Thread(r1.apply(r2.apply(r3.apply(pause)))); in runTestsOtherThread() 238 for (Function<Runnable, Runnable> r1 = li1.next(); li1.hasNext(); r1 = li1.next()) { in runTestsCurrentThread() 247 r1.apply(null).getClass(), in runTestsCurrentThread() 251 r1.apply(r2.apply(r3.apply(printer))).run(); in runTestsCurrentThread()
|
/art/runtime/gc/ |
D | allocation_record.h | 149 bool operator()(const T* r1, const T* r2) const { in operator() 150 if (r1 == r2) return true; in operator() 151 if (r1 == nullptr || r2 == nullptr) return false; in operator() 152 return *r1 == *r2; in operator()
|
/art/test/140-field-packing/src/ |
D | GapOrder.java | 39 public Object r1; field in GapOrder
|
/art/openjdkjvmti/ |
D | jvmti_weak_table.h | 206 bool operator()(const art::GcRoot<art::mirror::Object>& r1, in operator() 209 return r1.Read<art::kWithoutReadBarrier>() == r2.Read<art::kWithoutReadBarrier>(); in operator()
|
/art/compiler/optimizing/ |
D | code_generator_arm_vixl.h | 52 vixl::aarch32::r1, 101 vixl::aarch32::r1, 197 return helpers::LocationFrom(vixl::aarch32::r1); in GetObjectLocation() 204 ? helpers::LocationFrom(vixl::aarch32::r0, vixl::aarch32::r1) in GetReturnLocation() 212 : helpers::LocationFrom(vixl::aarch32::r1)); in GetSetValueLocation()
|
D | code_generator_mips64.cc | 1403 GpuRegister r1 = loc1.AsRegister<GpuRegister>(); in SwapLocations() local 1406 __ Move(r2, r1); in SwapLocations() 1407 __ Move(r1, TMP); in SwapLocations() 1415 FpuRegister r1 = loc1.AsFpuRegister<FpuRegister>(); in SwapLocations() local 1418 __ MovS(FTMP, r1); in SwapLocations() 1419 __ MovS(r1, r2); in SwapLocations() 1423 __ MovD(FTMP, r1); in SwapLocations() 1424 __ MovD(r1, r2); in SwapLocations()
|
/art/runtime/interpreter/mterp/x86_64/ |
D | object.S | 196 movq %rax, (%rcx) # obj.field<- r0/r1
|
/art/runtime/verifier/ |
D | reg_type_test.cc | 937 for (auto r1 : all) { in TEST_F() local 939 if (r1 == r2) { in TEST_F() 981 bfs(compute_grey, r1); in TEST_F() 1011 ASSERT_EQ(no_in_edge.size(), 1u) << r1->Dump() << " u " << r2->Dump() in TEST_F() 1015 expectations.emplace_back(*r1, *r2, **no_in_edge.begin()); in TEST_F()
|
/art/runtime/gc/allocator/ |
D | rosalloc.h | 642 bool operator()(const RosAlloc::Run* r1, const RosAlloc::Run* r2) const { in operator() 643 return r1 == r2; in operator()
|
/art/compiler/utils/ |
D | assembler_thumb_test_expected.cc.inc | 6 " c: 9121 str r1, [sp, #132] ; 0x84\n",
|