/art/disassembler/ |
D | disassembler_arm64.cc | 39 TR = 19, enumerator 50 if (reg.GetCode() == TR) { in AppendRegisterNameToOutput() 129 if (instr->GetRn() == TR) { in VisitLoadStoreUnsignedOffsetInstr() 140 target->GetRn() == TR && in VisitUnconditionalBranchInstr()
|
D | disassembler_arm.cc | 40 static const vixl::aarch32::Register tr(TR);
|
D | disassembler_riscv64.cc | 47 TR = 9, enumerator 365 if (rs1 == TR && offset >= 0) { in PrintLoadStoreAddress()
|
/art/compiler/utils/riscv64/ |
D | jni_macro_assembler_riscv64.cc | 199 __ Stored(src, TR, offs.Int32Value()); in StoreStackPointerToThread() 238 Riscv64ManagedRegister tr = Riscv64ManagedRegister::FromXRegister(TR); in LoadRawPtrFromThread() 403 __ Mv(dest.AsRiscv64().AsXRegister(), TR); in GetCurrentThread() 407 __ Stored(TR, SP, offset.Int32Value()); in GetCurrentThread() 454 Call(Riscv64ManagedRegister::FromXRegister(TR), offset); in CallFromThread() 474 __ LrW(scratch, TR, AqRl::kNone); in TryToTransitionFromRunnableToNative() 482 __ ScW(scratch, scratch2, TR, AqRl::kRelease); in TryToTransitionFromRunnableToNative() 486 __ Stored(Zero, TR, thread_held_mutex_mutator_lock_offset.Int32Value()); in TryToTransitionFromRunnableToNative() 511 __ LrW(scratch, TR, AqRl::kAcquire); in TryToTransitionFromNativeToRunnable() 521 __ ScW(scratch, Zero, TR, AqRl::kNone); in TryToTransitionFromNativeToRunnable() [all …]
|
/art/compiler/utils/arm64/ |
D | jni_macro_assembler_arm64.cc | 68 ___ Mov(reg_x(dest.AsArm64().AsXRegister()), reg_x(TR)); in GetCurrentThread() 72 StoreToOffset(TR, SP, offset.Int32Value()); in GetCurrentThread() 210 ___ Str(scratch, MEM_OP(reg_x(TR), tr_offs.Int32Value())); in StoreStackPointerToThread() 314 LoadFromOffset(dst.AsXRegister(), TR, offs.Int32Value()); in LoadRawPtrFromThread() 645 ___ Ldr(lr, MEM_OP(reg_x(TR), offset.Int32Value())); in CallFromThread() 725 ___ Ldxr(scratch, MEM_OP(reg_x(TR))); in TryToTransitionFromRunnableToNative() 730 ___ Stlxr(scratch, scratch2, MEM_OP(reg_x(TR))); in TryToTransitionFromRunnableToNative() 734 ___ Str(xzr, MEM_OP(reg_x(TR), thread_held_mutex_mutator_lock_offset.Int32Value())); in TryToTransitionFromRunnableToNative() 757 ___ Ldaxr(scratch, MEM_OP(reg_x(TR))); in TryToTransitionFromNativeToRunnable() 765 ___ Stxr(scratch, wzr, MEM_OP(reg_x(TR))); in TryToTransitionFromNativeToRunnable() [all …]
|
D | assembler_arm64.cc | 192 vixl::aarch64::Register tr = reg_x(TR); // Thread Register. in GenerateMarkingRegisterCheck()
|
D | managed_register_arm64_test.cc | 628 EXPECT_TRUE(vixl::aarch64::x19.Is(Arm64Assembler::reg_x(TR))); in TEST()
|
/art/cmdline/detail/ |
D | cmdline_parser_detail.h | 54 template <typename TL, typename TR> 55 static std::true_type EqualityOperatorTest(const TL& left, const TR& right,
|
/art/runtime/arch/arm/ |
D | registers_arm.h | 46 TR = 9, // ART Thread Register enumerator
|
D | context_arm.cc | 112 DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]); in DoLongJump()
|
/art/runtime/arch/riscv64/ |
D | registers_riscv64.h | 70 TR = S1, // ART Thread Register - managed runtime enumerator
|
D | context_riscv64.cc | 140 gprs[TR] = reinterpret_cast<uintptr_t>(Thread::Current()); in DoLongJump()
|
/art/compiler/trampolines/ |
D | trampoline_compiler.cc | 109 __ JumpTo(Arm64ManagedRegister::FromXRegister(TR), Offset(offset.Int32Value()), in CreateTrampoline() 142 __ Loadd(tmp, TR, offset.Int32Value()); in CreateTrampoline()
|
/art/runtime/arch/arm64/ |
D | registers_arm64.h | 65 TR = X19, // ART Thread Register - Managed Runtime (Callee Saved Reg) enumerator
|
D | context_arm64.cc | 162 DCHECK_EQ(reinterpret_cast<uintptr_t>(Thread::Current()), gprs[TR]); in DoLongJump()
|
/art/compiler/optimizing/ |
D | code_generator_riscv64.cc | 161 DCHECK(T0 <= reg && reg <= T6 && reg != TR) << reg; in ReadBarrierMarkEntrypointOffset() 286 __ Loadd(RA, TR, entrypoint_offset); in EmitNativeCode() 722 DCHECK(ref_reg >= T0 && ref_reg != TR); in EmitNativeCode() 1181 __ Loadw(tmp, TR, Thread::ThreadFlagsOffset<kRiscv64PointerSize>().Int32Value()); in GenerateSuspendCheck() 1279 __ Loadd(temp.AsRegister<XRegister>(), TR, entry_point_offset); in EmitBakerReadBarierMarkingCheck() 1936 __ Loadd(tmp, TR, entry_point_offset); in GenerateReferenceLoadWithBakerReadBarrier() 2448 __ Loadd(card, TR, Thread::CardTableOffset<kRiscv64PointerSize>().Int32Value()); in MarkGCCard() 2475 __ Loadd(card, TR, Thread::CardTableOffset<kRiscv64PointerSize>().Int32Value()); in CheckGCCardIsValid() 2484 __ Loadw(temp, TR, Thread::IsGcMarkingOffset<kRiscv64PointerSize>().Int32Value()); in CheckGCCardIsValid() 2646 __ Loadd(tmp, TR, trace_buffer_index_offset); in GenerateMethodEntryExitHook() [all …]
|
D | intrinsics_riscv64.cc | 766 __ Loadwu(temp, TR, Thread::WeakRefAccessEnabledOffset<kRiscv64PointerSize>().Int32Value()); in VisitReferenceGetReferent() 838 __ Loadwu(tmp, TR, Thread::IsGcMarkingOffset<kRiscv64PointerSize>().Int32Value()); in VisitReferenceRefersTo() 4723 __ Loadwu(out, TR, Thread::PeerOffset<kRiscv64PointerSize>().Int32Value()); in VisitThreadCurrentThread() 4739 __ Loadw(out, TR, Thread::InterruptedOffset<kRiscv64PointerSize>().Int32Value()); in VisitThreadInterrupted() 4741 __ Storew(Zero, TR, Thread::InterruptedOffset<kRiscv64PointerSize>().Int32Value()); in VisitThreadInterrupted()
|
D | code_generator_arm64.cc | 5457 assembler.JumpTo(ManagedRegister(arm64::TR), offset, ManagedRegister(arm64::IP0)); in EmitThunkCode()
|
D | code_generator_arm_vixl.cc | 2136 blocked_core_registers_[TR] = true; in SetupBlockedRegisters()
|
/art/compiler/utils/arm/ |
D | assembler_arm_vixl.cc | 39 extern const vixl32::Register tr(TR);
|