Home
last modified time | relevance | path

Searched refs:r_src (Results 1 – 16 of 16) sorted by relevance

/art/compiler/dex/quick/mips/
Dutility_mips.cc32 LIR* MipsMir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpFpRegCopy() argument
35 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit()); in OpFpRegCopy()
38 if (r_src.IsDouble()) { in OpFpRegCopy()
42 RegStorage t_opnd = r_src; in OpFpRegCopy()
43 r_src = r_dest; in OpFpRegCopy()
48 DCHECK(r_src.IsDouble()); in OpFpRegCopy()
53 if (r_src.IsSingle()) { in OpFpRegCopy()
57 RegStorage t_opnd = r_src; in OpFpRegCopy()
58 r_src = r_dest; in OpFpRegCopy()
63 DCHECK(r_src.IsSingle()); in OpFpRegCopy()
[all …]
Dint_mips.cc181 LIR* MipsMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { in OpRegCopyNoInsert() argument
190 if (r_src.IsPair()) { in OpRegCopyNoInsert()
191 r_src = r_src.GetLow(); in OpRegCopyNoInsert()
194 DCHECK(!r_dest.IsPair() && !r_src.IsPair()); in OpRegCopyNoInsert()
197 if (r_dest.IsFloat() || r_src.IsFloat()) in OpRegCopyNoInsert()
198 return OpFpRegCopy(r_dest, r_src); in OpRegCopyNoInsert()
201 if (r_dest.Is64Bit() || r_src.Is64Bit()) { in OpRegCopyNoInsert()
209 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); in OpRegCopyNoInsert()
210 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { in OpRegCopyNoInsert()
216 void MipsMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpRegCopy() argument
[all …]
Dcodegen_mips.h90 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
92 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
95 LIR* GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src);
200 LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
206 void OpRegCopy(RegStorage r_dest, RegStorage r_src);
207 LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
211 LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
212 LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
223 LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
Dtarget_mips.cc811 LIR* MipsMir2Lir::GenAtomic64Store(RegStorage r_base, int displacement, RegStorage r_src) { in GenAtomic64Store() argument
812 DCHECK(!r_src.IsFloat()); // See RegClassForFieldLoadStore(). in GenAtomic64Store()
814 DCHECK(!r_src.IsPair()); in GenAtomic64Store()
816 DCHECK(r_src.IsPair()); in GenAtomic64Store()
824 OpRegCopyWide(temp_value, r_src); in GenAtomic64Store()
/art/compiler/dex/quick/x86/
Dutility_x86.cc33 LIR* X86Mir2Lir::OpFpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpFpRegCopy() argument
36 DCHECK(r_dest.IsFloat() || r_src.IsFloat()); in OpFpRegCopy()
37 DCHECK_EQ(r_dest.IsDouble(), r_src.IsDouble()); in OpFpRegCopy()
42 if (r_src.IsSingle()) { in OpFpRegCopy()
48 DCHECK(r_src.IsSingle()) << "Raw: 0x" << std::hex << r_src.GetRawBits(); in OpFpRegCopy()
53 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); in OpFpRegCopy()
54 if (r_dest == r_src) { in OpFpRegCopy()
309 LIR* X86Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { in OpMovMemReg() argument
311 int src = r_src.IsPair() ? r_src.GetLowReg() : r_src.GetReg(); in OpMovMemReg()
316 CHECK(!r_src.IsFloat()); in OpMovMemReg()
[all …]
Dint_x86.cc126 LIR* X86Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { in OpRegCopyNoInsert() argument
131 if (r_src.IsPair()) { in OpRegCopyNoInsert()
132 r_src = r_src.GetLow(); in OpRegCopyNoInsert()
134 if (r_dest.IsFloat() || r_src.IsFloat()) in OpRegCopyNoInsert()
135 return OpFpRegCopy(r_dest, r_src); in OpRegCopyNoInsert()
137 r_dest.GetReg(), r_src.GetReg()); in OpRegCopyNoInsert()
138 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { in OpRegCopyNoInsert()
144 void X86Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpRegCopy() argument
145 if (r_dest != r_src) { in OpRegCopy()
146 LIR *res = OpRegCopyNoInsert(r_dest, r_src); in OpRegCopy()
[all …]
Dcodegen_x86.h99 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
101 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
299 LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
305 void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
306 LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
310 LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
311 LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
432 RegStorage r_src, OpSize size, int opt_flags = 0);
Dtarget_x86.cc748 RegStorage r_src = cu_->target64 ? RegStorage::Solo64(reg) : RegStorage::Solo32(reg); in SpillCoreRegs() local
749 StoreBaseDisp(rs_rSP, offset, r_src, size, kNotVolatile); in SpillCoreRegs()
/art/compiler/dex/quick/arm/
Dutility_arm.cc428 LIR* ArmMir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) { in OpMovMemReg() argument
429 UNUSED(r_base, offset, r_src, move_type); in OpMovMemReg()
434 LIR* ArmMir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { in OpCondRegReg() argument
435 UNUSED(op, cc, r_dest, r_src); in OpCondRegReg()
814 LIR* ArmMir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, in StoreBaseIndexed() argument
816 bool all_low_regs = r_base.Low8() && r_index.Low8() && r_src.Low8(); in StoreBaseIndexed()
822 if (r_src.IsFloat()) { in StoreBaseIndexed()
823 if (r_src.IsSingle()) { in StoreBaseIndexed()
828 DCHECK(r_src.IsDouble()); in StoreBaseIndexed()
830 DCHECK_EQ((r_src.GetReg() & 0x1), 0); in StoreBaseIndexed()
[all …]
Dint_arm.cc414 LIR* ArmMir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { in OpRegCopyNoInsert() argument
421 if (r_src.IsPair()) { in OpRegCopyNoInsert()
422 r_src = r_src.GetLow(); in OpRegCopyNoInsert()
424 if (r_dest.IsFloat() || r_src.IsFloat()) in OpRegCopyNoInsert()
425 return OpFpRegCopy(r_dest, r_src); in OpRegCopyNoInsert()
426 if (r_dest.Low8() && r_src.Low8()) in OpRegCopyNoInsert()
428 else if (!r_dest.Low8() && !r_src.Low8()) in OpRegCopyNoInsert()
434 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); in OpRegCopyNoInsert()
435 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { in OpRegCopyNoInsert()
441 void ArmMir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpRegCopy() argument
[all …]
Dcodegen_arm.h77 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
79 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
202 LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src);
209 void OpRegCopy(RegStorage r_dest, RegStorage r_src);
210 LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src);
214 LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type);
215 LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src);
224 LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
302 void GenEasyMultiplyTwoOps(RegStorage r_dest, RegStorage r_src, EasyMultiplyOp* ops);
/art/compiler/dex/quick/arm64/
Dutility_arm64.cc682 LIR* Arm64Mir2Lir::OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, in OpMovMemReg() argument
684 UNUSED(r_base, offset, r_src, move_type); in OpMovMemReg()
689 LIR* Arm64Mir2Lir::OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) { in OpCondRegReg() argument
690 UNUSED(op, cc, r_dest, r_src); in OpCondRegReg()
1109 LIR* Arm64Mir2Lir::StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, in StoreBaseIndexed() argument
1124 if (r_src.IsFloat()) { in StoreBaseIndexed()
1125 if (r_src.IsDouble()) { in StoreBaseIndexed()
1130 DCHECK(r_src.IsSingle()); in StoreBaseIndexed()
1137 return NewLIR4(opcode, r_src.GetReg(), r_base.GetReg(), r_index.GetReg(), in StoreBaseIndexed()
1145 r_src = Check64BitReg(r_src); in StoreBaseIndexed()
[all …]
Dcodegen_arm64.h73 LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src, OpSize size,
75 LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale,
206 LIR* OpFpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
212 void OpRegCopy(RegStorage r_dest, RegStorage r_src) OVERRIDE;
213 LIR* OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) OVERRIDE;
217 LIR* OpMovMemReg(RegStorage r_base, int offset, RegStorage r_src, MoveType move_type) OVERRIDE;
218 LIR* OpCondRegReg(OpKind op, ConditionCode cc, RegStorage r_dest, RegStorage r_src) OVERRIDE;
385 LIR* StoreBaseDispBody(RegStorage r_base, int displacement, RegStorage r_src, OpSize size);
Dint_arm64.cc314 LIR* Arm64Mir2Lir::OpRegCopyNoInsert(RegStorage r_dest, RegStorage r_src) { in OpRegCopyNoInsert() argument
316 bool src_is_fp = r_src.IsFloat(); in OpRegCopyNoInsert()
322 DCHECK_EQ(r_dest.Is64Bit(), r_src.Is64Bit()); in OpRegCopyNoInsert()
333 if (r_dest.Is64Bit() && r_src.Is64Bit()) { in OpRegCopyNoInsert()
339 bool src_is_double = r_src.IsDouble(); in OpRegCopyNoInsert()
353 r_src = Check32BitReg(r_src); in OpRegCopyNoInsert()
357 if (r_src.IsDouble()) { in OpRegCopyNoInsert()
366 res = RawLIR(current_dalvik_offset_, opcode, r_dest.GetReg(), r_src.GetReg()); in OpRegCopyNoInsert()
368 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { in OpRegCopyNoInsert()
375 void Arm64Mir2Lir::OpRegCopy(RegStorage r_dest, RegStorage r_src) { in OpRegCopy() argument
[all …]
/art/compiler/dex/quick/
Dmir_to_lir.h1005 LIR* StoreWordDisp(RegStorage r_base, int displacement, RegStorage r_src) { in StoreWordDisp() argument
1006 return StoreBaseDisp(r_base, displacement, r_src, kWord, kNotVolatile); in StoreWordDisp()
1009 LIR* StoreRefDisp(RegStorage r_base, int displacement, RegStorage r_src, in StoreRefDisp() argument
1011 return StoreBaseDisp(r_base, displacement, r_src, kReference, is_volatile); in StoreRefDisp()
1014 LIR* StoreRefIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src, int scale) { in StoreRefIndexed() argument
1015 return StoreBaseIndexed(r_base, r_index, r_src, scale, kReference); in StoreRefIndexed()
1018 LIR* Store32Disp(RegStorage r_base, int displacement, RegStorage r_src) { in Store32Disp() argument
1019 return StoreBaseDisp(r_base, displacement, r_src, k32, kNotVolatile); in Store32Disp()
1157 virtual LIR* StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r_src,
1159 virtual LIR* StoreBaseIndexed(RegStorage r_base, RegStorage r_index, RegStorage r_src,
[all …]
Dgen_common.cc581 RegStorage r_src = AllocTempRef(); in GenFilledNewArray() local
603 OpRegRegImm(kOpAdd, r_src, TargetPtrReg(kSp), SRegOffset(rl_first.s_reg_low)); in GenFilledNewArray()
614 LoadBaseIndexed(r_src, r_idx, r_val, 2, k32); in GenFilledNewArray()
628 FreeTemp(r_src); in GenFilledNewArray()