Home
last modified time | relevance | path

Searched refs:imm (Results 1 – 25 of 34) sorted by relevance

12

/art/test/442-checker-constant-folding/src/
DMain.java1336 long imm = 33L; in ReturnInt33() local
1337 return (int) imm; in ReturnInt33()
1353 float imm = 1.0e34f; in ReturnIntMax() local
1354 return (int) imm; in ReturnIntMax()
1370 double imm = Double.NaN; in ReturnInt0() local
1371 return (int) imm; in ReturnInt0()
1387 int imm = 33; in ReturnLong33() local
1388 return (long) imm; in ReturnLong33()
1404 float imm = 34.0f; in ReturnLong34() local
1405 return (long) imm; in ReturnLong34()
[all …]
/art/compiler/optimizing/
Dscheduler_arm64.cc94 int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); in VisitDiv() local
95 if (imm == 0) { in VisitDiv()
98 } else if (imm == 1 || imm == -1) { in VisitDiv()
101 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitDiv()
105 DCHECK(imm <= -2 || imm >= 2); in VisitDiv()
162 int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); in VisitRem() local
163 if (imm == 0) { in VisitRem()
166 } else if (imm == 1 || imm == -1) { in VisitRem()
169 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitRem()
173 DCHECK(imm <= -2 || imm >= 2); in VisitRem()
Dscheduler_arm.cc815 void SchedulingLatencyVisitorARM::HandleDivRemConstantIntegralLatencies(int32_t imm) { in HandleDivRemConstantIntegralLatencies() argument
816 if (imm == 0) { in HandleDivRemConstantIntegralLatencies()
819 } else if (imm == 1 || imm == -1) { in HandleDivRemConstantIntegralLatencies()
821 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in HandleDivRemConstantIntegralLatencies()
836 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitDiv() local
837 HandleDivRemConstantIntegralLatencies(imm); in VisitDiv()
899 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitRem() local
900 HandleDivRemConstantIntegralLatencies(imm); in VisitRem()
Dcode_generator_x86_64.cc3183 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); in VisitSub() local
3184 __ subl(first.AsRegister<CpuRegister>(), imm); in VisitSub()
3286 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3287 __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm); in VisitMul()
3444 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3446 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3454 if (imm == -1) { in DivRemOneOrMinusOne()
3466 if (imm == -1) { in DivRemOneOrMinusOne()
3485 int64_t imm = Int64FromConstant(second.GetConstant()); in DivByPowerOfTwo() local
3486 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo()
[all …]
Dcode_generator_mips.cc1991 int32_t imm = CodeGenerator::GetInt32ValueOf(right->AsConstant()); in HandleBinaryOp() local
1993 can_use_imm = IsUint<16>(imm); in HandleBinaryOp()
1997 imm = -imm; in HandleBinaryOp()
2001 int16_t imm_high = High16Bits(imm); in HandleBinaryOp()
2002 int16_t imm_low = Low16Bits(imm); in HandleBinaryOp()
2008 can_use_imm = IsInt<16>(imm); in HandleBinaryOp()
3706 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3707 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3716 if (imm == -1) { in DivRemOneOrMinusOne()
3733 if (imm == -1) { in DivRemOneOrMinusOne()
[all …]
Dscheduler_arm.h126 void HandleDivRemConstantIntegralLatencies(int32_t imm);
Dcode_generator_mips64.cc1835 int64_t imm = CodeGenerator::GetInt64ValueOf(right->AsConstant()); in HandleBinaryOp() local
1837 can_use_imm = IsUint<16>(imm); in HandleBinaryOp()
1842 if (!(type == DataType::Type::kInt32 && imm == INT32_MIN)) { in HandleBinaryOp()
1843 imm = -imm; in HandleBinaryOp()
1847 can_use_imm = IsInt<16>(imm) || (Low16Bits(imm) == 0) || single_use; in HandleBinaryOp()
1849 can_use_imm = IsInt<16>(imm) || (IsInt<32>(imm) && (Low16Bits(imm) == 0)) || single_use; in HandleBinaryOp()
3218 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local
3219 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3224 if (imm == -1) { in DivRemOneOrMinusOne()
3247 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemByPowerOfTwo() local
[all …]
Dcode_generator_x86.cc3218 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local
3219 __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm); in VisitMul()
3460 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivRemOneOrMinusOne() local
3462 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne()
3468 if (imm == -1) { in DivRemOneOrMinusOne()
3480 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivByPowerOfTwo() local
3481 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo()
3482 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in DivByPowerOfTwo()
3489 int shift = CTZ(imm); in DivByPowerOfTwo()
3492 if (imm < 0) { in DivByPowerOfTwo()
[all …]
Dcode_generator_arm64.cc3272 int64_t imm = Int64FromConstant(second.GetConstant()); in FOR_EACH_CONDITION_INSTRUCTION() local
3273 DCHECK(imm == 1 || imm == -1); in FOR_EACH_CONDITION_INSTRUCTION()
3278 if (imm == 1) { in FOR_EACH_CONDITION_INSTRUCTION()
3295 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemByPowerOfTwo() local
3296 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo()
3306 if (imm > 0) { in DivRemByPowerOfTwo()
3330 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateDivRemWithAnyConstant() local
3338 imm, type == DataType::Type::kInt64 /* is_long */, &magic, &shift); in GenerateDivRemWithAnyConstant()
3352 if (imm > 0 && magic < 0) { in GenerateDivRemWithAnyConstant()
3354 } else if (imm < 0 && magic > 0) { in GenerateDivRemWithAnyConstant()
[all …]
/art/compiler/utils/x86_64/
Dassembler_x86_64.h369 void pushq(const Immediate& imm);
385 void movq(const Address& dst, const Immediate& imm);
387 void movl(const Address& dst, const Immediate& imm);
399 void movb(const Address& dst, const Immediate& imm);
407 void movw(const Address& dst, const Immediate& imm);
520 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
521 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
587 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
588 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
589 void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Dassembler_x86_64.cc106 void X86_64Assembler::pushq(const Immediate& imm) { in pushq() argument
108 CHECK(imm.is_int32()); // pushq only supports 32b immediate. in pushq()
109 if (imm.is_int8()) { in pushq()
111 EmitUint8(imm.value() & 0xFF); in pushq()
114 EmitImmediate(imm); in pushq()
134 void X86_64Assembler::movq(CpuRegister dst, const Immediate& imm) { in movq() argument
136 if (imm.is_int32()) { in movq()
141 EmitInt32(static_cast<int32_t>(imm.value())); in movq()
145 EmitInt64(imm.value()); in movq()
150 void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) { in movl() argument
[all …]
Djni_macro_assembler_x86_64.h64 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_x86_64.cc201 uint32_t imm, in StoreImmediateToFrame() argument
203 __ movl(Address(CpuRegister(RSP), dest), Immediate(imm)); // TODO(64) movq? in StoreImmediateToFrame()
/art/compiler/utils/x86/
Dassembler_x86.cc108 void X86Assembler::pushl(const Immediate& imm) { in pushl() argument
110 if (imm.is_int8()) { in pushl()
112 EmitUint8(imm.value() & 0xFF); in pushl()
115 EmitImmediate(imm); in pushl()
133 void X86Assembler::movl(Register dst, const Immediate& imm) { in movl() argument
136 EmitImmediate(imm); in movl()
161 void X86Assembler::movl(const Address& dst, const Immediate& imm) { in movl() argument
165 EmitImmediate(imm); in movl()
276 void X86Assembler::movb(const Address& dst, const Immediate& imm) { in movb() argument
280 CHECK(imm.is_int8()); in movb()
[all …]
Dassembler_x86.h325 void pushl(const Immediate& imm);
335 void movl(const Address& dst, const Immediate& imm);
350 void rorl(Register reg, const Immediate& imm);
352 void roll(Register reg, const Immediate& imm);
361 void movb(const Address& dst, const Immediate& imm);
369 void movw(const Address& dst, const Immediate& imm);
476 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm);
477 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm);
544 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm);
545 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm);
[all …]
Djni_macro_assembler_x86.h63 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_x86.cc163 void X86JNIMacroAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister) { in StoreImmediateToFrame() argument
164 __ movl(Address(ESP, dest), Immediate(imm)); in StoreImmediateToFrame()
/art/compiler/utils/arm/
Dassembler_arm_vixl.h142 void Vmov(vixl32::DRegister rd, double imm) { in Vmov() argument
143 if (vixl::VFP::IsImmFP64(imm)) { in Vmov()
144 MacroAssembler::Vmov(rd, imm); in Vmov()
146 MacroAssembler::Vldr(rd, imm); in Vmov()
Djni_macro_assembler_arm_vixl.h70 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
Djni_macro_assembler_arm_vixl.cc296 uint32_t imm, in StoreImmediateToFrame() argument
302 asm_.LoadImmediate(mscratch.AsVIXLRegister(), imm); in StoreImmediateToFrame()
/art/compiler/utils/
Dassembler_test.h195 for (int64_t imm : imms) { variable
196 ImmType new_imm = CreateImmediate(imm);
217 sreg << imm * multiplier + bias;
251 for (int64_t imm : imms) { in RepeatTemplatedRegistersImmBits() local
252 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedRegistersImmBits()
279 sreg << imm + bias; in RepeatTemplatedRegistersImmBits()
312 for (int64_t imm : imms) { in RepeatTemplatedImmBitsRegisters() local
313 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedImmBitsRegisters()
334 sreg << imm; in RepeatTemplatedImmBitsRegisters()
362 for (int64_t imm : imms) { in RepeatTemplatedRegisterImmBits() local
[all …]
Djni_macro_assembler.h86 virtual void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) = 0;
/art/compiler/linker/arm/
Drelative_patcher_thumb2.cc96 uint32_t imm = (diff16 >> 11) & 0x1u; in PatchPcRelativeReference() local
99 insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; in PatchPcRelativeReference()
/art/compiler/utils/arm64/
Djni_macro_assembler_arm64.h72 void StoreImmediateToFrame(FrameOffset dest, uint32_t imm, ManagedRegister scratch) OVERRIDE;
/art/runtime/interpreter/mterp/mips/
Dheader.S675 #define LOAD_IMM(dest, imm) li dest, imm argument

12