/art/test/442-checker-constant-folding/src/ |
D | Main.java | 1451 long imm = 33L; in ReturnInt33() local 1452 return (int) imm; in ReturnInt33() 1468 float imm = 1.0e34f; in ReturnIntMax() local 1469 return (int) imm; in ReturnIntMax() 1485 double imm = Double.NaN; in ReturnInt0() local 1486 return (int) imm; in ReturnInt0() 1502 int imm = 33; in ReturnLong33() local 1503 return (long) imm; in ReturnLong33() 1519 float imm = 34.0f; in ReturnLong34() local 1520 return (long) imm; in ReturnLong34() [all …]
|
/art/compiler/utils/x86_64/ |
D | assembler_x86_64.h | 439 void pushq(const Immediate& imm); 455 void movq(const Address& dst, const Immediate& imm); 457 void movl(const Address& dst, const Immediate& imm); 469 void movb(const Address& dst, const Immediate& imm); 477 void movw(const Address& dst, const Immediate& imm); 644 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm); 645 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm); 725 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); 726 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); 727 void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm); [all …]
|
D | assembler_x86_64.cc | 114 void X86_64Assembler::pushq(const Immediate& imm) { in pushq() argument 116 CHECK(imm.is_int32()); // pushq only supports 32b immediate. in pushq() 117 if (imm.is_int8()) { in pushq() 119 EmitUint8(imm.value() & 0xFF); in pushq() 122 EmitImmediate(imm); in pushq() 142 void X86_64Assembler::movq(CpuRegister dst, const Immediate& imm) { in movq() argument 144 if (imm.is_int32()) { in movq() 149 EmitInt32(static_cast<int32_t>(imm.value())); in movq() 153 EmitInt64(imm.value()); in movq() 158 void X86_64Assembler::movl(CpuRegister dst, const Immediate& imm) { in movl() argument [all …]
|
D | assembler_x86_64_test.cc | 114 x86_64::Immediate imm(value); in TEST() local 115 EXPECT_FALSE(imm.is_int8()); in TEST() 116 EXPECT_FALSE(imm.is_int16()); in TEST() 117 EXPECT_FALSE(imm.is_int32()); in TEST()
|
/art/compiler/utils/x86/ |
D | assembler_x86.cc | 115 void X86Assembler::pushl(const Immediate& imm) { in pushl() argument 117 if (imm.is_int8()) { in pushl() 119 EmitUint8(imm.value() & 0xFF); in pushl() 122 EmitImmediate(imm); in pushl() 140 void X86Assembler::movl(Register dst, const Immediate& imm) { in movl() argument 143 EmitImmediate(imm); in movl() 168 void X86Assembler::movl(const Address& dst, const Immediate& imm) { in movl() argument 172 EmitImmediate(imm); in movl() 325 void X86Assembler::movb(const Address& dst, const Immediate& imm) { in movb() argument 329 CHECK(imm.is_int8()); in movb() [all …]
|
D | assembler_x86.h | 395 void pushl(const Immediate& imm); 405 void movl(const Address& dst, const Immediate& imm); 424 void rorl(Register reg, const Immediate& imm); 426 void roll(Register reg, const Immediate& imm); 435 void movb(const Address& dst, const Immediate& imm); 443 void movw(const Address& dst, const Immediate& imm); 605 void roundsd(XmmRegister dst, XmmRegister src, const Immediate& imm); 606 void roundss(XmmRegister dst, XmmRegister src, const Immediate& imm); 687 void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); 688 void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); [all …]
|
/art/compiler/optimizing/ |
D | scheduler_arm64.cc | 203 int64_t imm = Int64FromConstant(instr->GetRight()->AsConstant()); in VisitDiv() local 204 if (imm == 0) { in VisitDiv() 207 } else if (imm == 1 || imm == -1) { in VisitDiv() 210 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitDiv() 214 DCHECK(imm <= -2 || imm >= 2); in VisitDiv() 271 int64_t imm = Int64FromConstant(instruction->GetRight()->AsConstant()); in VisitRem() local 272 if (imm == 0) { in VisitRem() 275 } else if (imm == 1 || imm == -1) { in VisitRem() 278 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in VisitRem() 282 DCHECK(imm <= -2 || imm >= 2); in VisitRem()
|
D | scheduler_arm.cc | 131 void HandleDivRemConstantIntegralLatencies(int32_t imm); 926 void SchedulingLatencyVisitorARM::HandleDivRemConstantIntegralLatencies(int32_t imm) { in HandleDivRemConstantIntegralLatencies() argument 927 if (imm == 0) { in HandleDivRemConstantIntegralLatencies() 930 } else if (imm == 1 || imm == -1) { in HandleDivRemConstantIntegralLatencies() 932 } else if (IsPowerOfTwo(AbsOrMin(imm))) { in HandleDivRemConstantIntegralLatencies() 947 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitDiv() local 948 HandleDivRemConstantIntegralLatencies(imm); in VisitDiv() 1010 int32_t imm = Int32ConstantFrom(rhs->AsConstant()); in VisitRem() local 1011 HandleDivRemConstantIntegralLatencies(imm); in VisitRem()
|
D | code_generator_riscv64.cc | 1437 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local 1438 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne() 1443 if (imm == -1) { in DivRemOneOrMinusOne() 1467 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemByPowerOfTwo() local 1468 int64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo() 1490 if (imm < 0) { in DivRemByPowerOfTwo() 1512 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateDivRemWithAnyConstant() local 1518 __ LoadConst64(tmp, imm); in GenerateDivRemWithAnyConstant() 1544 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateDivRemIntegral() local 1545 if (imm == 0) { in GenerateDivRemIntegral() [all …]
|
D | code_generator_x86_64.cc | 3970 Immediate imm(second.GetConstant()->AsIntConstant()->GetValue()); in VisitSub() local 3971 __ subl(first.AsRegister<CpuRegister>(), imm); in VisitSub() 4073 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local 4074 __ imull(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>(), imm); in VisitMul() 4231 int64_t imm = Int64FromConstant(second.GetConstant()); in DivRemOneOrMinusOne() local 4233 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne() 4241 if (imm == -1) { in DivRemOneOrMinusOne() 4253 if (imm == -1) { in DivRemOneOrMinusOne() 4269 int64_t imm = Int64FromConstant(second.GetConstant()); in RemByPowerOfTwo() local 4270 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in RemByPowerOfTwo() [all …]
|
D | code_generator_arm64.cc | 3348 int64_t imm = Int64FromLocation(instruction->GetLocations()->InAt(1)); in FOR_EACH_CONDITION_INSTRUCTION() local 3349 uint64_t abs_imm = static_cast<uint64_t>(AbsOrMin(imm)); in FOR_EACH_CONDITION_INSTRUCTION() 3395 if (imm > 0) { in FOR_EACH_CONDITION_INSTRUCTION() 3454 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateInt64UnsignedDivRemWithAnyPositiveConstant() local 3455 DCHECK_GT(imm, 0); in GenerateInt64UnsignedDivRemWithAnyPositiveConstant() 3459 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ true, &magic, &shift); in GenerateInt64UnsignedDivRemWithAnyPositiveConstant() 3488 GenerateResultRemWithAnyConstant(out, dividend, temp, imm, &temps); in GenerateInt64UnsignedDivRemWithAnyPositiveConstant() 3505 int64_t imm = Int64FromConstant(second.GetConstant()); in GenerateInt64DivRemWithAnyConstant() local 3509 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ true, &magic, &shift); in GenerateInt64DivRemWithAnyConstant() 3529 if (NeedToAddDividend(magic, imm)) { in GenerateInt64DivRemWithAnyConstant() [all …]
|
D | code_generator_x86.cc | 3882 Immediate imm(mul->InputAt(1)->AsIntConstant()->GetValue()); in VisitMul() local 3883 __ imull(out.AsRegister<Register>(), first.AsRegister<Register>(), imm); in VisitMul() 4124 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivRemOneOrMinusOne() local 4126 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne() 4132 if (imm == -1) { in DivRemOneOrMinusOne() 4145 int32_t imm = Int64FromConstant(second.GetConstant()); in RemByPowerOfTwo() local 4146 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in RemByPowerOfTwo() 4147 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in RemByPowerOfTwo() 4165 int32_t imm = locations->InAt(1).GetConstant()->AsIntConstant()->GetValue(); in DivByPowerOfTwo() local 4166 DCHECK(IsPowerOfTwo(AbsOrMin(imm))); in DivByPowerOfTwo() [all …]
|
D | code_generator_arm_vixl.cc | 4489 int32_t imm = Int32ConstantFrom(second); in DivRemOneOrMinusOne() local 4490 DCHECK(imm == 1 || imm == -1); in DivRemOneOrMinusOne() 4495 if (imm == 1) { in DivRemOneOrMinusOne() 4513 int32_t imm = Int32ConstantFrom(second); in DivRemByPowerOfTwo() local 4514 uint32_t abs_imm = static_cast<uint32_t>(AbsOrMin(imm)); in DivRemByPowerOfTwo() 4517 auto generate_div_code = [this, imm, ctz_imm](vixl32::Register out, vixl32::Register in) { in DivRemByPowerOfTwo() 4519 if (imm < 0) { in DivRemByPowerOfTwo() 4595 int32_t imm = Int32ConstantFrom(second); in GenerateDivRemWithAnyConstant() local 4599 CalculateMagicAndShiftForDivRem(imm, /* is_long= */ false, &magic, &shift); in GenerateDivRemWithAnyConstant() 4633 if (imm > 0 && HasNonNegativeInputAt(instruction, 0)) { in GenerateDivRemWithAnyConstant() [all …]
|
/art/compiler/utils/ |
D | assembler_test.h | 216 for (int64_t imm : imms) { variable 217 ImmType new_imm = CreateImmediate(imm); 225 ReplaceImm(imm, bias, multiplier, &base); 252 for (int64_t imm : imms) { in RepeatTemplatedRegistersImmBits() local 253 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedRegistersImmBits() 262 ReplaceImm(imm, bias, /*multiplier=*/ 1, &base); in RepeatTemplatedRegistersImmBits() 288 for (int64_t imm : imms) { in RepeatTemplatedImmBitsRegisters() local 289 ImmType new_imm = CreateImmediate(imm); in RepeatTemplatedImmBitsRegisters() 297 ReplaceImm(imm, /*bias=*/ 0, /*multiplier=*/ 1, &base); in RepeatTemplatedImmBitsRegisters() 318 for (int64_t imm : imms) { in RepeatTemplatedRegisterImmBits() local [all …]
|
D | assembler_thumb_test_expected.cc.inc | 81 " f4: d16f bne 0x1d6 @ imm = #222\n" 137 " 1d2: f000 b803 b.w 0x1dc @ imm = #6\n" 138 " 1d6: f000 b81e b.w 0x216 @ imm = #60\n"
|
/art/disassembler/ |
D | disassembler_riscv64.cc | 108 uint32_t imm = (bits5_11 << 5) + bits0_4; in Decode32StoreOffset() local 109 return static_cast<int32_t>(imm) - static_cast<int32_t>(bit11 << 12); // Sign-extend. in Decode32StoreOffset() 398 uint32_t imm = (bits1_10 << 1) + (bit11 << 11) + (bits12_19 << 12) + (bit20 << 20); in Print32Jal() local 399 int32_t offset = static_cast<int32_t>(imm) - static_cast<int32_t>(bit20 << 21); // Sign-extend. in Print32Jal() 464 uint32_t imm = (bit12 << 12) + (bit11 << 11) + (bits5_10 << 5) + (bits1_4 << 1); in Print32BCond() local 465 int32_t offset = static_cast<int32_t>(imm) - static_cast<int32_t>(bit12 << 13); // Sign-extend. in Print32BCond() 713 int32_t imm = Decode32Imm12(insn32); in Print32BinOpImm() local 716 if (funct3 == /*ADDI*/ 0u && imm == 0u) { in Print32BinOpImm() 724 } else if (!narrow && funct3 == /*XORI*/ 4u && imm == -1) { in Print32BinOpImm() 726 } else if (!narrow && funct3 == /*ANDI*/ 7u && imm == 0xff) { in Print32BinOpImm() [all …]
|
/art/compiler/utils/arm/ |
D | assembler_arm_vixl.h | 168 void Vmov(vixl32::DRegister rd, double imm) { in Vmov() argument 169 if (vixl::VFP::IsImmFP64(imm)) { in Vmov() 170 MacroAssembler::Vmov(rd, imm); in Vmov() 172 MacroAssembler::Vldr(rd, imm); in Vmov()
|
/art/dex2oat/linker/arm/ |
D | relative_patcher_thumb2.cc | 79 uint32_t imm = (diff16 >> 11) & 0x1u; in PatchPcRelativeReference() local 82 insn = (insn & 0xfbf08f00u) | (imm << 26) | (imm4 << 16) | (imm3 << 12) | imm8; in PatchPcRelativeReference()
|
/art/compiler/utils/riscv64/ |
D | assembler_riscv64_test.cc | 841 for (int64_t imm : kImm12s) { in TestAddConst() local 842 emit_op(rd, rs1, imm); in TestAddConst() 843 expected += ART_FORMAT("{}{}, {}\n", addi_rd, rs1_name, std::to_string(imm)); in TestAddConst() 847 for (int64_t imm : imms) { in TestAddConst() local 848 emit_op(rd, rs1, imm); in TestAddConst() 851 ART_FORMAT("{}{}, {}\n", addi_rd, tmp_name, std::to_string(imm - adjustment)); in TestAddConst() 857 for (int64_t imm : large_values) { in TestAddConst() local 858 emit_op(rd, rs1, imm); in TestAddConst() 859 expected += ART_FORMAT("li {}, {}\n", tmp_name, std::to_string(imm)); in TestAddConst() 911 for (int64_t imm : kImm12s) { in RepeatLoadStoreArbitraryOffset() local [all …]
|
D | assembler_riscv64.h | 557 void CLi(XRegister rd, int32_t imm); 560 void CAddiw(XRegister rd, int32_t imm); 566 void CAndi(XRegister rd_s, int32_t imm); 1724 void Li(XRegister rd, int64_t imm); 2172 void LoadImmediate(XRegister rd, int64_t imm, bool can_use_tmp); 2235 static constexpr uint32_t EncodeIntWidth(const int32_t imm) { in EncodeIntWidth() argument 2236 DCHECK(IsInt<kWidth>(imm)); in EncodeIntWidth() 2237 return static_cast<uint32_t>(imm) & MaskLeastSignificant<uint32_t>(kWidth); in EncodeIntWidth() 2240 static constexpr uint32_t EncodeInt5(const int32_t imm) { return EncodeIntWidth<5>(imm); } in EncodeInt5() argument 2241 static constexpr uint32_t EncodeInt6(const int32_t imm) { return EncodeIntWidth<6>(imm); } in EncodeInt6() argument [all …]
|
D | assembler_riscv64.cc | 1313 void Riscv64Assembler::CLi(XRegister rd, int32_t imm) { in CLi() argument 1316 DCHECK(IsInt<6>(imm)); in CLi() 1317 EmitCI(0b010u, rd, EncodeInt6(imm), 0b01u); in CLi() 1335 void Riscv64Assembler::CAddiw(XRegister rd, int32_t imm) { in CAddiw() argument 1338 EmitCI(0b001u, rd, EncodeInt6(imm), 0b01u); in CAddiw() 1396 void Riscv64Assembler::CAndi(XRegister rd_s, int32_t imm) { in CAndi() argument 1398 DCHECK(IsInt<6>(imm)); in CAndi() 1399 EmitCBArithmetic(0b100u, 0b10u, imm, rd_s, 0b01u); in CAndi() 6115 void Riscv64Assembler::Li(XRegister rd, int64_t imm) { in Li() argument 6116 LoadImmediate(rd, imm, /*can_use_tmp=*/ false); in Li() [all …]
|