1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36
37 #include "src/arm/assembler-arm.h"
38
39 #if V8_TARGET_ARCH_ARM
40
41 #include "src/arm/assembler-arm-inl.h"
42 #include "src/base/bits.h"
43 #include "src/base/cpu.h"
44 #include "src/macro-assembler.h"
45
46 namespace v8 {
47 namespace internal {
48
49 static const unsigned kArmv6 = 0u;
50 static const unsigned kArmv7 = kArmv6 | (1u << ARMv7);
51 static const unsigned kArmv7WithSudiv = kArmv7 | (1u << ARMv7_SUDIV);
52 static const unsigned kArmv8 = kArmv7WithSudiv | (1u << ARMv8);
53
CpuFeaturesFromCommandLine()54 static unsigned CpuFeaturesFromCommandLine() {
55 unsigned result;
56 if (strcmp(FLAG_arm_arch, "armv8") == 0) {
57 result = kArmv8;
58 } else if (strcmp(FLAG_arm_arch, "armv7+sudiv") == 0) {
59 result = kArmv7WithSudiv;
60 } else if (strcmp(FLAG_arm_arch, "armv7") == 0) {
61 result = kArmv7;
62 } else if (strcmp(FLAG_arm_arch, "armv6") == 0) {
63 result = kArmv6;
64 } else {
65 fprintf(stderr, "Error: unrecognised value for --arm-arch ('%s').\n",
66 FLAG_arm_arch);
67 fprintf(stderr,
68 "Supported values are: armv8\n"
69 " armv7+sudiv\n"
70 " armv7\n"
71 " armv6\n");
72 CHECK(false);
73 }
74
75 // If any of the old (deprecated) flags are specified, print a warning, but
76 // otherwise try to respect them for now.
77 // TODO(jbramley): When all the old bots have been updated, remove this.
78 if (FLAG_enable_armv7.has_value || FLAG_enable_vfp3.has_value ||
79 FLAG_enable_32dregs.has_value || FLAG_enable_neon.has_value ||
80 FLAG_enable_sudiv.has_value || FLAG_enable_armv8.has_value) {
81 // As an approximation of the old behaviour, set the default values from the
82 // arm_arch setting, then apply the flags over the top.
83 bool enable_armv7 = (result & (1u << ARMv7)) != 0;
84 bool enable_vfp3 = (result & (1u << ARMv7)) != 0;
85 bool enable_32dregs = (result & (1u << ARMv7)) != 0;
86 bool enable_neon = (result & (1u << ARMv7)) != 0;
87 bool enable_sudiv = (result & (1u << ARMv7_SUDIV)) != 0;
88 bool enable_armv8 = (result & (1u << ARMv8)) != 0;
89 if (FLAG_enable_armv7.has_value) {
90 fprintf(stderr,
91 "Warning: --enable_armv7 is deprecated. "
92 "Use --arm_arch instead.\n");
93 enable_armv7 = FLAG_enable_armv7.value;
94 }
95 if (FLAG_enable_vfp3.has_value) {
96 fprintf(stderr,
97 "Warning: --enable_vfp3 is deprecated. "
98 "Use --arm_arch instead.\n");
99 enable_vfp3 = FLAG_enable_vfp3.value;
100 }
101 if (FLAG_enable_32dregs.has_value) {
102 fprintf(stderr,
103 "Warning: --enable_32dregs is deprecated. "
104 "Use --arm_arch instead.\n");
105 enable_32dregs = FLAG_enable_32dregs.value;
106 }
107 if (FLAG_enable_neon.has_value) {
108 fprintf(stderr,
109 "Warning: --enable_neon is deprecated. "
110 "Use --arm_arch instead.\n");
111 enable_neon = FLAG_enable_neon.value;
112 }
113 if (FLAG_enable_sudiv.has_value) {
114 fprintf(stderr,
115 "Warning: --enable_sudiv is deprecated. "
116 "Use --arm_arch instead.\n");
117 enable_sudiv = FLAG_enable_sudiv.value;
118 }
119 if (FLAG_enable_armv8.has_value) {
120 fprintf(stderr,
121 "Warning: --enable_armv8 is deprecated. "
122 "Use --arm_arch instead.\n");
123 enable_armv8 = FLAG_enable_armv8.value;
124 }
125 // Emulate the old implications.
126 if (enable_armv8) {
127 enable_vfp3 = true;
128 enable_neon = true;
129 enable_32dregs = true;
130 enable_sudiv = true;
131 }
132 // Select the best available configuration.
133 if (enable_armv7 && enable_vfp3 && enable_32dregs && enable_neon) {
134 if (enable_sudiv) {
135 if (enable_armv8) {
136 result = kArmv8;
137 } else {
138 result = kArmv7WithSudiv;
139 }
140 } else {
141 result = kArmv7;
142 }
143 } else {
144 result = kArmv6;
145 }
146 }
147 return result;
148 }
149
150 // Get the CPU features enabled by the build.
151 // For cross compilation the preprocessor symbols such as
152 // CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS can be used to
153 // enable ARMv7 and VFPv3 instructions when building the snapshot. However,
154 // these flags should be consistent with a supported ARM configuration:
155 // "armv6": ARMv6 + VFPv2
156 // "armv7": ARMv7 + VFPv3-D32 + NEON
157 // "armv7+sudiv": ARMv7 + VFPv4-D32 + NEON + SUDIV
158 // "armv8": ARMv8 (+ all of the above)
CpuFeaturesFromCompiler()159 static constexpr unsigned CpuFeaturesFromCompiler() {
160 // TODO(jbramley): Once the build flags are simplified, these tests should
161 // also be simplified.
162
163 // Check *architectural* implications.
164 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
165 #error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_ARMV7_INSTRUCTIONS"
166 #endif
167 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && !defined(CAN_USE_SUDIV)
168 #error "CAN_USE_ARMV8_INSTRUCTIONS should imply CAN_USE_SUDIV"
169 #endif
170 #if defined(CAN_USE_ARMV7_INSTRUCTIONS) != defined(CAN_USE_VFP3_INSTRUCTIONS)
171 // V8 requires VFP, and all ARMv7 devices with VFP have VFPv3. Similarly,
172 // VFPv3 isn't available before ARMv7.
173 #error "CAN_USE_ARMV7_INSTRUCTIONS should match CAN_USE_VFP3_INSTRUCTIONS"
174 #endif
175 #if defined(CAN_USE_NEON) && !defined(CAN_USE_ARMV7_INSTRUCTIONS)
176 #error "CAN_USE_NEON should imply CAN_USE_ARMV7_INSTRUCTIONS"
177 #endif
178
179 // Find compiler-implied features.
180 #if defined(CAN_USE_ARMV8_INSTRUCTIONS) && \
181 defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
182 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
183 return kArmv8;
184 #elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_SUDIV) && \
185 defined(CAN_USE_NEON) && defined(CAN_USE_VFP3_INSTRUCTIONS)
186 return kArmv7WithSudiv;
187 #elif defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(CAN_USE_NEON) && \
188 defined(CAN_USE_VFP3_INSTRUCTIONS)
189 return kArmv7;
190 #else
191 return kArmv6;
192 #endif
193 }
194
195
ProbeImpl(bool cross_compile)196 void CpuFeatures::ProbeImpl(bool cross_compile) {
197 dcache_line_size_ = 64;
198
199 unsigned command_line = CpuFeaturesFromCommandLine();
200 // Only use statically determined features for cross compile (snapshot).
201 if (cross_compile) {
202 supported_ |= command_line & CpuFeaturesFromCompiler();
203 return;
204 }
205
206 #ifndef __arm__
207 // For the simulator build, use whatever the flags specify.
208 supported_ |= command_line;
209
210 #else // __arm__
211 // Probe for additional features at runtime.
212 base::CPU cpu;
213 // Runtime detection is slightly fuzzy, and some inferences are necessary.
214 unsigned runtime = kArmv6;
215 // NEON and VFPv3 imply at least ARMv7-A.
216 if (cpu.has_neon() && cpu.has_vfp3_d32()) {
217 DCHECK(cpu.has_vfp3());
218 runtime |= kArmv7;
219 if (cpu.has_idiva()) {
220 runtime |= kArmv7WithSudiv;
221 if (cpu.architecture() >= 8) {
222 runtime |= kArmv8;
223 }
224 }
225 }
226
227 // Use the best of the features found by CPU detection and those inferred from
228 // the build system. In both cases, restrict available features using the
229 // command-line. Note that the command-line flags are very permissive (kArmv8)
230 // by default.
231 supported_ |= command_line & CpuFeaturesFromCompiler();
232 supported_ |= command_line & runtime;
233
234 // Additional tuning options.
235
236 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
237 if (cpu.implementer() == base::CPU::ARM &&
238 (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
239 cpu.part() == base::CPU::ARM_CORTEX_A9)) {
240 dcache_line_size_ = 32;
241 }
242 #endif
243
244 DCHECK_IMPLIES(IsSupported(ARMv7_SUDIV), IsSupported(ARMv7));
245 DCHECK_IMPLIES(IsSupported(ARMv8), IsSupported(ARMv7_SUDIV));
246 }
247
248
PrintTarget()249 void CpuFeatures::PrintTarget() {
250 const char* arm_arch = NULL;
251 const char* arm_target_type = "";
252 const char* arm_no_probe = "";
253 const char* arm_fpu = "";
254 const char* arm_thumb = "";
255 const char* arm_float_abi = NULL;
256
257 #if !defined __arm__
258 arm_target_type = " simulator";
259 #endif
260
261 #if defined ARM_TEST_NO_FEATURE_PROBE
262 arm_no_probe = " noprobe";
263 #endif
264
265 #if defined CAN_USE_ARMV8_INSTRUCTIONS
266 arm_arch = "arm v8";
267 #elif defined CAN_USE_ARMV7_INSTRUCTIONS
268 arm_arch = "arm v7";
269 #else
270 arm_arch = "arm v6";
271 #endif
272
273 #if defined CAN_USE_NEON
274 arm_fpu = " neon";
275 #elif defined CAN_USE_VFP3_INSTRUCTIONS
276 # if defined CAN_USE_VFP32DREGS
277 arm_fpu = " vfp3";
278 # else
279 arm_fpu = " vfp3-d16";
280 # endif
281 #else
282 arm_fpu = " vfp2";
283 #endif
284
285 #ifdef __arm__
286 arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
287 #elif USE_EABI_HARDFLOAT
288 arm_float_abi = "hard";
289 #else
290 arm_float_abi = "softfp";
291 #endif
292
293 #if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
294 arm_thumb = " thumb";
295 #endif
296
297 printf("target%s%s %s%s%s %s\n",
298 arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
299 arm_float_abi);
300 }
301
302
PrintFeatures()303 void CpuFeatures::PrintFeatures() {
304 printf("ARMv8=%d ARMv7=%d VFPv3=%d VFP32DREGS=%d NEON=%d SUDIV=%d",
305 CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
306 CpuFeatures::IsSupported(VFPv3), CpuFeatures::IsSupported(VFP32DREGS),
307 CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV));
308 #ifdef __arm__
309 bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
310 #elif USE_EABI_HARDFLOAT
311 bool eabi_hardfloat = true;
312 #else
313 bool eabi_hardfloat = false;
314 #endif
315 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
316 }
317
318
319 // -----------------------------------------------------------------------------
320 // Implementation of RelocInfo
321
322 // static
323 const int RelocInfo::kApplyMask = 0;
324
325
IsCodedSpecially()326 bool RelocInfo::IsCodedSpecially() {
327 // The deserializer needs to know whether a pointer is specially coded. Being
328 // specially coded on ARM means that it is a movw/movt instruction, or is an
329 // embedded constant pool entry. These only occur if
330 // FLAG_enable_embedded_constant_pool is true.
331 return FLAG_enable_embedded_constant_pool;
332 }
333
334
IsInConstantPool()335 bool RelocInfo::IsInConstantPool() {
336 return Assembler::is_constant_pool_load(pc_);
337 }
338
wasm_memory_reference()339 Address RelocInfo::wasm_memory_reference() {
340 DCHECK(IsWasmMemoryReference(rmode_));
341 return Assembler::target_address_at(pc_, host_);
342 }
343
wasm_memory_size_reference()344 uint32_t RelocInfo::wasm_memory_size_reference() {
345 DCHECK(IsWasmMemorySizeReference(rmode_));
346 return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
347 }
348
wasm_global_reference()349 Address RelocInfo::wasm_global_reference() {
350 DCHECK(IsWasmGlobalReference(rmode_));
351 return Assembler::target_address_at(pc_, host_);
352 }
353
unchecked_update_wasm_memory_reference(Address address,ICacheFlushMode flush_mode)354 void RelocInfo::unchecked_update_wasm_memory_reference(
355 Address address, ICacheFlushMode flush_mode) {
356 Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
357 }
358
unchecked_update_wasm_memory_size(uint32_t size,ICacheFlushMode flush_mode)359 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
360 ICacheFlushMode flush_mode) {
361 Assembler::set_target_address_at(isolate_, pc_, host_,
362 reinterpret_cast<Address>(size), flush_mode);
363 }
364
365 // -----------------------------------------------------------------------------
366 // Implementation of Operand and MemOperand
367 // See assembler-arm-inl.h for inlined constructors
368
Operand(Handle<Object> handle)369 Operand::Operand(Handle<Object> handle) {
370 AllowDeferredHandleDereference using_raw_address;
371 rm_ = no_reg;
372 // Verify all Objects referred by code are NOT in new space.
373 Object* obj = *handle;
374 if (obj->IsHeapObject()) {
375 imm32_ = reinterpret_cast<intptr_t>(handle.location());
376 rmode_ = RelocInfo::EMBEDDED_OBJECT;
377 } else {
378 // no relocation needed
379 imm32_ = reinterpret_cast<intptr_t>(obj);
380 rmode_ = RelocInfo::NONE32;
381 }
382 }
383
384
Operand(Register rm,ShiftOp shift_op,int shift_imm)385 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
386 DCHECK(is_uint5(shift_imm));
387
388 rm_ = rm;
389 rs_ = no_reg;
390 shift_op_ = shift_op;
391 shift_imm_ = shift_imm & 31;
392
393 if ((shift_op == ROR) && (shift_imm == 0)) {
394 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode
395 // RRX as ROR #0 (See below).
396 shift_op = LSL;
397 } else if (shift_op == RRX) {
398 // encoded as ROR with shift_imm == 0
399 DCHECK(shift_imm == 0);
400 shift_op_ = ROR;
401 shift_imm_ = 0;
402 }
403 }
404
405
Operand(Register rm,ShiftOp shift_op,Register rs)406 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
407 DCHECK(shift_op != RRX);
408 rm_ = rm;
409 rs_ = no_reg;
410 shift_op_ = shift_op;
411 rs_ = rs;
412 }
413
414
MemOperand(Register rn,int32_t offset,AddrMode am)415 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
416 rn_ = rn;
417 rm_ = no_reg;
418 offset_ = offset;
419 am_ = am;
420
421 // Accesses below the stack pointer are not safe, and are prohibited by the
422 // ABI. We can check obvious violations here.
423 if (rn.is(sp)) {
424 if (am == Offset) DCHECK_LE(0, offset);
425 if (am == NegOffset) DCHECK_GE(0, offset);
426 }
427 }
428
429
MemOperand(Register rn,Register rm,AddrMode am)430 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
431 rn_ = rn;
432 rm_ = rm;
433 shift_op_ = LSL;
434 shift_imm_ = 0;
435 am_ = am;
436 }
437
438
MemOperand(Register rn,Register rm,ShiftOp shift_op,int shift_imm,AddrMode am)439 MemOperand::MemOperand(Register rn, Register rm,
440 ShiftOp shift_op, int shift_imm, AddrMode am) {
441 DCHECK(is_uint5(shift_imm));
442 rn_ = rn;
443 rm_ = rm;
444 shift_op_ = shift_op;
445 shift_imm_ = shift_imm & 31;
446 am_ = am;
447 }
448
449
NeonMemOperand(Register rn,AddrMode am,int align)450 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
451 DCHECK((am == Offset) || (am == PostIndex));
452 rn_ = rn;
453 rm_ = (am == Offset) ? pc : sp;
454 SetAlignment(align);
455 }
456
457
NeonMemOperand(Register rn,Register rm,int align)458 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
459 rn_ = rn;
460 rm_ = rm;
461 SetAlignment(align);
462 }
463
464
SetAlignment(int align)465 void NeonMemOperand::SetAlignment(int align) {
466 switch (align) {
467 case 0:
468 align_ = 0;
469 break;
470 case 64:
471 align_ = 1;
472 break;
473 case 128:
474 align_ = 2;
475 break;
476 case 256:
477 align_ = 3;
478 break;
479 default:
480 UNREACHABLE();
481 align_ = 0;
482 break;
483 }
484 }
485
486
NeonListOperand(DoubleRegister base,int registers_count)487 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
488 base_ = base;
489 switch (registers_count) {
490 case 1:
491 type_ = nlt_1;
492 break;
493 case 2:
494 type_ = nlt_2;
495 break;
496 case 3:
497 type_ = nlt_3;
498 break;
499 case 4:
500 type_ = nlt_4;
501 break;
502 default:
503 UNREACHABLE();
504 type_ = nlt_1;
505 break;
506 }
507 }
508
509
510 // -----------------------------------------------------------------------------
511 // Specific instructions, constants, and masks.
512
513 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
514 // register r is not encoded.
515 const Instr kPushRegPattern =
516 al | B26 | 4 | NegPreIndex | Register::kCode_sp * B16;
517 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
518 // register r is not encoded.
519 const Instr kPopRegPattern =
520 al | B26 | L | 4 | PostIndex | Register::kCode_sp * B16;
521 // ldr rd, [pc, #offset]
522 const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
523 const Instr kLdrPCImmedPattern = 5 * B24 | L | Register::kCode_pc * B16;
524 // ldr rd, [pp, #offset]
525 const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
526 const Instr kLdrPpImmedPattern = 5 * B24 | L | Register::kCode_r8 * B16;
527 // ldr rd, [pp, rn]
528 const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
529 const Instr kLdrPpRegPattern = 7 * B24 | L | Register::kCode_r8 * B16;
530 // vldr dd, [pc, #offset]
531 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
532 const Instr kVldrDPCPattern = 13 * B24 | L | Register::kCode_pc * B16 | 11 * B8;
533 // vldr dd, [pp, #offset]
534 const Instr kVldrDPpMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
535 const Instr kVldrDPpPattern = 13 * B24 | L | Register::kCode_r8 * B16 | 11 * B8;
536 // blxcc rm
537 const Instr kBlxRegMask =
538 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
539 const Instr kBlxRegPattern =
540 B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
541 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
542 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
543 const Instr kMovMvnPattern = 0xd * B21;
544 const Instr kMovMvnFlip = B22;
545 const Instr kMovLeaveCCMask = 0xdff * B16;
546 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
547 const Instr kMovwPattern = 0x30 * B20;
548 const Instr kMovtPattern = 0x34 * B20;
549 const Instr kMovwLeaveCCFlip = 0x5 * B21;
550 const Instr kMovImmedMask = 0x7f * B21;
551 const Instr kMovImmedPattern = 0x1d * B21;
552 const Instr kOrrImmedMask = 0x7f * B21;
553 const Instr kOrrImmedPattern = 0x1c * B21;
554 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
555 const Instr kCmpCmnPattern = 0x15 * B20;
556 const Instr kCmpCmnFlip = B21;
557 const Instr kAddSubFlip = 0x6 * B21;
558 const Instr kAndBicFlip = 0xe * B21;
559
560 // A mask for the Rd register for push, pop, ldr, str instructions.
561 const Instr kLdrRegFpOffsetPattern =
562 al | B26 | L | Offset | Register::kCode_fp * B16;
563 const Instr kStrRegFpOffsetPattern =
564 al | B26 | Offset | Register::kCode_fp * B16;
565 const Instr kLdrRegFpNegOffsetPattern =
566 al | B26 | L | NegOffset | Register::kCode_fp * B16;
567 const Instr kStrRegFpNegOffsetPattern =
568 al | B26 | NegOffset | Register::kCode_fp * B16;
569 const Instr kLdrStrInstrTypeMask = 0xffff0000;
570
Assembler(Isolate * isolate,void * buffer,int buffer_size)571 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
572 : AssemblerBase(isolate, buffer, buffer_size),
573 recorded_ast_id_(TypeFeedbackId::None()),
574 pending_32_bit_constants_(),
575 pending_64_bit_constants_(),
576 constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits) {
577 pending_32_bit_constants_.reserve(kMinNumPendingConstants);
578 pending_64_bit_constants_.reserve(kMinNumPendingConstants);
579 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
580 next_buffer_check_ = 0;
581 const_pool_blocked_nesting_ = 0;
582 no_const_pool_before_ = 0;
583 first_const_pool_32_use_ = -1;
584 first_const_pool_64_use_ = -1;
585 last_bound_pos_ = 0;
586 ClearRecordedAstId();
587 if (CpuFeatures::IsSupported(VFP32DREGS)) {
588 // Register objects tend to be abstracted and survive between scopes, so
589 // it's awkward to use CpuFeatures::VFP32DREGS with CpuFeatureScope. To make
590 // its use consistent with other features, we always enable it if we can.
591 EnableCpuFeature(VFP32DREGS);
592 }
593 }
594
595
~Assembler()596 Assembler::~Assembler() {
597 DCHECK(const_pool_blocked_nesting_ == 0);
598 }
599
600
GetCode(CodeDesc * desc)601 void Assembler::GetCode(CodeDesc* desc) {
602 // Emit constant pool if necessary.
603 int constant_pool_offset = 0;
604 if (FLAG_enable_embedded_constant_pool) {
605 constant_pool_offset = EmitEmbeddedConstantPool();
606 } else {
607 CheckConstPool(true, false);
608 DCHECK(pending_32_bit_constants_.empty());
609 DCHECK(pending_64_bit_constants_.empty());
610 }
611 // Set up code descriptor.
612 desc->buffer = buffer_;
613 desc->buffer_size = buffer_size_;
614 desc->instr_size = pc_offset();
615 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
616 desc->constant_pool_size =
617 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
618 desc->origin = this;
619 desc->unwinding_info_size = 0;
620 desc->unwinding_info = nullptr;
621 }
622
623
Align(int m)624 void Assembler::Align(int m) {
625 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
626 DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
627 while ((pc_offset() & (m - 1)) != 0) {
628 nop();
629 }
630 }
631
632
CodeTargetAlign()633 void Assembler::CodeTargetAlign() {
634 // Preferred alignment of jump targets on some ARM chips.
635 Align(8);
636 }
637
638
GetCondition(Instr instr)639 Condition Assembler::GetCondition(Instr instr) {
640 return Instruction::ConditionField(instr);
641 }
642
643
IsBranch(Instr instr)644 bool Assembler::IsBranch(Instr instr) {
645 return (instr & (B27 | B25)) == (B27 | B25);
646 }
647
648
GetBranchOffset(Instr instr)649 int Assembler::GetBranchOffset(Instr instr) {
650 DCHECK(IsBranch(instr));
651 // Take the jump offset in the lower 24 bits, sign extend it and multiply it
652 // with 4 to get the offset in bytes.
653 return ((instr & kImm24Mask) << 8) >> 6;
654 }
655
656
IsLdrRegisterImmediate(Instr instr)657 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
658 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
659 }
660
661
IsVldrDRegisterImmediate(Instr instr)662 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
663 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
664 }
665
666
GetLdrRegisterImmediateOffset(Instr instr)667 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
668 DCHECK(IsLdrRegisterImmediate(instr));
669 bool positive = (instr & B23) == B23;
670 int offset = instr & kOff12Mask; // Zero extended offset.
671 return positive ? offset : -offset;
672 }
673
674
GetVldrDRegisterImmediateOffset(Instr instr)675 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
676 DCHECK(IsVldrDRegisterImmediate(instr));
677 bool positive = (instr & B23) == B23;
678 int offset = instr & kOff8Mask; // Zero extended offset.
679 offset <<= 2;
680 return positive ? offset : -offset;
681 }
682
683
SetLdrRegisterImmediateOffset(Instr instr,int offset)684 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
685 DCHECK(IsLdrRegisterImmediate(instr));
686 bool positive = offset >= 0;
687 if (!positive) offset = -offset;
688 DCHECK(is_uint12(offset));
689 // Set bit indicating whether the offset should be added.
690 instr = (instr & ~B23) | (positive ? B23 : 0);
691 // Set the actual offset.
692 return (instr & ~kOff12Mask) | offset;
693 }
694
695
SetVldrDRegisterImmediateOffset(Instr instr,int offset)696 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
697 DCHECK(IsVldrDRegisterImmediate(instr));
698 DCHECK((offset & ~3) == offset); // Must be 64-bit aligned.
699 bool positive = offset >= 0;
700 if (!positive) offset = -offset;
701 DCHECK(is_uint10(offset));
702 // Set bit indicating whether the offset should be added.
703 instr = (instr & ~B23) | (positive ? B23 : 0);
704 // Set the actual offset. Its bottom 2 bits are zero.
705 return (instr & ~kOff8Mask) | (offset >> 2);
706 }
707
708
IsStrRegisterImmediate(Instr instr)709 bool Assembler::IsStrRegisterImmediate(Instr instr) {
710 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
711 }
712
713
SetStrRegisterImmediateOffset(Instr instr,int offset)714 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
715 DCHECK(IsStrRegisterImmediate(instr));
716 bool positive = offset >= 0;
717 if (!positive) offset = -offset;
718 DCHECK(is_uint12(offset));
719 // Set bit indicating whether the offset should be added.
720 instr = (instr & ~B23) | (positive ? B23 : 0);
721 // Set the actual offset.
722 return (instr & ~kOff12Mask) | offset;
723 }
724
725
IsAddRegisterImmediate(Instr instr)726 bool Assembler::IsAddRegisterImmediate(Instr instr) {
727 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
728 }
729
730
SetAddRegisterImmediateOffset(Instr instr,int offset)731 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
732 DCHECK(IsAddRegisterImmediate(instr));
733 DCHECK(offset >= 0);
734 DCHECK(is_uint12(offset));
735 // Set the offset.
736 return (instr & ~kOff12Mask) | offset;
737 }
738
739
GetRd(Instr instr)740 Register Assembler::GetRd(Instr instr) {
741 Register reg;
742 reg.reg_code = Instruction::RdValue(instr);
743 return reg;
744 }
745
746
GetRn(Instr instr)747 Register Assembler::GetRn(Instr instr) {
748 Register reg;
749 reg.reg_code = Instruction::RnValue(instr);
750 return reg;
751 }
752
753
GetRm(Instr instr)754 Register Assembler::GetRm(Instr instr) {
755 Register reg;
756 reg.reg_code = Instruction::RmValue(instr);
757 return reg;
758 }
759
760
GetConsantPoolLoadPattern()761 Instr Assembler::GetConsantPoolLoadPattern() {
762 if (FLAG_enable_embedded_constant_pool) {
763 return kLdrPpImmedPattern;
764 } else {
765 return kLdrPCImmedPattern;
766 }
767 }
768
769
GetConsantPoolLoadMask()770 Instr Assembler::GetConsantPoolLoadMask() {
771 if (FLAG_enable_embedded_constant_pool) {
772 return kLdrPpImmedMask;
773 } else {
774 return kLdrPCImmedMask;
775 }
776 }
777
778
IsPush(Instr instr)779 bool Assembler::IsPush(Instr instr) {
780 return ((instr & ~kRdMask) == kPushRegPattern);
781 }
782
783
IsPop(Instr instr)784 bool Assembler::IsPop(Instr instr) {
785 return ((instr & ~kRdMask) == kPopRegPattern);
786 }
787
788
IsStrRegFpOffset(Instr instr)789 bool Assembler::IsStrRegFpOffset(Instr instr) {
790 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
791 }
792
793
IsLdrRegFpOffset(Instr instr)794 bool Assembler::IsLdrRegFpOffset(Instr instr) {
795 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
796 }
797
798
IsStrRegFpNegOffset(Instr instr)799 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
800 return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
801 }
802
803
IsLdrRegFpNegOffset(Instr instr)804 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
805 return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
806 }
807
808
IsLdrPcImmediateOffset(Instr instr)809 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
810 // Check the instruction is indeed a
811 // ldr<cond> <Rd>, [pc +/- offset_12].
812 return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
813 }
814
815
IsLdrPpImmediateOffset(Instr instr)816 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
817 // Check the instruction is indeed a
818 // ldr<cond> <Rd>, [pp +/- offset_12].
819 return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
820 }
821
822
IsLdrPpRegOffset(Instr instr)823 bool Assembler::IsLdrPpRegOffset(Instr instr) {
824 // Check the instruction is indeed a
825 // ldr<cond> <Rd>, [pp, +/- <Rm>].
826 return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
827 }
828
829
GetLdrPpRegOffsetPattern()830 Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
831
832
IsVldrDPcImmediateOffset(Instr instr)833 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
834 // Check the instruction is indeed a
835 // vldr<cond> <Dd>, [pc +/- offset_10].
836 return (instr & kVldrDPCMask) == kVldrDPCPattern;
837 }
838
839
IsVldrDPpImmediateOffset(Instr instr)840 bool Assembler::IsVldrDPpImmediateOffset(Instr instr) {
841 // Check the instruction is indeed a
842 // vldr<cond> <Dd>, [pp +/- offset_10].
843 return (instr & kVldrDPpMask) == kVldrDPpPattern;
844 }
845
846
IsBlxReg(Instr instr)847 bool Assembler::IsBlxReg(Instr instr) {
848 // Check the instruction is indeed a
849 // blxcc <Rm>
850 return (instr & kBlxRegMask) == kBlxRegPattern;
851 }
852
853
IsBlxIp(Instr instr)854 bool Assembler::IsBlxIp(Instr instr) {
855 // Check the instruction is indeed a
856 // blx ip
857 return instr == kBlxIp;
858 }
859
860
IsTstImmediate(Instr instr)861 bool Assembler::IsTstImmediate(Instr instr) {
862 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
863 (I | TST | S);
864 }
865
866
IsCmpRegister(Instr instr)867 bool Assembler::IsCmpRegister(Instr instr) {
868 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
869 (CMP | S);
870 }
871
872
IsCmpImmediate(Instr instr)873 bool Assembler::IsCmpImmediate(Instr instr) {
874 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
875 (I | CMP | S);
876 }
877
878
GetCmpImmediateRegister(Instr instr)879 Register Assembler::GetCmpImmediateRegister(Instr instr) {
880 DCHECK(IsCmpImmediate(instr));
881 return GetRn(instr);
882 }
883
884
GetCmpImmediateRawImmediate(Instr instr)885 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
886 DCHECK(IsCmpImmediate(instr));
887 return instr & kOff12Mask;
888 }
889
890
891 // Labels refer to positions in the (to be) generated code.
892 // There are bound, linked, and unused labels.
893 //
894 // Bound labels refer to known positions in the already
895 // generated code. pos() is the position the label refers to.
896 //
897 // Linked labels refer to unknown positions in the code
898 // to be generated; pos() is the position of the last
899 // instruction using the label.
900 //
901 // The linked labels form a link chain by making the branch offset
902 // in the instruction steam to point to the previous branch
903 // instruction using the same label.
904 //
905 // The link chain is terminated by a branch offset pointing to the
906 // same position.
907
908
target_at(int pos)909 int Assembler::target_at(int pos) {
910 Instr instr = instr_at(pos);
911 if (is_uint24(instr)) {
912 // Emitted link to a label, not part of a branch.
913 return instr;
914 }
915 DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
916 int imm26 = ((instr & kImm24Mask) << 8) >> 6;
917 if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
918 ((instr & B24) != 0)) {
919 // blx uses bit 24 to encode bit 2 of imm26
920 imm26 += 2;
921 }
922 return pos + kPcLoadDelta + imm26;
923 }
924
925
target_at_put(int pos,int target_pos)926 void Assembler::target_at_put(int pos, int target_pos) {
927 Instr instr = instr_at(pos);
928 if (is_uint24(instr)) {
929 DCHECK(target_pos == pos || target_pos >= 0);
930 // Emitted link to a label, not part of a branch.
931 // Load the position of the label relative to the generated code object
932 // pointer in a register.
933
934 // The existing code must be a single 24-bit label chain link, followed by
935 // nops encoding the destination register. See mov_label_offset.
936
937 // Extract the destination register from the first nop instructions.
938 Register dst =
939 Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
940 // In addition to the 24-bit label chain link, we expect to find one nop for
941 // ARMv7 and above, or two nops for ARMv6. See mov_label_offset.
942 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
943 if (!CpuFeatures::IsSupported(ARMv7)) {
944 DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
945 }
946
947 // Here are the instructions we need to emit:
948 // For ARMv7: target24 => target16_1:target16_0
949 // movw dst, #target16_0
950 // movt dst, #target16_1
951 // For ARMv6: target24 => target8_2:target8_1:target8_0
952 // mov dst, #target8_0
953 // orr dst, dst, #target8_1 << 8
954 // orr dst, dst, #target8_2 << 16
955
956 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
957 DCHECK(is_uint24(target24));
958 if (is_uint8(target24)) {
959 // If the target fits in a byte then only patch with a mov
960 // instruction.
961 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 1,
962 CodePatcher::DONT_FLUSH);
963 patcher.masm()->mov(dst, Operand(target24));
964 } else {
965 uint16_t target16_0 = target24 & kImm16Mask;
966 uint16_t target16_1 = target24 >> 16;
967 if (CpuFeatures::IsSupported(ARMv7)) {
968 // Patch with movw/movt.
969 if (target16_1 == 0) {
970 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
971 1, CodePatcher::DONT_FLUSH);
972 CpuFeatureScope scope(patcher.masm(), ARMv7);
973 patcher.masm()->movw(dst, target16_0);
974 } else {
975 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
976 2, CodePatcher::DONT_FLUSH);
977 CpuFeatureScope scope(patcher.masm(), ARMv7);
978 patcher.masm()->movw(dst, target16_0);
979 patcher.masm()->movt(dst, target16_1);
980 }
981 } else {
982 // Patch with a sequence of mov/orr/orr instructions.
983 uint8_t target8_0 = target16_0 & kImm8Mask;
984 uint8_t target8_1 = target16_0 >> 8;
985 uint8_t target8_2 = target16_1 & kImm8Mask;
986 if (target8_2 == 0) {
987 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
988 2, CodePatcher::DONT_FLUSH);
989 patcher.masm()->mov(dst, Operand(target8_0));
990 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
991 } else {
992 CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
993 3, CodePatcher::DONT_FLUSH);
994 patcher.masm()->mov(dst, Operand(target8_0));
995 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8));
996 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16));
997 }
998 }
999 }
1000 return;
1001 }
1002 int imm26 = target_pos - (pos + kPcLoadDelta);
1003 DCHECK_EQ(5 * B25, instr & 7 * B25); // b, bl, or blx imm24
1004 if (Instruction::ConditionField(instr) == kSpecialCondition) {
1005 // blx uses bit 24 to encode bit 2 of imm26
1006 DCHECK_EQ(0, imm26 & 1);
1007 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1) * B24;
1008 } else {
1009 DCHECK_EQ(0, imm26 & 3);
1010 instr &= ~kImm24Mask;
1011 }
1012 int imm24 = imm26 >> 2;
1013 DCHECK(is_int24(imm24));
1014 instr_at_put(pos, instr | (imm24 & kImm24Mask));
1015 }
1016
1017
print(Label * L)1018 void Assembler::print(Label* L) {
1019 if (L->is_unused()) {
1020 PrintF("unused label\n");
1021 } else if (L->is_bound()) {
1022 PrintF("bound label to %d\n", L->pos());
1023 } else if (L->is_linked()) {
1024 Label l = *L;
1025 PrintF("unbound label");
1026 while (l.is_linked()) {
1027 PrintF("@ %d ", l.pos());
1028 Instr instr = instr_at(l.pos());
1029 if ((instr & ~kImm24Mask) == 0) {
1030 PrintF("value\n");
1031 } else {
1032 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx
1033 Condition cond = Instruction::ConditionField(instr);
1034 const char* b;
1035 const char* c;
1036 if (cond == kSpecialCondition) {
1037 b = "blx";
1038 c = "";
1039 } else {
1040 if ((instr & B24) != 0)
1041 b = "bl";
1042 else
1043 b = "b";
1044
1045 switch (cond) {
1046 case eq: c = "eq"; break;
1047 case ne: c = "ne"; break;
1048 case hs: c = "hs"; break;
1049 case lo: c = "lo"; break;
1050 case mi: c = "mi"; break;
1051 case pl: c = "pl"; break;
1052 case vs: c = "vs"; break;
1053 case vc: c = "vc"; break;
1054 case hi: c = "hi"; break;
1055 case ls: c = "ls"; break;
1056 case ge: c = "ge"; break;
1057 case lt: c = "lt"; break;
1058 case gt: c = "gt"; break;
1059 case le: c = "le"; break;
1060 case al: c = ""; break;
1061 default:
1062 c = "";
1063 UNREACHABLE();
1064 }
1065 }
1066 PrintF("%s%s\n", b, c);
1067 }
1068 next(&l);
1069 }
1070 } else {
1071 PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
1072 }
1073 }
1074
1075
bind_to(Label * L,int pos)1076 void Assembler::bind_to(Label* L, int pos) {
1077 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
1078 while (L->is_linked()) {
1079 int fixup_pos = L->pos();
1080 next(L); // call next before overwriting link with target at fixup_pos
1081 target_at_put(fixup_pos, pos);
1082 }
1083 L->bind_to(pos);
1084
1085 // Keep track of the last bound label so we don't eliminate any instructions
1086 // before a bound label.
1087 if (pos > last_bound_pos_)
1088 last_bound_pos_ = pos;
1089 }
1090
1091
bind(Label * L)1092 void Assembler::bind(Label* L) {
1093 DCHECK(!L->is_bound()); // label can only be bound once
1094 bind_to(L, pc_offset());
1095 }
1096
1097
next(Label * L)1098 void Assembler::next(Label* L) {
1099 DCHECK(L->is_linked());
1100 int link = target_at(L->pos());
1101 if (link == L->pos()) {
1102 // Branch target points to the same instuction. This is the end of the link
1103 // chain.
1104 L->Unuse();
1105 } else {
1106 DCHECK(link >= 0);
1107 L->link_to(link);
1108 }
1109 }
1110
1111
1112 // Low-level code emission routines depending on the addressing mode.
1113 // If this returns true then you have to use the rotate_imm and immed_8
1114 // that it returns, because it may have already changed the instruction
1115 // to match them!
fits_shifter(uint32_t imm32,uint32_t * rotate_imm,uint32_t * immed_8,Instr * instr)1116 static bool fits_shifter(uint32_t imm32,
1117 uint32_t* rotate_imm,
1118 uint32_t* immed_8,
1119 Instr* instr) {
1120 // imm32 must be unsigned.
1121 for (int rot = 0; rot < 16; rot++) {
1122 uint32_t imm8 = base::bits::RotateLeft32(imm32, 2 * rot);
1123 if ((imm8 <= 0xff)) {
1124 *rotate_imm = rot;
1125 *immed_8 = imm8;
1126 return true;
1127 }
1128 }
1129 // If the opcode is one with a complementary version and the complementary
1130 // immediate fits, change the opcode.
1131 if (instr != NULL) {
1132 if ((*instr & kMovMvnMask) == kMovMvnPattern) {
1133 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1134 *instr ^= kMovMvnFlip;
1135 return true;
1136 } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
1137 if (CpuFeatures::IsSupported(ARMv7)) {
1138 if (imm32 < 0x10000) {
1139 *instr ^= kMovwLeaveCCFlip;
1140 *instr |= Assembler::EncodeMovwImmediate(imm32);
1141 *rotate_imm = *immed_8 = 0; // Not used for movw.
1142 return true;
1143 }
1144 }
1145 }
1146 } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
1147 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1148 *instr ^= kCmpCmnFlip;
1149 return true;
1150 }
1151 } else {
1152 Instr alu_insn = (*instr & kALUMask);
1153 if (alu_insn == ADD ||
1154 alu_insn == SUB) {
1155 if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
1156 *instr ^= kAddSubFlip;
1157 return true;
1158 }
1159 } else if (alu_insn == AND ||
1160 alu_insn == BIC) {
1161 if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
1162 *instr ^= kAndBicFlip;
1163 return true;
1164 }
1165 }
1166 }
1167 }
1168 return false;
1169 }
1170
1171
1172 // We have to use the temporary register for things that can be relocated even
1173 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
1174 // space. There is no guarantee that the relocated location can be similarly
1175 // encoded.
must_output_reloc_info(const Assembler * assembler) const1176 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1177 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1178 if (assembler != NULL && assembler->predictable_code_size()) return true;
1179 return assembler->serializer_enabled();
1180 } else if (RelocInfo::IsNone(rmode_)) {
1181 return false;
1182 }
1183 return true;
1184 }
1185
1186
use_mov_immediate_load(const Operand & x,const Assembler * assembler)1187 static bool use_mov_immediate_load(const Operand& x,
1188 const Assembler* assembler) {
1189 DCHECK(assembler != nullptr);
1190 if (FLAG_enable_embedded_constant_pool &&
1191 !assembler->is_constant_pool_available()) {
1192 return true;
1193 } else if (x.must_output_reloc_info(assembler)) {
1194 // Prefer constant pool if data is likely to be patched.
1195 return false;
1196 } else {
1197 // Otherwise, use immediate load if movw / movt is available.
1198 return CpuFeatures::IsSupported(ARMv7);
1199 }
1200 }
1201
1202
instructions_required(const Assembler * assembler,Instr instr) const1203 int Operand::instructions_required(const Assembler* assembler,
1204 Instr instr) const {
1205 DCHECK(assembler != nullptr);
1206 if (rm_.is_valid()) return 1;
1207 uint32_t dummy1, dummy2;
1208 if (must_output_reloc_info(assembler) ||
1209 !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
1210 // The immediate operand cannot be encoded as a shifter operand, or use of
1211 // constant pool is required. First account for the instructions required
1212 // for the constant pool or immediate load
1213 int instructions;
1214 if (use_mov_immediate_load(*this, assembler)) {
1215 // A movw / movt or mov / orr immediate load.
1216 instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
1217 } else if (assembler->ConstantPoolAccessIsInOverflow()) {
1218 // An overflowed constant pool load.
1219 instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
1220 } else {
1221 // A small constant pool load.
1222 instructions = 1;
1223 }
1224
1225 if ((instr & ~kCondMask) != 13 * B21) { // mov, S not set
1226 // For a mov or mvn instruction which doesn't set the condition
1227 // code, the constant pool or immediate load is enough, otherwise we need
1228 // to account for the actual instruction being requested.
1229 instructions += 1;
1230 }
1231 return instructions;
1232 } else {
1233 // No use of constant pool and the immediate operand can be encoded as a
1234 // shifter operand.
1235 return 1;
1236 }
1237 }
1238
1239
move_32_bit_immediate(Register rd,const Operand & x,Condition cond)1240 void Assembler::move_32_bit_immediate(Register rd,
1241 const Operand& x,
1242 Condition cond) {
1243 uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
1244 if (x.must_output_reloc_info(this)) {
1245 RecordRelocInfo(x.rmode_);
1246 }
1247
1248 if (use_mov_immediate_load(x, this)) {
1249 Register target = rd.code() == pc.code() ? ip : rd;
1250 if (CpuFeatures::IsSupported(ARMv7)) {
1251 CpuFeatureScope scope(this, ARMv7);
1252 if (!FLAG_enable_embedded_constant_pool &&
1253 x.must_output_reloc_info(this)) {
1254 // Make sure the movw/movt doesn't get separated.
1255 BlockConstPoolFor(2);
1256 }
1257 movw(target, imm32 & 0xffff, cond);
1258 movt(target, imm32 >> 16, cond);
1259 } else {
1260 DCHECK(FLAG_enable_embedded_constant_pool);
1261 mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
1262 orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
1263 orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
1264 orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
1265 }
1266 if (target.code() != rd.code()) {
1267 mov(rd, target, LeaveCC, cond);
1268 }
1269 } else {
1270 DCHECK(!FLAG_enable_embedded_constant_pool || is_constant_pool_available());
1271 ConstantPoolEntry::Access access =
1272 ConstantPoolAddEntry(pc_offset(), x.rmode_, x.imm32_);
1273 if (access == ConstantPoolEntry::OVERFLOWED) {
1274 DCHECK(FLAG_enable_embedded_constant_pool);
1275 Register target = rd.code() == pc.code() ? ip : rd;
1276 // Emit instructions to load constant pool offset.
1277 if (CpuFeatures::IsSupported(ARMv7)) {
1278 CpuFeatureScope scope(this, ARMv7);
1279 movw(target, 0, cond);
1280 movt(target, 0, cond);
1281 } else {
1282 mov(target, Operand(0), LeaveCC, cond);
1283 orr(target, target, Operand(0), LeaveCC, cond);
1284 orr(target, target, Operand(0), LeaveCC, cond);
1285 orr(target, target, Operand(0), LeaveCC, cond);
1286 }
1287 // Load from constant pool at offset.
1288 ldr(rd, MemOperand(pp, target), cond);
1289 } else {
1290 DCHECK(access == ConstantPoolEntry::REGULAR);
1291 ldr(rd, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0),
1292 cond);
1293 }
1294 }
1295 }
1296
1297
addrmod1(Instr instr,Register rn,Register rd,const Operand & x)1298 void Assembler::addrmod1(Instr instr,
1299 Register rn,
1300 Register rd,
1301 const Operand& x) {
1302 CheckBuffer();
1303 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
1304 if (!x.rm_.is_valid()) {
1305 // Immediate.
1306 uint32_t rotate_imm;
1307 uint32_t immed_8;
1308 if (x.must_output_reloc_info(this) ||
1309 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
1310 // The immediate operand cannot be encoded as a shifter operand, so load
1311 // it first to register ip and change the original instruction to use ip.
1312 // However, if the original instruction is a 'mov rd, x' (not setting the
1313 // condition code), then replace it with a 'ldr rd, [pc]'.
1314 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
1315 Condition cond = Instruction::ConditionField(instr);
1316 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
1317 move_32_bit_immediate(rd, x, cond);
1318 } else {
1319 mov(ip, x, LeaveCC, cond);
1320 addrmod1(instr, rn, rd, Operand(ip));
1321 }
1322 return;
1323 }
1324 instr |= I | rotate_imm*B8 | immed_8;
1325 } else if (!x.rs_.is_valid()) {
1326 // Immediate shift.
1327 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1328 } else {
1329 // Register shift.
1330 DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
1331 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
1332 }
1333 emit(instr | rn.code()*B16 | rd.code()*B12);
1334 if (rn.is(pc) || x.rm_.is(pc)) {
1335 // Block constant pool emission for one instruction after reading pc.
1336 BlockConstPoolFor(1);
1337 }
1338 }
1339
1340
addrmod2(Instr instr,Register rd,const MemOperand & x)1341 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
1342 DCHECK((instr & ~(kCondMask | B | L)) == B26);
1343 int am = x.am_;
1344 if (!x.rm_.is_valid()) {
1345 // Immediate offset.
1346 int offset_12 = x.offset_;
1347 if (offset_12 < 0) {
1348 offset_12 = -offset_12;
1349 am ^= U;
1350 }
1351 if (!is_uint12(offset_12)) {
1352 // Immediate offset cannot be encoded, load it first to register ip
1353 // rn (and rd in a load) should never be ip, or will be trashed.
1354 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1355 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1356 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
1357 return;
1358 }
1359 DCHECK(offset_12 >= 0); // no masking needed
1360 instr |= offset_12;
1361 } else {
1362 // Register offset (shift_imm_ and shift_op_ are 0) or scaled
1363 // register offset the constructors make sure than both shift_imm_
1364 // and shift_op_ are initialized.
1365 DCHECK(!x.rm_.is(pc));
1366 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
1367 }
1368 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1369 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1370 }
1371
1372
addrmod3(Instr instr,Register rd,const MemOperand & x)1373 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
1374 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
1375 DCHECK(x.rn_.is_valid());
1376 int am = x.am_;
1377 if (!x.rm_.is_valid()) {
1378 // Immediate offset.
1379 int offset_8 = x.offset_;
1380 if (offset_8 < 0) {
1381 offset_8 = -offset_8;
1382 am ^= U;
1383 }
1384 if (!is_uint8(offset_8)) {
1385 // Immediate offset cannot be encoded, load it first to register ip
1386 // rn (and rd in a load) should never be ip, or will be trashed.
1387 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1388 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
1389 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1390 return;
1391 }
1392 DCHECK(offset_8 >= 0); // no masking needed
1393 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
1394 } else if (x.shift_imm_ != 0) {
1395 // Scaled register offset not supported, load index first
1396 // rn (and rd in a load) should never be ip, or will be trashed.
1397 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
1398 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
1399 Instruction::ConditionField(instr));
1400 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
1401 return;
1402 } else {
1403 // Register offset.
1404 DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback
1405 instr |= x.rm_.code();
1406 }
1407 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1408 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
1409 }
1410
1411
addrmod4(Instr instr,Register rn,RegList rl)1412 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
1413 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
1414 DCHECK(rl != 0);
1415 DCHECK(!rn.is(pc));
1416 emit(instr | rn.code()*B16 | rl);
1417 }
1418
1419
addrmod5(Instr instr,CRegister crd,const MemOperand & x)1420 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
1421 // Unindexed addressing is not encoded by this function.
1422 DCHECK_EQ((B27 | B26),
1423 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
1424 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
1425 int am = x.am_;
1426 int offset_8 = x.offset_;
1427 DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset
1428 offset_8 >>= 2;
1429 if (offset_8 < 0) {
1430 offset_8 = -offset_8;
1431 am ^= U;
1432 }
1433 DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte
1434 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback
1435
1436 // Post-indexed addressing requires W == 1; different than in addrmod2/3.
1437 if ((am & P) == 0)
1438 am |= W;
1439
1440 DCHECK(offset_8 >= 0); // no masking needed
1441 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
1442 }
1443
1444
branch_offset(Label * L)1445 int Assembler::branch_offset(Label* L) {
1446 int target_pos;
1447 if (L->is_bound()) {
1448 target_pos = L->pos();
1449 } else {
1450 if (L->is_linked()) {
1451 // Point to previous instruction that uses the link.
1452 target_pos = L->pos();
1453 } else {
1454 // First entry of the link chain points to itself.
1455 target_pos = pc_offset();
1456 }
1457 L->link_to(pc_offset());
1458 }
1459
1460 // Block the emission of the constant pool, since the branch instruction must
1461 // be emitted at the pc offset recorded by the label.
1462 if (!is_const_pool_blocked()) BlockConstPoolFor(1);
1463
1464 return target_pos - (pc_offset() + kPcLoadDelta);
1465 }
1466
1467
1468 // Branch instructions.
b(int branch_offset,Condition cond)1469 void Assembler::b(int branch_offset, Condition cond) {
1470 DCHECK((branch_offset & 3) == 0);
1471 int imm24 = branch_offset >> 2;
1472 CHECK(is_int24(imm24));
1473 emit(cond | B27 | B25 | (imm24 & kImm24Mask));
1474
1475 if (cond == al) {
1476 // Dead code is a good location to emit the constant pool.
1477 CheckConstPool(false, false);
1478 }
1479 }
1480
1481
bl(int branch_offset,Condition cond)1482 void Assembler::bl(int branch_offset, Condition cond) {
1483 DCHECK((branch_offset & 3) == 0);
1484 int imm24 = branch_offset >> 2;
1485 CHECK(is_int24(imm24));
1486 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
1487 }
1488
blx(int branch_offset)1489 void Assembler::blx(int branch_offset) {
1490 DCHECK((branch_offset & 1) == 0);
1491 int h = ((branch_offset & 2) >> 1)*B24;
1492 int imm24 = branch_offset >> 2;
1493 CHECK(is_int24(imm24));
1494 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
1495 }
1496
blx(Register target,Condition cond)1497 void Assembler::blx(Register target, Condition cond) {
1498 DCHECK(!target.is(pc));
1499 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
1500 }
1501
bx(Register target,Condition cond)1502 void Assembler::bx(Register target, Condition cond) {
1503 DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged
1504 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
1505 }
1506
1507
b(Label * L,Condition cond)1508 void Assembler::b(Label* L, Condition cond) {
1509 CheckBuffer();
1510 b(branch_offset(L), cond);
1511 }
1512
1513
bl(Label * L,Condition cond)1514 void Assembler::bl(Label* L, Condition cond) {
1515 CheckBuffer();
1516 bl(branch_offset(L), cond);
1517 }
1518
1519
blx(Label * L)1520 void Assembler::blx(Label* L) {
1521 CheckBuffer();
1522 blx(branch_offset(L));
1523 }
1524
1525
1526 // Data-processing instructions.
1527
and_(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1528 void Assembler::and_(Register dst, Register src1, const Operand& src2,
1529 SBit s, Condition cond) {
1530 addrmod1(cond | AND | s, src1, dst, src2);
1531 }
1532
1533
eor(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1534 void Assembler::eor(Register dst, Register src1, const Operand& src2,
1535 SBit s, Condition cond) {
1536 addrmod1(cond | EOR | s, src1, dst, src2);
1537 }
1538
1539
sub(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1540 void Assembler::sub(Register dst, Register src1, const Operand& src2,
1541 SBit s, Condition cond) {
1542 addrmod1(cond | SUB | s, src1, dst, src2);
1543 }
1544
1545
rsb(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1546 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
1547 SBit s, Condition cond) {
1548 addrmod1(cond | RSB | s, src1, dst, src2);
1549 }
1550
1551
add(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1552 void Assembler::add(Register dst, Register src1, const Operand& src2,
1553 SBit s, Condition cond) {
1554 addrmod1(cond | ADD | s, src1, dst, src2);
1555 }
1556
1557
adc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1558 void Assembler::adc(Register dst, Register src1, const Operand& src2,
1559 SBit s, Condition cond) {
1560 addrmod1(cond | ADC | s, src1, dst, src2);
1561 }
1562
1563
sbc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1564 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
1565 SBit s, Condition cond) {
1566 addrmod1(cond | SBC | s, src1, dst, src2);
1567 }
1568
1569
rsc(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1570 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
1571 SBit s, Condition cond) {
1572 addrmod1(cond | RSC | s, src1, dst, src2);
1573 }
1574
1575
tst(Register src1,const Operand & src2,Condition cond)1576 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
1577 addrmod1(cond | TST | S, src1, r0, src2);
1578 }
1579
1580
teq(Register src1,const Operand & src2,Condition cond)1581 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
1582 addrmod1(cond | TEQ | S, src1, r0, src2);
1583 }
1584
1585
cmp(Register src1,const Operand & src2,Condition cond)1586 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
1587 addrmod1(cond | CMP | S, src1, r0, src2);
1588 }
1589
1590
cmp_raw_immediate(Register src,int raw_immediate,Condition cond)1591 void Assembler::cmp_raw_immediate(
1592 Register src, int raw_immediate, Condition cond) {
1593 DCHECK(is_uint12(raw_immediate));
1594 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
1595 }
1596
1597
cmn(Register src1,const Operand & src2,Condition cond)1598 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
1599 addrmod1(cond | CMN | S, src1, r0, src2);
1600 }
1601
1602
orr(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1603 void Assembler::orr(Register dst, Register src1, const Operand& src2,
1604 SBit s, Condition cond) {
1605 addrmod1(cond | ORR | s, src1, dst, src2);
1606 }
1607
1608
mov(Register dst,const Operand & src,SBit s,Condition cond)1609 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
1610 // Don't allow nop instructions in the form mov rn, rn to be generated using
1611 // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
1612 // or MarkCode(int/NopMarkerTypes) pseudo instructions.
1613 DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
1614 addrmod1(cond | MOV | s, r0, dst, src);
1615 }
1616
1617
mov_label_offset(Register dst,Label * label)1618 void Assembler::mov_label_offset(Register dst, Label* label) {
1619 if (label->is_bound()) {
1620 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag)));
1621 } else {
1622 // Emit the link to the label in the code stream followed by extra nop
1623 // instructions.
1624 // If the label is not linked, then start a new link chain by linking it to
1625 // itself, emitting pc_offset().
1626 int link = label->is_linked() ? label->pos() : pc_offset();
1627 label->link_to(pc_offset());
1628
1629 // When the label is bound, these instructions will be patched with a
1630 // sequence of movw/movt or mov/orr/orr instructions. They will load the
1631 // destination register with the position of the label from the beginning
1632 // of the code.
1633 //
1634 // The link will be extracted from the first instruction and the destination
1635 // register from the second.
1636 // For ARMv7:
1637 // link
1638 // mov dst, dst
1639 // For ARMv6:
1640 // link
1641 // mov dst, dst
1642 // mov dst, dst
1643 //
1644 // When the label gets bound: target_at extracts the link and target_at_put
1645 // patches the instructions.
1646 CHECK(is_uint24(link));
1647 BlockConstPoolScope block_const_pool(this);
1648 emit(link);
1649 nop(dst.code());
1650 if (!CpuFeatures::IsSupported(ARMv7)) {
1651 nop(dst.code());
1652 }
1653 }
1654 }
1655
1656
movw(Register reg,uint32_t immediate,Condition cond)1657 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
1658 DCHECK(IsEnabled(ARMv7));
1659 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1660 }
1661
1662
movt(Register reg,uint32_t immediate,Condition cond)1663 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
1664 DCHECK(IsEnabled(ARMv7));
1665 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
1666 }
1667
1668
bic(Register dst,Register src1,const Operand & src2,SBit s,Condition cond)1669 void Assembler::bic(Register dst, Register src1, const Operand& src2,
1670 SBit s, Condition cond) {
1671 addrmod1(cond | BIC | s, src1, dst, src2);
1672 }
1673
1674
mvn(Register dst,const Operand & src,SBit s,Condition cond)1675 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
1676 addrmod1(cond | MVN | s, r0, dst, src);
1677 }
1678
1679
1680 // Multiply instructions.
mla(Register dst,Register src1,Register src2,Register srcA,SBit s,Condition cond)1681 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
1682 SBit s, Condition cond) {
1683 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1684 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
1685 src2.code()*B8 | B7 | B4 | src1.code());
1686 }
1687
1688
mls(Register dst,Register src1,Register src2,Register srcA,Condition cond)1689 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
1690 Condition cond) {
1691 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1692 DCHECK(IsEnabled(ARMv7));
1693 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
1694 src2.code()*B8 | B7 | B4 | src1.code());
1695 }
1696
1697
sdiv(Register dst,Register src1,Register src2,Condition cond)1698 void Assembler::sdiv(Register dst, Register src1, Register src2,
1699 Condition cond) {
1700 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1701 DCHECK(IsEnabled(SUDIV));
1702 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
1703 src2.code()*B8 | B4 | src1.code());
1704 }
1705
1706
udiv(Register dst,Register src1,Register src2,Condition cond)1707 void Assembler::udiv(Register dst, Register src1, Register src2,
1708 Condition cond) {
1709 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1710 DCHECK(IsEnabled(SUDIV));
1711 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
1712 src2.code() * B8 | B4 | src1.code());
1713 }
1714
1715
mul(Register dst,Register src1,Register src2,SBit s,Condition cond)1716 void Assembler::mul(Register dst, Register src1, Register src2, SBit s,
1717 Condition cond) {
1718 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1719 // dst goes in bits 16-19 for this instruction!
1720 emit(cond | s | dst.code() * B16 | src2.code() * B8 | B7 | B4 | src1.code());
1721 }
1722
1723
smmla(Register dst,Register src1,Register src2,Register srcA,Condition cond)1724 void Assembler::smmla(Register dst, Register src1, Register src2, Register srcA,
1725 Condition cond) {
1726 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
1727 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 |
1728 srcA.code() * B12 | src2.code() * B8 | B4 | src1.code());
1729 }
1730
1731
smmul(Register dst,Register src1,Register src2,Condition cond)1732 void Assembler::smmul(Register dst, Register src1, Register src2,
1733 Condition cond) {
1734 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
1735 emit(cond | B26 | B25 | B24 | B22 | B20 | dst.code() * B16 | 0xf * B12 |
1736 src2.code() * B8 | B4 | src1.code());
1737 }
1738
1739
smlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1740 void Assembler::smlal(Register dstL,
1741 Register dstH,
1742 Register src1,
1743 Register src2,
1744 SBit s,
1745 Condition cond) {
1746 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1747 DCHECK(!dstL.is(dstH));
1748 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1749 src2.code()*B8 | B7 | B4 | src1.code());
1750 }
1751
1752
smull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1753 void Assembler::smull(Register dstL,
1754 Register dstH,
1755 Register src1,
1756 Register src2,
1757 SBit s,
1758 Condition cond) {
1759 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1760 DCHECK(!dstL.is(dstH));
1761 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
1762 src2.code()*B8 | B7 | B4 | src1.code());
1763 }
1764
1765
umlal(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1766 void Assembler::umlal(Register dstL,
1767 Register dstH,
1768 Register src1,
1769 Register src2,
1770 SBit s,
1771 Condition cond) {
1772 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1773 DCHECK(!dstL.is(dstH));
1774 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
1775 src2.code()*B8 | B7 | B4 | src1.code());
1776 }
1777
1778
umull(Register dstL,Register dstH,Register src1,Register src2,SBit s,Condition cond)1779 void Assembler::umull(Register dstL,
1780 Register dstH,
1781 Register src1,
1782 Register src2,
1783 SBit s,
1784 Condition cond) {
1785 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
1786 DCHECK(!dstL.is(dstH));
1787 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
1788 src2.code()*B8 | B7 | B4 | src1.code());
1789 }
1790
1791
1792 // Miscellaneous arithmetic instructions.
clz(Register dst,Register src,Condition cond)1793 void Assembler::clz(Register dst, Register src, Condition cond) {
1794 DCHECK(!dst.is(pc) && !src.is(pc));
1795 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
1796 15*B8 | CLZ | src.code());
1797 }
1798
1799
1800 // Saturating instructions.
1801
1802 // Unsigned saturate.
usat(Register dst,int satpos,const Operand & src,Condition cond)1803 void Assembler::usat(Register dst,
1804 int satpos,
1805 const Operand& src,
1806 Condition cond) {
1807 DCHECK(!dst.is(pc) && !src.rm_.is(pc));
1808 DCHECK((satpos >= 0) && (satpos <= 31));
1809 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
1810 DCHECK(src.rs_.is(no_reg));
1811
1812 int sh = 0;
1813 if (src.shift_op_ == ASR) {
1814 sh = 1;
1815 }
1816
1817 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
1818 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
1819 }
1820
1821
1822 // Bitfield manipulation instructions.
1823
1824 // Unsigned bit field extract.
1825 // Extracts #width adjacent bits from position #lsb in a register, and
1826 // writes them to the low bits of a destination register.
1827 // ubfx dst, src, #lsb, #width
ubfx(Register dst,Register src,int lsb,int width,Condition cond)1828 void Assembler::ubfx(Register dst,
1829 Register src,
1830 int lsb,
1831 int width,
1832 Condition cond) {
1833 DCHECK(IsEnabled(ARMv7));
1834 DCHECK(!dst.is(pc) && !src.is(pc));
1835 DCHECK((lsb >= 0) && (lsb <= 31));
1836 DCHECK((width >= 1) && (width <= (32 - lsb)));
1837 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
1838 lsb*B7 | B6 | B4 | src.code());
1839 }
1840
1841
1842 // Signed bit field extract.
1843 // Extracts #width adjacent bits from position #lsb in a register, and
1844 // writes them to the low bits of a destination register. The extracted
1845 // value is sign extended to fill the destination register.
1846 // sbfx dst, src, #lsb, #width
sbfx(Register dst,Register src,int lsb,int width,Condition cond)1847 void Assembler::sbfx(Register dst,
1848 Register src,
1849 int lsb,
1850 int width,
1851 Condition cond) {
1852 DCHECK(IsEnabled(ARMv7));
1853 DCHECK(!dst.is(pc) && !src.is(pc));
1854 DCHECK((lsb >= 0) && (lsb <= 31));
1855 DCHECK((width >= 1) && (width <= (32 - lsb)));
1856 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
1857 lsb*B7 | B6 | B4 | src.code());
1858 }
1859
1860
1861 // Bit field clear.
1862 // Sets #width adjacent bits at position #lsb in the destination register
1863 // to zero, preserving the value of the other bits.
1864 // bfc dst, #lsb, #width
bfc(Register dst,int lsb,int width,Condition cond)1865 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
1866 DCHECK(IsEnabled(ARMv7));
1867 DCHECK(!dst.is(pc));
1868 DCHECK((lsb >= 0) && (lsb <= 31));
1869 DCHECK((width >= 1) && (width <= (32 - lsb)));
1870 int msb = lsb + width - 1;
1871 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
1872 }
1873
1874
1875 // Bit field insert.
1876 // Inserts #width adjacent bits from the low bits of the source register
1877 // into position #lsb of the destination register.
1878 // bfi dst, src, #lsb, #width
bfi(Register dst,Register src,int lsb,int width,Condition cond)1879 void Assembler::bfi(Register dst,
1880 Register src,
1881 int lsb,
1882 int width,
1883 Condition cond) {
1884 DCHECK(IsEnabled(ARMv7));
1885 DCHECK(!dst.is(pc) && !src.is(pc));
1886 DCHECK((lsb >= 0) && (lsb <= 31));
1887 DCHECK((width >= 1) && (width <= (32 - lsb)));
1888 int msb = lsb + width - 1;
1889 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
1890 src.code());
1891 }
1892
1893
pkhbt(Register dst,Register src1,const Operand & src2,Condition cond)1894 void Assembler::pkhbt(Register dst,
1895 Register src1,
1896 const Operand& src2,
1897 Condition cond ) {
1898 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1899 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1900 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
1901 DCHECK(!dst.is(pc));
1902 DCHECK(!src1.is(pc));
1903 DCHECK(!src2.rm().is(pc));
1904 DCHECK(!src2.rm().is(no_reg));
1905 DCHECK(src2.rs().is(no_reg));
1906 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
1907 DCHECK(src2.shift_op() == LSL);
1908 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1909 src2.shift_imm_*B7 | B4 | src2.rm().code());
1910 }
1911
1912
pkhtb(Register dst,Register src1,const Operand & src2,Condition cond)1913 void Assembler::pkhtb(Register dst,
1914 Register src1,
1915 const Operand& src2,
1916 Condition cond) {
1917 // Instruction details available in ARM DDI 0406C.b, A8.8.125.
1918 // cond(31-28) | 01101000(27-20) | Rn(19-16) |
1919 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
1920 DCHECK(!dst.is(pc));
1921 DCHECK(!src1.is(pc));
1922 DCHECK(!src2.rm().is(pc));
1923 DCHECK(!src2.rm().is(no_reg));
1924 DCHECK(src2.rs().is(no_reg));
1925 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
1926 DCHECK(src2.shift_op() == ASR);
1927 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
1928 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
1929 asr*B7 | B6 | B4 | src2.rm().code());
1930 }
1931
1932
sxtb(Register dst,Register src,int rotate,Condition cond)1933 void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
1934 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1935 // cond(31-28) | 01101010(27-20) | 1111(19-16) |
1936 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1937 DCHECK(!dst.is(pc));
1938 DCHECK(!src.is(pc));
1939 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1940 emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
1941 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1942 }
1943
1944
sxtab(Register dst,Register src1,Register src2,int rotate,Condition cond)1945 void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
1946 Condition cond) {
1947 // Instruction details available in ARM DDI 0406C.b, A8.8.233.
1948 // cond(31-28) | 01101010(27-20) | Rn(19-16) |
1949 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1950 DCHECK(!dst.is(pc));
1951 DCHECK(!src1.is(pc));
1952 DCHECK(!src2.is(pc));
1953 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1954 emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
1955 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1956 }
1957
1958
sxth(Register dst,Register src,int rotate,Condition cond)1959 void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
1960 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1961 // cond(31-28) | 01101011(27-20) | 1111(19-16) |
1962 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1963 DCHECK(!dst.is(pc));
1964 DCHECK(!src.is(pc));
1965 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1966 emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
1967 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1968 }
1969
1970
sxtah(Register dst,Register src1,Register src2,int rotate,Condition cond)1971 void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
1972 Condition cond) {
1973 // Instruction details available in ARM DDI 0406C.b, A8.8.235.
1974 // cond(31-28) | 01101011(27-20) | Rn(19-16) |
1975 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1976 DCHECK(!dst.is(pc));
1977 DCHECK(!src1.is(pc));
1978 DCHECK(!src2.is(pc));
1979 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1980 emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
1981 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
1982 }
1983
1984
uxtb(Register dst,Register src,int rotate,Condition cond)1985 void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
1986 // Instruction details available in ARM DDI 0406C.b, A8.8.274.
1987 // cond(31-28) | 01101110(27-20) | 1111(19-16) |
1988 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
1989 DCHECK(!dst.is(pc));
1990 DCHECK(!src.is(pc));
1991 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
1992 emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
1993 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
1994 }
1995
1996
uxtab(Register dst,Register src1,Register src2,int rotate,Condition cond)1997 void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
1998 Condition cond) {
1999 // Instruction details available in ARM DDI 0406C.b, A8.8.271.
2000 // cond(31-28) | 01101110(27-20) | Rn(19-16) |
2001 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
2002 DCHECK(!dst.is(pc));
2003 DCHECK(!src1.is(pc));
2004 DCHECK(!src2.is(pc));
2005 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2006 emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
2007 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2008 }
2009
2010
uxtb16(Register dst,Register src,int rotate,Condition cond)2011 void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
2012 // Instruction details available in ARM DDI 0406C.b, A8.8.275.
2013 // cond(31-28) | 01101100(27-20) | 1111(19-16) |
2014 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
2015 DCHECK(!dst.is(pc));
2016 DCHECK(!src.is(pc));
2017 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2018 emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
2019 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2020 }
2021
2022
uxth(Register dst,Register src,int rotate,Condition cond)2023 void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
2024 // Instruction details available in ARM DDI 0406C.b, A8.8.276.
2025 // cond(31-28) | 01101111(27-20) | 1111(19-16) |
2026 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
2027 DCHECK(!dst.is(pc));
2028 DCHECK(!src.is(pc));
2029 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2030 emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
2031 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
2032 }
2033
2034
uxtah(Register dst,Register src1,Register src2,int rotate,Condition cond)2035 void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
2036 Condition cond) {
2037 // Instruction details available in ARM DDI 0406C.b, A8.8.273.
2038 // cond(31-28) | 01101111(27-20) | Rn(19-16) |
2039 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
2040 DCHECK(!dst.is(pc));
2041 DCHECK(!src1.is(pc));
2042 DCHECK(!src2.is(pc));
2043 DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
2044 emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
2045 ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
2046 }
2047
2048
rbit(Register dst,Register src,Condition cond)2049 void Assembler::rbit(Register dst, Register src, Condition cond) {
2050 // Instruction details available in ARM DDI 0406C.b, A8.8.144.
2051 // cond(31-28) | 011011111111(27-16) | Rd(15-12) | 11110011(11-4) | Rm(3-0)
2052 DCHECK(IsEnabled(ARMv7));
2053 DCHECK(!dst.is(pc));
2054 DCHECK(!src.is(pc));
2055 emit(cond | 0x6FF * B16 | dst.code() * B12 | 0xF3 * B4 | src.code());
2056 }
2057
2058
2059 // Status register access instructions.
mrs(Register dst,SRegister s,Condition cond)2060 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
2061 DCHECK(!dst.is(pc));
2062 emit(cond | B24 | s | 15*B16 | dst.code()*B12);
2063 }
2064
2065
msr(SRegisterFieldMask fields,const Operand & src,Condition cond)2066 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
2067 Condition cond) {
2068 DCHECK((fields & 0x000f0000) != 0); // At least one field must be set.
2069 DCHECK(((fields & 0xfff0ffff) == CPSR) || ((fields & 0xfff0ffff) == SPSR));
2070 Instr instr;
2071 if (!src.rm_.is_valid()) {
2072 // Immediate.
2073 uint32_t rotate_imm;
2074 uint32_t immed_8;
2075 if (src.must_output_reloc_info(this) ||
2076 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
2077 // Immediate operand cannot be encoded, load it first to register ip.
2078 move_32_bit_immediate(ip, src);
2079 msr(fields, Operand(ip), cond);
2080 return;
2081 }
2082 instr = I | rotate_imm*B8 | immed_8;
2083 } else {
2084 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed
2085 instr = src.rm_.code();
2086 }
2087 emit(cond | instr | B24 | B21 | fields | 15*B12);
2088 }
2089
2090
2091 // Load/Store instructions.
ldr(Register dst,const MemOperand & src,Condition cond)2092 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
2093 addrmod2(cond | B26 | L, dst, src);
2094 }
2095
2096
str(Register src,const MemOperand & dst,Condition cond)2097 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
2098 addrmod2(cond | B26, src, dst);
2099 }
2100
2101
ldrb(Register dst,const MemOperand & src,Condition cond)2102 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
2103 addrmod2(cond | B26 | B | L, dst, src);
2104 }
2105
2106
strb(Register src,const MemOperand & dst,Condition cond)2107 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
2108 addrmod2(cond | B26 | B, src, dst);
2109 }
2110
2111
ldrh(Register dst,const MemOperand & src,Condition cond)2112 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
2113 addrmod3(cond | L | B7 | H | B4, dst, src);
2114 }
2115
2116
strh(Register src,const MemOperand & dst,Condition cond)2117 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
2118 addrmod3(cond | B7 | H | B4, src, dst);
2119 }
2120
2121
ldrsb(Register dst,const MemOperand & src,Condition cond)2122 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
2123 addrmod3(cond | L | B7 | S6 | B4, dst, src);
2124 }
2125
2126
ldrsh(Register dst,const MemOperand & src,Condition cond)2127 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
2128 addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
2129 }
2130
2131
ldrd(Register dst1,Register dst2,const MemOperand & src,Condition cond)2132 void Assembler::ldrd(Register dst1, Register dst2,
2133 const MemOperand& src, Condition cond) {
2134 DCHECK(src.rm().is(no_reg));
2135 DCHECK(!dst1.is(lr)); // r14.
2136 DCHECK_EQ(0, dst1.code() % 2);
2137 DCHECK_EQ(dst1.code() + 1, dst2.code());
2138 addrmod3(cond | B7 | B6 | B4, dst1, src);
2139 }
2140
2141
strd(Register src1,Register src2,const MemOperand & dst,Condition cond)2142 void Assembler::strd(Register src1, Register src2,
2143 const MemOperand& dst, Condition cond) {
2144 DCHECK(dst.rm().is(no_reg));
2145 DCHECK(!src1.is(lr)); // r14.
2146 DCHECK_EQ(0, src1.code() % 2);
2147 DCHECK_EQ(src1.code() + 1, src2.code());
2148 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
2149 }
2150
2151 // Load/Store exclusive instructions.
ldrex(Register dst,Register src,Condition cond)2152 void Assembler::ldrex(Register dst, Register src, Condition cond) {
2153 // Instruction details available in ARM DDI 0406C.b, A8.8.75.
2154 // cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
2155 emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
2156 }
2157
strex(Register src1,Register src2,Register dst,Condition cond)2158 void Assembler::strex(Register src1, Register src2, Register dst,
2159 Condition cond) {
2160 // Instruction details available in ARM DDI 0406C.b, A8.8.212.
2161 // cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
2162 // Rt(3-0)
2163 emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
2164 src2.code());
2165 }
2166
ldrexb(Register dst,Register src,Condition cond)2167 void Assembler::ldrexb(Register dst, Register src, Condition cond) {
2168 // Instruction details available in ARM DDI 0406C.b, A8.8.76.
2169 // cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
2170 emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
2171 0xf9f);
2172 }
2173
strexb(Register src1,Register src2,Register dst,Condition cond)2174 void Assembler::strexb(Register src1, Register src2, Register dst,
2175 Condition cond) {
2176 // Instruction details available in ARM DDI 0406C.b, A8.8.213.
2177 // cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
2178 // Rt(3-0)
2179 emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
2180 0xf9 * B4 | src2.code());
2181 }
2182
ldrexh(Register dst,Register src,Condition cond)2183 void Assembler::ldrexh(Register dst, Register src, Condition cond) {
2184 // Instruction details available in ARM DDI 0406C.b, A8.8.78.
2185 // cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
2186 emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
2187 dst.code() * B12 | 0xf9f);
2188 }
2189
strexh(Register src1,Register src2,Register dst,Condition cond)2190 void Assembler::strexh(Register src1, Register src2, Register dst,
2191 Condition cond) {
2192 // Instruction details available in ARM DDI 0406C.b, A8.8.215.
2193 // cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
2194 // Rt(3-0)
2195 emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
2196 0xf9 * B4 | src2.code());
2197 }
2198
2199 // Preload instructions.
pld(const MemOperand & address)2200 void Assembler::pld(const MemOperand& address) {
2201 // Instruction details available in ARM DDI 0406C.b, A8.8.128.
2202 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
2203 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
2204 DCHECK(address.rm().is(no_reg));
2205 DCHECK(address.am() == Offset);
2206 int U = B23;
2207 int offset = address.offset();
2208 if (offset < 0) {
2209 offset = -offset;
2210 U = 0;
2211 }
2212 DCHECK(offset < 4096);
2213 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
2214 0xf*B12 | offset);
2215 }
2216
2217
2218 // Load/Store multiple instructions.
ldm(BlockAddrMode am,Register base,RegList dst,Condition cond)2219 void Assembler::ldm(BlockAddrMode am,
2220 Register base,
2221 RegList dst,
2222 Condition cond) {
2223 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable.
2224 DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
2225
2226 addrmod4(cond | B27 | am | L, base, dst);
2227
2228 // Emit the constant pool after a function return implemented by ldm ..{..pc}.
2229 if (cond == al && (dst & pc.bit()) != 0) {
2230 // There is a slight chance that the ldm instruction was actually a call,
2231 // in which case it would be wrong to return into the constant pool; we
2232 // recognize this case by checking if the emission of the pool was blocked
2233 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
2234 // the case, we emit a jump over the pool.
2235 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
2236 }
2237 }
2238
2239
stm(BlockAddrMode am,Register base,RegList src,Condition cond)2240 void Assembler::stm(BlockAddrMode am,
2241 Register base,
2242 RegList src,
2243 Condition cond) {
2244 addrmod4(cond | B27 | am, base, src);
2245 }
2246
2247
2248 // Exception-generating instructions and debugging support.
2249 // Stops with a non-negative code less than kNumOfWatchedStops support
2250 // enabling/disabling and a counter feature. See simulator-arm.h .
stop(const char * msg,Condition cond,int32_t code)2251 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
2252 #ifndef __arm__
2253 DCHECK(code >= kDefaultStopCode);
2254 {
2255 // The Simulator will handle the stop instruction and get the message
2256 // address. It expects to find the address just after the svc instruction.
2257 BlockConstPoolScope block_const_pool(this);
2258 if (code >= 0) {
2259 svc(kStopCode + code, cond);
2260 } else {
2261 svc(kStopCode + kMaxStopCode, cond);
2262 }
2263 // Do not embed the message string address! We used to do this, but that
2264 // made snapshots created from position-independent executable builds
2265 // non-deterministic.
2266 // TODO(yangguo): remove this field entirely.
2267 nop();
2268 }
2269 #else // def __arm__
2270 if (cond != al) {
2271 Label skip;
2272 b(&skip, NegateCondition(cond));
2273 bkpt(0);
2274 bind(&skip);
2275 } else {
2276 bkpt(0);
2277 }
2278 #endif // def __arm__
2279 }
2280
bkpt(uint32_t imm16)2281 void Assembler::bkpt(uint32_t imm16) {
2282 DCHECK(is_uint16(imm16));
2283 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
2284 }
2285
2286
svc(uint32_t imm24,Condition cond)2287 void Assembler::svc(uint32_t imm24, Condition cond) {
2288 DCHECK(is_uint24(imm24));
2289 emit(cond | 15*B24 | imm24);
2290 }
2291
2292
dmb(BarrierOption option)2293 void Assembler::dmb(BarrierOption option) {
2294 if (CpuFeatures::IsSupported(ARMv7)) {
2295 // Details available in ARM DDI 0406C.b, A8-378.
2296 emit(kSpecialCondition | 0x57ff * B12 | 5 * B4 | option);
2297 } else {
2298 // Details available in ARM DDI 0406C.b, B3-1750.
2299 // CP15DMB: CRn=c7, opc1=0, CRm=c10, opc2=5, Rt is ignored.
2300 mcr(p15, 0, r0, cr7, cr10, 5);
2301 }
2302 }
2303
2304
dsb(BarrierOption option)2305 void Assembler::dsb(BarrierOption option) {
2306 if (CpuFeatures::IsSupported(ARMv7)) {
2307 // Details available in ARM DDI 0406C.b, A8-380.
2308 emit(kSpecialCondition | 0x57ff * B12 | 4 * B4 | option);
2309 } else {
2310 // Details available in ARM DDI 0406C.b, B3-1750.
2311 // CP15DSB: CRn=c7, opc1=0, CRm=c10, opc2=4, Rt is ignored.
2312 mcr(p15, 0, r0, cr7, cr10, 4);
2313 }
2314 }
2315
2316
isb(BarrierOption option)2317 void Assembler::isb(BarrierOption option) {
2318 if (CpuFeatures::IsSupported(ARMv7)) {
2319 // Details available in ARM DDI 0406C.b, A8-389.
2320 emit(kSpecialCondition | 0x57ff * B12 | 6 * B4 | option);
2321 } else {
2322 // Details available in ARM DDI 0406C.b, B3-1750.
2323 // CP15ISB: CRn=c7, opc1=0, CRm=c5, opc2=4, Rt is ignored.
2324 mcr(p15, 0, r0, cr7, cr5, 4);
2325 }
2326 }
2327
2328
2329 // Coprocessor instructions.
cdp(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2330 void Assembler::cdp(Coprocessor coproc,
2331 int opcode_1,
2332 CRegister crd,
2333 CRegister crn,
2334 CRegister crm,
2335 int opcode_2,
2336 Condition cond) {
2337 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
2338 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
2339 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
2340 }
2341
cdp2(Coprocessor coproc,int opcode_1,CRegister crd,CRegister crn,CRegister crm,int opcode_2)2342 void Assembler::cdp2(Coprocessor coproc, int opcode_1, CRegister crd,
2343 CRegister crn, CRegister crm, int opcode_2) {
2344 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
2345 }
2346
2347
mcr(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2348 void Assembler::mcr(Coprocessor coproc,
2349 int opcode_1,
2350 Register rd,
2351 CRegister crn,
2352 CRegister crm,
2353 int opcode_2,
2354 Condition cond) {
2355 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2356 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
2357 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2358 }
2359
mcr2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2360 void Assembler::mcr2(Coprocessor coproc, int opcode_1, Register rd,
2361 CRegister crn, CRegister crm, int opcode_2) {
2362 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2363 }
2364
2365
mrc(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2,Condition cond)2366 void Assembler::mrc(Coprocessor coproc,
2367 int opcode_1,
2368 Register rd,
2369 CRegister crn,
2370 CRegister crm,
2371 int opcode_2,
2372 Condition cond) {
2373 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
2374 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
2375 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
2376 }
2377
mrc2(Coprocessor coproc,int opcode_1,Register rd,CRegister crn,CRegister crm,int opcode_2)2378 void Assembler::mrc2(Coprocessor coproc, int opcode_1, Register rd,
2379 CRegister crn, CRegister crm, int opcode_2) {
2380 mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
2381 }
2382
2383
ldc(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l,Condition cond)2384 void Assembler::ldc(Coprocessor coproc,
2385 CRegister crd,
2386 const MemOperand& src,
2387 LFlag l,
2388 Condition cond) {
2389 addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
2390 }
2391
2392
ldc(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l,Condition cond)2393 void Assembler::ldc(Coprocessor coproc,
2394 CRegister crd,
2395 Register rn,
2396 int option,
2397 LFlag l,
2398 Condition cond) {
2399 // Unindexed addressing.
2400 DCHECK(is_uint8(option));
2401 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
2402 coproc*B8 | (option & 255));
2403 }
2404
ldc2(Coprocessor coproc,CRegister crd,const MemOperand & src,LFlag l)2405 void Assembler::ldc2(Coprocessor coproc, CRegister crd, const MemOperand& src,
2406 LFlag l) {
2407 ldc(coproc, crd, src, l, kSpecialCondition);
2408 }
2409
ldc2(Coprocessor coproc,CRegister crd,Register rn,int option,LFlag l)2410 void Assembler::ldc2(Coprocessor coproc, CRegister crd, Register rn, int option,
2411 LFlag l) {
2412 ldc(coproc, crd, rn, option, l, kSpecialCondition);
2413 }
2414
2415
2416 // Support for VFP.
2417
vldr(const DwVfpRegister dst,const Register base,int offset,const Condition cond)2418 void Assembler::vldr(const DwVfpRegister dst,
2419 const Register base,
2420 int offset,
2421 const Condition cond) {
2422 // Ddst = MEM(Rbase + offset).
2423 // Instruction details available in ARM DDI 0406C.b, A8-924.
2424 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
2425 // Vd(15-12) | 1011(11-8) | offset
2426 DCHECK(VfpRegisterIsAvailable(dst));
2427 int u = 1;
2428 if (offset < 0) {
2429 CHECK(offset != kMinInt);
2430 offset = -offset;
2431 u = 0;
2432 }
2433 int vd, d;
2434 dst.split_code(&vd, &d);
2435
2436 DCHECK(offset >= 0);
2437 if ((offset % 4) == 0 && (offset / 4) < 256) {
2438 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
2439 0xB*B8 | ((offset / 4) & 255));
2440 } else {
2441 // Larger offsets must be handled by computing the correct address
2442 // in the ip register.
2443 DCHECK(!base.is(ip));
2444 if (u == 1) {
2445 add(ip, base, Operand(offset));
2446 } else {
2447 sub(ip, base, Operand(offset));
2448 }
2449 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
2450 }
2451 }
2452
2453
vldr(const DwVfpRegister dst,const MemOperand & operand,const Condition cond)2454 void Assembler::vldr(const DwVfpRegister dst,
2455 const MemOperand& operand,
2456 const Condition cond) {
2457 DCHECK(VfpRegisterIsAvailable(dst));
2458 DCHECK(operand.am_ == Offset);
2459 if (operand.rm().is_valid()) {
2460 add(ip, operand.rn(),
2461 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2462 vldr(dst, ip, 0, cond);
2463 } else {
2464 vldr(dst, operand.rn(), operand.offset(), cond);
2465 }
2466 }
2467
2468
vldr(const SwVfpRegister dst,const Register base,int offset,const Condition cond)2469 void Assembler::vldr(const SwVfpRegister dst,
2470 const Register base,
2471 int offset,
2472 const Condition cond) {
2473 // Sdst = MEM(Rbase + offset).
2474 // Instruction details available in ARM DDI 0406A, A8-628.
2475 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
2476 // Vdst(15-12) | 1010(11-8) | offset
2477 int u = 1;
2478 if (offset < 0) {
2479 offset = -offset;
2480 u = 0;
2481 }
2482 int sd, d;
2483 dst.split_code(&sd, &d);
2484 DCHECK(offset >= 0);
2485
2486 if ((offset % 4) == 0 && (offset / 4) < 256) {
2487 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
2488 0xA*B8 | ((offset / 4) & 255));
2489 } else {
2490 // Larger offsets must be handled by computing the correct address
2491 // in the ip register.
2492 DCHECK(!base.is(ip));
2493 if (u == 1) {
2494 add(ip, base, Operand(offset));
2495 } else {
2496 sub(ip, base, Operand(offset));
2497 }
2498 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2499 }
2500 }
2501
2502
vldr(const SwVfpRegister dst,const MemOperand & operand,const Condition cond)2503 void Assembler::vldr(const SwVfpRegister dst,
2504 const MemOperand& operand,
2505 const Condition cond) {
2506 DCHECK(operand.am_ == Offset);
2507 if (operand.rm().is_valid()) {
2508 add(ip, operand.rn(),
2509 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2510 vldr(dst, ip, 0, cond);
2511 } else {
2512 vldr(dst, operand.rn(), operand.offset(), cond);
2513 }
2514 }
2515
2516
vstr(const DwVfpRegister src,const Register base,int offset,const Condition cond)2517 void Assembler::vstr(const DwVfpRegister src,
2518 const Register base,
2519 int offset,
2520 const Condition cond) {
2521 // MEM(Rbase + offset) = Dsrc.
2522 // Instruction details available in ARM DDI 0406C.b, A8-1082.
2523 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
2524 // Vd(15-12) | 1011(11-8) | (offset/4)
2525 DCHECK(VfpRegisterIsAvailable(src));
2526 int u = 1;
2527 if (offset < 0) {
2528 CHECK(offset != kMinInt);
2529 offset = -offset;
2530 u = 0;
2531 }
2532 DCHECK(offset >= 0);
2533 int vd, d;
2534 src.split_code(&vd, &d);
2535
2536 if ((offset % 4) == 0 && (offset / 4) < 256) {
2537 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
2538 ((offset / 4) & 255));
2539 } else {
2540 // Larger offsets must be handled by computing the correct address
2541 // in the ip register.
2542 DCHECK(!base.is(ip));
2543 if (u == 1) {
2544 add(ip, base, Operand(offset));
2545 } else {
2546 sub(ip, base, Operand(offset));
2547 }
2548 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
2549 }
2550 }
2551
2552
vstr(const DwVfpRegister src,const MemOperand & operand,const Condition cond)2553 void Assembler::vstr(const DwVfpRegister src,
2554 const MemOperand& operand,
2555 const Condition cond) {
2556 DCHECK(VfpRegisterIsAvailable(src));
2557 DCHECK(operand.am_ == Offset);
2558 if (operand.rm().is_valid()) {
2559 add(ip, operand.rn(),
2560 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2561 vstr(src, ip, 0, cond);
2562 } else {
2563 vstr(src, operand.rn(), operand.offset(), cond);
2564 }
2565 }
2566
2567
vstr(const SwVfpRegister src,const Register base,int offset,const Condition cond)2568 void Assembler::vstr(const SwVfpRegister src,
2569 const Register base,
2570 int offset,
2571 const Condition cond) {
2572 // MEM(Rbase + offset) = SSrc.
2573 // Instruction details available in ARM DDI 0406A, A8-786.
2574 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
2575 // Vdst(15-12) | 1010(11-8) | (offset/4)
2576 int u = 1;
2577 if (offset < 0) {
2578 CHECK(offset != kMinInt);
2579 offset = -offset;
2580 u = 0;
2581 }
2582 int sd, d;
2583 src.split_code(&sd, &d);
2584 DCHECK(offset >= 0);
2585 if ((offset % 4) == 0 && (offset / 4) < 256) {
2586 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
2587 0xA*B8 | ((offset / 4) & 255));
2588 } else {
2589 // Larger offsets must be handled by computing the correct address
2590 // in the ip register.
2591 DCHECK(!base.is(ip));
2592 if (u == 1) {
2593 add(ip, base, Operand(offset));
2594 } else {
2595 sub(ip, base, Operand(offset));
2596 }
2597 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
2598 }
2599 }
2600
2601
vstr(const SwVfpRegister src,const MemOperand & operand,const Condition cond)2602 void Assembler::vstr(const SwVfpRegister src,
2603 const MemOperand& operand,
2604 const Condition cond) {
2605 DCHECK(operand.am_ == Offset);
2606 if (operand.rm().is_valid()) {
2607 add(ip, operand.rn(),
2608 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
2609 vstr(src, ip, 0, cond);
2610 } else {
2611 vstr(src, operand.rn(), operand.offset(), cond);
2612 }
2613 }
2614
vldm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2615 void Assembler::vldm(BlockAddrMode am, Register base, DwVfpRegister first,
2616 DwVfpRegister last, Condition cond) {
2617 // Instruction details available in ARM DDI 0406C.b, A8-922.
2618 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2619 // first(15-12) | 1011(11-8) | (count * 2)
2620 DCHECK_LE(first.code(), last.code());
2621 DCHECK(VfpRegisterIsAvailable(last));
2622 DCHECK(am == ia || am == ia_w || am == db_w);
2623 DCHECK(!base.is(pc));
2624
2625 int sd, d;
2626 first.split_code(&sd, &d);
2627 int count = last.code() - first.code() + 1;
2628 DCHECK(count <= 16);
2629 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2630 0xB*B8 | count*2);
2631 }
2632
vstm(BlockAddrMode am,Register base,DwVfpRegister first,DwVfpRegister last,Condition cond)2633 void Assembler::vstm(BlockAddrMode am, Register base, DwVfpRegister first,
2634 DwVfpRegister last, Condition cond) {
2635 // Instruction details available in ARM DDI 0406C.b, A8-1080.
2636 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2637 // first(15-12) | 1011(11-8) | (count * 2)
2638 DCHECK_LE(first.code(), last.code());
2639 DCHECK(VfpRegisterIsAvailable(last));
2640 DCHECK(am == ia || am == ia_w || am == db_w);
2641 DCHECK(!base.is(pc));
2642
2643 int sd, d;
2644 first.split_code(&sd, &d);
2645 int count = last.code() - first.code() + 1;
2646 DCHECK(count <= 16);
2647 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2648 0xB*B8 | count*2);
2649 }
2650
vldm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2651 void Assembler::vldm(BlockAddrMode am, Register base, SwVfpRegister first,
2652 SwVfpRegister last, Condition cond) {
2653 // Instruction details available in ARM DDI 0406A, A8-626.
2654 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
2655 // first(15-12) | 1010(11-8) | (count/2)
2656 DCHECK_LE(first.code(), last.code());
2657 DCHECK(am == ia || am == ia_w || am == db_w);
2658 DCHECK(!base.is(pc));
2659
2660 int sd, d;
2661 first.split_code(&sd, &d);
2662 int count = last.code() - first.code() + 1;
2663 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
2664 0xA*B8 | count);
2665 }
2666
vstm(BlockAddrMode am,Register base,SwVfpRegister first,SwVfpRegister last,Condition cond)2667 void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
2668 SwVfpRegister last, Condition cond) {
2669 // Instruction details available in ARM DDI 0406A, A8-784.
2670 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
2671 // first(15-12) | 1011(11-8) | (count/2)
2672 DCHECK_LE(first.code(), last.code());
2673 DCHECK(am == ia || am == ia_w || am == db_w);
2674 DCHECK(!base.is(pc));
2675
2676 int sd, d;
2677 first.split_code(&sd, &d);
2678 int count = last.code() - first.code() + 1;
2679 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
2680 0xA*B8 | count);
2681 }
2682
2683
DoubleAsTwoUInt32(double d,uint32_t * lo,uint32_t * hi)2684 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
2685 uint64_t i;
2686 memcpy(&i, &d, 8);
2687
2688 *lo = i & 0xffffffff;
2689 *hi = i >> 32;
2690 }
2691
2692
2693 // Only works for little endian floating point formats.
2694 // We don't support VFP on the mixed endian floating point platform.
FitsVmovFPImmediate(double d,uint32_t * encoding)2695 static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
2696 // VMOV can accept an immediate of the form:
2697 //
2698 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
2699 //
2700 // The immediate is encoded using an 8-bit quantity, comprised of two
2701 // 4-bit fields. For an 8-bit immediate of the form:
2702 //
2703 // [abcdefgh]
2704 //
2705 // where a is the MSB and h is the LSB, an immediate 64-bit double can be
2706 // created of the form:
2707 //
2708 // [aBbbbbbb,bbcdefgh,00000000,00000000,
2709 // 00000000,00000000,00000000,00000000]
2710 //
2711 // where B = ~b.
2712 //
2713
2714 uint32_t lo, hi;
2715 DoubleAsTwoUInt32(d, &lo, &hi);
2716
2717 // The most obvious constraint is the long block of zeroes.
2718 if ((lo != 0) || ((hi & 0xffff) != 0)) {
2719 return false;
2720 }
2721
2722 // Bits 61:54 must be all clear or all set.
2723 if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
2724 return false;
2725 }
2726
2727 // Bit 62 must be NOT bit 61.
2728 if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
2729 return false;
2730 }
2731
2732 // Create the encoded immediate in the form:
2733 // [00000000,0000abcd,00000000,0000efgh]
2734 *encoding = (hi >> 16) & 0xf; // Low nybble.
2735 *encoding |= (hi >> 4) & 0x70000; // Low three bits of the high nybble.
2736 *encoding |= (hi >> 12) & 0x80000; // Top bit of the high nybble.
2737
2738 return true;
2739 }
2740
2741
vmov(const SwVfpRegister dst,float imm)2742 void Assembler::vmov(const SwVfpRegister dst, float imm) {
2743 uint32_t enc;
2744 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
2745 CpuFeatureScope scope(this, VFPv3);
2746 // The float can be encoded in the instruction.
2747 //
2748 // Sd = immediate
2749 // Instruction details available in ARM DDI 0406C.b, A8-936.
2750 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2751 // Vd(15-12) | 101(11-9) | sz=0(8) | imm4L(3-0)
2752 int vd, d;
2753 dst.split_code(&vd, &d);
2754 emit(al | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | enc);
2755 } else {
2756 mov(ip, Operand(bit_cast<int32_t>(imm)));
2757 vmov(dst, ip);
2758 }
2759 }
2760
2761
vmov(const DwVfpRegister dst,double imm,const Register scratch)2762 void Assembler::vmov(const DwVfpRegister dst,
2763 double imm,
2764 const Register scratch) {
2765 DCHECK(VfpRegisterIsAvailable(dst));
2766 DCHECK(!scratch.is(ip));
2767 uint32_t enc;
2768 // If the embedded constant pool is disabled, we can use the normal, inline
2769 // constant pool. If the embedded constant pool is enabled (via
2770 // FLAG_enable_embedded_constant_pool), we can only use it where the pool
2771 // pointer (pp) is valid.
2772 bool can_use_pool =
2773 !FLAG_enable_embedded_constant_pool || is_constant_pool_available();
2774 if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
2775 CpuFeatureScope scope(this, VFPv3);
2776 // The double can be encoded in the instruction.
2777 //
2778 // Dd = immediate
2779 // Instruction details available in ARM DDI 0406C.b, A8-936.
2780 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
2781 // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
2782 int vd, d;
2783 dst.split_code(&vd, &d);
2784 emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
2785 } else if (CpuFeatures::IsSupported(ARMv7) && FLAG_enable_vldr_imm &&
2786 can_use_pool) {
2787 CpuFeatureScope scope(this, ARMv7);
2788 // TODO(jfb) Temporarily turned off until we have constant blinding or
2789 // some equivalent mitigation: an attacker can otherwise control
2790 // generated data which also happens to be executable, a Very Bad
2791 // Thing indeed.
2792 // Blinding gets tricky because we don't have xor, we probably
2793 // need to add/subtract without losing precision, which requires a
2794 // cookie value that Lithium is probably better positioned to
2795 // choose.
2796 // We could also add a few peepholes here like detecting 0.0 and
2797 // -0.0 and doing a vmov from the sequestered d14, forcing denorms
2798 // to zero (we set flush-to-zero), and normalizing NaN values.
2799 // We could also detect redundant values.
2800 // The code could also randomize the order of values, though
2801 // that's tricky because vldr has a limited reach. Furthermore
2802 // it breaks load locality.
2803 ConstantPoolEntry::Access access = ConstantPoolAddEntry(pc_offset(), imm);
2804 if (access == ConstantPoolEntry::OVERFLOWED) {
2805 DCHECK(FLAG_enable_embedded_constant_pool);
2806 // Emit instructions to load constant pool offset.
2807 movw(ip, 0);
2808 movt(ip, 0);
2809 // Load from constant pool at offset.
2810 vldr(dst, MemOperand(pp, ip));
2811 } else {
2812 DCHECK(access == ConstantPoolEntry::REGULAR);
2813 vldr(dst, MemOperand(FLAG_enable_embedded_constant_pool ? pp : pc, 0));
2814 }
2815 } else {
2816 // Synthesise the double from ARM immediates.
2817 uint32_t lo, hi;
2818 DoubleAsTwoUInt32(imm, &lo, &hi);
2819
2820 if (lo == hi) {
2821 // Move the low and high parts of the double to a D register in one
2822 // instruction.
2823 mov(ip, Operand(lo));
2824 vmov(dst, ip, ip);
2825 } else if (scratch.is(no_reg)) {
2826 mov(ip, Operand(lo));
2827 vmov(dst, VmovIndexLo, ip);
2828 if (((lo & 0xffff) == (hi & 0xffff)) &&
2829 CpuFeatures::IsSupported(ARMv7)) {
2830 CpuFeatureScope scope(this, ARMv7);
2831 movt(ip, hi >> 16);
2832 } else {
2833 mov(ip, Operand(hi));
2834 }
2835 vmov(dst, VmovIndexHi, ip);
2836 } else {
2837 // Move the low and high parts of the double to a D register in one
2838 // instruction.
2839 mov(ip, Operand(lo));
2840 mov(scratch, Operand(hi));
2841 vmov(dst, ip, scratch);
2842 }
2843 }
2844 }
2845
2846
vmov(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)2847 void Assembler::vmov(const SwVfpRegister dst,
2848 const SwVfpRegister src,
2849 const Condition cond) {
2850 // Sd = Sm
2851 // Instruction details available in ARM DDI 0406B, A8-642.
2852 int sd, d, sm, m;
2853 dst.split_code(&sd, &d);
2854 src.split_code(&sm, &m);
2855 emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
2856 }
2857
2858
vmov(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)2859 void Assembler::vmov(const DwVfpRegister dst,
2860 const DwVfpRegister src,
2861 const Condition cond) {
2862 // Dd = Dm
2863 // Instruction details available in ARM DDI 0406C.b, A8-938.
2864 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
2865 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
2866 DCHECK(VfpRegisterIsAvailable(dst));
2867 DCHECK(VfpRegisterIsAvailable(src));
2868 int vd, d;
2869 dst.split_code(&vd, &d);
2870 int vm, m;
2871 src.split_code(&vm, &m);
2872 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
2873 vm);
2874 }
2875
2876
vmov(const DwVfpRegister dst,const VmovIndex index,const Register src,const Condition cond)2877 void Assembler::vmov(const DwVfpRegister dst,
2878 const VmovIndex index,
2879 const Register src,
2880 const Condition cond) {
2881 // Dd[index] = Rt
2882 // Instruction details available in ARM DDI 0406C.b, A8-940.
2883 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
2884 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2885 DCHECK(VfpRegisterIsAvailable(dst));
2886 DCHECK(index.index == 0 || index.index == 1);
2887 int vd, d;
2888 dst.split_code(&vd, &d);
2889 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
2890 d*B7 | B4);
2891 }
2892
2893
vmov(const Register dst,const VmovIndex index,const DwVfpRegister src,const Condition cond)2894 void Assembler::vmov(const Register dst,
2895 const VmovIndex index,
2896 const DwVfpRegister src,
2897 const Condition cond) {
2898 // Dd[index] = Rt
2899 // Instruction details available in ARM DDI 0406C.b, A8.8.342.
2900 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
2901 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
2902 DCHECK(VfpRegisterIsAvailable(src));
2903 DCHECK(index.index == 0 || index.index == 1);
2904 int vn, n;
2905 src.split_code(&vn, &n);
2906 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
2907 0xB*B8 | n*B7 | B4);
2908 }
2909
2910
vmov(const DwVfpRegister dst,const Register src1,const Register src2,const Condition cond)2911 void Assembler::vmov(const DwVfpRegister dst,
2912 const Register src1,
2913 const Register src2,
2914 const Condition cond) {
2915 // Dm = <Rt,Rt2>.
2916 // Instruction details available in ARM DDI 0406C.b, A8-948.
2917 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
2918 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2919 DCHECK(VfpRegisterIsAvailable(dst));
2920 DCHECK(!src1.is(pc) && !src2.is(pc));
2921 int vm, m;
2922 dst.split_code(&vm, &m);
2923 emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
2924 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2925 }
2926
2927
vmov(const Register dst1,const Register dst2,const DwVfpRegister src,const Condition cond)2928 void Assembler::vmov(const Register dst1,
2929 const Register dst2,
2930 const DwVfpRegister src,
2931 const Condition cond) {
2932 // <Rt,Rt2> = Dm.
2933 // Instruction details available in ARM DDI 0406C.b, A8-948.
2934 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
2935 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
2936 DCHECK(VfpRegisterIsAvailable(src));
2937 DCHECK(!dst1.is(pc) && !dst2.is(pc));
2938 int vm, m;
2939 src.split_code(&vm, &m);
2940 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
2941 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
2942 }
2943
2944
vmov(const SwVfpRegister dst,const Register src,const Condition cond)2945 void Assembler::vmov(const SwVfpRegister dst,
2946 const Register src,
2947 const Condition cond) {
2948 // Sn = Rt.
2949 // Instruction details available in ARM DDI 0406A, A8-642.
2950 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
2951 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2952 DCHECK(!src.is(pc));
2953 int sn, n;
2954 dst.split_code(&sn, &n);
2955 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
2956 }
2957
2958
vmov(const Register dst,const SwVfpRegister src,const Condition cond)2959 void Assembler::vmov(const Register dst,
2960 const SwVfpRegister src,
2961 const Condition cond) {
2962 // Rt = Sn.
2963 // Instruction details available in ARM DDI 0406A, A8-642.
2964 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
2965 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
2966 DCHECK(!dst.is(pc));
2967 int sn, n;
2968 src.split_code(&sn, &n);
2969 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
2970 }
2971
2972
2973 // Type of data to read from or write to VFP register.
2974 // Used as specifier in generic vcvt instruction.
2975 enum VFPType { S32, U32, F32, F64 };
2976
2977
IsSignedVFPType(VFPType type)2978 static bool IsSignedVFPType(VFPType type) {
2979 switch (type) {
2980 case S32:
2981 return true;
2982 case U32:
2983 return false;
2984 default:
2985 UNREACHABLE();
2986 return false;
2987 }
2988 }
2989
2990
IsIntegerVFPType(VFPType type)2991 static bool IsIntegerVFPType(VFPType type) {
2992 switch (type) {
2993 case S32:
2994 case U32:
2995 return true;
2996 case F32:
2997 case F64:
2998 return false;
2999 default:
3000 UNREACHABLE();
3001 return false;
3002 }
3003 }
3004
3005
IsDoubleVFPType(VFPType type)3006 static bool IsDoubleVFPType(VFPType type) {
3007 switch (type) {
3008 case F32:
3009 return false;
3010 case F64:
3011 return true;
3012 default:
3013 UNREACHABLE();
3014 return false;
3015 }
3016 }
3017
3018
3019 // Split five bit reg_code based on size of reg_type.
3020 // 32-bit register codes are Vm:M
3021 // 64-bit register codes are M:Vm
3022 // where Vm is four bits, and M is a single bit.
SplitRegCode(VFPType reg_type,int reg_code,int * vm,int * m)3023 static void SplitRegCode(VFPType reg_type,
3024 int reg_code,
3025 int* vm,
3026 int* m) {
3027 DCHECK((reg_code >= 0) && (reg_code <= 31));
3028 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
3029 // 32 bit type.
3030 *m = reg_code & 0x1;
3031 *vm = reg_code >> 1;
3032 } else {
3033 // 64 bit type.
3034 *m = (reg_code & 0x10) >> 4;
3035 *vm = reg_code & 0x0F;
3036 }
3037 }
3038
3039
3040 // Encode vcvt.src_type.dst_type instruction.
EncodeVCVT(const VFPType dst_type,const int dst_code,const VFPType src_type,const int src_code,VFPConversionMode mode,const Condition cond)3041 static Instr EncodeVCVT(const VFPType dst_type,
3042 const int dst_code,
3043 const VFPType src_type,
3044 const int src_code,
3045 VFPConversionMode mode,
3046 const Condition cond) {
3047 DCHECK(src_type != dst_type);
3048 int D, Vd, M, Vm;
3049 SplitRegCode(src_type, src_code, &Vm, &M);
3050 SplitRegCode(dst_type, dst_code, &Vd, &D);
3051
3052 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
3053 // Conversion between IEEE floating point and 32-bit integer.
3054 // Instruction details available in ARM DDI 0406B, A8.6.295.
3055 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
3056 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3057 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
3058
3059 int sz, opc2, op;
3060
3061 if (IsIntegerVFPType(dst_type)) {
3062 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
3063 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3064 op = mode;
3065 } else {
3066 DCHECK(IsIntegerVFPType(src_type));
3067 opc2 = 0x0;
3068 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
3069 op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
3070 }
3071
3072 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
3073 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
3074 } else {
3075 // Conversion between IEEE double and single precision.
3076 // Instruction details available in ARM DDI 0406B, A8.6.298.
3077 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
3078 // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3079 int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
3080 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
3081 Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
3082 }
3083 }
3084
3085
vcvt_f64_s32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3086 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
3087 const SwVfpRegister src,
3088 VFPConversionMode mode,
3089 const Condition cond) {
3090 DCHECK(VfpRegisterIsAvailable(dst));
3091 emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
3092 }
3093
3094
vcvt_f32_s32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3095 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
3096 const SwVfpRegister src,
3097 VFPConversionMode mode,
3098 const Condition cond) {
3099 emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
3100 }
3101
3102
vcvt_f64_u32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3103 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
3104 const SwVfpRegister src,
3105 VFPConversionMode mode,
3106 const Condition cond) {
3107 DCHECK(VfpRegisterIsAvailable(dst));
3108 emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
3109 }
3110
3111
vcvt_f32_u32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3112 void Assembler::vcvt_f32_u32(const SwVfpRegister dst, const SwVfpRegister src,
3113 VFPConversionMode mode, const Condition cond) {
3114 emit(EncodeVCVT(F32, dst.code(), U32, src.code(), mode, cond));
3115 }
3116
3117
vcvt_s32_f32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3118 void Assembler::vcvt_s32_f32(const SwVfpRegister dst, const SwVfpRegister src,
3119 VFPConversionMode mode, const Condition cond) {
3120 emit(EncodeVCVT(S32, dst.code(), F32, src.code(), mode, cond));
3121 }
3122
3123
vcvt_u32_f32(const SwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3124 void Assembler::vcvt_u32_f32(const SwVfpRegister dst, const SwVfpRegister src,
3125 VFPConversionMode mode, const Condition cond) {
3126 emit(EncodeVCVT(U32, dst.code(), F32, src.code(), mode, cond));
3127 }
3128
3129
vcvt_s32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)3130 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
3131 const DwVfpRegister src,
3132 VFPConversionMode mode,
3133 const Condition cond) {
3134 DCHECK(VfpRegisterIsAvailable(src));
3135 emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
3136 }
3137
3138
vcvt_u32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)3139 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
3140 const DwVfpRegister src,
3141 VFPConversionMode mode,
3142 const Condition cond) {
3143 DCHECK(VfpRegisterIsAvailable(src));
3144 emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
3145 }
3146
3147
vcvt_f64_f32(const DwVfpRegister dst,const SwVfpRegister src,VFPConversionMode mode,const Condition cond)3148 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
3149 const SwVfpRegister src,
3150 VFPConversionMode mode,
3151 const Condition cond) {
3152 DCHECK(VfpRegisterIsAvailable(dst));
3153 emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
3154 }
3155
3156
vcvt_f32_f64(const SwVfpRegister dst,const DwVfpRegister src,VFPConversionMode mode,const Condition cond)3157 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
3158 const DwVfpRegister src,
3159 VFPConversionMode mode,
3160 const Condition cond) {
3161 DCHECK(VfpRegisterIsAvailable(src));
3162 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
3163 }
3164
3165
vcvt_f64_s32(const DwVfpRegister dst,int fraction_bits,const Condition cond)3166 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
3167 int fraction_bits,
3168 const Condition cond) {
3169 // Instruction details available in ARM DDI 0406C.b, A8-874.
3170 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
3171 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
3172 DCHECK(IsEnabled(VFPv3));
3173 DCHECK(VfpRegisterIsAvailable(dst));
3174 DCHECK(fraction_bits > 0 && fraction_bits <= 32);
3175 int vd, d;
3176 dst.split_code(&vd, &d);
3177 int imm5 = 32 - fraction_bits;
3178 int i = imm5 & 1;
3179 int imm4 = (imm5 >> 1) & 0xf;
3180 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
3181 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
3182 }
3183
3184
vneg(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)3185 void Assembler::vneg(const DwVfpRegister dst,
3186 const DwVfpRegister src,
3187 const Condition cond) {
3188 // Instruction details available in ARM DDI 0406C.b, A8-968.
3189 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
3190 // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3191 DCHECK(VfpRegisterIsAvailable(dst));
3192 DCHECK(VfpRegisterIsAvailable(src));
3193 int vd, d;
3194 dst.split_code(&vd, &d);
3195 int vm, m;
3196 src.split_code(&vm, &m);
3197
3198 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3199 m*B5 | vm);
3200 }
3201
3202
vneg(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)3203 void Assembler::vneg(const SwVfpRegister dst, const SwVfpRegister src,
3204 const Condition cond) {
3205 // Instruction details available in ARM DDI 0406C.b, A8-968.
3206 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
3207 // 101(11-9) | sz=0(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3208 int vd, d;
3209 dst.split_code(&vd, &d);
3210 int vm, m;
3211 src.split_code(&vm, &m);
3212
3213 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3214 B6 | m * B5 | vm);
3215 }
3216
3217
vabs(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)3218 void Assembler::vabs(const DwVfpRegister dst,
3219 const DwVfpRegister src,
3220 const Condition cond) {
3221 // Instruction details available in ARM DDI 0406C.b, A8-524.
3222 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
3223 // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3224 DCHECK(VfpRegisterIsAvailable(dst));
3225 DCHECK(VfpRegisterIsAvailable(src));
3226 int vd, d;
3227 dst.split_code(&vd, &d);
3228 int vm, m;
3229 src.split_code(&vm, &m);
3230 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
3231 m*B5 | vm);
3232 }
3233
3234
vabs(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)3235 void Assembler::vabs(const SwVfpRegister dst, const SwVfpRegister src,
3236 const Condition cond) {
3237 // Instruction details available in ARM DDI 0406C.b, A8-524.
3238 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
3239 // 101(11-9) | sz=0(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3240 int vd, d;
3241 dst.split_code(&vd, &d);
3242 int vm, m;
3243 src.split_code(&vm, &m);
3244 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | vd * B12 | 0x5 * B9 | B7 | B6 |
3245 m * B5 | vm);
3246 }
3247
3248
vadd(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3249 void Assembler::vadd(const DwVfpRegister dst,
3250 const DwVfpRegister src1,
3251 const DwVfpRegister src2,
3252 const Condition cond) {
3253 // Dd = vadd(Dn, Dm) double precision floating point addition.
3254 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3255 // Instruction details available in ARM DDI 0406C.b, A8-830.
3256 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3257 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3258 DCHECK(VfpRegisterIsAvailable(dst));
3259 DCHECK(VfpRegisterIsAvailable(src1));
3260 DCHECK(VfpRegisterIsAvailable(src2));
3261 int vd, d;
3262 dst.split_code(&vd, &d);
3263 int vn, n;
3264 src1.split_code(&vn, &n);
3265 int vm, m;
3266 src2.split_code(&vm, &m);
3267 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3268 n*B7 | m*B5 | vm);
3269 }
3270
3271
vadd(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3272 void Assembler::vadd(const SwVfpRegister dst, const SwVfpRegister src1,
3273 const SwVfpRegister src2, const Condition cond) {
3274 // Sd = vadd(Sn, Sm) single precision floating point addition.
3275 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3276 // Instruction details available in ARM DDI 0406C.b, A8-830.
3277 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3278 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3279 int vd, d;
3280 dst.split_code(&vd, &d);
3281 int vn, n;
3282 src1.split_code(&vn, &n);
3283 int vm, m;
3284 src2.split_code(&vm, &m);
3285 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3286 0x5 * B9 | n * B7 | m * B5 | vm);
3287 }
3288
3289
vsub(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3290 void Assembler::vsub(const DwVfpRegister dst,
3291 const DwVfpRegister src1,
3292 const DwVfpRegister src2,
3293 const Condition cond) {
3294 // Dd = vsub(Dn, Dm) double precision floating point subtraction.
3295 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3296 // Instruction details available in ARM DDI 0406C.b, A8-1086.
3297 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3298 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3299 DCHECK(VfpRegisterIsAvailable(dst));
3300 DCHECK(VfpRegisterIsAvailable(src1));
3301 DCHECK(VfpRegisterIsAvailable(src2));
3302 int vd, d;
3303 dst.split_code(&vd, &d);
3304 int vn, n;
3305 src1.split_code(&vn, &n);
3306 int vm, m;
3307 src2.split_code(&vm, &m);
3308 emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3309 n*B7 | B6 | m*B5 | vm);
3310 }
3311
3312
vsub(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3313 void Assembler::vsub(const SwVfpRegister dst, const SwVfpRegister src1,
3314 const SwVfpRegister src2, const Condition cond) {
3315 // Sd = vsub(Sn, Sm) single precision floating point subtraction.
3316 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3317 // Instruction details available in ARM DDI 0406C.b, A8-1086.
3318 // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
3319 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3320 int vd, d;
3321 dst.split_code(&vd, &d);
3322 int vn, n;
3323 src1.split_code(&vn, &n);
3324 int vm, m;
3325 src2.split_code(&vm, &m);
3326 emit(cond | 0x1C * B23 | d * B22 | 0x3 * B20 | vn * B16 | vd * B12 |
3327 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
3328 }
3329
3330
vmul(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3331 void Assembler::vmul(const DwVfpRegister dst,
3332 const DwVfpRegister src1,
3333 const DwVfpRegister src2,
3334 const Condition cond) {
3335 // Dd = vmul(Dn, Dm) double precision floating point multiplication.
3336 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3337 // Instruction details available in ARM DDI 0406C.b, A8-960.
3338 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3339 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3340 DCHECK(VfpRegisterIsAvailable(dst));
3341 DCHECK(VfpRegisterIsAvailable(src1));
3342 DCHECK(VfpRegisterIsAvailable(src2));
3343 int vd, d;
3344 dst.split_code(&vd, &d);
3345 int vn, n;
3346 src1.split_code(&vn, &n);
3347 int vm, m;
3348 src2.split_code(&vm, &m);
3349 emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
3350 n*B7 | m*B5 | vm);
3351 }
3352
3353
vmul(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3354 void Assembler::vmul(const SwVfpRegister dst, const SwVfpRegister src1,
3355 const SwVfpRegister src2, const Condition cond) {
3356 // Sd = vmul(Sn, Sm) single precision floating point multiplication.
3357 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3358 // Instruction details available in ARM DDI 0406C.b, A8-960.
3359 // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
3360 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3361 int vd, d;
3362 dst.split_code(&vd, &d);
3363 int vn, n;
3364 src1.split_code(&vn, &n);
3365 int vm, m;
3366 src2.split_code(&vm, &m);
3367 emit(cond | 0x1C * B23 | d * B22 | 0x2 * B20 | vn * B16 | vd * B12 |
3368 0x5 * B9 | n * B7 | m * B5 | vm);
3369 }
3370
3371
vmla(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3372 void Assembler::vmla(const DwVfpRegister dst,
3373 const DwVfpRegister src1,
3374 const DwVfpRegister src2,
3375 const Condition cond) {
3376 // Instruction details available in ARM DDI 0406C.b, A8-932.
3377 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3378 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3379 DCHECK(VfpRegisterIsAvailable(dst));
3380 DCHECK(VfpRegisterIsAvailable(src1));
3381 DCHECK(VfpRegisterIsAvailable(src2));
3382 int vd, d;
3383 dst.split_code(&vd, &d);
3384 int vn, n;
3385 src1.split_code(&vn, &n);
3386 int vm, m;
3387 src2.split_code(&vm, &m);
3388 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3389 vm);
3390 }
3391
3392
vmla(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3393 void Assembler::vmla(const SwVfpRegister dst, const SwVfpRegister src1,
3394 const SwVfpRegister src2, const Condition cond) {
3395 // Instruction details available in ARM DDI 0406C.b, A8-932.
3396 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3397 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
3398 int vd, d;
3399 dst.split_code(&vd, &d);
3400 int vn, n;
3401 src1.split_code(&vn, &n);
3402 int vm, m;
3403 src2.split_code(&vm, &m);
3404 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3405 m * B5 | vm);
3406 }
3407
3408
vmls(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3409 void Assembler::vmls(const DwVfpRegister dst,
3410 const DwVfpRegister src1,
3411 const DwVfpRegister src2,
3412 const Condition cond) {
3413 // Instruction details available in ARM DDI 0406C.b, A8-932.
3414 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3415 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3416 DCHECK(VfpRegisterIsAvailable(dst));
3417 DCHECK(VfpRegisterIsAvailable(src1));
3418 DCHECK(VfpRegisterIsAvailable(src2));
3419 int vd, d;
3420 dst.split_code(&vd, &d);
3421 int vn, n;
3422 src1.split_code(&vn, &n);
3423 int vm, m;
3424 src2.split_code(&vm, &m);
3425 emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
3426 m*B5 | vm);
3427 }
3428
3429
vmls(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3430 void Assembler::vmls(const SwVfpRegister dst, const SwVfpRegister src1,
3431 const SwVfpRegister src2, const Condition cond) {
3432 // Instruction details available in ARM DDI 0406C.b, A8-932.
3433 // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3434 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
3435 int vd, d;
3436 dst.split_code(&vd, &d);
3437 int vn, n;
3438 src1.split_code(&vn, &n);
3439 int vm, m;
3440 src2.split_code(&vm, &m);
3441 emit(cond | 0x1C * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3442 B6 | m * B5 | vm);
3443 }
3444
3445
vdiv(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3446 void Assembler::vdiv(const DwVfpRegister dst,
3447 const DwVfpRegister src1,
3448 const DwVfpRegister src2,
3449 const Condition cond) {
3450 // Dd = vdiv(Dn, Dm) double precision floating point division.
3451 // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
3452 // Instruction details available in ARM DDI 0406C.b, A8-882.
3453 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3454 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3455 DCHECK(VfpRegisterIsAvailable(dst));
3456 DCHECK(VfpRegisterIsAvailable(src1));
3457 DCHECK(VfpRegisterIsAvailable(src2));
3458 int vd, d;
3459 dst.split_code(&vd, &d);
3460 int vn, n;
3461 src1.split_code(&vn, &n);
3462 int vm, m;
3463 src2.split_code(&vm, &m);
3464 emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
3465 vm);
3466 }
3467
3468
vdiv(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3469 void Assembler::vdiv(const SwVfpRegister dst, const SwVfpRegister src1,
3470 const SwVfpRegister src2, const Condition cond) {
3471 // Sd = vdiv(Sn, Sm) single precision floating point division.
3472 // Sd = D:Vd; Sm=M:Vm; Sn=N:Vm.
3473 // Instruction details available in ARM DDI 0406C.b, A8-882.
3474 // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
3475 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3476 int vd, d;
3477 dst.split_code(&vd, &d);
3478 int vn, n;
3479 src1.split_code(&vn, &n);
3480 int vm, m;
3481 src2.split_code(&vm, &m);
3482 emit(cond | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 | 0x5 * B9 | n * B7 |
3483 m * B5 | vm);
3484 }
3485
3486
vcmp(const DwVfpRegister src1,const DwVfpRegister src2,const Condition cond)3487 void Assembler::vcmp(const DwVfpRegister src1,
3488 const DwVfpRegister src2,
3489 const Condition cond) {
3490 // vcmp(Dd, Dm) double precision floating point comparison.
3491 // Instruction details available in ARM DDI 0406C.b, A8-864.
3492 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3493 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3494 DCHECK(VfpRegisterIsAvailable(src1));
3495 DCHECK(VfpRegisterIsAvailable(src2));
3496 int vd, d;
3497 src1.split_code(&vd, &d);
3498 int vm, m;
3499 src2.split_code(&vm, &m);
3500 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
3501 m*B5 | vm);
3502 }
3503
3504
vcmp(const SwVfpRegister src1,const SwVfpRegister src2,const Condition cond)3505 void Assembler::vcmp(const SwVfpRegister src1, const SwVfpRegister src2,
3506 const Condition cond) {
3507 // vcmp(Sd, Sm) single precision floating point comparison.
3508 // Instruction details available in ARM DDI 0406C.b, A8-864.
3509 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
3510 // Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3511 int vd, d;
3512 src1.split_code(&vd, &d);
3513 int vm, m;
3514 src2.split_code(&vm, &m);
3515 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x4 * B16 | vd * B12 |
3516 0x5 * B9 | B6 | m * B5 | vm);
3517 }
3518
3519
vcmp(const DwVfpRegister src1,const double src2,const Condition cond)3520 void Assembler::vcmp(const DwVfpRegister src1,
3521 const double src2,
3522 const Condition cond) {
3523 // vcmp(Dd, #0.0) double precision floating point comparison.
3524 // Instruction details available in ARM DDI 0406C.b, A8-864.
3525 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3526 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3527 DCHECK(VfpRegisterIsAvailable(src1));
3528 DCHECK(src2 == 0.0);
3529 int vd, d;
3530 src1.split_code(&vd, &d);
3531 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
3532 }
3533
3534
vcmp(const SwVfpRegister src1,const float src2,const Condition cond)3535 void Assembler::vcmp(const SwVfpRegister src1, const float src2,
3536 const Condition cond) {
3537 // vcmp(Sd, #0.0) single precision floating point comparison.
3538 // Instruction details available in ARM DDI 0406C.b, A8-864.
3539 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
3540 // Vd(15-12) | 101(11-9) | sz=0(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
3541 DCHECK(src2 == 0.0);
3542 int vd, d;
3543 src1.split_code(&vd, &d);
3544 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x5 * B16 | vd * B12 |
3545 0x5 * B9 | B6);
3546 }
3547
vmaxnm(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2)3548 void Assembler::vmaxnm(const DwVfpRegister dst, const DwVfpRegister src1,
3549 const DwVfpRegister src2) {
3550 // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3551 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3552 DCHECK(IsEnabled(ARMv8));
3553 int vd, d;
3554 dst.split_code(&vd, &d);
3555 int vn, n;
3556 src1.split_code(&vn, &n);
3557 int vm, m;
3558 src2.split_code(&vm, &m);
3559
3560 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3561 0x5 * B9 | B8 | n * B7 | m * B5 | vm);
3562 }
3563
vmaxnm(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2)3564 void Assembler::vmaxnm(const SwVfpRegister dst, const SwVfpRegister src1,
3565 const SwVfpRegister src2) {
3566 // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3567 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
3568 DCHECK(IsEnabled(ARMv8));
3569 int vd, d;
3570 dst.split_code(&vd, &d);
3571 int vn, n;
3572 src1.split_code(&vn, &n);
3573 int vm, m;
3574 src2.split_code(&vm, &m);
3575
3576 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3577 0x5 * B9 | n * B7 | m * B5 | vm);
3578 }
3579
vminnm(const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2)3580 void Assembler::vminnm(const DwVfpRegister dst, const DwVfpRegister src1,
3581 const DwVfpRegister src2) {
3582 // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3583 // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3584 DCHECK(IsEnabled(ARMv8));
3585 int vd, d;
3586 dst.split_code(&vd, &d);
3587 int vn, n;
3588 src1.split_code(&vn, &n);
3589 int vm, m;
3590 src2.split_code(&vm, &m);
3591
3592 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3593 0x5 * B9 | B8 | n * B7 | B6 | m * B5 | vm);
3594 }
3595
vminnm(const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2)3596 void Assembler::vminnm(const SwVfpRegister dst, const SwVfpRegister src1,
3597 const SwVfpRegister src2) {
3598 // kSpecialCondition(31-28) | 11101(27-23) | D(22) | 00(21-20) | Vn(19-16) |
3599 // Vd(15-12) | 101(11-9) | sz=0(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3600 DCHECK(IsEnabled(ARMv8));
3601 int vd, d;
3602 dst.split_code(&vd, &d);
3603 int vn, n;
3604 src1.split_code(&vn, &n);
3605 int vm, m;
3606 src2.split_code(&vm, &m);
3607
3608 emit(kSpecialCondition | 0x1D * B23 | d * B22 | vn * B16 | vd * B12 |
3609 0x5 * B9 | n * B7 | B6 | m * B5 | vm);
3610 }
3611
vsel(Condition cond,const DwVfpRegister dst,const DwVfpRegister src1,const DwVfpRegister src2)3612 void Assembler::vsel(Condition cond, const DwVfpRegister dst,
3613 const DwVfpRegister src1, const DwVfpRegister src2) {
3614 // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
3615 // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=1(8) | N(7) |
3616 // 0(6) | M(5) | 0(4) | Vm(3-0)
3617 DCHECK(IsEnabled(ARMv8));
3618 int vd, d;
3619 dst.split_code(&vd, &d);
3620 int vn, n;
3621 src1.split_code(&vn, &n);
3622 int vm, m;
3623 src2.split_code(&vm, &m);
3624 int sz = 1;
3625
3626 // VSEL has a special (restricted) condition encoding.
3627 // eq(0b0000)... -> 0b00
3628 // ge(0b1010)... -> 0b10
3629 // gt(0b1100)... -> 0b11
3630 // vs(0b0110)... -> 0b01
3631 // No other conditions are supported.
3632 int vsel_cond = (cond >> 30) & 0x3;
3633 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3634 // We can implement some other conditions by swapping the inputs.
3635 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3636 std::swap(vn, vm);
3637 std::swap(n, m);
3638 }
3639
3640 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3641 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
3642 }
3643
vsel(Condition cond,const SwVfpRegister dst,const SwVfpRegister src1,const SwVfpRegister src2)3644 void Assembler::vsel(Condition cond, const SwVfpRegister dst,
3645 const SwVfpRegister src1, const SwVfpRegister src2) {
3646 // cond=kSpecialCondition(31-28) | 11100(27-23) | D(22) |
3647 // vsel_cond=XX(21-20) | Vn(19-16) | Vd(15-12) | 101(11-9) | sz=0(8) | N(7) |
3648 // 0(6) | M(5) | 0(4) | Vm(3-0)
3649 DCHECK(IsEnabled(ARMv8));
3650 int vd, d;
3651 dst.split_code(&vd, &d);
3652 int vn, n;
3653 src1.split_code(&vn, &n);
3654 int vm, m;
3655 src2.split_code(&vm, &m);
3656 int sz = 0;
3657
3658 // VSEL has a special (restricted) condition encoding.
3659 // eq(0b0000)... -> 0b00
3660 // ge(0b1010)... -> 0b10
3661 // gt(0b1100)... -> 0b11
3662 // vs(0b0110)... -> 0b01
3663 // No other conditions are supported.
3664 int vsel_cond = (cond >> 30) & 0x3;
3665 if ((cond != eq) && (cond != ge) && (cond != gt) && (cond != vs)) {
3666 // We can implement some other conditions by swapping the inputs.
3667 DCHECK((cond == ne) | (cond == lt) | (cond == le) | (cond == vc));
3668 std::swap(vn, vm);
3669 std::swap(n, m);
3670 }
3671
3672 emit(kSpecialCondition | 0x1C * B23 | d * B22 | vsel_cond * B20 | vn * B16 |
3673 vd * B12 | 0x5 * B9 | sz * B8 | n * B7 | m * B5 | vm);
3674 }
3675
vsqrt(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)3676 void Assembler::vsqrt(const DwVfpRegister dst,
3677 const DwVfpRegister src,
3678 const Condition cond) {
3679 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3680 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3681 // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3682 DCHECK(VfpRegisterIsAvailable(dst));
3683 DCHECK(VfpRegisterIsAvailable(src));
3684 int vd, d;
3685 dst.split_code(&vd, &d);
3686 int vm, m;
3687 src.split_code(&vm, &m);
3688 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
3689 m*B5 | vm);
3690 }
3691
3692
vsqrt(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)3693 void Assembler::vsqrt(const SwVfpRegister dst, const SwVfpRegister src,
3694 const Condition cond) {
3695 // Instruction details available in ARM DDI 0406C.b, A8-1058.
3696 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
3697 // Vd(15-12) | 101(11-9) | sz=0(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
3698 int vd, d;
3699 dst.split_code(&vd, &d);
3700 int vm, m;
3701 src.split_code(&vm, &m);
3702 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | B16 | vd * B12 | 0x5 * B9 |
3703 0x3 * B6 | m * B5 | vm);
3704 }
3705
3706
vmsr(Register dst,Condition cond)3707 void Assembler::vmsr(Register dst, Condition cond) {
3708 // Instruction details available in ARM DDI 0406A, A8-652.
3709 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
3710 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3711 emit(cond | 0xE * B24 | 0xE * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3712 }
3713
3714
vmrs(Register dst,Condition cond)3715 void Assembler::vmrs(Register dst, Condition cond) {
3716 // Instruction details available in ARM DDI 0406A, A8-652.
3717 // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
3718 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
3719 emit(cond | 0xE * B24 | 0xF * B20 | B16 | dst.code() * B12 | 0xA * B8 | B4);
3720 }
3721
3722
vrinta(const SwVfpRegister dst,const SwVfpRegister src)3723 void Assembler::vrinta(const SwVfpRegister dst, const SwVfpRegister src) {
3724 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3725 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3726 // M(5) | 0(4) | Vm(3-0)
3727 DCHECK(IsEnabled(ARMv8));
3728 int vd, d;
3729 dst.split_code(&vd, &d);
3730 int vm, m;
3731 src.split_code(&vm, &m);
3732 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3733 0x5 * B9 | B6 | m * B5 | vm);
3734 }
3735
3736
vrinta(const DwVfpRegister dst,const DwVfpRegister src)3737 void Assembler::vrinta(const DwVfpRegister dst, const DwVfpRegister src) {
3738 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3739 // 10(19-18) | RM=00(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3740 // M(5) | 0(4) | Vm(3-0)
3741 DCHECK(IsEnabled(ARMv8));
3742 int vd, d;
3743 dst.split_code(&vd, &d);
3744 int vm, m;
3745 src.split_code(&vm, &m);
3746 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | vd * B12 |
3747 0x5 * B9 | B8 | B6 | m * B5 | vm);
3748 }
3749
3750
vrintn(const SwVfpRegister dst,const SwVfpRegister src)3751 void Assembler::vrintn(const SwVfpRegister dst, const SwVfpRegister src) {
3752 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3753 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3754 // M(5) | 0(4) | Vm(3-0)
3755 DCHECK(IsEnabled(ARMv8));
3756 int vd, d;
3757 dst.split_code(&vd, &d);
3758 int vm, m;
3759 src.split_code(&vm, &m);
3760 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3761 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3762 }
3763
3764
vrintn(const DwVfpRegister dst,const DwVfpRegister src)3765 void Assembler::vrintn(const DwVfpRegister dst, const DwVfpRegister src) {
3766 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3767 // 10(19-18) | RM=01(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3768 // M(5) | 0(4) | Vm(3-0)
3769 DCHECK(IsEnabled(ARMv8));
3770 int vd, d;
3771 dst.split_code(&vd, &d);
3772 int vm, m;
3773 src.split_code(&vm, &m);
3774 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x1 * B16 |
3775 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3776 }
3777
3778
vrintp(const SwVfpRegister dst,const SwVfpRegister src)3779 void Assembler::vrintp(const SwVfpRegister dst, const SwVfpRegister src) {
3780 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3781 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3782 // M(5) | 0(4) | Vm(3-0)
3783 DCHECK(IsEnabled(ARMv8));
3784 int vd, d;
3785 dst.split_code(&vd, &d);
3786 int vm, m;
3787 src.split_code(&vm, &m);
3788 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3789 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3790 }
3791
3792
vrintp(const DwVfpRegister dst,const DwVfpRegister src)3793 void Assembler::vrintp(const DwVfpRegister dst, const DwVfpRegister src) {
3794 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3795 // 10(19-18) | RM=10(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3796 // M(5) | 0(4) | Vm(3-0)
3797 DCHECK(IsEnabled(ARMv8));
3798 int vd, d;
3799 dst.split_code(&vd, &d);
3800 int vm, m;
3801 src.split_code(&vm, &m);
3802 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x2 * B16 |
3803 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3804 }
3805
3806
vrintm(const SwVfpRegister dst,const SwVfpRegister src)3807 void Assembler::vrintm(const SwVfpRegister dst, const SwVfpRegister src) {
3808 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3809 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=0(8) | 01(7-6) |
3810 // M(5) | 0(4) | Vm(3-0)
3811 DCHECK(IsEnabled(ARMv8));
3812 int vd, d;
3813 dst.split_code(&vd, &d);
3814 int vm, m;
3815 src.split_code(&vm, &m);
3816 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3817 vd * B12 | 0x5 * B9 | B6 | m * B5 | vm);
3818 }
3819
3820
vrintm(const DwVfpRegister dst,const DwVfpRegister src)3821 void Assembler::vrintm(const DwVfpRegister dst, const DwVfpRegister src) {
3822 // cond=kSpecialCondition(31-28) | 11101(27-23)| D(22) | 11(21-20) |
3823 // 10(19-18) | RM=11(17-16) | Vd(15-12) | 101(11-9) | sz=1(8) | 01(7-6) |
3824 // M(5) | 0(4) | Vm(3-0)
3825 DCHECK(IsEnabled(ARMv8));
3826 int vd, d;
3827 dst.split_code(&vd, &d);
3828 int vm, m;
3829 src.split_code(&vm, &m);
3830 emit(kSpecialCondition | 0x1D * B23 | d * B22 | 0x3 * B20 | B19 | 0x3 * B16 |
3831 vd * B12 | 0x5 * B9 | B8 | B6 | m * B5 | vm);
3832 }
3833
3834
vrintz(const SwVfpRegister dst,const SwVfpRegister src,const Condition cond)3835 void Assembler::vrintz(const SwVfpRegister dst, const SwVfpRegister src,
3836 const Condition cond) {
3837 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3838 // Vd(15-12) | 101(11-9) | sz=0(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3839 DCHECK(IsEnabled(ARMv8));
3840 int vd, d;
3841 dst.split_code(&vd, &d);
3842 int vm, m;
3843 src.split_code(&vm, &m);
3844 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3845 0x5 * B9 | B7 | B6 | m * B5 | vm);
3846 }
3847
3848
vrintz(const DwVfpRegister dst,const DwVfpRegister src,const Condition cond)3849 void Assembler::vrintz(const DwVfpRegister dst, const DwVfpRegister src,
3850 const Condition cond) {
3851 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 011(19-17) | 0(16) |
3852 // Vd(15-12) | 101(11-9) | sz=1(8) | op=1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
3853 DCHECK(IsEnabled(ARMv8));
3854 int vd, d;
3855 dst.split_code(&vd, &d);
3856 int vm, m;
3857 src.split_code(&vm, &m);
3858 emit(cond | 0x1D * B23 | d * B22 | 0x3 * B20 | 0x3 * B17 | vd * B12 |
3859 0x5 * B9 | B8 | B7 | B6 | m * B5 | vm);
3860 }
3861
3862
3863 // Support for NEON.
3864
vld1(NeonSize size,const NeonListOperand & dst,const NeonMemOperand & src)3865 void Assembler::vld1(NeonSize size,
3866 const NeonListOperand& dst,
3867 const NeonMemOperand& src) {
3868 // Instruction details available in ARM DDI 0406C.b, A8.8.320.
3869 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
3870 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3871 DCHECK(IsEnabled(NEON));
3872 int vd, d;
3873 dst.base().split_code(&vd, &d);
3874 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
3875 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
3876 }
3877
3878
vst1(NeonSize size,const NeonListOperand & src,const NeonMemOperand & dst)3879 void Assembler::vst1(NeonSize size,
3880 const NeonListOperand& src,
3881 const NeonMemOperand& dst) {
3882 // Instruction details available in ARM DDI 0406C.b, A8.8.404.
3883 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
3884 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
3885 DCHECK(IsEnabled(NEON));
3886 int vd, d;
3887 src.base().split_code(&vd, &d);
3888 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
3889 size*B6 | dst.align()*B4 | dst.rm().code());
3890 }
3891
3892
vmovl(NeonDataType dt,QwNeonRegister dst,DwVfpRegister src)3893 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
3894 // Instruction details available in ARM DDI 0406C.b, A8.8.346.
3895 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
3896 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
3897 DCHECK(IsEnabled(NEON));
3898 int vd, d;
3899 dst.split_code(&vd, &d);
3900 int vm, m;
3901 src.split_code(&vm, &m);
3902 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
3903 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
3904 }
3905
vswp(DwVfpRegister srcdst0,DwVfpRegister srcdst1)3906 void Assembler::vswp(DwVfpRegister srcdst0, DwVfpRegister srcdst1) {
3907 DCHECK(VfpRegisterIsAvailable(srcdst0));
3908 DCHECK(VfpRegisterIsAvailable(srcdst1));
3909 DCHECK(!srcdst0.is(kScratchDoubleReg));
3910 DCHECK(!srcdst1.is(kScratchDoubleReg));
3911
3912 if (srcdst0.is(srcdst1)) return; // Swapping aliased registers emits nothing.
3913
3914 if (CpuFeatures::IsSupported(NEON)) {
3915 // Instruction details available in ARM DDI 0406C.b, A8.8.418.
3916 // 1111(31-28) | 00111(27-23) | D(22) | 110010(21-16) |
3917 // Vd(15-12) | 000000(11-6) | M(5) | 0(4) | Vm(3-0)
3918 int vd, d;
3919 srcdst0.split_code(&vd, &d);
3920 int vm, m;
3921 srcdst1.split_code(&vm, &m);
3922 emit(0xFU * B28 | 7 * B23 | d * B22 | 0x32 * B16 | vd * B12 | m * B5 | vm);
3923 } else {
3924 vmov(kScratchDoubleReg, srcdst0);
3925 vmov(srcdst0, srcdst1);
3926 vmov(srcdst1, kScratchDoubleReg);
3927 }
3928 }
3929
3930 // Pseudo instructions.
nop(int type)3931 void Assembler::nop(int type) {
3932 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
3933 // some of the CPU's pipeline and has to issue. Older ARM chips simply used
3934 // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
3935 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
3936 // a type.
3937 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3938 emit(al | 13*B21 | type*B12 | type);
3939 }
3940
3941
IsMovT(Instr instr)3942 bool Assembler::IsMovT(Instr instr) {
3943 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3944 ((kNumRegisters-1)*B12) | // mask out register
3945 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3946 return instr == kMovtPattern;
3947 }
3948
3949
IsMovW(Instr instr)3950 bool Assembler::IsMovW(Instr instr) {
3951 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions
3952 ((kNumRegisters-1)*B12) | // mask out destination
3953 EncodeMovwImmediate(0xFFFF)); // mask out immediate value
3954 return instr == kMovwPattern;
3955 }
3956
3957
GetMovTPattern()3958 Instr Assembler::GetMovTPattern() { return kMovtPattern; }
3959
3960
GetMovWPattern()3961 Instr Assembler::GetMovWPattern() { return kMovwPattern; }
3962
3963
EncodeMovwImmediate(uint32_t immediate)3964 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
3965 DCHECK(immediate < 0x10000);
3966 return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
3967 }
3968
3969
PatchMovwImmediate(Instr instruction,uint32_t immediate)3970 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
3971 instruction &= ~EncodeMovwImmediate(0xffff);
3972 return instruction | EncodeMovwImmediate(immediate);
3973 }
3974
3975
DecodeShiftImm(Instr instr)3976 int Assembler::DecodeShiftImm(Instr instr) {
3977 int rotate = Instruction::RotateValue(instr) * 2;
3978 int immed8 = Instruction::Immed8Value(instr);
3979 return base::bits::RotateRight32(immed8, rotate);
3980 }
3981
3982
PatchShiftImm(Instr instr,int immed)3983 Instr Assembler::PatchShiftImm(Instr instr, int immed) {
3984 uint32_t rotate_imm = 0;
3985 uint32_t immed_8 = 0;
3986 bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
3987 DCHECK(immed_fits);
3988 USE(immed_fits);
3989 return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
3990 }
3991
3992
IsNop(Instr instr,int type)3993 bool Assembler::IsNop(Instr instr, int type) {
3994 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop.
3995 // Check for mov rx, rx where x = type.
3996 return instr == (al | 13*B21 | type*B12 | type);
3997 }
3998
3999
IsMovImmed(Instr instr)4000 bool Assembler::IsMovImmed(Instr instr) {
4001 return (instr & kMovImmedMask) == kMovImmedPattern;
4002 }
4003
4004
IsOrrImmed(Instr instr)4005 bool Assembler::IsOrrImmed(Instr instr) {
4006 return (instr & kOrrImmedMask) == kOrrImmedPattern;
4007 }
4008
4009
4010 // static
ImmediateFitsAddrMode1Instruction(int32_t imm32)4011 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
4012 uint32_t dummy1;
4013 uint32_t dummy2;
4014 return fits_shifter(imm32, &dummy1, &dummy2, NULL);
4015 }
4016
4017
ImmediateFitsAddrMode2Instruction(int32_t imm32)4018 bool Assembler::ImmediateFitsAddrMode2Instruction(int32_t imm32) {
4019 return is_uint12(abs(imm32));
4020 }
4021
4022
4023 // Debugging.
RecordConstPool(int size)4024 void Assembler::RecordConstPool(int size) {
4025 // We only need this for debugger support, to correctly compute offsets in the
4026 // code.
4027 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
4028 }
4029
4030
GrowBuffer()4031 void Assembler::GrowBuffer() {
4032 if (!own_buffer_) FATAL("external code buffer is too small");
4033
4034 // Compute new buffer size.
4035 CodeDesc desc; // the new buffer
4036 if (buffer_size_ < 1 * MB) {
4037 desc.buffer_size = 2*buffer_size_;
4038 } else {
4039 desc.buffer_size = buffer_size_ + 1*MB;
4040 }
4041 CHECK_GT(desc.buffer_size, 0); // no overflow
4042
4043 // Set up new buffer.
4044 desc.buffer = NewArray<byte>(desc.buffer_size);
4045
4046 desc.instr_size = pc_offset();
4047 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
4048 desc.origin = this;
4049
4050 // Copy the data.
4051 int pc_delta = desc.buffer - buffer_;
4052 int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
4053 MemMove(desc.buffer, buffer_, desc.instr_size);
4054 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
4055 desc.reloc_size);
4056
4057 // Switch buffers.
4058 DeleteArray(buffer_);
4059 buffer_ = desc.buffer;
4060 buffer_size_ = desc.buffer_size;
4061 pc_ += pc_delta;
4062 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
4063 reloc_info_writer.last_pc() + pc_delta);
4064
4065 // None of our relocation types are pc relative pointing outside the code
4066 // buffer nor pc absolute pointing inside the code buffer, so there is no need
4067 // to relocate any emitted relocation entries.
4068 }
4069
4070
db(uint8_t data)4071 void Assembler::db(uint8_t data) {
4072 // db is used to write raw data. The constant pool should be emitted or
4073 // blocked before using db.
4074 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
4075 DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
4076 CheckBuffer();
4077 *reinterpret_cast<uint8_t*>(pc_) = data;
4078 pc_ += sizeof(uint8_t);
4079 }
4080
4081
dd(uint32_t data)4082 void Assembler::dd(uint32_t data) {
4083 // dd is used to write raw data. The constant pool should be emitted or
4084 // blocked before using dd.
4085 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
4086 DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
4087 CheckBuffer();
4088 *reinterpret_cast<uint32_t*>(pc_) = data;
4089 pc_ += sizeof(uint32_t);
4090 }
4091
4092
dq(uint64_t value)4093 void Assembler::dq(uint64_t value) {
4094 // dq is used to write raw data. The constant pool should be emitted or
4095 // blocked before using dq.
4096 DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
4097 DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
4098 CheckBuffer();
4099 *reinterpret_cast<uint64_t*>(pc_) = value;
4100 pc_ += sizeof(uint64_t);
4101 }
4102
4103
emit_code_stub_address(Code * stub)4104 void Assembler::emit_code_stub_address(Code* stub) {
4105 CheckBuffer();
4106 *reinterpret_cast<uint32_t*>(pc_) =
4107 reinterpret_cast<uint32_t>(stub->instruction_start());
4108 pc_ += sizeof(uint32_t);
4109 }
4110
4111
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)4112 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
4113 if (RelocInfo::IsNone(rmode) ||
4114 // Don't record external references unless the heap will be serialized.
4115 (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
4116 !emit_debug_code())) {
4117 return;
4118 }
4119 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
4120 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
4121 data = RecordedAstId().ToInt();
4122 ClearRecordedAstId();
4123 }
4124 RelocInfo rinfo(isolate(), pc_, rmode, data, NULL);
4125 reloc_info_writer.Write(&rinfo);
4126 }
4127
4128
ConstantPoolAddEntry(int position,RelocInfo::Mode rmode,intptr_t value)4129 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
4130 RelocInfo::Mode rmode,
4131 intptr_t value) {
4132 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::CONST_POOL &&
4133 rmode != RelocInfo::NONE64);
4134 bool sharing_ok = RelocInfo::IsNone(rmode) ||
4135 !(serializer_enabled() || rmode < RelocInfo::CELL);
4136 if (FLAG_enable_embedded_constant_pool) {
4137 return constant_pool_builder_.AddEntry(position, value, sharing_ok);
4138 } else {
4139 DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
4140 if (pending_32_bit_constants_.empty()) {
4141 first_const_pool_32_use_ = position;
4142 }
4143 ConstantPoolEntry entry(position, value, sharing_ok);
4144 pending_32_bit_constants_.push_back(entry);
4145
4146 // Make sure the constant pool is not emitted in place of the next
4147 // instruction for which we just recorded relocation info.
4148 BlockConstPoolFor(1);
4149 return ConstantPoolEntry::REGULAR;
4150 }
4151 }
4152
4153
ConstantPoolAddEntry(int position,double value)4154 ConstantPoolEntry::Access Assembler::ConstantPoolAddEntry(int position,
4155 double value) {
4156 if (FLAG_enable_embedded_constant_pool) {
4157 return constant_pool_builder_.AddEntry(position, value);
4158 } else {
4159 DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
4160 if (pending_64_bit_constants_.empty()) {
4161 first_const_pool_64_use_ = position;
4162 }
4163 ConstantPoolEntry entry(position, value);
4164 pending_64_bit_constants_.push_back(entry);
4165
4166 // Make sure the constant pool is not emitted in place of the next
4167 // instruction for which we just recorded relocation info.
4168 BlockConstPoolFor(1);
4169 return ConstantPoolEntry::REGULAR;
4170 }
4171 }
4172
4173
BlockConstPoolFor(int instructions)4174 void Assembler::BlockConstPoolFor(int instructions) {
4175 if (FLAG_enable_embedded_constant_pool) {
4176 // Should be a no-op if using an embedded constant pool.
4177 DCHECK(pending_32_bit_constants_.empty());
4178 DCHECK(pending_64_bit_constants_.empty());
4179 return;
4180 }
4181
4182 int pc_limit = pc_offset() + instructions * kInstrSize;
4183 if (no_const_pool_before_ < pc_limit) {
4184 // Max pool start (if we need a jump and an alignment).
4185 #ifdef DEBUG
4186 int start = pc_limit + kInstrSize + 2 * kPointerSize;
4187 DCHECK(pending_32_bit_constants_.empty() ||
4188 (start - first_const_pool_32_use_ +
4189 pending_64_bit_constants_.size() * kDoubleSize <
4190 kMaxDistToIntPool));
4191 DCHECK(pending_64_bit_constants_.empty() ||
4192 (start - first_const_pool_64_use_ < kMaxDistToFPPool));
4193 #endif
4194 no_const_pool_before_ = pc_limit;
4195 }
4196
4197 if (next_buffer_check_ < no_const_pool_before_) {
4198 next_buffer_check_ = no_const_pool_before_;
4199 }
4200 }
4201
4202
CheckConstPool(bool force_emit,bool require_jump)4203 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
4204 if (FLAG_enable_embedded_constant_pool) {
4205 // Should be a no-op if using an embedded constant pool.
4206 DCHECK(pending_32_bit_constants_.empty());
4207 DCHECK(pending_64_bit_constants_.empty());
4208 return;
4209 }
4210
4211 // Some short sequence of instruction mustn't be broken up by constant pool
4212 // emission, such sequences are protected by calls to BlockConstPoolFor and
4213 // BlockConstPoolScope.
4214 if (is_const_pool_blocked()) {
4215 // Something is wrong if emission is forced and blocked at the same time.
4216 DCHECK(!force_emit);
4217 return;
4218 }
4219
4220 // There is nothing to do if there are no pending constant pool entries.
4221 if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
4222 // Calculate the offset of the next check.
4223 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
4224 return;
4225 }
4226
4227 // Check that the code buffer is large enough before emitting the constant
4228 // pool (include the jump over the pool and the constant pool marker and
4229 // the gap to the relocation information).
4230 int jump_instr = require_jump ? kInstrSize : 0;
4231 int size_up_to_marker = jump_instr + kInstrSize;
4232 int estimated_size_after_marker =
4233 pending_32_bit_constants_.size() * kPointerSize;
4234 bool has_int_values = !pending_32_bit_constants_.empty();
4235 bool has_fp_values = !pending_64_bit_constants_.empty();
4236 bool require_64_bit_align = false;
4237 if (has_fp_values) {
4238 require_64_bit_align =
4239 !IsAligned(reinterpret_cast<intptr_t>(pc_ + size_up_to_marker),
4240 kDoubleAlignment);
4241 if (require_64_bit_align) {
4242 estimated_size_after_marker += kInstrSize;
4243 }
4244 estimated_size_after_marker +=
4245 pending_64_bit_constants_.size() * kDoubleSize;
4246 }
4247 int estimated_size = size_up_to_marker + estimated_size_after_marker;
4248
4249 // We emit a constant pool when:
4250 // * requested to do so by parameter force_emit (e.g. after each function).
4251 // * the distance from the first instruction accessing the constant pool to
4252 // any of the constant pool entries will exceed its limit the next
4253 // time the pool is checked. This is overly restrictive, but we don't emit
4254 // constant pool entries in-order so it's conservatively correct.
4255 // * the instruction doesn't require a jump after itself to jump over the
4256 // constant pool, and we're getting close to running out of range.
4257 if (!force_emit) {
4258 DCHECK(has_fp_values || has_int_values);
4259 bool need_emit = false;
4260 if (has_fp_values) {
4261 // The 64-bit constants are always emitted before the 32-bit constants, so
4262 // we can ignore the effect of the 32-bit constants on estimated_size.
4263 int dist64 = pc_offset() + estimated_size -
4264 pending_32_bit_constants_.size() * kPointerSize -
4265 first_const_pool_64_use_;
4266 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
4267 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
4268 need_emit = true;
4269 }
4270 }
4271 if (has_int_values) {
4272 int dist32 = pc_offset() + estimated_size - first_const_pool_32_use_;
4273 if ((dist32 >= kMaxDistToIntPool - kCheckPoolInterval) ||
4274 (!require_jump && (dist32 >= kMaxDistToIntPool / 2))) {
4275 need_emit = true;
4276 }
4277 }
4278 if (!need_emit) return;
4279 }
4280
4281 // Deduplicate constants.
4282 int size_after_marker = estimated_size_after_marker;
4283 for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
4284 ConstantPoolEntry& entry = pending_64_bit_constants_[i];
4285 DCHECK(!entry.is_merged());
4286 for (size_t j = 0; j < i; j++) {
4287 if (entry.value64() == pending_64_bit_constants_[j].value64()) {
4288 DCHECK(!pending_64_bit_constants_[j].is_merged());
4289 entry.set_merged_index(j);
4290 size_after_marker -= kDoubleSize;
4291 break;
4292 }
4293 }
4294 }
4295
4296 for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
4297 ConstantPoolEntry& entry = pending_32_bit_constants_[i];
4298 DCHECK(!entry.is_merged());
4299 if (!entry.sharing_ok()) continue;
4300 for (size_t j = 0; j < i; j++) {
4301 if (entry.value() == pending_32_bit_constants_[j].value()) {
4302 DCHECK(!pending_32_bit_constants_[j].is_merged());
4303 entry.set_merged_index(j);
4304 size_after_marker -= kPointerSize;
4305 break;
4306 }
4307 }
4308 }
4309
4310 int size = size_up_to_marker + size_after_marker;
4311
4312 int needed_space = size + kGap;
4313 while (buffer_space() <= needed_space) GrowBuffer();
4314
4315 {
4316 // Block recursive calls to CheckConstPool.
4317 BlockConstPoolScope block_const_pool(this);
4318 RecordComment("[ Constant Pool");
4319 RecordConstPool(size);
4320
4321 Label size_check;
4322 bind(&size_check);
4323
4324 // Emit jump over constant pool if necessary.
4325 Label after_pool;
4326 if (require_jump) {
4327 b(&after_pool);
4328 }
4329
4330 // Put down constant pool marker "Undefined instruction".
4331 // The data size helps disassembly know what to print.
4332 emit(kConstantPoolMarker |
4333 EncodeConstantPoolLength(size_after_marker / kPointerSize));
4334
4335 if (require_64_bit_align) {
4336 emit(kConstantPoolMarker);
4337 }
4338
4339 // Emit 64-bit constant pool entries first: their range is smaller than
4340 // 32-bit entries.
4341 for (size_t i = 0; i < pending_64_bit_constants_.size(); i++) {
4342 ConstantPoolEntry& entry = pending_64_bit_constants_[i];
4343
4344 Instr instr = instr_at(entry.position());
4345 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
4346 DCHECK((IsVldrDPcImmediateOffset(instr) &&
4347 GetVldrDRegisterImmediateOffset(instr) == 0));
4348
4349 int delta = pc_offset() - entry.position() - kPcLoadDelta;
4350 DCHECK(is_uint10(delta));
4351
4352 if (entry.is_merged()) {
4353 ConstantPoolEntry& merged =
4354 pending_64_bit_constants_[entry.merged_index()];
4355 DCHECK(entry.value64() == merged.value64());
4356 Instr merged_instr = instr_at(merged.position());
4357 DCHECK(IsVldrDPcImmediateOffset(merged_instr));
4358 delta = GetVldrDRegisterImmediateOffset(merged_instr);
4359 delta += merged.position() - entry.position();
4360 }
4361 instr_at_put(entry.position(),
4362 SetVldrDRegisterImmediateOffset(instr, delta));
4363 if (!entry.is_merged()) {
4364 DCHECK(IsAligned(reinterpret_cast<intptr_t>(pc_), kDoubleAlignment));
4365 dq(entry.value64());
4366 }
4367 }
4368
4369 // Emit 32-bit constant pool entries.
4370 for (size_t i = 0; i < pending_32_bit_constants_.size(); i++) {
4371 ConstantPoolEntry& entry = pending_32_bit_constants_[i];
4372 Instr instr = instr_at(entry.position());
4373
4374 // 64-bit loads shouldn't get here.
4375 DCHECK(!IsVldrDPcImmediateOffset(instr));
4376 DCHECK(!IsMovW(instr));
4377 DCHECK(IsLdrPcImmediateOffset(instr) &&
4378 GetLdrRegisterImmediateOffset(instr) == 0);
4379
4380 int delta = pc_offset() - entry.position() - kPcLoadDelta;
4381 DCHECK(is_uint12(delta));
4382 // 0 is the smallest delta:
4383 // ldr rd, [pc, #0]
4384 // constant pool marker
4385 // data
4386
4387 if (entry.is_merged()) {
4388 DCHECK(entry.sharing_ok());
4389 ConstantPoolEntry& merged =
4390 pending_32_bit_constants_[entry.merged_index()];
4391 DCHECK(entry.value() == merged.value());
4392 Instr merged_instr = instr_at(merged.position());
4393 DCHECK(IsLdrPcImmediateOffset(merged_instr));
4394 delta = GetLdrRegisterImmediateOffset(merged_instr);
4395 delta += merged.position() - entry.position();
4396 }
4397 instr_at_put(entry.position(),
4398 SetLdrRegisterImmediateOffset(instr, delta));
4399 if (!entry.is_merged()) {
4400 emit(entry.value());
4401 }
4402 }
4403
4404 pending_32_bit_constants_.clear();
4405 pending_64_bit_constants_.clear();
4406 first_const_pool_32_use_ = -1;
4407 first_const_pool_64_use_ = -1;
4408
4409 RecordComment("]");
4410
4411 DCHECK_EQ(size, SizeOfCodeGeneratedSince(&size_check));
4412
4413 if (after_pool.is_linked()) {
4414 bind(&after_pool);
4415 }
4416 }
4417
4418 // Since a constant pool was just emitted, move the check offset forward by
4419 // the standard interval.
4420 next_buffer_check_ = pc_offset() + kCheckPoolInterval;
4421 }
4422
4423
PatchConstantPoolAccessInstruction(int pc_offset,int offset,ConstantPoolEntry::Access access,ConstantPoolEntry::Type type)4424 void Assembler::PatchConstantPoolAccessInstruction(
4425 int pc_offset, int offset, ConstantPoolEntry::Access access,
4426 ConstantPoolEntry::Type type) {
4427 DCHECK(FLAG_enable_embedded_constant_pool);
4428 Address pc = buffer_ + pc_offset;
4429
4430 // Patch vldr/ldr instruction with correct offset.
4431 Instr instr = instr_at(pc);
4432 if (access == ConstantPoolEntry::OVERFLOWED) {
4433 if (CpuFeatures::IsSupported(ARMv7)) {
4434 CpuFeatureScope scope(this, ARMv7);
4435 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
4436 Instr next_instr = instr_at(pc + kInstrSize);
4437 DCHECK((IsMovW(instr) && Instruction::ImmedMovwMovtValue(instr) == 0));
4438 DCHECK((IsMovT(next_instr) &&
4439 Instruction::ImmedMovwMovtValue(next_instr) == 0));
4440 instr_at_put(pc, PatchMovwImmediate(instr, offset & 0xffff));
4441 instr_at_put(pc + kInstrSize,
4442 PatchMovwImmediate(next_instr, offset >> 16));
4443 } else {
4444 // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
4445 Instr instr_2 = instr_at(pc + kInstrSize);
4446 Instr instr_3 = instr_at(pc + 2 * kInstrSize);
4447 Instr instr_4 = instr_at(pc + 3 * kInstrSize);
4448 DCHECK((IsMovImmed(instr) && Instruction::Immed8Value(instr) == 0));
4449 DCHECK((IsOrrImmed(instr_2) && Instruction::Immed8Value(instr_2) == 0) &&
4450 GetRn(instr_2).is(GetRd(instr_2)));
4451 DCHECK((IsOrrImmed(instr_3) && Instruction::Immed8Value(instr_3) == 0) &&
4452 GetRn(instr_3).is(GetRd(instr_3)));
4453 DCHECK((IsOrrImmed(instr_4) && Instruction::Immed8Value(instr_4) == 0) &&
4454 GetRn(instr_4).is(GetRd(instr_4)));
4455 instr_at_put(pc, PatchShiftImm(instr, (offset & kImm8Mask)));
4456 instr_at_put(pc + kInstrSize,
4457 PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
4458 instr_at_put(pc + 2 * kInstrSize,
4459 PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
4460 instr_at_put(pc + 3 * kInstrSize,
4461 PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
4462 }
4463 } else if (type == ConstantPoolEntry::DOUBLE) {
4464 // Instruction to patch must be 'vldr rd, [pp, #0]'.
4465 DCHECK((IsVldrDPpImmediateOffset(instr) &&
4466 GetVldrDRegisterImmediateOffset(instr) == 0));
4467 DCHECK(is_uint10(offset));
4468 instr_at_put(pc, SetVldrDRegisterImmediateOffset(instr, offset));
4469 } else {
4470 // Instruction to patch must be 'ldr rd, [pp, #0]'.
4471 DCHECK((IsLdrPpImmediateOffset(instr) &&
4472 GetLdrRegisterImmediateOffset(instr) == 0));
4473 DCHECK(is_uint12(offset));
4474 instr_at_put(pc, SetLdrRegisterImmediateOffset(instr, offset));
4475 }
4476 }
4477
4478
4479 } // namespace internal
4480 } // namespace v8
4481
4482 #endif // V8_TARGET_ARCH_ARM
4483