1 // Copyright 2015, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
28 #define VIXL_A64_MACRO_ASSEMBLER_A64_H_
29
30 #include <algorithm>
31 #include <limits>
32
33 #include "vixl/globals.h"
34 #include "vixl/a64/assembler-a64.h"
35 #include "vixl/a64/debugger-a64.h"
36 #include "vixl/a64/instrument-a64.h"
37 #include "vixl/a64/simulator-constants-a64.h"
38
39
40 #define LS_MACRO_LIST(V) \
41 V(Ldrb, Register&, rt, LDRB_w) \
42 V(Strb, Register&, rt, STRB_w) \
43 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
44 V(Ldrh, Register&, rt, LDRH_w) \
45 V(Strh, Register&, rt, STRH_w) \
46 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
47 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
48 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
49 V(Ldrsw, Register&, rt, LDRSW_x)
50
51
52 #define LSPAIR_MACRO_LIST(V) \
53 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
54 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
55 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
56
57 namespace vixl {
58
59 // Forward declaration
60 class MacroAssembler;
61 class UseScratchRegisterScope;
62
63 class Pool {
64 public:
Pool(MacroAssembler * masm)65 explicit Pool(MacroAssembler* masm)
66 : checkpoint_(kNoCheckpointRequired), masm_(masm) {
67 Reset();
68 }
69
Reset()70 void Reset() {
71 checkpoint_ = kNoCheckpointRequired;
72 monitor_ = 0;
73 }
74
Block()75 void Block() { monitor_++; }
76 void Release();
IsBlocked()77 bool IsBlocked() const { return monitor_ != 0; }
78
79 static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
80
81 void SetNextCheckpoint(ptrdiff_t checkpoint);
checkpoint()82 ptrdiff_t checkpoint() const { return checkpoint_; }
83
84 enum EmitOption {
85 kBranchRequired,
86 kNoBranchRequired
87 };
88
89 protected:
90 // Next buffer offset at which a check is required for this pool.
91 ptrdiff_t checkpoint_;
92 // Indicates whether the emission of this pool is blocked.
93 int monitor_;
94 // The MacroAssembler using this pool.
95 MacroAssembler* masm_;
96 };
97
98
99 class LiteralPool : public Pool {
100 public:
101 explicit LiteralPool(MacroAssembler* masm);
102 ~LiteralPool();
103 void Reset();
104
105 void AddEntry(RawLiteral* literal);
IsEmpty()106 bool IsEmpty() const { return entries_.empty(); }
107 size_t Size() const;
108 size_t MaxSize() const;
109 size_t OtherPoolsMaxSize() const;
110
111 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
112 void Emit(EmitOption option = kNoBranchRequired);
113
114 void SetNextRecommendedCheckpoint(ptrdiff_t offset);
115 ptrdiff_t NextRecommendedCheckpoint();
116
117 void UpdateFirstUse(ptrdiff_t use_position);
118
DeleteOnDestruction(RawLiteral * literal)119 void DeleteOnDestruction(RawLiteral* literal) {
120 deleted_on_destruction_.push_back(literal);
121 }
122
123 // Recommended not exact since the pool can be blocked for short periods.
124 static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
125
126 private:
127 std::vector<RawLiteral*> entries_;
128 size_t size_;
129 ptrdiff_t first_use_;
130 // The parent class `Pool` provides a `checkpoint_`, which is the buffer
131 // offset before which a check *must* occur. This recommended checkpoint
132 // indicates when we would like to start emitting the constant pool. The
133 // MacroAssembler can, but does not have to, check the buffer when the
134 // checkpoint is reached.
135 ptrdiff_t recommended_checkpoint_;
136
137 std::vector<RawLiteral*> deleted_on_destruction_;
138 };
139
140
Size()141 inline size_t LiteralPool::Size() const {
142 // Account for the pool header.
143 return size_ + kInstructionSize;
144 }
145
146
MaxSize()147 inline size_t LiteralPool::MaxSize() const {
148 // Account for the potential branch over the pool.
149 return Size() + kInstructionSize;
150 }
151
152
NextRecommendedCheckpoint()153 inline ptrdiff_t LiteralPool::NextRecommendedCheckpoint() {
154 return first_use_ + kRecommendedLiteralPoolRange;
155 }
156
157
158 class VeneerPool : public Pool {
159 public:
VeneerPool(MacroAssembler * masm)160 explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
161
162 void Reset();
163
Block()164 void Block() { monitor_++; }
165 void Release();
IsBlocked()166 bool IsBlocked() const { return monitor_ != 0; }
IsEmpty()167 bool IsEmpty() const { return unresolved_branches_.empty(); }
168
169 class BranchInfo {
170 public:
BranchInfo()171 BranchInfo()
172 : max_reachable_pc_(0), pc_offset_(0),
173 label_(NULL), branch_type_(UnknownBranchType) {}
BranchInfo(ptrdiff_t offset,Label * label,ImmBranchType branch_type)174 BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
175 : pc_offset_(offset), label_(label), branch_type_(branch_type) {
176 max_reachable_pc_ =
177 pc_offset_ + Instruction::ImmBranchForwardRange(branch_type_);
178 }
179
IsValidComparison(const BranchInfo & branch_1,const BranchInfo & branch_2)180 static bool IsValidComparison(const BranchInfo& branch_1,
181 const BranchInfo& branch_2) {
182 // BranchInfo are always compared against against other objects with
183 // the same branch type.
184 if (branch_1.branch_type_ != branch_2.branch_type_) {
185 return false;
186 }
187 // Since we should never have two branch infos with the same offsets, it
188 // first looks like we should check that offsets are different. However
189 // the operators may also be used to *search* for a branch info in the
190 // set.
191 bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
192 return (!same_offsets ||
193 ((branch_1.label_ == branch_2.label_) &&
194 (branch_1.max_reachable_pc_ == branch_2.max_reachable_pc_)));
195 }
196
197 // We must provide comparison operators to work with InvalSet.
198 bool operator==(const BranchInfo& other) const {
199 VIXL_ASSERT(IsValidComparison(*this, other));
200 return pc_offset_ == other.pc_offset_;
201 }
202 bool operator<(const BranchInfo& other) const {
203 VIXL_ASSERT(IsValidComparison(*this, other));
204 return pc_offset_ < other.pc_offset_;
205 }
206 bool operator<=(const BranchInfo& other) const {
207 VIXL_ASSERT(IsValidComparison(*this, other));
208 return pc_offset_ <= other.pc_offset_;
209 }
210 bool operator>(const BranchInfo& other) const {
211 VIXL_ASSERT(IsValidComparison(*this, other));
212 return pc_offset_ > other.pc_offset_;
213 }
214
215 // Maximum position reachable by the branch using a positive branch offset.
216 ptrdiff_t max_reachable_pc_;
217 // Offset of the branch in the code generation buffer.
218 ptrdiff_t pc_offset_;
219 // The label branched to.
220 Label* label_;
221 ImmBranchType branch_type_;
222 };
223
BranchTypeUsesVeneers(ImmBranchType type)224 bool BranchTypeUsesVeneers(ImmBranchType type) {
225 return (type != UnknownBranchType) && (type != UncondBranchType);
226 }
227
228 void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
229 Label* label,
230 ImmBranchType branch_type);
231 void DeleteUnresolvedBranchInfoForLabel(Label* label);
232
233 bool ShouldEmitVeneer(int64_t max_reachable_pc, size_t amount);
ShouldEmitVeneers(size_t amount)234 bool ShouldEmitVeneers(size_t amount) {
235 return ShouldEmitVeneer(unresolved_branches_.FirstLimit(), amount);
236 }
237
238 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
239 void Emit(EmitOption option, size_t margin);
240
241 // The code size generated for a veneer. Currently one branch instruction.
242 // This is for code size checking purposes, and can be extended in the future
243 // for example if we decide to add nops between the veneers.
244 static const int kVeneerCodeSize = 1 * kInstructionSize;
245 // The maximum size of code other than veneers that can be generated when
246 // emitting a veneer pool. Currently there can be an additional branch to jump
247 // over the pool.
248 static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
249
UpdateNextCheckPoint()250 void UpdateNextCheckPoint() {
251 SetNextCheckpoint(NextCheckPoint());
252 }
253
NumberOfPotentialVeneers()254 int NumberOfPotentialVeneers() const {
255 return static_cast<int>(unresolved_branches_.size());
256 }
257
MaxSize()258 size_t MaxSize() const {
259 return
260 kPoolNonVeneerCodeSize + unresolved_branches_.size() * kVeneerCodeSize;
261 }
262
263 size_t OtherPoolsMaxSize() const;
264
265 static const int kNPreallocatedInfos = 4;
266 static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
267 static const size_t kReclaimFrom = 128;
268 static const size_t kReclaimFactor = 16;
269
270 private:
271 typedef InvalSet<BranchInfo,
272 kNPreallocatedInfos,
273 ptrdiff_t,
274 kInvalidOffset,
275 kReclaimFrom,
276 kReclaimFactor> BranchInfoTypedSetBase;
277 typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
278
279 class BranchInfoTypedSet : public BranchInfoTypedSetBase {
280 public:
BranchInfoTypedSet()281 BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
282
FirstLimit()283 ptrdiff_t FirstLimit() {
284 if (empty()) {
285 return kInvalidOffset;
286 }
287 return min_element_key();
288 }
289 };
290
291 class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
292 public:
BranchInfoTypedSetIterator()293 BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
BranchInfoTypedSetIterator(BranchInfoTypedSet * typed_set)294 explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
295 : BranchInfoTypedSetIterBase(typed_set) {}
296 };
297
298 class BranchInfoSet {
299 public:
insert(BranchInfo branch_info)300 void insert(BranchInfo branch_info) {
301 ImmBranchType type = branch_info.branch_type_;
302 VIXL_ASSERT(IsValidBranchType(type));
303 typed_set_[BranchIndexFromType(type)].insert(branch_info);
304 }
305
erase(BranchInfo branch_info)306 void erase(BranchInfo branch_info) {
307 if (IsValidBranchType(branch_info.branch_type_)) {
308 int index =
309 BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
310 typed_set_[index].erase(branch_info);
311 }
312 }
313
size()314 size_t size() const {
315 size_t res = 0;
316 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
317 res += typed_set_[i].size();
318 }
319 return res;
320 }
321
empty()322 bool empty() const {
323 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
324 if (!typed_set_[i].empty()) {
325 return false;
326 }
327 }
328 return true;
329 }
330
FirstLimit()331 ptrdiff_t FirstLimit() {
332 ptrdiff_t res = kInvalidOffset;
333 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
334 res = std::min(res, typed_set_[i].FirstLimit());
335 }
336 return res;
337 }
338
Reset()339 void Reset() {
340 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
341 typed_set_[i].clear();
342 }
343 }
344
BranchTypeFromIndex(int index)345 static ImmBranchType BranchTypeFromIndex(int index) {
346 switch (index) {
347 case 0:
348 return CondBranchType;
349 case 1:
350 return CompareBranchType;
351 case 2:
352 return TestBranchType;
353 default:
354 VIXL_UNREACHABLE();
355 return UnknownBranchType;
356 }
357 }
BranchIndexFromType(ImmBranchType branch_type)358 static int BranchIndexFromType(ImmBranchType branch_type) {
359 switch (branch_type) {
360 case CondBranchType:
361 return 0;
362 case CompareBranchType:
363 return 1;
364 case TestBranchType:
365 return 2;
366 default:
367 VIXL_UNREACHABLE();
368 return 0;
369 }
370 }
371
IsValidBranchType(ImmBranchType branch_type)372 bool IsValidBranchType(ImmBranchType branch_type) {
373 return (branch_type != UnknownBranchType) &&
374 (branch_type != UncondBranchType);
375 }
376
377 private:
378 static const int kNumberOfTrackedBranchTypes = 3;
379 BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
380
381 friend class VeneerPool;
382 friend class BranchInfoSetIterator;
383 };
384
385 class BranchInfoSetIterator {
386 public:
BranchInfoSetIterator(BranchInfoSet * set)387 explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
388 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
389 new(&sub_iterator_[i])
390 BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
391 }
392 }
393
Current()394 VeneerPool::BranchInfo* Current() {
395 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
396 if (!sub_iterator_[i].Done()) {
397 return sub_iterator_[i].Current();
398 }
399 }
400 VIXL_UNREACHABLE();
401 return NULL;
402 }
403
Advance()404 void Advance() {
405 VIXL_ASSERT(!Done());
406 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
407 if (!sub_iterator_[i].Done()) {
408 sub_iterator_[i].Advance();
409 return;
410 }
411 }
412 VIXL_UNREACHABLE();
413 }
414
Done()415 bool Done() const {
416 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
417 if (!sub_iterator_[i].Done()) return false;
418 }
419 return true;
420 }
421
AdvanceToNextType()422 void AdvanceToNextType() {
423 VIXL_ASSERT(!Done());
424 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
425 if (!sub_iterator_[i].Done()) {
426 sub_iterator_[i].Finish();
427 return;
428 }
429 }
430 VIXL_UNREACHABLE();
431 }
432
DeleteCurrentAndAdvance()433 void DeleteCurrentAndAdvance() {
434 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
435 if (!sub_iterator_[i].Done()) {
436 sub_iterator_[i].DeleteCurrentAndAdvance();
437 return;
438 }
439 }
440 }
441
442 private:
443 BranchInfoSet* set_;
444 BranchInfoTypedSetIterator
445 sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
446 };
447
NextCheckPoint()448 ptrdiff_t NextCheckPoint() {
449 if (unresolved_branches_.empty()) {
450 return kNoCheckpointRequired;
451 } else {
452 return unresolved_branches_.FirstLimit();
453 }
454 }
455
456 // Information about unresolved (forward) branches.
457 BranchInfoSet unresolved_branches_;
458 };
459
460
461 // Required InvalSet template specialisations.
462 template<>
463 inline ptrdiff_t InvalSet<VeneerPool::BranchInfo,
464 VeneerPool::kNPreallocatedInfos,
465 ptrdiff_t,
466 VeneerPool::kInvalidOffset,
467 VeneerPool::kReclaimFrom,
Key(const VeneerPool::BranchInfo & branch_info)468 VeneerPool::kReclaimFactor>::Key(
469 const VeneerPool::BranchInfo& branch_info) {
470 return branch_info.max_reachable_pc_;
471 }
472 template<>
473 inline void InvalSet<VeneerPool::BranchInfo,
474 VeneerPool::kNPreallocatedInfos,
475 ptrdiff_t,
476 VeneerPool::kInvalidOffset,
477 VeneerPool::kReclaimFrom,
SetKey(VeneerPool::BranchInfo * branch_info,ptrdiff_t key)478 VeneerPool::kReclaimFactor>::SetKey(
479 VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
480 branch_info->max_reachable_pc_ = key;
481 }
482
483
484 // This scope has the following purposes:
485 // * Acquire/Release the underlying assembler's code buffer.
486 // * This is mandatory before emitting.
487 // * Emit the literal or veneer pools if necessary before emitting the
488 // macro-instruction.
489 // * Ensure there is enough space to emit the macro-instruction.
490 class EmissionCheckScope {
491 public:
492 EmissionCheckScope(MacroAssembler* masm, size_t size);
493 ~EmissionCheckScope();
494
495 protected:
496 MacroAssembler* masm_;
497 #ifdef VIXL_DEBUG
498 Label start_;
499 size_t size_;
500 #endif
501 };
502
503
504 // Helper for common Emission checks.
505 // The macro-instruction maps to a single instruction.
506 class SingleEmissionCheckScope : public EmissionCheckScope {
507 public:
SingleEmissionCheckScope(MacroAssembler * masm)508 explicit SingleEmissionCheckScope(MacroAssembler* masm)
509 : EmissionCheckScope(masm, kInstructionSize) {}
510 };
511
512
513 // The macro instruction is a "typical" macro-instruction. Typical macro-
514 // instruction only emit a few instructions, a few being defined as 8 here.
515 class MacroEmissionCheckScope : public EmissionCheckScope {
516 public:
MacroEmissionCheckScope(MacroAssembler * masm)517 explicit MacroEmissionCheckScope(MacroAssembler* masm)
518 : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
519
520 private:
521 static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
522 };
523
524
525 enum BranchType {
526 // Copies of architectural conditions.
527 // The associated conditions can be used in place of those, the code will
528 // take care of reinterpreting them with the correct type.
529 integer_eq = eq,
530 integer_ne = ne,
531 integer_hs = hs,
532 integer_lo = lo,
533 integer_mi = mi,
534 integer_pl = pl,
535 integer_vs = vs,
536 integer_vc = vc,
537 integer_hi = hi,
538 integer_ls = ls,
539 integer_ge = ge,
540 integer_lt = lt,
541 integer_gt = gt,
542 integer_le = le,
543 integer_al = al,
544 integer_nv = nv,
545
546 // These two are *different* from the architectural codes al and nv.
547 // 'always' is used to generate unconditional branches.
548 // 'never' is used to not generate a branch (generally as the inverse
549 // branch type of 'always).
550 always, never,
551 // cbz and cbnz
552 reg_zero, reg_not_zero,
553 // tbz and tbnz
554 reg_bit_clear, reg_bit_set,
555
556 // Aliases.
557 kBranchTypeFirstCondition = eq,
558 kBranchTypeLastCondition = nv,
559 kBranchTypeFirstUsingReg = reg_zero,
560 kBranchTypeFirstUsingBit = reg_bit_clear
561 };
562
563
564 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
565
566
567 class MacroAssembler : public Assembler {
568 public:
569 MacroAssembler(size_t capacity,
570 PositionIndependentCodeOption pic = PositionIndependentCode);
571 MacroAssembler(byte * buffer, size_t capacity,
572 PositionIndependentCodeOption pic = PositionIndependentCode);
573 ~MacroAssembler();
574
575 // Start generating code from the beginning of the buffer, discarding any code
576 // and data that has already been emitted into the buffer.
577 //
578 // In order to avoid any accidental transfer of state, Reset ASSERTs that the
579 // constant pool is not blocked.
580 void Reset();
581
582 // Finalize a code buffer of generated instructions. This function must be
583 // called before executing or copying code from the buffer.
584 void FinalizeCode();
585
586
587 // Constant generation helpers.
588 // These functions return the number of instructions required to move the
589 // immediate into the destination register. Also, if the masm pointer is
590 // non-null, it generates the code to do so.
591 // The two features are implemented using one function to avoid duplication of
592 // the logic.
593 // The function can be used to evaluate the cost of synthesizing an
594 // instruction using 'mov immediate' instructions. A user might prefer loading
595 // a constant using the literal pool instead of using multiple 'mov immediate'
596 // instructions.
597 static int MoveImmediateHelper(MacroAssembler* masm,
598 const Register &rd,
599 uint64_t imm);
600 static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
601 const Register& dst,
602 int64_t imm);
603
604
605 // Logical macros.
606 void And(const Register& rd,
607 const Register& rn,
608 const Operand& operand);
609 void Ands(const Register& rd,
610 const Register& rn,
611 const Operand& operand);
612 void Bic(const Register& rd,
613 const Register& rn,
614 const Operand& operand);
615 void Bics(const Register& rd,
616 const Register& rn,
617 const Operand& operand);
618 void Orr(const Register& rd,
619 const Register& rn,
620 const Operand& operand);
621 void Orn(const Register& rd,
622 const Register& rn,
623 const Operand& operand);
624 void Eor(const Register& rd,
625 const Register& rn,
626 const Operand& operand);
627 void Eon(const Register& rd,
628 const Register& rn,
629 const Operand& operand);
630 void Tst(const Register& rn, const Operand& operand);
631 void LogicalMacro(const Register& rd,
632 const Register& rn,
633 const Operand& operand,
634 LogicalOp op);
635
636 // Add and sub macros.
637 void Add(const Register& rd,
638 const Register& rn,
639 const Operand& operand,
640 FlagsUpdate S = LeaveFlags);
641 void Adds(const Register& rd,
642 const Register& rn,
643 const Operand& operand);
644 void Sub(const Register& rd,
645 const Register& rn,
646 const Operand& operand,
647 FlagsUpdate S = LeaveFlags);
648 void Subs(const Register& rd,
649 const Register& rn,
650 const Operand& operand);
651 void Cmn(const Register& rn, const Operand& operand);
652 void Cmp(const Register& rn, const Operand& operand);
653 void Neg(const Register& rd,
654 const Operand& operand);
655 void Negs(const Register& rd,
656 const Operand& operand);
657
658 void AddSubMacro(const Register& rd,
659 const Register& rn,
660 const Operand& operand,
661 FlagsUpdate S,
662 AddSubOp op);
663
664 // Add/sub with carry macros.
665 void Adc(const Register& rd,
666 const Register& rn,
667 const Operand& operand);
668 void Adcs(const Register& rd,
669 const Register& rn,
670 const Operand& operand);
671 void Sbc(const Register& rd,
672 const Register& rn,
673 const Operand& operand);
674 void Sbcs(const Register& rd,
675 const Register& rn,
676 const Operand& operand);
677 void Ngc(const Register& rd,
678 const Operand& operand);
679 void Ngcs(const Register& rd,
680 const Operand& operand);
681 void AddSubWithCarryMacro(const Register& rd,
682 const Register& rn,
683 const Operand& operand,
684 FlagsUpdate S,
685 AddSubWithCarryOp op);
686
687 // Move macros.
688 void Mov(const Register& rd, uint64_t imm);
689 void Mov(const Register& rd,
690 const Operand& operand,
691 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
Mvn(const Register & rd,uint64_t imm)692 void Mvn(const Register& rd, uint64_t imm) {
693 Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask));
694 }
695 void Mvn(const Register& rd, const Operand& operand);
696
697 // Try to move an immediate into the destination register in a single
698 // instruction. Returns true for success, and updates the contents of dst.
699 // Returns false, otherwise.
700 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
701
702 // Move an immediate into register dst, and return an Operand object for
703 // use with a subsequent instruction that accepts a shift. The value moved
704 // into dst is not necessarily equal to imm; it may have had a shifting
705 // operation applied to it that will be subsequently undone by the shift
706 // applied in the Operand.
707 Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
708
709 // Synthesises the address represented by a MemOperand into a register.
710 void ComputeAddress(const Register& dst, const MemOperand& mem_op);
711
712 // Conditional macros.
713 void Ccmp(const Register& rn,
714 const Operand& operand,
715 StatusFlags nzcv,
716 Condition cond);
717 void Ccmn(const Register& rn,
718 const Operand& operand,
719 StatusFlags nzcv,
720 Condition cond);
721 void ConditionalCompareMacro(const Register& rn,
722 const Operand& operand,
723 StatusFlags nzcv,
724 Condition cond,
725 ConditionalCompareOp op);
726 void Csel(const Register& rd,
727 const Register& rn,
728 const Operand& operand,
729 Condition cond);
730
731 // Load/store macros.
732 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
733 void FN(const REGTYPE REG, const MemOperand& addr);
734 LS_MACRO_LIST(DECLARE_FUNCTION)
735 #undef DECLARE_FUNCTION
736
737 void LoadStoreMacro(const CPURegister& rt,
738 const MemOperand& addr,
739 LoadStoreOp op);
740
741 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
742 void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
743 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
744 #undef DECLARE_FUNCTION
745
746 void LoadStorePairMacro(const CPURegister& rt,
747 const CPURegister& rt2,
748 const MemOperand& addr,
749 LoadStorePairOp op);
750
751 void Prfm(PrefetchOperation op, const MemOperand& addr);
752
753 // Push or pop up to 4 registers of the same width to or from the stack,
754 // using the current stack pointer as set by SetStackPointer.
755 //
756 // If an argument register is 'NoReg', all further arguments are also assumed
757 // to be 'NoReg', and are thus not pushed or popped.
758 //
759 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
760 // to "Push(a); Push(b);".
761 //
762 // It is valid to push the same register more than once, and there is no
763 // restriction on the order in which registers are specified.
764 //
765 // It is not valid to pop into the same register more than once in one
766 // operation, not even into the zero register.
767 //
768 // If the current stack pointer (as set by SetStackPointer) is sp, then it
769 // must be aligned to 16 bytes on entry and the total size of the specified
770 // registers must also be a multiple of 16 bytes.
771 //
772 // Even if the current stack pointer is not the system stack pointer (sp),
773 // Push (and derived methods) will still modify the system stack pointer in
774 // order to comply with ABI rules about accessing memory below the system
775 // stack pointer.
776 //
777 // Other than the registers passed into Pop, the stack pointer and (possibly)
778 // the system stack pointer, these methods do not modify any other registers.
779 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
780 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
781 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
782 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
783
784 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
785 // specifies the registers that are to be pushed or popped. Higher-numbered
786 // registers are associated with higher memory addresses (as in the A32 push
787 // and pop instructions).
788 //
789 // (Push|Pop)SizeRegList allow you to specify the register size as a
790 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
791 // supported.
792 //
793 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
794 void PushCPURegList(CPURegList registers);
795 void PopCPURegList(CPURegList registers);
796
797 void PushSizeRegList(RegList registers, unsigned reg_size,
798 CPURegister::RegisterType type = CPURegister::kRegister) {
799 PushCPURegList(CPURegList(type, reg_size, registers));
800 }
801 void PopSizeRegList(RegList registers, unsigned reg_size,
802 CPURegister::RegisterType type = CPURegister::kRegister) {
803 PopCPURegList(CPURegList(type, reg_size, registers));
804 }
PushXRegList(RegList regs)805 void PushXRegList(RegList regs) {
806 PushSizeRegList(regs, kXRegSize);
807 }
PopXRegList(RegList regs)808 void PopXRegList(RegList regs) {
809 PopSizeRegList(regs, kXRegSize);
810 }
PushWRegList(RegList regs)811 void PushWRegList(RegList regs) {
812 PushSizeRegList(regs, kWRegSize);
813 }
PopWRegList(RegList regs)814 void PopWRegList(RegList regs) {
815 PopSizeRegList(regs, kWRegSize);
816 }
PushDRegList(RegList regs)817 void PushDRegList(RegList regs) {
818 PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
819 }
PopDRegList(RegList regs)820 void PopDRegList(RegList regs) {
821 PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
822 }
PushSRegList(RegList regs)823 void PushSRegList(RegList regs) {
824 PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
825 }
PopSRegList(RegList regs)826 void PopSRegList(RegList regs) {
827 PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
828 }
829
830 // Push the specified register 'count' times.
831 void PushMultipleTimes(int count, Register src);
832
833 // Poke 'src' onto the stack. The offset is in bytes.
834 //
835 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
836 // must be aligned to 16 bytes.
837 void Poke(const Register& src, const Operand& offset);
838
839 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
840 //
841 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
842 // must be aligned to 16 bytes.
843 void Peek(const Register& dst, const Operand& offset);
844
845 // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
846 // specifies the registers that are to be pushed or popped. Higher-numbered
847 // registers are associated with higher memory addresses.
848 //
849 // (Peek|Poke)SizeRegList allow you to specify the register size as a
850 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
851 // supported.
852 //
853 // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
PeekCPURegList(CPURegList registers,int64_t offset)854 void PeekCPURegList(CPURegList registers, int64_t offset) {
855 LoadCPURegList(registers, MemOperand(StackPointer(), offset));
856 }
PokeCPURegList(CPURegList registers,int64_t offset)857 void PokeCPURegList(CPURegList registers, int64_t offset) {
858 StoreCPURegList(registers, MemOperand(StackPointer(), offset));
859 }
860
861 void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
862 CPURegister::RegisterType type = CPURegister::kRegister) {
863 PeekCPURegList(CPURegList(type, reg_size, registers), offset);
864 }
865 void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
866 CPURegister::RegisterType type = CPURegister::kRegister) {
867 PokeCPURegList(CPURegList(type, reg_size, registers), offset);
868 }
PeekXRegList(RegList regs,int64_t offset)869 void PeekXRegList(RegList regs, int64_t offset) {
870 PeekSizeRegList(regs, offset, kXRegSize);
871 }
PokeXRegList(RegList regs,int64_t offset)872 void PokeXRegList(RegList regs, int64_t offset) {
873 PokeSizeRegList(regs, offset, kXRegSize);
874 }
PeekWRegList(RegList regs,int64_t offset)875 void PeekWRegList(RegList regs, int64_t offset) {
876 PeekSizeRegList(regs, offset, kWRegSize);
877 }
PokeWRegList(RegList regs,int64_t offset)878 void PokeWRegList(RegList regs, int64_t offset) {
879 PokeSizeRegList(regs, offset, kWRegSize);
880 }
PeekDRegList(RegList regs,int64_t offset)881 void PeekDRegList(RegList regs, int64_t offset) {
882 PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
883 }
PokeDRegList(RegList regs,int64_t offset)884 void PokeDRegList(RegList regs, int64_t offset) {
885 PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
886 }
PeekSRegList(RegList regs,int64_t offset)887 void PeekSRegList(RegList regs, int64_t offset) {
888 PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
889 }
PokeSRegList(RegList regs,int64_t offset)890 void PokeSRegList(RegList regs, int64_t offset) {
891 PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
892 }
893
894
895 // Claim or drop stack space without actually accessing memory.
896 //
897 // If the current stack pointer (as set by SetStackPointer) is sp, then it
898 // must be aligned to 16 bytes and the size claimed or dropped must be a
899 // multiple of 16 bytes.
900 void Claim(const Operand& size);
901 void Drop(const Operand& size);
902
903 // Preserve the callee-saved registers (as defined by AAPCS64).
904 //
905 // Higher-numbered registers are pushed before lower-numbered registers, and
906 // thus get higher addresses.
907 // Floating-point registers are pushed before general-purpose registers, and
908 // thus get higher addresses.
909 //
910 // This method must not be called unless StackPointer() is sp, and it is
911 // aligned to 16 bytes.
912 void PushCalleeSavedRegisters();
913
914 // Restore the callee-saved registers (as defined by AAPCS64).
915 //
916 // Higher-numbered registers are popped after lower-numbered registers, and
917 // thus come from higher addresses.
918 // Floating-point registers are popped after general-purpose registers, and
919 // thus come from higher addresses.
920 //
921 // This method must not be called unless StackPointer() is sp, and it is
922 // aligned to 16 bytes.
923 void PopCalleeSavedRegisters();
924
925 void LoadCPURegList(CPURegList registers, const MemOperand& src);
926 void StoreCPURegList(CPURegList registers, const MemOperand& dst);
927
928 // Remaining instructions are simple pass-through calls to the assembler.
Adr(const Register & rd,Label * label)929 void Adr(const Register& rd, Label* label) {
930 VIXL_ASSERT(allow_macro_instructions_);
931 VIXL_ASSERT(!rd.IsZero());
932 SingleEmissionCheckScope guard(this);
933 adr(rd, label);
934 }
Adrp(const Register & rd,Label * label)935 void Adrp(const Register& rd, Label* label) {
936 VIXL_ASSERT(allow_macro_instructions_);
937 VIXL_ASSERT(!rd.IsZero());
938 SingleEmissionCheckScope guard(this);
939 adrp(rd, label);
940 }
Asr(const Register & rd,const Register & rn,unsigned shift)941 void Asr(const Register& rd, const Register& rn, unsigned shift) {
942 VIXL_ASSERT(allow_macro_instructions_);
943 VIXL_ASSERT(!rd.IsZero());
944 VIXL_ASSERT(!rn.IsZero());
945 SingleEmissionCheckScope guard(this);
946 asr(rd, rn, shift);
947 }
Asr(const Register & rd,const Register & rn,const Register & rm)948 void Asr(const Register& rd, const Register& rn, const Register& rm) {
949 VIXL_ASSERT(allow_macro_instructions_);
950 VIXL_ASSERT(!rd.IsZero());
951 VIXL_ASSERT(!rn.IsZero());
952 VIXL_ASSERT(!rm.IsZero());
953 SingleEmissionCheckScope guard(this);
954 asrv(rd, rn, rm);
955 }
956
957 // Branch type inversion relies on these relations.
958 VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
959 (reg_bit_clear == (reg_bit_set ^ 1)) &&
960 (always == (never ^ 1)));
961
InvertBranchType(BranchType type)962 BranchType InvertBranchType(BranchType type) {
963 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
964 return static_cast<BranchType>(
965 InvertCondition(static_cast<Condition>(type)));
966 } else {
967 return static_cast<BranchType>(type ^ 1);
968 }
969 }
970
971 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
972
973 void B(Label* label);
974 void B(Label* label, Condition cond);
B(Condition cond,Label * label)975 void B(Condition cond, Label* label) {
976 B(label, cond);
977 }
Bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)978 void Bfm(const Register& rd,
979 const Register& rn,
980 unsigned immr,
981 unsigned imms) {
982 VIXL_ASSERT(allow_macro_instructions_);
983 VIXL_ASSERT(!rd.IsZero());
984 VIXL_ASSERT(!rn.IsZero());
985 SingleEmissionCheckScope guard(this);
986 bfm(rd, rn, immr, imms);
987 }
Bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)988 void Bfi(const Register& rd,
989 const Register& rn,
990 unsigned lsb,
991 unsigned width) {
992 VIXL_ASSERT(allow_macro_instructions_);
993 VIXL_ASSERT(!rd.IsZero());
994 VIXL_ASSERT(!rn.IsZero());
995 SingleEmissionCheckScope guard(this);
996 bfi(rd, rn, lsb, width);
997 }
Bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)998 void Bfxil(const Register& rd,
999 const Register& rn,
1000 unsigned lsb,
1001 unsigned width) {
1002 VIXL_ASSERT(allow_macro_instructions_);
1003 VIXL_ASSERT(!rd.IsZero());
1004 VIXL_ASSERT(!rn.IsZero());
1005 SingleEmissionCheckScope guard(this);
1006 bfxil(rd, rn, lsb, width);
1007 }
1008 void Bind(Label* label);
1009 // Bind a label to a specified offset from the start of the buffer.
1010 void BindToOffset(Label* label, ptrdiff_t offset);
Bl(Label * label)1011 void Bl(Label* label) {
1012 VIXL_ASSERT(allow_macro_instructions_);
1013 SingleEmissionCheckScope guard(this);
1014 bl(label);
1015 }
Blr(const Register & xn)1016 void Blr(const Register& xn) {
1017 VIXL_ASSERT(allow_macro_instructions_);
1018 VIXL_ASSERT(!xn.IsZero());
1019 SingleEmissionCheckScope guard(this);
1020 blr(xn);
1021 }
Br(const Register & xn)1022 void Br(const Register& xn) {
1023 VIXL_ASSERT(allow_macro_instructions_);
1024 VIXL_ASSERT(!xn.IsZero());
1025 SingleEmissionCheckScope guard(this);
1026 br(xn);
1027 }
1028 void Brk(int code = 0) {
1029 VIXL_ASSERT(allow_macro_instructions_);
1030 SingleEmissionCheckScope guard(this);
1031 brk(code);
1032 }
1033 void Cbnz(const Register& rt, Label* label);
1034 void Cbz(const Register& rt, Label* label);
Cinc(const Register & rd,const Register & rn,Condition cond)1035 void Cinc(const Register& rd, const Register& rn, Condition cond) {
1036 VIXL_ASSERT(allow_macro_instructions_);
1037 VIXL_ASSERT(!rd.IsZero());
1038 VIXL_ASSERT(!rn.IsZero());
1039 SingleEmissionCheckScope guard(this);
1040 cinc(rd, rn, cond);
1041 }
Cinv(const Register & rd,const Register & rn,Condition cond)1042 void Cinv(const Register& rd, const Register& rn, Condition cond) {
1043 VIXL_ASSERT(allow_macro_instructions_);
1044 VIXL_ASSERT(!rd.IsZero());
1045 VIXL_ASSERT(!rn.IsZero());
1046 SingleEmissionCheckScope guard(this);
1047 cinv(rd, rn, cond);
1048 }
Clrex()1049 void Clrex() {
1050 VIXL_ASSERT(allow_macro_instructions_);
1051 SingleEmissionCheckScope guard(this);
1052 clrex();
1053 }
Cls(const Register & rd,const Register & rn)1054 void Cls(const Register& rd, const Register& rn) {
1055 VIXL_ASSERT(allow_macro_instructions_);
1056 VIXL_ASSERT(!rd.IsZero());
1057 VIXL_ASSERT(!rn.IsZero());
1058 SingleEmissionCheckScope guard(this);
1059 cls(rd, rn);
1060 }
Clz(const Register & rd,const Register & rn)1061 void Clz(const Register& rd, const Register& rn) {
1062 VIXL_ASSERT(allow_macro_instructions_);
1063 VIXL_ASSERT(!rd.IsZero());
1064 VIXL_ASSERT(!rn.IsZero());
1065 SingleEmissionCheckScope guard(this);
1066 clz(rd, rn);
1067 }
Cneg(const Register & rd,const Register & rn,Condition cond)1068 void Cneg(const Register& rd, const Register& rn, Condition cond) {
1069 VIXL_ASSERT(allow_macro_instructions_);
1070 VIXL_ASSERT(!rd.IsZero());
1071 VIXL_ASSERT(!rn.IsZero());
1072 SingleEmissionCheckScope guard(this);
1073 cneg(rd, rn, cond);
1074 }
Cset(const Register & rd,Condition cond)1075 void Cset(const Register& rd, Condition cond) {
1076 VIXL_ASSERT(allow_macro_instructions_);
1077 VIXL_ASSERT(!rd.IsZero());
1078 SingleEmissionCheckScope guard(this);
1079 cset(rd, cond);
1080 }
Csetm(const Register & rd,Condition cond)1081 void Csetm(const Register& rd, Condition cond) {
1082 VIXL_ASSERT(allow_macro_instructions_);
1083 VIXL_ASSERT(!rd.IsZero());
1084 SingleEmissionCheckScope guard(this);
1085 csetm(rd, cond);
1086 }
Csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1087 void Csinc(const Register& rd,
1088 const Register& rn,
1089 const Register& rm,
1090 Condition cond) {
1091 VIXL_ASSERT(allow_macro_instructions_);
1092 VIXL_ASSERT(!rd.IsZero());
1093 VIXL_ASSERT(!rn.IsZero());
1094 VIXL_ASSERT(!rm.IsZero());
1095 VIXL_ASSERT((cond != al) && (cond != nv));
1096 SingleEmissionCheckScope guard(this);
1097 csinc(rd, rn, rm, cond);
1098 }
Csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1099 void Csinv(const Register& rd,
1100 const Register& rn,
1101 const Register& rm,
1102 Condition cond) {
1103 VIXL_ASSERT(allow_macro_instructions_);
1104 VIXL_ASSERT(!rd.IsZero());
1105 VIXL_ASSERT(!rn.IsZero());
1106 VIXL_ASSERT(!rm.IsZero());
1107 VIXL_ASSERT((cond != al) && (cond != nv));
1108 SingleEmissionCheckScope guard(this);
1109 csinv(rd, rn, rm, cond);
1110 }
Csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1111 void Csneg(const Register& rd,
1112 const Register& rn,
1113 const Register& rm,
1114 Condition cond) {
1115 VIXL_ASSERT(allow_macro_instructions_);
1116 VIXL_ASSERT(!rd.IsZero());
1117 VIXL_ASSERT(!rn.IsZero());
1118 VIXL_ASSERT(!rm.IsZero());
1119 VIXL_ASSERT((cond != al) && (cond != nv));
1120 SingleEmissionCheckScope guard(this);
1121 csneg(rd, rn, rm, cond);
1122 }
Dmb(BarrierDomain domain,BarrierType type)1123 void Dmb(BarrierDomain domain, BarrierType type) {
1124 VIXL_ASSERT(allow_macro_instructions_);
1125 SingleEmissionCheckScope guard(this);
1126 dmb(domain, type);
1127 }
Dsb(BarrierDomain domain,BarrierType type)1128 void Dsb(BarrierDomain domain, BarrierType type) {
1129 VIXL_ASSERT(allow_macro_instructions_);
1130 SingleEmissionCheckScope guard(this);
1131 dsb(domain, type);
1132 }
Extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1133 void Extr(const Register& rd,
1134 const Register& rn,
1135 const Register& rm,
1136 unsigned lsb) {
1137 VIXL_ASSERT(allow_macro_instructions_);
1138 VIXL_ASSERT(!rd.IsZero());
1139 VIXL_ASSERT(!rn.IsZero());
1140 VIXL_ASSERT(!rm.IsZero());
1141 SingleEmissionCheckScope guard(this);
1142 extr(rd, rn, rm, lsb);
1143 }
Fadd(const VRegister & vd,const VRegister & vn,const VRegister & vm)1144 void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1145 VIXL_ASSERT(allow_macro_instructions_);
1146 SingleEmissionCheckScope guard(this);
1147 fadd(vd, vn, vm);
1148 }
1149 void Fccmp(const VRegister& vn,
1150 const VRegister& vm,
1151 StatusFlags nzcv,
1152 Condition cond,
1153 FPTrapFlags trap = DisableTrap) {
1154 VIXL_ASSERT(allow_macro_instructions_);
1155 VIXL_ASSERT((cond != al) && (cond != nv));
1156 SingleEmissionCheckScope guard(this);
1157 FPCCompareMacro(vn, vm, nzcv, cond, trap);
1158 }
Fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)1159 void Fccmpe(const VRegister& vn,
1160 const VRegister& vm,
1161 StatusFlags nzcv,
1162 Condition cond) {
1163 Fccmp(vn, vm, nzcv, cond, EnableTrap);
1164 }
1165 void Fcmp(const VRegister& vn, const VRegister& vm,
1166 FPTrapFlags trap = DisableTrap) {
1167 VIXL_ASSERT(allow_macro_instructions_);
1168 SingleEmissionCheckScope guard(this);
1169 FPCompareMacro(vn, vm, trap);
1170 }
1171 void Fcmp(const VRegister& vn, double value,
1172 FPTrapFlags trap = DisableTrap);
1173 void Fcmpe(const VRegister& vn, double value);
Fcmpe(const VRegister & vn,const VRegister & vm)1174 void Fcmpe(const VRegister& vn, const VRegister& vm) {
1175 Fcmp(vn, vm, EnableTrap);
1176 }
Fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)1177 void Fcsel(const VRegister& vd,
1178 const VRegister& vn,
1179 const VRegister& vm,
1180 Condition cond) {
1181 VIXL_ASSERT(allow_macro_instructions_);
1182 VIXL_ASSERT((cond != al) && (cond != nv));
1183 SingleEmissionCheckScope guard(this);
1184 fcsel(vd, vn, vm, cond);
1185 }
Fcvt(const VRegister & vd,const VRegister & vn)1186 void Fcvt(const VRegister& vd, const VRegister& vn) {
1187 VIXL_ASSERT(allow_macro_instructions_);
1188 SingleEmissionCheckScope guard(this);
1189 fcvt(vd, vn);
1190 }
Fcvtl(const VRegister & vd,const VRegister & vn)1191 void Fcvtl(const VRegister& vd, const VRegister& vn) {
1192 VIXL_ASSERT(allow_macro_instructions_);
1193 SingleEmissionCheckScope guard(this);
1194 fcvtl(vd, vn);
1195 }
Fcvtl2(const VRegister & vd,const VRegister & vn)1196 void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1197 VIXL_ASSERT(allow_macro_instructions_);
1198 SingleEmissionCheckScope guard(this);
1199 fcvtl2(vd, vn);
1200 }
Fcvtn(const VRegister & vd,const VRegister & vn)1201 void Fcvtn(const VRegister& vd, const VRegister& vn) {
1202 VIXL_ASSERT(allow_macro_instructions_);
1203 SingleEmissionCheckScope guard(this);
1204 fcvtn(vd, vn);
1205 }
Fcvtn2(const VRegister & vd,const VRegister & vn)1206 void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1207 VIXL_ASSERT(allow_macro_instructions_);
1208 SingleEmissionCheckScope guard(this);
1209 fcvtn2(vd, vn);
1210 }
Fcvtxn(const VRegister & vd,const VRegister & vn)1211 void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1212 VIXL_ASSERT(allow_macro_instructions_);
1213 SingleEmissionCheckScope guard(this);
1214 fcvtxn(vd, vn);
1215 }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1216 void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1217 VIXL_ASSERT(allow_macro_instructions_);
1218 SingleEmissionCheckScope guard(this);
1219 fcvtxn2(vd, vn);
1220 }
Fcvtas(const Register & rd,const VRegister & vn)1221 void Fcvtas(const Register& rd, const VRegister& vn) {
1222 VIXL_ASSERT(allow_macro_instructions_);
1223 VIXL_ASSERT(!rd.IsZero());
1224 SingleEmissionCheckScope guard(this);
1225 fcvtas(rd, vn);
1226 }
Fcvtau(const Register & rd,const VRegister & vn)1227 void Fcvtau(const Register& rd, const VRegister& vn) {
1228 VIXL_ASSERT(allow_macro_instructions_);
1229 VIXL_ASSERT(!rd.IsZero());
1230 SingleEmissionCheckScope guard(this);
1231 fcvtau(rd, vn);
1232 }
Fcvtms(const Register & rd,const VRegister & vn)1233 void Fcvtms(const Register& rd, const VRegister& vn) {
1234 VIXL_ASSERT(allow_macro_instructions_);
1235 VIXL_ASSERT(!rd.IsZero());
1236 SingleEmissionCheckScope guard(this);
1237 fcvtms(rd, vn);
1238 }
Fcvtmu(const Register & rd,const VRegister & vn)1239 void Fcvtmu(const Register& rd, const VRegister& vn) {
1240 VIXL_ASSERT(allow_macro_instructions_);
1241 VIXL_ASSERT(!rd.IsZero());
1242 SingleEmissionCheckScope guard(this);
1243 fcvtmu(rd, vn);
1244 }
Fcvtns(const Register & rd,const VRegister & vn)1245 void Fcvtns(const Register& rd, const VRegister& vn) {
1246 VIXL_ASSERT(allow_macro_instructions_);
1247 VIXL_ASSERT(!rd.IsZero());
1248 SingleEmissionCheckScope guard(this);
1249 fcvtns(rd, vn);
1250 }
Fcvtnu(const Register & rd,const VRegister & vn)1251 void Fcvtnu(const Register& rd, const VRegister& vn) {
1252 VIXL_ASSERT(allow_macro_instructions_);
1253 VIXL_ASSERT(!rd.IsZero());
1254 SingleEmissionCheckScope guard(this);
1255 fcvtnu(rd, vn);
1256 }
Fcvtps(const Register & rd,const VRegister & vn)1257 void Fcvtps(const Register& rd, const VRegister& vn) {
1258 VIXL_ASSERT(allow_macro_instructions_);
1259 VIXL_ASSERT(!rd.IsZero());
1260 SingleEmissionCheckScope guard(this);
1261 fcvtps(rd, vn);
1262 }
Fcvtpu(const Register & rd,const VRegister & vn)1263 void Fcvtpu(const Register& rd, const VRegister& vn) {
1264 VIXL_ASSERT(allow_macro_instructions_);
1265 VIXL_ASSERT(!rd.IsZero());
1266 SingleEmissionCheckScope guard(this);
1267 fcvtpu(rd, vn);
1268 }
1269 void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1270 VIXL_ASSERT(allow_macro_instructions_);
1271 VIXL_ASSERT(!rd.IsZero());
1272 SingleEmissionCheckScope guard(this);
1273 fcvtzs(rd, vn, fbits);
1274 }
1275 void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1276 VIXL_ASSERT(allow_macro_instructions_);
1277 VIXL_ASSERT(!rd.IsZero());
1278 SingleEmissionCheckScope guard(this);
1279 fcvtzu(rd, vn, fbits);
1280 }
Fdiv(const VRegister & vd,const VRegister & vn,const VRegister & vm)1281 void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1282 VIXL_ASSERT(allow_macro_instructions_);
1283 SingleEmissionCheckScope guard(this);
1284 fdiv(vd, vn, vm);
1285 }
Fmax(const VRegister & vd,const VRegister & vn,const VRegister & vm)1286 void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1287 VIXL_ASSERT(allow_macro_instructions_);
1288 SingleEmissionCheckScope guard(this);
1289 fmax(vd, vn, vm);
1290 }
Fmaxnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1291 void Fmaxnm(const VRegister& vd,
1292 const VRegister& vn,
1293 const VRegister& vm) {
1294 VIXL_ASSERT(allow_macro_instructions_);
1295 SingleEmissionCheckScope guard(this);
1296 fmaxnm(vd, vn, vm);
1297 }
Fmin(const VRegister & vd,const VRegister & vn,const VRegister & vm)1298 void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1299 VIXL_ASSERT(allow_macro_instructions_);
1300 SingleEmissionCheckScope guard(this);
1301 fmin(vd, vn, vm);
1302 }
Fminnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1303 void Fminnm(const VRegister& vd,
1304 const VRegister& vn,
1305 const VRegister& vm) {
1306 VIXL_ASSERT(allow_macro_instructions_);
1307 SingleEmissionCheckScope guard(this);
1308 fminnm(vd, vn, vm);
1309 }
Fmov(VRegister vd,VRegister vn)1310 void Fmov(VRegister vd, VRegister vn) {
1311 VIXL_ASSERT(allow_macro_instructions_);
1312 SingleEmissionCheckScope guard(this);
1313 // Only emit an instruction if vd and vn are different, and they are both D
1314 // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1315 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1316 // the top of q0, but VRegister does not currently support Q registers.
1317 if (!vd.Is(vn) || !vd.Is64Bits()) {
1318 fmov(vd, vn);
1319 }
1320 }
Fmov(VRegister vd,Register rn)1321 void Fmov(VRegister vd, Register rn) {
1322 VIXL_ASSERT(allow_macro_instructions_);
1323 VIXL_ASSERT(!rn.IsZero());
1324 SingleEmissionCheckScope guard(this);
1325 fmov(vd, rn);
1326 }
Fmov(const VRegister & vd,int index,const Register & rn)1327 void Fmov(const VRegister& vd, int index, const Register& rn) {
1328 VIXL_ASSERT(allow_macro_instructions_);
1329 SingleEmissionCheckScope guard(this);
1330 fmov(vd, index, rn);
1331 }
Fmov(const Register & rd,const VRegister & vn,int index)1332 void Fmov(const Register& rd, const VRegister& vn, int index) {
1333 VIXL_ASSERT(allow_macro_instructions_);
1334 SingleEmissionCheckScope guard(this);
1335 fmov(rd, vn, index);
1336 }
1337
1338 // Provide explicit double and float interfaces for FP immediate moves, rather
1339 // than relying on implicit C++ casts. This allows signalling NaNs to be
1340 // preserved when the immediate matches the format of vd. Most systems convert
1341 // signalling NaNs to quiet NaNs when converting between float and double.
1342 void Fmov(VRegister vd, double imm);
1343 void Fmov(VRegister vd, float imm);
1344 // Provide a template to allow other types to be converted automatically.
1345 template<typename T>
Fmov(VRegister vd,T imm)1346 void Fmov(VRegister vd, T imm) {
1347 VIXL_ASSERT(allow_macro_instructions_);
1348 Fmov(vd, static_cast<double>(imm));
1349 }
Fmov(Register rd,VRegister vn)1350 void Fmov(Register rd, VRegister vn) {
1351 VIXL_ASSERT(allow_macro_instructions_);
1352 VIXL_ASSERT(!rd.IsZero());
1353 SingleEmissionCheckScope guard(this);
1354 fmov(rd, vn);
1355 }
Fmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1356 void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1357 VIXL_ASSERT(allow_macro_instructions_);
1358 SingleEmissionCheckScope guard(this);
1359 fmul(vd, vn, vm);
1360 }
Fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1361 void Fnmul(const VRegister& vd, const VRegister& vn,
1362 const VRegister& vm) {
1363 VIXL_ASSERT(allow_macro_instructions_);
1364 SingleEmissionCheckScope guard(this);
1365 fnmul(vd, vn, vm);
1366 }
Fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1367 void Fmadd(const VRegister& vd,
1368 const VRegister& vn,
1369 const VRegister& vm,
1370 const VRegister& va) {
1371 VIXL_ASSERT(allow_macro_instructions_);
1372 SingleEmissionCheckScope guard(this);
1373 fmadd(vd, vn, vm, va);
1374 }
Fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1375 void Fmsub(const VRegister& vd,
1376 const VRegister& vn,
1377 const VRegister& vm,
1378 const VRegister& va) {
1379 VIXL_ASSERT(allow_macro_instructions_);
1380 SingleEmissionCheckScope guard(this);
1381 fmsub(vd, vn, vm, va);
1382 }
Fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1383 void Fnmadd(const VRegister& vd,
1384 const VRegister& vn,
1385 const VRegister& vm,
1386 const VRegister& va) {
1387 VIXL_ASSERT(allow_macro_instructions_);
1388 SingleEmissionCheckScope guard(this);
1389 fnmadd(vd, vn, vm, va);
1390 }
Fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1391 void Fnmsub(const VRegister& vd,
1392 const VRegister& vn,
1393 const VRegister& vm,
1394 const VRegister& va) {
1395 VIXL_ASSERT(allow_macro_instructions_);
1396 SingleEmissionCheckScope guard(this);
1397 fnmsub(vd, vn, vm, va);
1398 }
Fsub(const VRegister & vd,const VRegister & vn,const VRegister & vm)1399 void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1400 VIXL_ASSERT(allow_macro_instructions_);
1401 SingleEmissionCheckScope guard(this);
1402 fsub(vd, vn, vm);
1403 }
Hint(SystemHint code)1404 void Hint(SystemHint code) {
1405 VIXL_ASSERT(allow_macro_instructions_);
1406 SingleEmissionCheckScope guard(this);
1407 hint(code);
1408 }
Hlt(int code)1409 void Hlt(int code) {
1410 VIXL_ASSERT(allow_macro_instructions_);
1411 SingleEmissionCheckScope guard(this);
1412 hlt(code);
1413 }
Isb()1414 void Isb() {
1415 VIXL_ASSERT(allow_macro_instructions_);
1416 SingleEmissionCheckScope guard(this);
1417 isb();
1418 }
Ldar(const Register & rt,const MemOperand & src)1419 void Ldar(const Register& rt, const MemOperand& src) {
1420 VIXL_ASSERT(allow_macro_instructions_);
1421 SingleEmissionCheckScope guard(this);
1422 ldar(rt, src);
1423 }
Ldarb(const Register & rt,const MemOperand & src)1424 void Ldarb(const Register& rt, const MemOperand& src) {
1425 VIXL_ASSERT(allow_macro_instructions_);
1426 SingleEmissionCheckScope guard(this);
1427 ldarb(rt, src);
1428 }
Ldarh(const Register & rt,const MemOperand & src)1429 void Ldarh(const Register& rt, const MemOperand& src) {
1430 VIXL_ASSERT(allow_macro_instructions_);
1431 SingleEmissionCheckScope guard(this);
1432 ldarh(rt, src);
1433 }
Ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1434 void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1435 VIXL_ASSERT(allow_macro_instructions_);
1436 VIXL_ASSERT(!rt.Aliases(rt2));
1437 SingleEmissionCheckScope guard(this);
1438 ldaxp(rt, rt2, src);
1439 }
Ldaxr(const Register & rt,const MemOperand & src)1440 void Ldaxr(const Register& rt, const MemOperand& src) {
1441 VIXL_ASSERT(allow_macro_instructions_);
1442 SingleEmissionCheckScope guard(this);
1443 ldaxr(rt, src);
1444 }
Ldaxrb(const Register & rt,const MemOperand & src)1445 void Ldaxrb(const Register& rt, const MemOperand& src) {
1446 VIXL_ASSERT(allow_macro_instructions_);
1447 SingleEmissionCheckScope guard(this);
1448 ldaxrb(rt, src);
1449 }
Ldaxrh(const Register & rt,const MemOperand & src)1450 void Ldaxrh(const Register& rt, const MemOperand& src) {
1451 VIXL_ASSERT(allow_macro_instructions_);
1452 SingleEmissionCheckScope guard(this);
1453 ldaxrh(rt, src);
1454 }
Ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1455 void Ldnp(const CPURegister& rt,
1456 const CPURegister& rt2,
1457 const MemOperand& src) {
1458 VIXL_ASSERT(allow_macro_instructions_);
1459 SingleEmissionCheckScope guard(this);
1460 ldnp(rt, rt2, src);
1461 }
1462 // Provide both double and float interfaces for FP immediate loads, rather
1463 // than relying on implicit C++ casts. This allows signalling NaNs to be
1464 // preserved when the immediate matches the format of fd. Most systems convert
1465 // signalling NaNs to quiet NaNs when converting between float and double.
Ldr(const VRegister & vt,double imm)1466 void Ldr(const VRegister& vt, double imm) {
1467 VIXL_ASSERT(allow_macro_instructions_);
1468 SingleEmissionCheckScope guard(this);
1469 RawLiteral* literal;
1470 if (vt.IsD()) {
1471 literal = new Literal<double>(imm,
1472 &literal_pool_,
1473 RawLiteral::kDeletedOnPlacementByPool);
1474 } else {
1475 literal = new Literal<float>(static_cast<float>(imm),
1476 &literal_pool_,
1477 RawLiteral::kDeletedOnPlacementByPool);
1478 }
1479 ldr(vt, literal);
1480 }
Ldr(const VRegister & vt,float imm)1481 void Ldr(const VRegister& vt, float imm) {
1482 VIXL_ASSERT(allow_macro_instructions_);
1483 SingleEmissionCheckScope guard(this);
1484 RawLiteral* literal;
1485 if (vt.IsS()) {
1486 literal = new Literal<float>(imm,
1487 &literal_pool_,
1488 RawLiteral::kDeletedOnPlacementByPool);
1489 } else {
1490 literal = new Literal<double>(static_cast<double>(imm),
1491 &literal_pool_,
1492 RawLiteral::kDeletedOnPlacementByPool);
1493 }
1494 ldr(vt, literal);
1495 }
Ldr(const VRegister & vt,uint64_t high64,uint64_t low64)1496 void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1497 VIXL_ASSERT(allow_macro_instructions_);
1498 VIXL_ASSERT(vt.IsQ());
1499 SingleEmissionCheckScope guard(this);
1500 ldr(vt, new Literal<uint64_t>(high64, low64,
1501 &literal_pool_,
1502 RawLiteral::kDeletedOnPlacementByPool));
1503 }
Ldr(const Register & rt,uint64_t imm)1504 void Ldr(const Register& rt, uint64_t imm) {
1505 VIXL_ASSERT(allow_macro_instructions_);
1506 VIXL_ASSERT(!rt.IsZero());
1507 SingleEmissionCheckScope guard(this);
1508 RawLiteral* literal;
1509 if (rt.Is64Bits()) {
1510 literal = new Literal<uint64_t>(imm,
1511 &literal_pool_,
1512 RawLiteral::kDeletedOnPlacementByPool);
1513 } else {
1514 VIXL_ASSERT(rt.Is32Bits());
1515 VIXL_ASSERT(is_uint32(imm) || is_int32(imm));
1516 literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1517 &literal_pool_,
1518 RawLiteral::kDeletedOnPlacementByPool);
1519 }
1520 ldr(rt, literal);
1521 }
Ldrsw(const Register & rt,uint32_t imm)1522 void Ldrsw(const Register& rt, uint32_t imm) {
1523 VIXL_ASSERT(allow_macro_instructions_);
1524 VIXL_ASSERT(!rt.IsZero());
1525 SingleEmissionCheckScope guard(this);
1526 ldrsw(rt,
1527 new Literal<uint32_t>(imm,
1528 &literal_pool_,
1529 RawLiteral::kDeletedOnPlacementByPool));
1530 }
Ldr(const CPURegister & rt,RawLiteral * literal)1531 void Ldr(const CPURegister& rt, RawLiteral* literal) {
1532 VIXL_ASSERT(allow_macro_instructions_);
1533 SingleEmissionCheckScope guard(this);
1534 ldr(rt, literal);
1535 }
Ldrsw(const Register & rt,RawLiteral * literal)1536 void Ldrsw(const Register& rt, RawLiteral* literal) {
1537 VIXL_ASSERT(allow_macro_instructions_);
1538 SingleEmissionCheckScope guard(this);
1539 ldrsw(rt, literal);
1540 }
Ldxp(const Register & rt,const Register & rt2,const MemOperand & src)1541 void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1542 VIXL_ASSERT(allow_macro_instructions_);
1543 VIXL_ASSERT(!rt.Aliases(rt2));
1544 SingleEmissionCheckScope guard(this);
1545 ldxp(rt, rt2, src);
1546 }
Ldxr(const Register & rt,const MemOperand & src)1547 void Ldxr(const Register& rt, const MemOperand& src) {
1548 VIXL_ASSERT(allow_macro_instructions_);
1549 SingleEmissionCheckScope guard(this);
1550 ldxr(rt, src);
1551 }
Ldxrb(const Register & rt,const MemOperand & src)1552 void Ldxrb(const Register& rt, const MemOperand& src) {
1553 VIXL_ASSERT(allow_macro_instructions_);
1554 SingleEmissionCheckScope guard(this);
1555 ldxrb(rt, src);
1556 }
Ldxrh(const Register & rt,const MemOperand & src)1557 void Ldxrh(const Register& rt, const MemOperand& src) {
1558 VIXL_ASSERT(allow_macro_instructions_);
1559 SingleEmissionCheckScope guard(this);
1560 ldxrh(rt, src);
1561 }
Lsl(const Register & rd,const Register & rn,unsigned shift)1562 void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1563 VIXL_ASSERT(allow_macro_instructions_);
1564 VIXL_ASSERT(!rd.IsZero());
1565 VIXL_ASSERT(!rn.IsZero());
1566 SingleEmissionCheckScope guard(this);
1567 lsl(rd, rn, shift);
1568 }
Lsl(const Register & rd,const Register & rn,const Register & rm)1569 void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1570 VIXL_ASSERT(allow_macro_instructions_);
1571 VIXL_ASSERT(!rd.IsZero());
1572 VIXL_ASSERT(!rn.IsZero());
1573 VIXL_ASSERT(!rm.IsZero());
1574 SingleEmissionCheckScope guard(this);
1575 lslv(rd, rn, rm);
1576 }
Lsr(const Register & rd,const Register & rn,unsigned shift)1577 void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1578 VIXL_ASSERT(allow_macro_instructions_);
1579 VIXL_ASSERT(!rd.IsZero());
1580 VIXL_ASSERT(!rn.IsZero());
1581 SingleEmissionCheckScope guard(this);
1582 lsr(rd, rn, shift);
1583 }
Lsr(const Register & rd,const Register & rn,const Register & rm)1584 void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1585 VIXL_ASSERT(allow_macro_instructions_);
1586 VIXL_ASSERT(!rd.IsZero());
1587 VIXL_ASSERT(!rn.IsZero());
1588 VIXL_ASSERT(!rm.IsZero());
1589 SingleEmissionCheckScope guard(this);
1590 lsrv(rd, rn, rm);
1591 }
Madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1592 void Madd(const Register& rd,
1593 const Register& rn,
1594 const Register& rm,
1595 const Register& ra) {
1596 VIXL_ASSERT(allow_macro_instructions_);
1597 VIXL_ASSERT(!rd.IsZero());
1598 VIXL_ASSERT(!rn.IsZero());
1599 VIXL_ASSERT(!rm.IsZero());
1600 VIXL_ASSERT(!ra.IsZero());
1601 SingleEmissionCheckScope guard(this);
1602 madd(rd, rn, rm, ra);
1603 }
Mneg(const Register & rd,const Register & rn,const Register & rm)1604 void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1605 VIXL_ASSERT(allow_macro_instructions_);
1606 VIXL_ASSERT(!rd.IsZero());
1607 VIXL_ASSERT(!rn.IsZero());
1608 VIXL_ASSERT(!rm.IsZero());
1609 SingleEmissionCheckScope guard(this);
1610 mneg(rd, rn, rm);
1611 }
Mov(const Register & rd,const Register & rn)1612 void Mov(const Register& rd, const Register& rn) {
1613 VIXL_ASSERT(allow_macro_instructions_);
1614 SingleEmissionCheckScope guard(this);
1615 mov(rd, rn);
1616 }
1617 void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1618 VIXL_ASSERT(allow_macro_instructions_);
1619 VIXL_ASSERT(!rd.IsZero());
1620 SingleEmissionCheckScope guard(this);
1621 movk(rd, imm, shift);
1622 }
Mrs(const Register & rt,SystemRegister sysreg)1623 void Mrs(const Register& rt, SystemRegister sysreg) {
1624 VIXL_ASSERT(allow_macro_instructions_);
1625 VIXL_ASSERT(!rt.IsZero());
1626 SingleEmissionCheckScope guard(this);
1627 mrs(rt, sysreg);
1628 }
Msr(SystemRegister sysreg,const Register & rt)1629 void Msr(SystemRegister sysreg, const Register& rt) {
1630 VIXL_ASSERT(allow_macro_instructions_);
1631 VIXL_ASSERT(!rt.IsZero());
1632 SingleEmissionCheckScope guard(this);
1633 msr(sysreg, rt);
1634 }
1635 void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1636 VIXL_ASSERT(allow_macro_instructions_);
1637 SingleEmissionCheckScope guard(this);
1638 sys(op1, crn, crm, op2, rt);
1639 }
Dc(DataCacheOp op,const Register & rt)1640 void Dc(DataCacheOp op, const Register& rt) {
1641 VIXL_ASSERT(allow_macro_instructions_);
1642 SingleEmissionCheckScope guard(this);
1643 dc(op, rt);
1644 }
Ic(InstructionCacheOp op,const Register & rt)1645 void Ic(InstructionCacheOp op, const Register& rt) {
1646 VIXL_ASSERT(allow_macro_instructions_);
1647 SingleEmissionCheckScope guard(this);
1648 ic(op, rt);
1649 }
Msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1650 void Msub(const Register& rd,
1651 const Register& rn,
1652 const Register& rm,
1653 const Register& ra) {
1654 VIXL_ASSERT(allow_macro_instructions_);
1655 VIXL_ASSERT(!rd.IsZero());
1656 VIXL_ASSERT(!rn.IsZero());
1657 VIXL_ASSERT(!rm.IsZero());
1658 VIXL_ASSERT(!ra.IsZero());
1659 SingleEmissionCheckScope guard(this);
1660 msub(rd, rn, rm, ra);
1661 }
Mul(const Register & rd,const Register & rn,const Register & rm)1662 void Mul(const Register& rd, const Register& rn, const Register& rm) {
1663 VIXL_ASSERT(allow_macro_instructions_);
1664 VIXL_ASSERT(!rd.IsZero());
1665 VIXL_ASSERT(!rn.IsZero());
1666 VIXL_ASSERT(!rm.IsZero());
1667 SingleEmissionCheckScope guard(this);
1668 mul(rd, rn, rm);
1669 }
Nop()1670 void Nop() {
1671 VIXL_ASSERT(allow_macro_instructions_);
1672 SingleEmissionCheckScope guard(this);
1673 nop();
1674 }
Rbit(const Register & rd,const Register & rn)1675 void Rbit(const Register& rd, const Register& rn) {
1676 VIXL_ASSERT(allow_macro_instructions_);
1677 VIXL_ASSERT(!rd.IsZero());
1678 VIXL_ASSERT(!rn.IsZero());
1679 SingleEmissionCheckScope guard(this);
1680 rbit(rd, rn);
1681 }
1682 void Ret(const Register& xn = lr) {
1683 VIXL_ASSERT(allow_macro_instructions_);
1684 VIXL_ASSERT(!xn.IsZero());
1685 SingleEmissionCheckScope guard(this);
1686 ret(xn);
1687 }
Rev(const Register & rd,const Register & rn)1688 void Rev(const Register& rd, const Register& rn) {
1689 VIXL_ASSERT(allow_macro_instructions_);
1690 VIXL_ASSERT(!rd.IsZero());
1691 VIXL_ASSERT(!rn.IsZero());
1692 SingleEmissionCheckScope guard(this);
1693 rev(rd, rn);
1694 }
Rev16(const Register & rd,const Register & rn)1695 void Rev16(const Register& rd, const Register& rn) {
1696 VIXL_ASSERT(allow_macro_instructions_);
1697 VIXL_ASSERT(!rd.IsZero());
1698 VIXL_ASSERT(!rn.IsZero());
1699 SingleEmissionCheckScope guard(this);
1700 rev16(rd, rn);
1701 }
Rev32(const Register & rd,const Register & rn)1702 void Rev32(const Register& rd, const Register& rn) {
1703 VIXL_ASSERT(allow_macro_instructions_);
1704 VIXL_ASSERT(!rd.IsZero());
1705 VIXL_ASSERT(!rn.IsZero());
1706 SingleEmissionCheckScope guard(this);
1707 rev32(rd, rn);
1708 }
Ror(const Register & rd,const Register & rs,unsigned shift)1709 void Ror(const Register& rd, const Register& rs, unsigned shift) {
1710 VIXL_ASSERT(allow_macro_instructions_);
1711 VIXL_ASSERT(!rd.IsZero());
1712 VIXL_ASSERT(!rs.IsZero());
1713 SingleEmissionCheckScope guard(this);
1714 ror(rd, rs, shift);
1715 }
Ror(const Register & rd,const Register & rn,const Register & rm)1716 void Ror(const Register& rd, const Register& rn, const Register& rm) {
1717 VIXL_ASSERT(allow_macro_instructions_);
1718 VIXL_ASSERT(!rd.IsZero());
1719 VIXL_ASSERT(!rn.IsZero());
1720 VIXL_ASSERT(!rm.IsZero());
1721 SingleEmissionCheckScope guard(this);
1722 rorv(rd, rn, rm);
1723 }
Sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1724 void Sbfiz(const Register& rd,
1725 const Register& rn,
1726 unsigned lsb,
1727 unsigned width) {
1728 VIXL_ASSERT(allow_macro_instructions_);
1729 VIXL_ASSERT(!rd.IsZero());
1730 VIXL_ASSERT(!rn.IsZero());
1731 SingleEmissionCheckScope guard(this);
1732 sbfiz(rd, rn, lsb, width);
1733 }
Sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1734 void Sbfm(const Register& rd,
1735 const Register& rn,
1736 unsigned immr,
1737 unsigned imms) {
1738 VIXL_ASSERT(allow_macro_instructions_);
1739 VIXL_ASSERT(!rd.IsZero());
1740 VIXL_ASSERT(!rn.IsZero());
1741 SingleEmissionCheckScope guard(this);
1742 sbfm(rd, rn, immr, imms);
1743 }
Sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1744 void Sbfx(const Register& rd,
1745 const Register& rn,
1746 unsigned lsb,
1747 unsigned width) {
1748 VIXL_ASSERT(allow_macro_instructions_);
1749 VIXL_ASSERT(!rd.IsZero());
1750 VIXL_ASSERT(!rn.IsZero());
1751 SingleEmissionCheckScope guard(this);
1752 sbfx(rd, rn, lsb, width);
1753 }
1754 void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
1755 VIXL_ASSERT(allow_macro_instructions_);
1756 VIXL_ASSERT(!rn.IsZero());
1757 SingleEmissionCheckScope guard(this);
1758 scvtf(vd, rn, fbits);
1759 }
Sdiv(const Register & rd,const Register & rn,const Register & rm)1760 void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
1761 VIXL_ASSERT(allow_macro_instructions_);
1762 VIXL_ASSERT(!rd.IsZero());
1763 VIXL_ASSERT(!rn.IsZero());
1764 VIXL_ASSERT(!rm.IsZero());
1765 SingleEmissionCheckScope guard(this);
1766 sdiv(rd, rn, rm);
1767 }
Smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1768 void Smaddl(const Register& rd,
1769 const Register& rn,
1770 const Register& rm,
1771 const Register& ra) {
1772 VIXL_ASSERT(allow_macro_instructions_);
1773 VIXL_ASSERT(!rd.IsZero());
1774 VIXL_ASSERT(!rn.IsZero());
1775 VIXL_ASSERT(!rm.IsZero());
1776 VIXL_ASSERT(!ra.IsZero());
1777 SingleEmissionCheckScope guard(this);
1778 smaddl(rd, rn, rm, ra);
1779 }
Smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1780 void Smsubl(const Register& rd,
1781 const Register& rn,
1782 const Register& rm,
1783 const Register& ra) {
1784 VIXL_ASSERT(allow_macro_instructions_);
1785 VIXL_ASSERT(!rd.IsZero());
1786 VIXL_ASSERT(!rn.IsZero());
1787 VIXL_ASSERT(!rm.IsZero());
1788 VIXL_ASSERT(!ra.IsZero());
1789 SingleEmissionCheckScope guard(this);
1790 smsubl(rd, rn, rm, ra);
1791 }
Smull(const Register & rd,const Register & rn,const Register & rm)1792 void Smull(const Register& rd, const Register& rn, const Register& rm) {
1793 VIXL_ASSERT(allow_macro_instructions_);
1794 VIXL_ASSERT(!rd.IsZero());
1795 VIXL_ASSERT(!rn.IsZero());
1796 VIXL_ASSERT(!rm.IsZero());
1797 SingleEmissionCheckScope guard(this);
1798 smull(rd, rn, rm);
1799 }
Smulh(const Register & xd,const Register & xn,const Register & xm)1800 void Smulh(const Register& xd, const Register& xn, const Register& xm) {
1801 VIXL_ASSERT(allow_macro_instructions_);
1802 VIXL_ASSERT(!xd.IsZero());
1803 VIXL_ASSERT(!xn.IsZero());
1804 VIXL_ASSERT(!xm.IsZero());
1805 SingleEmissionCheckScope guard(this);
1806 smulh(xd, xn, xm);
1807 }
Stlr(const Register & rt,const MemOperand & dst)1808 void Stlr(const Register& rt, const MemOperand& dst) {
1809 VIXL_ASSERT(allow_macro_instructions_);
1810 SingleEmissionCheckScope guard(this);
1811 stlr(rt, dst);
1812 }
Stlrb(const Register & rt,const MemOperand & dst)1813 void Stlrb(const Register& rt, const MemOperand& dst) {
1814 VIXL_ASSERT(allow_macro_instructions_);
1815 SingleEmissionCheckScope guard(this);
1816 stlrb(rt, dst);
1817 }
Stlrh(const Register & rt,const MemOperand & dst)1818 void Stlrh(const Register& rt, const MemOperand& dst) {
1819 VIXL_ASSERT(allow_macro_instructions_);
1820 SingleEmissionCheckScope guard(this);
1821 stlrh(rt, dst);
1822 }
Stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1823 void Stlxp(const Register& rs,
1824 const Register& rt,
1825 const Register& rt2,
1826 const MemOperand& dst) {
1827 VIXL_ASSERT(allow_macro_instructions_);
1828 VIXL_ASSERT(!rs.Aliases(dst.base()));
1829 VIXL_ASSERT(!rs.Aliases(rt));
1830 VIXL_ASSERT(!rs.Aliases(rt2));
1831 SingleEmissionCheckScope guard(this);
1832 stlxp(rs, rt, rt2, dst);
1833 }
Stlxr(const Register & rs,const Register & rt,const MemOperand & dst)1834 void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1835 VIXL_ASSERT(allow_macro_instructions_);
1836 VIXL_ASSERT(!rs.Aliases(dst.base()));
1837 VIXL_ASSERT(!rs.Aliases(rt));
1838 SingleEmissionCheckScope guard(this);
1839 stlxr(rs, rt, dst);
1840 }
Stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)1841 void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1842 VIXL_ASSERT(allow_macro_instructions_);
1843 VIXL_ASSERT(!rs.Aliases(dst.base()));
1844 VIXL_ASSERT(!rs.Aliases(rt));
1845 SingleEmissionCheckScope guard(this);
1846 stlxrb(rs, rt, dst);
1847 }
Stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)1848 void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1849 VIXL_ASSERT(allow_macro_instructions_);
1850 VIXL_ASSERT(!rs.Aliases(dst.base()));
1851 VIXL_ASSERT(!rs.Aliases(rt));
1852 SingleEmissionCheckScope guard(this);
1853 stlxrh(rs, rt, dst);
1854 }
Stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1855 void Stnp(const CPURegister& rt,
1856 const CPURegister& rt2,
1857 const MemOperand& dst) {
1858 VIXL_ASSERT(allow_macro_instructions_);
1859 SingleEmissionCheckScope guard(this);
1860 stnp(rt, rt2, dst);
1861 }
Stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1862 void Stxp(const Register& rs,
1863 const Register& rt,
1864 const Register& rt2,
1865 const MemOperand& dst) {
1866 VIXL_ASSERT(allow_macro_instructions_);
1867 VIXL_ASSERT(!rs.Aliases(dst.base()));
1868 VIXL_ASSERT(!rs.Aliases(rt));
1869 VIXL_ASSERT(!rs.Aliases(rt2));
1870 SingleEmissionCheckScope guard(this);
1871 stxp(rs, rt, rt2, dst);
1872 }
Stxr(const Register & rs,const Register & rt,const MemOperand & dst)1873 void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1874 VIXL_ASSERT(allow_macro_instructions_);
1875 VIXL_ASSERT(!rs.Aliases(dst.base()));
1876 VIXL_ASSERT(!rs.Aliases(rt));
1877 SingleEmissionCheckScope guard(this);
1878 stxr(rs, rt, dst);
1879 }
Stxrb(const Register & rs,const Register & rt,const MemOperand & dst)1880 void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1881 VIXL_ASSERT(allow_macro_instructions_);
1882 VIXL_ASSERT(!rs.Aliases(dst.base()));
1883 VIXL_ASSERT(!rs.Aliases(rt));
1884 SingleEmissionCheckScope guard(this);
1885 stxrb(rs, rt, dst);
1886 }
Stxrh(const Register & rs,const Register & rt,const MemOperand & dst)1887 void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1888 VIXL_ASSERT(allow_macro_instructions_);
1889 VIXL_ASSERT(!rs.Aliases(dst.base()));
1890 VIXL_ASSERT(!rs.Aliases(rt));
1891 SingleEmissionCheckScope guard(this);
1892 stxrh(rs, rt, dst);
1893 }
Svc(int code)1894 void Svc(int code) {
1895 VIXL_ASSERT(allow_macro_instructions_);
1896 SingleEmissionCheckScope guard(this);
1897 svc(code);
1898 }
Sxtb(const Register & rd,const Register & rn)1899 void Sxtb(const Register& rd, const Register& rn) {
1900 VIXL_ASSERT(allow_macro_instructions_);
1901 VIXL_ASSERT(!rd.IsZero());
1902 VIXL_ASSERT(!rn.IsZero());
1903 SingleEmissionCheckScope guard(this);
1904 sxtb(rd, rn);
1905 }
Sxth(const Register & rd,const Register & rn)1906 void Sxth(const Register& rd, const Register& rn) {
1907 VIXL_ASSERT(allow_macro_instructions_);
1908 VIXL_ASSERT(!rd.IsZero());
1909 VIXL_ASSERT(!rn.IsZero());
1910 SingleEmissionCheckScope guard(this);
1911 sxth(rd, rn);
1912 }
Sxtw(const Register & rd,const Register & rn)1913 void Sxtw(const Register& rd, const Register& rn) {
1914 VIXL_ASSERT(allow_macro_instructions_);
1915 VIXL_ASSERT(!rd.IsZero());
1916 VIXL_ASSERT(!rn.IsZero());
1917 SingleEmissionCheckScope guard(this);
1918 sxtw(rd, rn);
1919 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1920 void Tbl(const VRegister& vd,
1921 const VRegister& vn,
1922 const VRegister& vm) {
1923 VIXL_ASSERT(allow_macro_instructions_);
1924 SingleEmissionCheckScope guard(this);
1925 tbl(vd, vn, vm);
1926 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1927 void Tbl(const VRegister& vd,
1928 const VRegister& vn,
1929 const VRegister& vn2,
1930 const VRegister& vm) {
1931 VIXL_ASSERT(allow_macro_instructions_);
1932 SingleEmissionCheckScope guard(this);
1933 tbl(vd, vn, vn2, vm);
1934 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1935 void Tbl(const VRegister& vd,
1936 const VRegister& vn,
1937 const VRegister& vn2,
1938 const VRegister& vn3,
1939 const VRegister& vm) {
1940 VIXL_ASSERT(allow_macro_instructions_);
1941 SingleEmissionCheckScope guard(this);
1942 tbl(vd, vn, vn2, vn3, vm);
1943 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1944 void Tbl(const VRegister& vd,
1945 const VRegister& vn,
1946 const VRegister& vn2,
1947 const VRegister& vn3,
1948 const VRegister& vn4,
1949 const VRegister& vm) {
1950 VIXL_ASSERT(allow_macro_instructions_);
1951 SingleEmissionCheckScope guard(this);
1952 tbl(vd, vn, vn2, vn3, vn4, vm);
1953 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)1954 void Tbx(const VRegister& vd,
1955 const VRegister& vn,
1956 const VRegister& vm) {
1957 VIXL_ASSERT(allow_macro_instructions_);
1958 SingleEmissionCheckScope guard(this);
1959 tbx(vd, vn, vm);
1960 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1961 void Tbx(const VRegister& vd,
1962 const VRegister& vn,
1963 const VRegister& vn2,
1964 const VRegister& vm) {
1965 VIXL_ASSERT(allow_macro_instructions_);
1966 SingleEmissionCheckScope guard(this);
1967 tbx(vd, vn, vn2, vm);
1968 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1969 void Tbx(const VRegister& vd,
1970 const VRegister& vn,
1971 const VRegister& vn2,
1972 const VRegister& vn3,
1973 const VRegister& vm) {
1974 VIXL_ASSERT(allow_macro_instructions_);
1975 SingleEmissionCheckScope guard(this);
1976 tbx(vd, vn, vn2, vn3, vm);
1977 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1978 void Tbx(const VRegister& vd,
1979 const VRegister& vn,
1980 const VRegister& vn2,
1981 const VRegister& vn3,
1982 const VRegister& vn4,
1983 const VRegister& vm) {
1984 VIXL_ASSERT(allow_macro_instructions_);
1985 SingleEmissionCheckScope guard(this);
1986 tbx(vd, vn, vn2, vn3, vn4, vm);
1987 }
1988 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
1989 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
Ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1990 void Ubfiz(const Register& rd,
1991 const Register& rn,
1992 unsigned lsb,
1993 unsigned width) {
1994 VIXL_ASSERT(allow_macro_instructions_);
1995 VIXL_ASSERT(!rd.IsZero());
1996 VIXL_ASSERT(!rn.IsZero());
1997 SingleEmissionCheckScope guard(this);
1998 ubfiz(rd, rn, lsb, width);
1999 }
Ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2000 void Ubfm(const Register& rd,
2001 const Register& rn,
2002 unsigned immr,
2003 unsigned imms) {
2004 VIXL_ASSERT(allow_macro_instructions_);
2005 VIXL_ASSERT(!rd.IsZero());
2006 VIXL_ASSERT(!rn.IsZero());
2007 SingleEmissionCheckScope guard(this);
2008 ubfm(rd, rn, immr, imms);
2009 }
Ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2010 void Ubfx(const Register& rd,
2011 const Register& rn,
2012 unsigned lsb,
2013 unsigned width) {
2014 VIXL_ASSERT(allow_macro_instructions_);
2015 VIXL_ASSERT(!rd.IsZero());
2016 VIXL_ASSERT(!rn.IsZero());
2017 SingleEmissionCheckScope guard(this);
2018 ubfx(rd, rn, lsb, width);
2019 }
2020 void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2021 VIXL_ASSERT(allow_macro_instructions_);
2022 VIXL_ASSERT(!rn.IsZero());
2023 SingleEmissionCheckScope guard(this);
2024 ucvtf(vd, rn, fbits);
2025 }
Udiv(const Register & rd,const Register & rn,const Register & rm)2026 void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2027 VIXL_ASSERT(allow_macro_instructions_);
2028 VIXL_ASSERT(!rd.IsZero());
2029 VIXL_ASSERT(!rn.IsZero());
2030 VIXL_ASSERT(!rm.IsZero());
2031 SingleEmissionCheckScope guard(this);
2032 udiv(rd, rn, rm);
2033 }
Umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2034 void Umaddl(const Register& rd,
2035 const Register& rn,
2036 const Register& rm,
2037 const Register& ra) {
2038 VIXL_ASSERT(allow_macro_instructions_);
2039 VIXL_ASSERT(!rd.IsZero());
2040 VIXL_ASSERT(!rn.IsZero());
2041 VIXL_ASSERT(!rm.IsZero());
2042 VIXL_ASSERT(!ra.IsZero());
2043 SingleEmissionCheckScope guard(this);
2044 umaddl(rd, rn, rm, ra);
2045 }
Umull(const Register & rd,const Register & rn,const Register & rm)2046 void Umull(const Register& rd,
2047 const Register& rn,
2048 const Register& rm) {
2049 VIXL_ASSERT(allow_macro_instructions_);
2050 VIXL_ASSERT(!rd.IsZero());
2051 VIXL_ASSERT(!rn.IsZero());
2052 VIXL_ASSERT(!rm.IsZero());
2053 SingleEmissionCheckScope guard(this);
2054 umull(rd, rn, rm);
2055 }
Umulh(const Register & xd,const Register & xn,const Register & xm)2056 void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2057 VIXL_ASSERT(allow_macro_instructions_);
2058 VIXL_ASSERT(!xd.IsZero());
2059 VIXL_ASSERT(!xn.IsZero());
2060 VIXL_ASSERT(!xm.IsZero());
2061 SingleEmissionCheckScope guard(this);
2062 umulh(xd, xn, xm);
2063 }
Umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2064 void Umsubl(const Register& rd,
2065 const Register& rn,
2066 const Register& rm,
2067 const Register& ra) {
2068 VIXL_ASSERT(allow_macro_instructions_);
2069 VIXL_ASSERT(!rd.IsZero());
2070 VIXL_ASSERT(!rn.IsZero());
2071 VIXL_ASSERT(!rm.IsZero());
2072 VIXL_ASSERT(!ra.IsZero());
2073 SingleEmissionCheckScope guard(this);
2074 umsubl(rd, rn, rm, ra);
2075 }
Unreachable()2076 void Unreachable() {
2077 VIXL_ASSERT(allow_macro_instructions_);
2078 SingleEmissionCheckScope guard(this);
2079 if (allow_simulator_instructions_) {
2080 hlt(kUnreachableOpcode);
2081 } else {
2082 // Branch to 0 to generate a segfault.
2083 // lr - kInstructionSize is the address of the offending instruction.
2084 blr(xzr);
2085 }
2086 }
Uxtb(const Register & rd,const Register & rn)2087 void Uxtb(const Register& rd, const Register& rn) {
2088 VIXL_ASSERT(allow_macro_instructions_);
2089 VIXL_ASSERT(!rd.IsZero());
2090 VIXL_ASSERT(!rn.IsZero());
2091 SingleEmissionCheckScope guard(this);
2092 uxtb(rd, rn);
2093 }
Uxth(const Register & rd,const Register & rn)2094 void Uxth(const Register& rd, const Register& rn) {
2095 VIXL_ASSERT(allow_macro_instructions_);
2096 VIXL_ASSERT(!rd.IsZero());
2097 VIXL_ASSERT(!rn.IsZero());
2098 SingleEmissionCheckScope guard(this);
2099 uxth(rd, rn);
2100 }
Uxtw(const Register & rd,const Register & rn)2101 void Uxtw(const Register& rd, const Register& rn) {
2102 VIXL_ASSERT(allow_macro_instructions_);
2103 VIXL_ASSERT(!rd.IsZero());
2104 VIXL_ASSERT(!rn.IsZero());
2105 SingleEmissionCheckScope guard(this);
2106 uxtw(rd, rn);
2107 }
2108
2109 // NEON 3 vector register instructions.
2110 #define NEON_3VREG_MACRO_LIST(V) \
2111 V(add, Add) \
2112 V(addhn, Addhn) \
2113 V(addhn2, Addhn2) \
2114 V(addp, Addp) \
2115 V(and_, And) \
2116 V(bic, Bic) \
2117 V(bif, Bif) \
2118 V(bit, Bit) \
2119 V(bsl, Bsl) \
2120 V(cmeq, Cmeq) \
2121 V(cmge, Cmge) \
2122 V(cmgt, Cmgt) \
2123 V(cmhi, Cmhi) \
2124 V(cmhs, Cmhs) \
2125 V(cmtst, Cmtst) \
2126 V(eor, Eor) \
2127 V(fabd, Fabd) \
2128 V(facge, Facge) \
2129 V(facgt, Facgt) \
2130 V(faddp, Faddp) \
2131 V(fcmeq, Fcmeq) \
2132 V(fcmge, Fcmge) \
2133 V(fcmgt, Fcmgt) \
2134 V(fmaxnmp, Fmaxnmp) \
2135 V(fmaxp, Fmaxp) \
2136 V(fminnmp, Fminnmp) \
2137 V(fminp, Fminp) \
2138 V(fmla, Fmla) \
2139 V(fmls, Fmls) \
2140 V(fmulx, Fmulx) \
2141 V(frecps, Frecps) \
2142 V(frsqrts, Frsqrts) \
2143 V(mla, Mla) \
2144 V(mls, Mls) \
2145 V(mul, Mul) \
2146 V(orn, Orn) \
2147 V(orr, Orr) \
2148 V(pmul, Pmul) \
2149 V(pmull, Pmull) \
2150 V(pmull2, Pmull2) \
2151 V(raddhn, Raddhn) \
2152 V(raddhn2, Raddhn2) \
2153 V(rsubhn, Rsubhn) \
2154 V(rsubhn2, Rsubhn2) \
2155 V(saba, Saba) \
2156 V(sabal, Sabal) \
2157 V(sabal2, Sabal2) \
2158 V(sabd, Sabd) \
2159 V(sabdl, Sabdl) \
2160 V(sabdl2, Sabdl2) \
2161 V(saddl, Saddl) \
2162 V(saddl2, Saddl2) \
2163 V(saddw, Saddw) \
2164 V(saddw2, Saddw2) \
2165 V(shadd, Shadd) \
2166 V(shsub, Shsub) \
2167 V(smax, Smax) \
2168 V(smaxp, Smaxp) \
2169 V(smin, Smin) \
2170 V(sminp, Sminp) \
2171 V(smlal, Smlal) \
2172 V(smlal2, Smlal2) \
2173 V(smlsl, Smlsl) \
2174 V(smlsl2, Smlsl2) \
2175 V(smull, Smull) \
2176 V(smull2, Smull2) \
2177 V(sqadd, Sqadd) \
2178 V(sqdmlal, Sqdmlal) \
2179 V(sqdmlal2, Sqdmlal2) \
2180 V(sqdmlsl, Sqdmlsl) \
2181 V(sqdmlsl2, Sqdmlsl2) \
2182 V(sqdmulh, Sqdmulh) \
2183 V(sqdmull, Sqdmull) \
2184 V(sqdmull2, Sqdmull2) \
2185 V(sqrdmulh, Sqrdmulh) \
2186 V(sqrshl, Sqrshl) \
2187 V(sqshl, Sqshl) \
2188 V(sqsub, Sqsub) \
2189 V(srhadd, Srhadd) \
2190 V(srshl, Srshl) \
2191 V(sshl, Sshl) \
2192 V(ssubl, Ssubl) \
2193 V(ssubl2, Ssubl2) \
2194 V(ssubw, Ssubw) \
2195 V(ssubw2, Ssubw2) \
2196 V(sub, Sub) \
2197 V(subhn, Subhn) \
2198 V(subhn2, Subhn2) \
2199 V(trn1, Trn1) \
2200 V(trn2, Trn2) \
2201 V(uaba, Uaba) \
2202 V(uabal, Uabal) \
2203 V(uabal2, Uabal2) \
2204 V(uabd, Uabd) \
2205 V(uabdl, Uabdl) \
2206 V(uabdl2, Uabdl2) \
2207 V(uaddl, Uaddl) \
2208 V(uaddl2, Uaddl2) \
2209 V(uaddw, Uaddw) \
2210 V(uaddw2, Uaddw2) \
2211 V(uhadd, Uhadd) \
2212 V(uhsub, Uhsub) \
2213 V(umax, Umax) \
2214 V(umaxp, Umaxp) \
2215 V(umin, Umin) \
2216 V(uminp, Uminp) \
2217 V(umlal, Umlal) \
2218 V(umlal2, Umlal2) \
2219 V(umlsl, Umlsl) \
2220 V(umlsl2, Umlsl2) \
2221 V(umull, Umull) \
2222 V(umull2, Umull2) \
2223 V(uqadd, Uqadd) \
2224 V(uqrshl, Uqrshl) \
2225 V(uqshl, Uqshl) \
2226 V(uqsub, Uqsub) \
2227 V(urhadd, Urhadd) \
2228 V(urshl, Urshl) \
2229 V(ushl, Ushl) \
2230 V(usubl, Usubl) \
2231 V(usubl2, Usubl2) \
2232 V(usubw, Usubw) \
2233 V(usubw2, Usubw2) \
2234 V(uzp1, Uzp1) \
2235 V(uzp2, Uzp2) \
2236 V(zip1, Zip1) \
2237 V(zip2, Zip2)
2238
2239 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2240 void MASM(const VRegister& vd, \
2241 const VRegister& vn, \
2242 const VRegister& vm) { \
2243 VIXL_ASSERT(allow_macro_instructions_); \
2244 SingleEmissionCheckScope guard(this); \
2245 ASM(vd, vn, vm); \
2246 }
2247 NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2248 #undef DEFINE_MACRO_ASM_FUNC
2249
2250 // NEON 2 vector register instructions.
2251 #define NEON_2VREG_MACRO_LIST(V) \
2252 V(abs, Abs) \
2253 V(addp, Addp) \
2254 V(addv, Addv) \
2255 V(cls, Cls) \
2256 V(clz, Clz) \
2257 V(cnt, Cnt) \
2258 V(fabs, Fabs) \
2259 V(faddp, Faddp) \
2260 V(fcvtas, Fcvtas) \
2261 V(fcvtau, Fcvtau) \
2262 V(fcvtms, Fcvtms) \
2263 V(fcvtmu, Fcvtmu) \
2264 V(fcvtns, Fcvtns) \
2265 V(fcvtnu, Fcvtnu) \
2266 V(fcvtps, Fcvtps) \
2267 V(fcvtpu, Fcvtpu) \
2268 V(fmaxnmp, Fmaxnmp) \
2269 V(fmaxnmv, Fmaxnmv) \
2270 V(fmaxp, Fmaxp) \
2271 V(fmaxv, Fmaxv) \
2272 V(fminnmp, Fminnmp) \
2273 V(fminnmv, Fminnmv) \
2274 V(fminp, Fminp) \
2275 V(fminv, Fminv) \
2276 V(fneg, Fneg) \
2277 V(frecpe, Frecpe) \
2278 V(frecpx, Frecpx) \
2279 V(frinta, Frinta) \
2280 V(frinti, Frinti) \
2281 V(frintm, Frintm) \
2282 V(frintn, Frintn) \
2283 V(frintp, Frintp) \
2284 V(frintx, Frintx) \
2285 V(frintz, Frintz) \
2286 V(frsqrte, Frsqrte) \
2287 V(fsqrt, Fsqrt) \
2288 V(mov, Mov) \
2289 V(mvn, Mvn) \
2290 V(neg, Neg) \
2291 V(not_, Not) \
2292 V(rbit, Rbit) \
2293 V(rev16, Rev16) \
2294 V(rev32, Rev32) \
2295 V(rev64, Rev64) \
2296 V(sadalp, Sadalp) \
2297 V(saddlp, Saddlp) \
2298 V(saddlv, Saddlv) \
2299 V(smaxv, Smaxv) \
2300 V(sminv, Sminv) \
2301 V(sqabs, Sqabs) \
2302 V(sqneg, Sqneg) \
2303 V(sqxtn, Sqxtn) \
2304 V(sqxtn2, Sqxtn2) \
2305 V(sqxtun, Sqxtun) \
2306 V(sqxtun2, Sqxtun2) \
2307 V(suqadd, Suqadd) \
2308 V(sxtl, Sxtl) \
2309 V(sxtl2, Sxtl2) \
2310 V(uadalp, Uadalp) \
2311 V(uaddlp, Uaddlp) \
2312 V(uaddlv, Uaddlv) \
2313 V(umaxv, Umaxv) \
2314 V(uminv, Uminv) \
2315 V(uqxtn, Uqxtn) \
2316 V(uqxtn2, Uqxtn2) \
2317 V(urecpe, Urecpe) \
2318 V(ursqrte, Ursqrte) \
2319 V(usqadd, Usqadd) \
2320 V(uxtl, Uxtl) \
2321 V(uxtl2, Uxtl2) \
2322 V(xtn, Xtn) \
2323 V(xtn2, Xtn2)
2324
2325 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2326 void MASM(const VRegister& vd, \
2327 const VRegister& vn) { \
2328 VIXL_ASSERT(allow_macro_instructions_); \
2329 SingleEmissionCheckScope guard(this); \
2330 ASM(vd, vn); \
2331 }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)2332 NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2333 #undef DEFINE_MACRO_ASM_FUNC
2334
2335 // NEON 2 vector register with immediate instructions.
2336 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2337 V(fcmeq, Fcmeq) \
2338 V(fcmge, Fcmge) \
2339 V(fcmgt, Fcmgt) \
2340 V(fcmle, Fcmle) \
2341 V(fcmlt, Fcmlt)
2342
2343 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2344 void MASM(const VRegister& vd, \
2345 const VRegister& vn, \
2346 double imm) { \
2347 VIXL_ASSERT(allow_macro_instructions_); \
2348 SingleEmissionCheckScope guard(this); \
2349 ASM(vd, vn, imm); \
2350 }
2351 NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2352 #undef DEFINE_MACRO_ASM_FUNC
2353
2354 // NEON by element instructions.
2355 #define NEON_BYELEMENT_MACRO_LIST(V) \
2356 V(fmul, Fmul) \
2357 V(fmla, Fmla) \
2358 V(fmls, Fmls) \
2359 V(fmulx, Fmulx) \
2360 V(mul, Mul) \
2361 V(mla, Mla) \
2362 V(mls, Mls) \
2363 V(sqdmulh, Sqdmulh) \
2364 V(sqrdmulh, Sqrdmulh) \
2365 V(sqdmull, Sqdmull) \
2366 V(sqdmull2, Sqdmull2) \
2367 V(sqdmlal, Sqdmlal) \
2368 V(sqdmlal2, Sqdmlal2) \
2369 V(sqdmlsl, Sqdmlsl) \
2370 V(sqdmlsl2, Sqdmlsl2) \
2371 V(smull, Smull) \
2372 V(smull2, Smull2) \
2373 V(smlal, Smlal) \
2374 V(smlal2, Smlal2) \
2375 V(smlsl, Smlsl) \
2376 V(smlsl2, Smlsl2) \
2377 V(umull, Umull) \
2378 V(umull2, Umull2) \
2379 V(umlal, Umlal) \
2380 V(umlal2, Umlal2) \
2381 V(umlsl, Umlsl) \
2382 V(umlsl2, Umlsl2)
2383
2384 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2385 void MASM(const VRegister& vd, \
2386 const VRegister& vn, \
2387 const VRegister& vm, \
2388 int vm_index \
2389 ) { \
2390 VIXL_ASSERT(allow_macro_instructions_); \
2391 SingleEmissionCheckScope guard(this); \
2392 ASM(vd, vn, vm, vm_index); \
2393 }
2394 NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2395 #undef DEFINE_MACRO_ASM_FUNC
2396
2397 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2398 V(rshrn, Rshrn) \
2399 V(rshrn2, Rshrn2) \
2400 V(shl, Shl) \
2401 V(shll, Shll) \
2402 V(shll2, Shll2) \
2403 V(shrn, Shrn) \
2404 V(shrn2, Shrn2) \
2405 V(sli, Sli) \
2406 V(sqrshrn, Sqrshrn) \
2407 V(sqrshrn2, Sqrshrn2) \
2408 V(sqrshrun, Sqrshrun) \
2409 V(sqrshrun2, Sqrshrun2) \
2410 V(sqshl, Sqshl) \
2411 V(sqshlu, Sqshlu) \
2412 V(sqshrn, Sqshrn) \
2413 V(sqshrn2, Sqshrn2) \
2414 V(sqshrun, Sqshrun) \
2415 V(sqshrun2, Sqshrun2) \
2416 V(sri, Sri) \
2417 V(srshr, Srshr) \
2418 V(srsra, Srsra) \
2419 V(sshll, Sshll) \
2420 V(sshll2, Sshll2) \
2421 V(sshr, Sshr) \
2422 V(ssra, Ssra) \
2423 V(uqrshrn, Uqrshrn) \
2424 V(uqrshrn2, Uqrshrn2) \
2425 V(uqshl, Uqshl) \
2426 V(uqshrn, Uqshrn) \
2427 V(uqshrn2, Uqshrn2) \
2428 V(urshr, Urshr) \
2429 V(ursra, Ursra) \
2430 V(ushll, Ushll) \
2431 V(ushll2, Ushll2) \
2432 V(ushr, Ushr) \
2433 V(usra, Usra) \
2434
2435 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2436 void MASM(const VRegister& vd, \
2437 const VRegister& vn, \
2438 int shift) { \
2439 VIXL_ASSERT(allow_macro_instructions_); \
2440 SingleEmissionCheckScope guard(this); \
2441 ASM(vd, vn, shift); \
2442 }
2443 NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2444 #undef DEFINE_MACRO_ASM_FUNC
2445
2446 void Bic(const VRegister& vd,
2447 const int imm8,
2448 const int left_shift = 0) {
2449 VIXL_ASSERT(allow_macro_instructions_);
2450 SingleEmissionCheckScope guard(this);
2451 bic(vd, imm8, left_shift);
2452 }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)2453 void Cmeq(const VRegister& vd,
2454 const VRegister& vn,
2455 int imm) {
2456 VIXL_ASSERT(allow_macro_instructions_);
2457 SingleEmissionCheckScope guard(this);
2458 cmeq(vd, vn, imm);
2459 }
Cmge(const VRegister & vd,const VRegister & vn,int imm)2460 void Cmge(const VRegister& vd,
2461 const VRegister& vn,
2462 int imm) {
2463 VIXL_ASSERT(allow_macro_instructions_);
2464 SingleEmissionCheckScope guard(this);
2465 cmge(vd, vn, imm);
2466 }
Cmgt(const VRegister & vd,const VRegister & vn,int imm)2467 void Cmgt(const VRegister& vd,
2468 const VRegister& vn,
2469 int imm) {
2470 VIXL_ASSERT(allow_macro_instructions_);
2471 SingleEmissionCheckScope guard(this);
2472 cmgt(vd, vn, imm);
2473 }
Cmle(const VRegister & vd,const VRegister & vn,int imm)2474 void Cmle(const VRegister& vd,
2475 const VRegister& vn,
2476 int imm) {
2477 VIXL_ASSERT(allow_macro_instructions_);
2478 SingleEmissionCheckScope guard(this);
2479 cmle(vd, vn, imm);
2480 }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)2481 void Cmlt(const VRegister& vd,
2482 const VRegister& vn,
2483 int imm) {
2484 VIXL_ASSERT(allow_macro_instructions_);
2485 SingleEmissionCheckScope guard(this);
2486 cmlt(vd, vn, imm);
2487 }
Dup(const VRegister & vd,const VRegister & vn,int index)2488 void Dup(const VRegister& vd,
2489 const VRegister& vn,
2490 int index) {
2491 VIXL_ASSERT(allow_macro_instructions_);
2492 SingleEmissionCheckScope guard(this);
2493 dup(vd, vn, index);
2494 }
Dup(const VRegister & vd,const Register & rn)2495 void Dup(const VRegister& vd,
2496 const Register& rn) {
2497 VIXL_ASSERT(allow_macro_instructions_);
2498 SingleEmissionCheckScope guard(this);
2499 dup(vd, rn);
2500 }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)2501 void Ext(const VRegister& vd,
2502 const VRegister& vn,
2503 const VRegister& vm,
2504 int index) {
2505 VIXL_ASSERT(allow_macro_instructions_);
2506 SingleEmissionCheckScope guard(this);
2507 ext(vd, vn, vm, index);
2508 }
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2509 void Ins(const VRegister& vd,
2510 int vd_index,
2511 const VRegister& vn,
2512 int vn_index) {
2513 VIXL_ASSERT(allow_macro_instructions_);
2514 SingleEmissionCheckScope guard(this);
2515 ins(vd, vd_index, vn, vn_index);
2516 }
Ins(const VRegister & vd,int vd_index,const Register & rn)2517 void Ins(const VRegister& vd,
2518 int vd_index,
2519 const Register& rn) {
2520 VIXL_ASSERT(allow_macro_instructions_);
2521 SingleEmissionCheckScope guard(this);
2522 ins(vd, vd_index, rn);
2523 }
Ld1(const VRegister & vt,const MemOperand & src)2524 void Ld1(const VRegister& vt,
2525 const MemOperand& src) {
2526 VIXL_ASSERT(allow_macro_instructions_);
2527 SingleEmissionCheckScope guard(this);
2528 ld1(vt, src);
2529 }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2530 void Ld1(const VRegister& vt,
2531 const VRegister& vt2,
2532 const MemOperand& src) {
2533 VIXL_ASSERT(allow_macro_instructions_);
2534 SingleEmissionCheckScope guard(this);
2535 ld1(vt, vt2, src);
2536 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2537 void Ld1(const VRegister& vt,
2538 const VRegister& vt2,
2539 const VRegister& vt3,
2540 const MemOperand& src) {
2541 VIXL_ASSERT(allow_macro_instructions_);
2542 SingleEmissionCheckScope guard(this);
2543 ld1(vt, vt2, vt3, src);
2544 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2545 void Ld1(const VRegister& vt,
2546 const VRegister& vt2,
2547 const VRegister& vt3,
2548 const VRegister& vt4,
2549 const MemOperand& src) {
2550 VIXL_ASSERT(allow_macro_instructions_);
2551 SingleEmissionCheckScope guard(this);
2552 ld1(vt, vt2, vt3, vt4, src);
2553 }
Ld1(const VRegister & vt,int lane,const MemOperand & src)2554 void Ld1(const VRegister& vt,
2555 int lane,
2556 const MemOperand& src) {
2557 VIXL_ASSERT(allow_macro_instructions_);
2558 SingleEmissionCheckScope guard(this);
2559 ld1(vt, lane, src);
2560 }
Ld1r(const VRegister & vt,const MemOperand & src)2561 void Ld1r(const VRegister& vt,
2562 const MemOperand& src) {
2563 VIXL_ASSERT(allow_macro_instructions_);
2564 SingleEmissionCheckScope guard(this);
2565 ld1r(vt, src);
2566 }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2567 void Ld2(const VRegister& vt,
2568 const VRegister& vt2,
2569 const MemOperand& src) {
2570 VIXL_ASSERT(allow_macro_instructions_);
2571 SingleEmissionCheckScope guard(this);
2572 ld2(vt, vt2, src);
2573 }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)2574 void Ld2(const VRegister& vt,
2575 const VRegister& vt2,
2576 int lane,
2577 const MemOperand& src) {
2578 VIXL_ASSERT(allow_macro_instructions_);
2579 SingleEmissionCheckScope guard(this);
2580 ld2(vt, vt2, lane, src);
2581 }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2582 void Ld2r(const VRegister& vt,
2583 const VRegister& vt2,
2584 const MemOperand& src) {
2585 VIXL_ASSERT(allow_macro_instructions_);
2586 SingleEmissionCheckScope guard(this);
2587 ld2r(vt, vt2, src);
2588 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2589 void Ld3(const VRegister& vt,
2590 const VRegister& vt2,
2591 const VRegister& vt3,
2592 const MemOperand& src) {
2593 VIXL_ASSERT(allow_macro_instructions_);
2594 SingleEmissionCheckScope guard(this);
2595 ld3(vt, vt2, vt3, src);
2596 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)2597 void Ld3(const VRegister& vt,
2598 const VRegister& vt2,
2599 const VRegister& vt3,
2600 int lane,
2601 const MemOperand& src) {
2602 VIXL_ASSERT(allow_macro_instructions_);
2603 SingleEmissionCheckScope guard(this);
2604 ld3(vt, vt2, vt3, lane, src);
2605 }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2606 void Ld3r(const VRegister& vt,
2607 const VRegister& vt2,
2608 const VRegister& vt3,
2609 const MemOperand& src) {
2610 VIXL_ASSERT(allow_macro_instructions_);
2611 SingleEmissionCheckScope guard(this);
2612 ld3r(vt, vt2, vt3, src);
2613 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2614 void Ld4(const VRegister& vt,
2615 const VRegister& vt2,
2616 const VRegister& vt3,
2617 const VRegister& vt4,
2618 const MemOperand& src) {
2619 VIXL_ASSERT(allow_macro_instructions_);
2620 SingleEmissionCheckScope guard(this);
2621 ld4(vt, vt2, vt3, vt4, src);
2622 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)2623 void Ld4(const VRegister& vt,
2624 const VRegister& vt2,
2625 const VRegister& vt3,
2626 const VRegister& vt4,
2627 int lane,
2628 const MemOperand& src) {
2629 VIXL_ASSERT(allow_macro_instructions_);
2630 SingleEmissionCheckScope guard(this);
2631 ld4(vt, vt2, vt3, vt4, lane, src);
2632 }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2633 void Ld4r(const VRegister& vt,
2634 const VRegister& vt2,
2635 const VRegister& vt3,
2636 const VRegister& vt4,
2637 const MemOperand& src) {
2638 VIXL_ASSERT(allow_macro_instructions_);
2639 SingleEmissionCheckScope guard(this);
2640 ld4r(vt, vt2, vt3, vt4, src);
2641 }
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2642 void Mov(const VRegister& vd,
2643 int vd_index,
2644 const VRegister& vn,
2645 int vn_index) {
2646 VIXL_ASSERT(allow_macro_instructions_);
2647 SingleEmissionCheckScope guard(this);
2648 mov(vd, vd_index, vn, vn_index);
2649 }
Mov(const VRegister & vd,const VRegister & vn,int index)2650 void Mov(const VRegister& vd,
2651 const VRegister& vn,
2652 int index) {
2653 VIXL_ASSERT(allow_macro_instructions_);
2654 SingleEmissionCheckScope guard(this);
2655 mov(vd, vn, index);
2656 }
Mov(const VRegister & vd,int vd_index,const Register & rn)2657 void Mov(const VRegister& vd,
2658 int vd_index,
2659 const Register& rn) {
2660 VIXL_ASSERT(allow_macro_instructions_);
2661 SingleEmissionCheckScope guard(this);
2662 mov(vd, vd_index, rn);
2663 }
Mov(const Register & rd,const VRegister & vn,int vn_index)2664 void Mov(const Register& rd,
2665 const VRegister& vn,
2666 int vn_index) {
2667 VIXL_ASSERT(allow_macro_instructions_);
2668 SingleEmissionCheckScope guard(this);
2669 mov(rd, vn, vn_index);
2670 }
2671 void Movi(const VRegister& vd,
2672 uint64_t imm,
2673 Shift shift = LSL,
2674 int shift_amount = 0);
2675 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
2676 void Mvni(const VRegister& vd,
2677 const int imm8,
2678 Shift shift = LSL,
2679 const int shift_amount = 0) {
2680 VIXL_ASSERT(allow_macro_instructions_);
2681 SingleEmissionCheckScope guard(this);
2682 mvni(vd, imm8, shift, shift_amount);
2683 }
2684 void Orr(const VRegister& vd,
2685 const int imm8,
2686 const int left_shift = 0) {
2687 VIXL_ASSERT(allow_macro_instructions_);
2688 SingleEmissionCheckScope guard(this);
2689 orr(vd, imm8, left_shift);
2690 }
2691 void Scvtf(const VRegister& vd,
2692 const VRegister& vn,
2693 int fbits = 0) {
2694 VIXL_ASSERT(allow_macro_instructions_);
2695 SingleEmissionCheckScope guard(this);
2696 scvtf(vd, vn, fbits);
2697 }
2698 void Ucvtf(const VRegister& vd,
2699 const VRegister& vn,
2700 int fbits = 0) {
2701 VIXL_ASSERT(allow_macro_instructions_);
2702 SingleEmissionCheckScope guard(this);
2703 ucvtf(vd, vn, fbits);
2704 }
2705 void Fcvtzs(const VRegister& vd,
2706 const VRegister& vn,
2707 int fbits = 0) {
2708 VIXL_ASSERT(allow_macro_instructions_);
2709 SingleEmissionCheckScope guard(this);
2710 fcvtzs(vd, vn, fbits);
2711 }
2712 void Fcvtzu(const VRegister& vd,
2713 const VRegister& vn,
2714 int fbits = 0) {
2715 VIXL_ASSERT(allow_macro_instructions_);
2716 SingleEmissionCheckScope guard(this);
2717 fcvtzu(vd, vn, fbits);
2718 }
St1(const VRegister & vt,const MemOperand & dst)2719 void St1(const VRegister& vt,
2720 const MemOperand& dst) {
2721 VIXL_ASSERT(allow_macro_instructions_);
2722 SingleEmissionCheckScope guard(this);
2723 st1(vt, dst);
2724 }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2725 void St1(const VRegister& vt,
2726 const VRegister& vt2,
2727 const MemOperand& dst) {
2728 VIXL_ASSERT(allow_macro_instructions_);
2729 SingleEmissionCheckScope guard(this);
2730 st1(vt, vt2, dst);
2731 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2732 void St1(const VRegister& vt,
2733 const VRegister& vt2,
2734 const VRegister& vt3,
2735 const MemOperand& dst) {
2736 VIXL_ASSERT(allow_macro_instructions_);
2737 SingleEmissionCheckScope guard(this);
2738 st1(vt, vt2, vt3, dst);
2739 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2740 void St1(const VRegister& vt,
2741 const VRegister& vt2,
2742 const VRegister& vt3,
2743 const VRegister& vt4,
2744 const MemOperand& dst) {
2745 VIXL_ASSERT(allow_macro_instructions_);
2746 SingleEmissionCheckScope guard(this);
2747 st1(vt, vt2, vt3, vt4, dst);
2748 }
St1(const VRegister & vt,int lane,const MemOperand & dst)2749 void St1(const VRegister& vt,
2750 int lane,
2751 const MemOperand& dst) {
2752 VIXL_ASSERT(allow_macro_instructions_);
2753 SingleEmissionCheckScope guard(this);
2754 st1(vt, lane, dst);
2755 }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2756 void St2(const VRegister& vt,
2757 const VRegister& vt2,
2758 const MemOperand& dst) {
2759 VIXL_ASSERT(allow_macro_instructions_);
2760 SingleEmissionCheckScope guard(this);
2761 st2(vt, vt2, dst);
2762 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2763 void St3(const VRegister& vt,
2764 const VRegister& vt2,
2765 const VRegister& vt3,
2766 const MemOperand& dst) {
2767 VIXL_ASSERT(allow_macro_instructions_);
2768 SingleEmissionCheckScope guard(this);
2769 st3(vt, vt2, vt3, dst);
2770 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2771 void St4(const VRegister& vt,
2772 const VRegister& vt2,
2773 const VRegister& vt3,
2774 const VRegister& vt4,
2775 const MemOperand& dst) {
2776 VIXL_ASSERT(allow_macro_instructions_);
2777 SingleEmissionCheckScope guard(this);
2778 st4(vt, vt2, vt3, vt4, dst);
2779 }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)2780 void St2(const VRegister& vt,
2781 const VRegister& vt2,
2782 int lane,
2783 const MemOperand& dst) {
2784 VIXL_ASSERT(allow_macro_instructions_);
2785 SingleEmissionCheckScope guard(this);
2786 st2(vt, vt2, lane, dst);
2787 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)2788 void St3(const VRegister& vt,
2789 const VRegister& vt2,
2790 const VRegister& vt3,
2791 int lane,
2792 const MemOperand& dst) {
2793 VIXL_ASSERT(allow_macro_instructions_);
2794 SingleEmissionCheckScope guard(this);
2795 st3(vt, vt2, vt3, lane, dst);
2796 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)2797 void St4(const VRegister& vt,
2798 const VRegister& vt2,
2799 const VRegister& vt3,
2800 const VRegister& vt4,
2801 int lane,
2802 const MemOperand& dst) {
2803 VIXL_ASSERT(allow_macro_instructions_);
2804 SingleEmissionCheckScope guard(this);
2805 st4(vt, vt2, vt3, vt4, lane, dst);
2806 }
Smov(const Register & rd,const VRegister & vn,int vn_index)2807 void Smov(const Register& rd,
2808 const VRegister& vn,
2809 int vn_index) {
2810 VIXL_ASSERT(allow_macro_instructions_);
2811 SingleEmissionCheckScope guard(this);
2812 smov(rd, vn, vn_index);
2813 }
Umov(const Register & rd,const VRegister & vn,int vn_index)2814 void Umov(const Register& rd,
2815 const VRegister& vn,
2816 int vn_index) {
2817 VIXL_ASSERT(allow_macro_instructions_);
2818 SingleEmissionCheckScope guard(this);
2819 umov(rd, vn, vn_index);
2820 }
Crc32b(const Register & rd,const Register & rn,const Register & rm)2821 void Crc32b(const Register& rd,
2822 const Register& rn,
2823 const Register& rm) {
2824 VIXL_ASSERT(allow_macro_instructions_);
2825 SingleEmissionCheckScope guard(this);
2826 crc32b(rd, rn, rm);
2827 }
Crc32h(const Register & rd,const Register & rn,const Register & rm)2828 void Crc32h(const Register& rd,
2829 const Register& rn,
2830 const Register& rm) {
2831 VIXL_ASSERT(allow_macro_instructions_);
2832 SingleEmissionCheckScope guard(this);
2833 crc32h(rd, rn, rm);
2834 }
Crc32w(const Register & rd,const Register & rn,const Register & rm)2835 void Crc32w(const Register& rd,
2836 const Register& rn,
2837 const Register& rm) {
2838 VIXL_ASSERT(allow_macro_instructions_);
2839 SingleEmissionCheckScope guard(this);
2840 crc32w(rd, rn, rm);
2841 }
Crc32x(const Register & rd,const Register & rn,const Register & rm)2842 void Crc32x(const Register& rd,
2843 const Register& rn,
2844 const Register& rm) {
2845 VIXL_ASSERT(allow_macro_instructions_);
2846 SingleEmissionCheckScope guard(this);
2847 crc32x(rd, rn, rm);
2848 }
Crc32cb(const Register & rd,const Register & rn,const Register & rm)2849 void Crc32cb(const Register& rd,
2850 const Register& rn,
2851 const Register& rm) {
2852 VIXL_ASSERT(allow_macro_instructions_);
2853 SingleEmissionCheckScope guard(this);
2854 crc32cb(rd, rn, rm);
2855 }
Crc32ch(const Register & rd,const Register & rn,const Register & rm)2856 void Crc32ch(const Register& rd,
2857 const Register& rn,
2858 const Register& rm) {
2859 VIXL_ASSERT(allow_macro_instructions_);
2860 SingleEmissionCheckScope guard(this);
2861 crc32ch(rd, rn, rm);
2862 }
Crc32cw(const Register & rd,const Register & rn,const Register & rm)2863 void Crc32cw(const Register& rd,
2864 const Register& rn,
2865 const Register& rm) {
2866 VIXL_ASSERT(allow_macro_instructions_);
2867 SingleEmissionCheckScope guard(this);
2868 crc32cw(rd, rn, rm);
2869 }
Crc32cx(const Register & rd,const Register & rn,const Register & rm)2870 void Crc32cx(const Register& rd,
2871 const Register& rn,
2872 const Register& rm) {
2873 VIXL_ASSERT(allow_macro_instructions_);
2874 SingleEmissionCheckScope guard(this);
2875 crc32cx(rd, rn, rm);
2876 }
2877
2878 template<typename T>
CreateLiteralDestroyedWithPool(T value)2879 Literal<T>* CreateLiteralDestroyedWithPool(T value) {
2880 return new Literal<T>(value,
2881 &literal_pool_,
2882 RawLiteral::kDeletedOnPoolDestruction);
2883 }
2884
2885 template<typename T>
CreateLiteralDestroyedWithPool(T high64,T low64)2886 Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
2887 return new Literal<T>(high64, low64,
2888 &literal_pool_,
2889 RawLiteral::kDeletedOnPoolDestruction);
2890 }
2891
2892 // Push the system stack pointer (sp) down to allow the same to be done to
2893 // the current stack pointer (according to StackPointer()). This must be
2894 // called _before_ accessing the memory.
2895 //
2896 // This is necessary when pushing or otherwise adding things to the stack, to
2897 // satisfy the AAPCS64 constraint that the memory below the system stack
2898 // pointer is not accessed.
2899 //
2900 // This method asserts that StackPointer() is not sp, since the call does
2901 // not make sense in that context.
2902 //
2903 // TODO: This method can only accept values of 'space' that can be encoded in
2904 // one instruction. Refer to the implementation for details.
2905 void BumpSystemStackPointer(const Operand& space);
2906
2907 #ifdef VIXL_DEBUG
SetAllowMacroInstructions(bool value)2908 void SetAllowMacroInstructions(bool value) {
2909 allow_macro_instructions_ = value;
2910 }
2911
AllowMacroInstructions()2912 bool AllowMacroInstructions() const {
2913 return allow_macro_instructions_;
2914 }
2915 #endif
2916
SetAllowSimulatorInstructions(bool value)2917 void SetAllowSimulatorInstructions(bool value) {
2918 allow_simulator_instructions_ = value;
2919 }
2920
AllowSimulatorInstructions()2921 bool AllowSimulatorInstructions() const {
2922 return allow_simulator_instructions_;
2923 }
2924
BlockLiteralPool()2925 void BlockLiteralPool() { literal_pool_.Block(); }
ReleaseLiteralPool()2926 void ReleaseLiteralPool() { literal_pool_.Release(); }
IsLiteralPoolBlocked()2927 bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
BlockVeneerPool()2928 void BlockVeneerPool() { veneer_pool_.Block(); }
ReleaseVeneerPool()2929 void ReleaseVeneerPool() { veneer_pool_.Release(); }
IsVeneerPoolBlocked()2930 bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
2931
BlockPools()2932 void BlockPools() {
2933 BlockLiteralPool();
2934 BlockVeneerPool();
2935 }
2936
ReleasePools()2937 void ReleasePools() {
2938 ReleaseLiteralPool();
2939 ReleaseVeneerPool();
2940 }
2941
LiteralPoolSize()2942 size_t LiteralPoolSize() const {
2943 return literal_pool_.Size();
2944 }
2945
LiteralPoolMaxSize()2946 size_t LiteralPoolMaxSize() const {
2947 return literal_pool_.MaxSize();
2948 }
2949
VeneerPoolMaxSize()2950 size_t VeneerPoolMaxSize() const {
2951 return veneer_pool_.MaxSize();
2952 }
2953
2954 // The number of unresolved branches that may require a veneer.
NumberOfPotentialVeneers()2955 int NumberOfPotentialVeneers() const {
2956 return veneer_pool_.NumberOfPotentialVeneers();
2957 }
2958
NextCheckPoint()2959 ptrdiff_t NextCheckPoint() {
2960 ptrdiff_t next_checkpoint_for_pools = std::min(literal_pool_.checkpoint(),
2961 veneer_pool_.checkpoint());
2962 return std::min(next_checkpoint_for_pools, BufferEndOffset());
2963 }
2964
EmitLiteralPool(LiteralPool::EmitOption option)2965 void EmitLiteralPool(LiteralPool::EmitOption option) {
2966 if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
2967
2968 checkpoint_ = NextCheckPoint();
2969 recommended_checkpoint_ = literal_pool_.NextRecommendedCheckpoint();
2970 }
2971
2972 void CheckEmitFor(size_t amount);
EnsureEmitFor(size_t amount)2973 void EnsureEmitFor(size_t amount) {
2974 ptrdiff_t offset = amount;
2975 ptrdiff_t max_pools_size = literal_pool_.MaxSize() + veneer_pool_.MaxSize();
2976 ptrdiff_t cursor = CursorOffset();
2977 if ((cursor >= recommended_checkpoint_) ||
2978 ((cursor + offset + max_pools_size) >= checkpoint_)) {
2979 CheckEmitFor(amount);
2980 }
2981 }
2982
2983 // Set the current stack pointer, but don't generate any code.
SetStackPointer(const Register & stack_pointer)2984 void SetStackPointer(const Register& stack_pointer) {
2985 VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
2986 sp_ = stack_pointer;
2987 }
2988
2989 // Return the current stack pointer, as set by SetStackPointer.
StackPointer()2990 const Register& StackPointer() const {
2991 return sp_;
2992 }
2993
TmpList()2994 CPURegList* TmpList() { return &tmp_list_; }
FPTmpList()2995 CPURegList* FPTmpList() { return &fptmp_list_; }
2996
2997 // Like printf, but print at run-time from generated code.
2998 //
2999 // The caller must ensure that arguments for floating-point placeholders
3000 // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
3001 // arguments for integer placeholders are Registers.
3002 //
3003 // At the moment it is only possible to print the value of sp if it is the
3004 // current stack pointer. Otherwise, the MacroAssembler will automatically
3005 // update sp on every push (using BumpSystemStackPointer), so determining its
3006 // value is difficult.
3007 //
3008 // Format placeholders that refer to more than one argument, or to a specific
3009 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
3010 //
3011 // This function automatically preserves caller-saved registers so that
3012 // calling code can use Printf at any point without having to worry about
3013 // corruption. The preservation mechanism generates a lot of code. If this is
3014 // a problem, preserve the important registers manually and then call
3015 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
3016 // implicitly preserved.
3017 void Printf(const char * format,
3018 CPURegister arg0 = NoCPUReg,
3019 CPURegister arg1 = NoCPUReg,
3020 CPURegister arg2 = NoCPUReg,
3021 CPURegister arg3 = NoCPUReg);
3022
3023 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
3024 //
3025 // The return code from the system printf call will be returned in x0.
3026 void PrintfNoPreserve(const char * format,
3027 const CPURegister& arg0 = NoCPUReg,
3028 const CPURegister& arg1 = NoCPUReg,
3029 const CPURegister& arg2 = NoCPUReg,
3030 const CPURegister& arg3 = NoCPUReg);
3031
3032 // Trace control when running the debug simulator.
3033 //
3034 // For example:
3035 //
3036 // __ Trace(LOG_REGS, TRACE_ENABLE);
3037 // Will add registers to the trace if it wasn't already the case.
3038 //
3039 // __ Trace(LOG_DISASM, TRACE_DISABLE);
3040 // Will stop logging disassembly. It has no effect if the disassembly wasn't
3041 // already being logged.
3042 void Trace(TraceParameters parameters, TraceCommand command);
3043
3044 // Log the requested data independently of what is being traced.
3045 //
3046 // For example:
3047 //
3048 // __ Log(LOG_FLAGS)
3049 // Will output the flags.
3050 void Log(TraceParameters parameters);
3051
3052 // Enable or disable instrumentation when an Instrument visitor is attached to
3053 // the simulator.
3054 void EnableInstrumentation();
3055 void DisableInstrumentation();
3056
3057 // Add a marker to the instrumentation data produced by an Instrument visitor.
3058 // The name is a two character string that will be attached to the marker in
3059 // the output data.
3060 void AnnotateInstrumentation(const char* marker_name);
3061
GetLiteralPool()3062 LiteralPool* GetLiteralPool() {
3063 return &literal_pool_;
3064 }
3065
3066 private:
3067 // The actual Push and Pop implementations. These don't generate any code
3068 // other than that required for the push or pop. This allows
3069 // (Push|Pop)CPURegList to bundle together setup code for a large block of
3070 // registers.
3071 //
3072 // Note that size is per register, and is specified in bytes.
3073 void PushHelper(int count, int size,
3074 const CPURegister& src0, const CPURegister& src1,
3075 const CPURegister& src2, const CPURegister& src3);
3076 void PopHelper(int count, int size,
3077 const CPURegister& dst0, const CPURegister& dst1,
3078 const CPURegister& dst2, const CPURegister& dst3);
3079
3080 void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3081 void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3082 void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3083
3084 // Perform necessary maintenance operations before a push or pop.
3085 //
3086 // Note that size is per register, and is specified in bytes.
3087 void PrepareForPush(int count, int size);
3088 void PrepareForPop(int count, int size);
3089
3090 // The actual implementation of load and store operations for CPURegList.
3091 enum LoadStoreCPURegListAction {
3092 kLoad,
3093 kStore
3094 };
3095 void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3096 CPURegList registers,
3097 const MemOperand& mem);
3098 // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3099 // This helper may allocate registers from `scratch_scope` and generate code
3100 // to compute an intermediate address. The resulting MemOperand is only valid
3101 // as long as `scratch_scope` remains valid.
3102 MemOperand BaseMemOperandForLoadStoreCPURegList(
3103 const CPURegList& registers,
3104 const MemOperand& mem,
3105 UseScratchRegisterScope* scratch_scope);
3106
LabelIsOutOfRange(Label * label,ImmBranchType branch_type)3107 bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3108 return !Instruction::IsValidImmPCOffset(branch_type,
3109 label->location() - CursorOffset());
3110 }
3111
3112 #ifdef VIXL_DEBUG
3113 // Tell whether any of the macro instruction can be used. When false the
3114 // MacroAssembler will assert if a method which can emit a variable number
3115 // of instructions is called.
3116 bool allow_macro_instructions_;
3117 #endif
3118
3119 // Tell whether we should generate code that will run on the simulator or not.
3120 bool allow_simulator_instructions_;
3121
3122 // The register to use as a stack pointer for stack operations.
3123 Register sp_;
3124
3125 // Scratch registers available for use by the MacroAssembler.
3126 CPURegList tmp_list_;
3127 CPURegList fptmp_list_;
3128
3129 LiteralPool literal_pool_;
3130 VeneerPool veneer_pool_;
3131
3132 ptrdiff_t checkpoint_;
3133 ptrdiff_t recommended_checkpoint_;
3134
3135 friend class Pool;
3136 friend class LiteralPool;
3137 };
3138
3139
OtherPoolsMaxSize()3140 inline size_t VeneerPool::OtherPoolsMaxSize() const {
3141 return masm_->LiteralPoolMaxSize();
3142 }
3143
3144
OtherPoolsMaxSize()3145 inline size_t LiteralPool::OtherPoolsMaxSize() const {
3146 return masm_->VeneerPoolMaxSize();
3147 }
3148
3149
SetNextRecommendedCheckpoint(ptrdiff_t offset)3150 inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3151 masm_->recommended_checkpoint_ =
3152 std::min(masm_->recommended_checkpoint_, offset);
3153 recommended_checkpoint_ = offset;
3154 }
3155
3156 // Use this scope when you need a one-to-one mapping between methods and
3157 // instructions. This scope prevents the MacroAssembler from being called and
3158 // literal pools from being emitted. It also asserts the number of instructions
3159 // emitted is what you specified when creating the scope.
3160 class InstructionAccurateScope : public CodeBufferCheckScope {
3161 public:
3162 InstructionAccurateScope(MacroAssembler* masm,
3163 int64_t count,
3164 AssertPolicy policy = kExactSize)
3165 : CodeBufferCheckScope(masm,
3166 (count * kInstructionSize),
3167 kCheck,
3168 policy) {
3169 VIXL_ASSERT(policy != kNoAssert);
3170 #ifdef VIXL_DEBUG
3171 old_allow_macro_instructions_ = masm->AllowMacroInstructions();
3172 masm->SetAllowMacroInstructions(false);
3173 #endif
3174 }
3175
~InstructionAccurateScope()3176 ~InstructionAccurateScope() {
3177 #ifdef VIXL_DEBUG
3178 MacroAssembler* masm = reinterpret_cast<MacroAssembler*>(assm_);
3179 masm->SetAllowMacroInstructions(old_allow_macro_instructions_);
3180 #endif
3181 }
3182
3183 private:
3184 #ifdef VIXL_DEBUG
3185 bool old_allow_macro_instructions_;
3186 #endif
3187 };
3188
3189
3190 class BlockLiteralPoolScope {
3191 public:
BlockLiteralPoolScope(MacroAssembler * masm)3192 explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3193 masm_->BlockLiteralPool();
3194 }
3195
~BlockLiteralPoolScope()3196 ~BlockLiteralPoolScope() {
3197 masm_->ReleaseLiteralPool();
3198 }
3199
3200 private:
3201 MacroAssembler* masm_;
3202 };
3203
3204
3205 class BlockVeneerPoolScope {
3206 public:
BlockVeneerPoolScope(MacroAssembler * masm)3207 explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3208 masm_->BlockVeneerPool();
3209 }
3210
~BlockVeneerPoolScope()3211 ~BlockVeneerPoolScope() {
3212 masm_->ReleaseVeneerPool();
3213 }
3214
3215 private:
3216 MacroAssembler* masm_;
3217 };
3218
3219
3220 class BlockPoolsScope {
3221 public:
BlockPoolsScope(MacroAssembler * masm)3222 explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3223 masm_->BlockPools();
3224 }
3225
~BlockPoolsScope()3226 ~BlockPoolsScope() {
3227 masm_->ReleasePools();
3228 }
3229
3230 private:
3231 MacroAssembler* masm_;
3232 };
3233
3234
3235 // This scope utility allows scratch registers to be managed safely. The
3236 // MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
3237 // registers. These registers can be allocated on demand, and will be returned
3238 // at the end of the scope.
3239 //
3240 // When the scope ends, the MacroAssembler's lists will be restored to their
3241 // original state, even if the lists were modified by some other means.
3242 class UseScratchRegisterScope {
3243 public:
3244 // This constructor implicitly calls the `Open` function to initialise the
3245 // scope, so it is ready to use immediately after it has been constructed.
3246 explicit UseScratchRegisterScope(MacroAssembler* masm);
3247 // This constructor allows deferred and optional initialisation of the scope.
3248 // The user is required to explicitly call the `Open` function before using
3249 // the scope.
3250 UseScratchRegisterScope();
3251 // This function performs the actual initialisation work.
3252 void Open(MacroAssembler* masm);
3253
3254 // The destructor always implicitly calls the `Close` function.
3255 ~UseScratchRegisterScope();
3256 // This function performs the cleaning-up work. It must succeed even if the
3257 // scope has not been opened. It is safe to call multiple times.
3258 void Close();
3259
3260
3261 bool IsAvailable(const CPURegister& reg) const;
3262
3263
3264 // Take a register from the appropriate temps list. It will be returned
3265 // automatically when the scope ends.
AcquireW()3266 Register AcquireW() { return AcquireNextAvailable(available_).W(); }
AcquireX()3267 Register AcquireX() { return AcquireNextAvailable(available_).X(); }
AcquireS()3268 VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
AcquireD()3269 VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
3270
3271
3272 Register AcquireSameSizeAs(const Register& reg);
3273 VRegister AcquireSameSizeAs(const VRegister& reg);
3274
3275
3276 // Explicitly release an acquired (or excluded) register, putting it back in
3277 // the appropriate temps list.
3278 void Release(const CPURegister& reg);
3279
3280
3281 // Make the specified registers available as scratch registers for the
3282 // duration of this scope.
3283 void Include(const CPURegList& list);
3284 void Include(const Register& reg1,
3285 const Register& reg2 = NoReg,
3286 const Register& reg3 = NoReg,
3287 const Register& reg4 = NoReg);
3288 void Include(const VRegister& reg1,
3289 const VRegister& reg2 = NoVReg,
3290 const VRegister& reg3 = NoVReg,
3291 const VRegister& reg4 = NoVReg);
3292
3293
3294 // Make sure that the specified registers are not available in this scope.
3295 // This can be used to prevent helper functions from using sensitive
3296 // registers, for example.
3297 void Exclude(const CPURegList& list);
3298 void Exclude(const Register& reg1,
3299 const Register& reg2 = NoReg,
3300 const Register& reg3 = NoReg,
3301 const Register& reg4 = NoReg);
3302 void Exclude(const VRegister& reg1,
3303 const VRegister& reg2 = NoVReg,
3304 const VRegister& reg3 = NoVReg,
3305 const VRegister& reg4 = NoVReg);
3306 void Exclude(const CPURegister& reg1,
3307 const CPURegister& reg2 = NoCPUReg,
3308 const CPURegister& reg3 = NoCPUReg,
3309 const CPURegister& reg4 = NoCPUReg);
3310
3311
3312 // Prevent any scratch registers from being used in this scope.
3313 void ExcludeAll();
3314
3315
3316 private:
3317 static CPURegister AcquireNextAvailable(CPURegList* available);
3318
3319 static void ReleaseByCode(CPURegList* available, int code);
3320
3321 static void ReleaseByRegList(CPURegList* available,
3322 RegList regs);
3323
3324 static void IncludeByRegList(CPURegList* available,
3325 RegList exclude);
3326
3327 static void ExcludeByRegList(CPURegList* available,
3328 RegList exclude);
3329
3330 // Available scratch registers.
3331 CPURegList* available_; // kRegister
3332 CPURegList* availablefp_; // kVRegister
3333
3334 // The state of the available lists at the start of this scope.
3335 RegList old_available_; // kRegister
3336 RegList old_availablefp_; // kVRegister
3337 #ifdef VIXL_DEBUG
3338 bool initialised_;
3339 #endif
3340
3341 // Disallow copy constructor and operator=.
UseScratchRegisterScope(const UseScratchRegisterScope &)3342 VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3343 VIXL_UNREACHABLE();
3344 }
3345 VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3346 VIXL_UNREACHABLE();
3347 }
3348 };
3349
3350
3351 } // namespace vixl
3352
3353 #endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_
3354