1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include "instructions-aarch64.h"
28 #include "assembler-aarch64.h"
29 
30 namespace vixl {
31 namespace aarch64 {
32 
RepeatBitsAcrossReg(unsigned reg_size,uint64_t value,unsigned width)33 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
34                                     uint64_t value,
35                                     unsigned width) {
36   VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
37               (width == 32));
38   VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
39   uint64_t result = value & ((UINT64_C(1) << width) - 1);
40   for (unsigned i = width; i < reg_size; i *= 2) {
41     result |= (result << i);
42   }
43   return result;
44 }
45 
46 
IsLoad() const47 bool Instruction::IsLoad() const {
48   if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
49     return false;
50   }
51 
52   if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
53     return Mask(LoadStorePairLBit) != 0;
54   } else {
55     LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
56     switch (op) {
57       case LDRB_w:
58       case LDRH_w:
59       case LDR_w:
60       case LDR_x:
61       case LDRSB_w:
62       case LDRSB_x:
63       case LDRSH_w:
64       case LDRSH_x:
65       case LDRSW_x:
66       case LDR_b:
67       case LDR_h:
68       case LDR_s:
69       case LDR_d:
70       case LDR_q:
71         return true;
72       default:
73         return false;
74     }
75   }
76 }
77 
78 
IsStore() const79 bool Instruction::IsStore() const {
80   if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
81     return false;
82   }
83 
84   if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
85     return Mask(LoadStorePairLBit) == 0;
86   } else {
87     LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
88     switch (op) {
89       case STRB_w:
90       case STRH_w:
91       case STR_w:
92       case STR_x:
93       case STR_b:
94       case STR_h:
95       case STR_s:
96       case STR_d:
97       case STR_q:
98         return true;
99       default:
100         return false;
101     }
102   }
103 }
104 
105 
106 // Logical immediates can't encode zero, so a return value of zero is used to
107 // indicate a failure case. Specifically, where the constraints on imm_s are
108 // not met.
GetImmLogical() const109 uint64_t Instruction::GetImmLogical() const {
110   unsigned reg_size = GetSixtyFourBits() ? kXRegSize : kWRegSize;
111   int32_t n = GetBitN();
112   int32_t imm_s = GetImmSetBits();
113   int32_t imm_r = GetImmRotate();
114 
115   // An integer is constructed from the n, imm_s and imm_r bits according to
116   // the following table:
117   //
118   //  N   imms    immr    size        S             R
119   //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
120   //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
121   //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
122   //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
123   //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
124   //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
125   // (s bits must not be all set)
126   //
127   // A pattern is constructed of size bits, where the least significant S+1
128   // bits are set. The pattern is rotated right by R, and repeated across a
129   // 32 or 64-bit value, depending on destination register width.
130   //
131 
132   if (n == 1) {
133     if (imm_s == 0x3f) {
134       return 0;
135     }
136     uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
137     return RotateRight(bits, imm_r, 64);
138   } else {
139     if ((imm_s >> 1) == 0x1f) {
140       return 0;
141     }
142     for (int width = 0x20; width >= 0x2; width >>= 1) {
143       if ((imm_s & width) == 0) {
144         int mask = width - 1;
145         if ((imm_s & mask) == mask) {
146           return 0;
147         }
148         uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
149         return RepeatBitsAcrossReg(reg_size,
150                                    RotateRight(bits, imm_r & mask, width),
151                                    width);
152       }
153     }
154   }
155   VIXL_UNREACHABLE();
156   return 0;
157 }
158 
159 
GetImmNEONabcdefgh() const160 uint32_t Instruction::GetImmNEONabcdefgh() const {
161   return GetImmNEONabc() << 5 | GetImmNEONdefgh();
162 }
163 
164 
Imm8ToFloat16(uint32_t imm8)165 Float16 Instruction::Imm8ToFloat16(uint32_t imm8) {
166   // Imm8: abcdefgh (8 bits)
167   // Half: aBbb.cdef.gh00.0000 (16 bits)
168   // where B is b ^ 1
169   uint32_t bits = imm8;
170   uint16_t bit7 = (bits >> 7) & 0x1;
171   uint16_t bit6 = (bits >> 6) & 0x1;
172   uint16_t bit5_to_0 = bits & 0x3f;
173   uint16_t result = (bit7 << 15) | ((4 - bit6) << 12) | (bit5_to_0 << 6);
174   return RawbitsToFloat16(result);
175 }
176 
177 
Imm8ToFP32(uint32_t imm8)178 float Instruction::Imm8ToFP32(uint32_t imm8) {
179   // Imm8: abcdefgh (8 bits)
180   // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
181   // where B is b ^ 1
182   uint32_t bits = imm8;
183   uint32_t bit7 = (bits >> 7) & 0x1;
184   uint32_t bit6 = (bits >> 6) & 0x1;
185   uint32_t bit5_to_0 = bits & 0x3f;
186   uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
187 
188   return RawbitsToFloat(result);
189 }
190 
191 
GetImmFP16() const192 Float16 Instruction::GetImmFP16() const { return Imm8ToFloat16(GetImmFP()); }
193 
194 
GetImmFP32() const195 float Instruction::GetImmFP32() const { return Imm8ToFP32(GetImmFP()); }
196 
197 
Imm8ToFP64(uint32_t imm8)198 double Instruction::Imm8ToFP64(uint32_t imm8) {
199   // Imm8: abcdefgh (8 bits)
200   // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
201   //         0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
202   // where B is b ^ 1
203   uint32_t bits = imm8;
204   uint64_t bit7 = (bits >> 7) & 0x1;
205   uint64_t bit6 = (bits >> 6) & 0x1;
206   uint64_t bit5_to_0 = bits & 0x3f;
207   uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
208 
209   return RawbitsToDouble(result);
210 }
211 
212 
GetImmFP64() const213 double Instruction::GetImmFP64() const { return Imm8ToFP64(GetImmFP()); }
214 
215 
GetImmNEONFP16() const216 Float16 Instruction::GetImmNEONFP16() const {
217   return Imm8ToFloat16(GetImmNEONabcdefgh());
218 }
219 
220 
GetImmNEONFP32() const221 float Instruction::GetImmNEONFP32() const {
222   return Imm8ToFP32(GetImmNEONabcdefgh());
223 }
224 
225 
GetImmNEONFP64() const226 double Instruction::GetImmNEONFP64() const {
227   return Imm8ToFP64(GetImmNEONabcdefgh());
228 }
229 
230 
CalcLSDataSize(LoadStoreOp op)231 unsigned CalcLSDataSize(LoadStoreOp op) {
232   VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
233   unsigned size = static_cast<Instr>(op) >> LSSize_offset;
234   if ((op & LSVector_mask) != 0) {
235     // Vector register memory operations encode the access size in the "size"
236     // and "opc" fields.
237     if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
238       size = kQRegSizeInBytesLog2;
239     }
240   }
241   return size;
242 }
243 
244 
CalcLSPairDataSize(LoadStorePairOp op)245 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
246   VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
247   VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
248   switch (op) {
249     case STP_q:
250     case LDP_q:
251       return kQRegSizeInBytesLog2;
252     case STP_x:
253     case LDP_x:
254     case STP_d:
255     case LDP_d:
256       return kXRegSizeInBytesLog2;
257     default:
258       return kWRegSizeInBytesLog2;
259   }
260 }
261 
262 
GetImmBranchRangeBitwidth(ImmBranchType branch_type)263 int Instruction::GetImmBranchRangeBitwidth(ImmBranchType branch_type) {
264   switch (branch_type) {
265     case UncondBranchType:
266       return ImmUncondBranch_width;
267     case CondBranchType:
268       return ImmCondBranch_width;
269     case CompareBranchType:
270       return ImmCmpBranch_width;
271     case TestBranchType:
272       return ImmTestBranch_width;
273     default:
274       VIXL_UNREACHABLE();
275       return 0;
276   }
277 }
278 
279 
GetImmBranchForwardRange(ImmBranchType branch_type)280 int32_t Instruction::GetImmBranchForwardRange(ImmBranchType branch_type) {
281   int32_t encoded_max = 1 << (GetImmBranchRangeBitwidth(branch_type) - 1);
282   return encoded_max * kInstructionSize;
283 }
284 
285 
IsValidImmPCOffset(ImmBranchType branch_type,int64_t offset)286 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
287                                      int64_t offset) {
288   return IsIntN(GetImmBranchRangeBitwidth(branch_type), offset);
289 }
290 
291 
GetImmPCOffsetTarget() const292 const Instruction* Instruction::GetImmPCOffsetTarget() const {
293   const Instruction* base = this;
294   ptrdiff_t offset;
295   if (IsPCRelAddressing()) {
296     // ADR and ADRP.
297     offset = GetImmPCRel();
298     if (Mask(PCRelAddressingMask) == ADRP) {
299       base = AlignDown(base, kPageSize);
300       offset *= kPageSize;
301     } else {
302       VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
303     }
304   } else {
305     // All PC-relative branches.
306     VIXL_ASSERT(GetBranchType() != UnknownBranchType);
307     // Relative branch offsets are instruction-size-aligned.
308     offset = GetImmBranch() * static_cast<int>(kInstructionSize);
309   }
310   return base + offset;
311 }
312 
313 
GetImmBranch() const314 int Instruction::GetImmBranch() const {
315   switch (GetBranchType()) {
316     case CondBranchType:
317       return GetImmCondBranch();
318     case UncondBranchType:
319       return GetImmUncondBranch();
320     case CompareBranchType:
321       return GetImmCmpBranch();
322     case TestBranchType:
323       return GetImmTestBranch();
324     default:
325       VIXL_UNREACHABLE();
326   }
327   return 0;
328 }
329 
330 
SetImmPCOffsetTarget(const Instruction * target)331 void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
332   if (IsPCRelAddressing()) {
333     SetPCRelImmTarget(target);
334   } else {
335     SetBranchImmTarget(target);
336   }
337 }
338 
339 
SetPCRelImmTarget(const Instruction * target)340 void Instruction::SetPCRelImmTarget(const Instruction* target) {
341   ptrdiff_t imm21;
342   if ((Mask(PCRelAddressingMask) == ADR)) {
343     imm21 = target - this;
344   } else {
345     VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
346     uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
347     uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
348     imm21 = target_page - this_page;
349   }
350   Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
351 
352   SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
353 }
354 
355 
SetBranchImmTarget(const Instruction * target)356 void Instruction::SetBranchImmTarget(const Instruction* target) {
357   VIXL_ASSERT(((target - this) & 3) == 0);
358   Instr branch_imm = 0;
359   uint32_t imm_mask = 0;
360   int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
361   switch (GetBranchType()) {
362     case CondBranchType: {
363       branch_imm = Assembler::ImmCondBranch(offset);
364       imm_mask = ImmCondBranch_mask;
365       break;
366     }
367     case UncondBranchType: {
368       branch_imm = Assembler::ImmUncondBranch(offset);
369       imm_mask = ImmUncondBranch_mask;
370       break;
371     }
372     case CompareBranchType: {
373       branch_imm = Assembler::ImmCmpBranch(offset);
374       imm_mask = ImmCmpBranch_mask;
375       break;
376     }
377     case TestBranchType: {
378       branch_imm = Assembler::ImmTestBranch(offset);
379       imm_mask = ImmTestBranch_mask;
380       break;
381     }
382     default:
383       VIXL_UNREACHABLE();
384   }
385   SetInstructionBits(Mask(~imm_mask) | branch_imm);
386 }
387 
388 
SetImmLLiteral(const Instruction * source)389 void Instruction::SetImmLLiteral(const Instruction* source) {
390   VIXL_ASSERT(IsWordAligned(source));
391   ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
392   Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
393   Instr mask = ImmLLiteral_mask;
394 
395   SetInstructionBits(Mask(~mask) | imm);
396 }
397 
398 
VectorFormatHalfWidth(VectorFormat vform)399 VectorFormat VectorFormatHalfWidth(VectorFormat vform) {
400   VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
401               vform == kFormatH || vform == kFormatS || vform == kFormatD);
402   switch (vform) {
403     case kFormat8H:
404       return kFormat8B;
405     case kFormat4S:
406       return kFormat4H;
407     case kFormat2D:
408       return kFormat2S;
409     case kFormatH:
410       return kFormatB;
411     case kFormatS:
412       return kFormatH;
413     case kFormatD:
414       return kFormatS;
415     default:
416       VIXL_UNREACHABLE();
417       return kFormatUndefined;
418   }
419 }
420 
421 
VectorFormatDoubleWidth(VectorFormat vform)422 VectorFormat VectorFormatDoubleWidth(VectorFormat vform) {
423   VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
424               vform == kFormatB || vform == kFormatH || vform == kFormatS);
425   switch (vform) {
426     case kFormat8B:
427       return kFormat8H;
428     case kFormat4H:
429       return kFormat4S;
430     case kFormat2S:
431       return kFormat2D;
432     case kFormatB:
433       return kFormatH;
434     case kFormatH:
435       return kFormatS;
436     case kFormatS:
437       return kFormatD;
438     default:
439       VIXL_UNREACHABLE();
440       return kFormatUndefined;
441   }
442 }
443 
444 
VectorFormatFillQ(VectorFormat vform)445 VectorFormat VectorFormatFillQ(VectorFormat vform) {
446   switch (vform) {
447     case kFormatB:
448     case kFormat8B:
449     case kFormat16B:
450       return kFormat16B;
451     case kFormatH:
452     case kFormat4H:
453     case kFormat8H:
454       return kFormat8H;
455     case kFormatS:
456     case kFormat2S:
457     case kFormat4S:
458       return kFormat4S;
459     case kFormatD:
460     case kFormat1D:
461     case kFormat2D:
462       return kFormat2D;
463     default:
464       VIXL_UNREACHABLE();
465       return kFormatUndefined;
466   }
467 }
468 
VectorFormatHalfWidthDoubleLanes(VectorFormat vform)469 VectorFormat VectorFormatHalfWidthDoubleLanes(VectorFormat vform) {
470   switch (vform) {
471     case kFormat4H:
472       return kFormat8B;
473     case kFormat8H:
474       return kFormat16B;
475     case kFormat2S:
476       return kFormat4H;
477     case kFormat4S:
478       return kFormat8H;
479     case kFormat1D:
480       return kFormat2S;
481     case kFormat2D:
482       return kFormat4S;
483     default:
484       VIXL_UNREACHABLE();
485       return kFormatUndefined;
486   }
487 }
488 
VectorFormatDoubleLanes(VectorFormat vform)489 VectorFormat VectorFormatDoubleLanes(VectorFormat vform) {
490   VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
491   switch (vform) {
492     case kFormat8B:
493       return kFormat16B;
494     case kFormat4H:
495       return kFormat8H;
496     case kFormat2S:
497       return kFormat4S;
498     default:
499       VIXL_UNREACHABLE();
500       return kFormatUndefined;
501   }
502 }
503 
504 
VectorFormatHalfLanes(VectorFormat vform)505 VectorFormat VectorFormatHalfLanes(VectorFormat vform) {
506   VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
507   switch (vform) {
508     case kFormat16B:
509       return kFormat8B;
510     case kFormat8H:
511       return kFormat4H;
512     case kFormat4S:
513       return kFormat2S;
514     default:
515       VIXL_UNREACHABLE();
516       return kFormatUndefined;
517   }
518 }
519 
520 
ScalarFormatFromLaneSize(int laneSize)521 VectorFormat ScalarFormatFromLaneSize(int laneSize) {
522   switch (laneSize) {
523     case 8:
524       return kFormatB;
525     case 16:
526       return kFormatH;
527     case 32:
528       return kFormatS;
529     case 64:
530       return kFormatD;
531     default:
532       VIXL_UNREACHABLE();
533       return kFormatUndefined;
534   }
535 }
536 
537 
ScalarFormatFromFormat(VectorFormat vform)538 VectorFormat ScalarFormatFromFormat(VectorFormat vform) {
539   return ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
540 }
541 
542 
RegisterSizeInBitsFromFormat(VectorFormat vform)543 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
544   VIXL_ASSERT(vform != kFormatUndefined);
545   switch (vform) {
546     case kFormatB:
547       return kBRegSize;
548     case kFormatH:
549       return kHRegSize;
550     case kFormatS:
551     case kFormat2H:
552       return kSRegSize;
553     case kFormatD:
554       return kDRegSize;
555     case kFormat8B:
556     case kFormat4H:
557     case kFormat2S:
558     case kFormat1D:
559       return kDRegSize;
560     default:
561       return kQRegSize;
562   }
563 }
564 
565 
RegisterSizeInBytesFromFormat(VectorFormat vform)566 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
567   return RegisterSizeInBitsFromFormat(vform) / 8;
568 }
569 
570 
LaneSizeInBitsFromFormat(VectorFormat vform)571 unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
572   VIXL_ASSERT(vform != kFormatUndefined);
573   switch (vform) {
574     case kFormatB:
575     case kFormat8B:
576     case kFormat16B:
577       return 8;
578     case kFormatH:
579     case kFormat2H:
580     case kFormat4H:
581     case kFormat8H:
582       return 16;
583     case kFormatS:
584     case kFormat2S:
585     case kFormat4S:
586       return 32;
587     case kFormatD:
588     case kFormat1D:
589     case kFormat2D:
590       return 64;
591     default:
592       VIXL_UNREACHABLE();
593       return 0;
594   }
595 }
596 
597 
LaneSizeInBytesFromFormat(VectorFormat vform)598 int LaneSizeInBytesFromFormat(VectorFormat vform) {
599   return LaneSizeInBitsFromFormat(vform) / 8;
600 }
601 
602 
LaneSizeInBytesLog2FromFormat(VectorFormat vform)603 int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
604   VIXL_ASSERT(vform != kFormatUndefined);
605   switch (vform) {
606     case kFormatB:
607     case kFormat8B:
608     case kFormat16B:
609       return 0;
610     case kFormatH:
611     case kFormat2H:
612     case kFormat4H:
613     case kFormat8H:
614       return 1;
615     case kFormatS:
616     case kFormat2S:
617     case kFormat4S:
618       return 2;
619     case kFormatD:
620     case kFormat1D:
621     case kFormat2D:
622       return 3;
623     default:
624       VIXL_UNREACHABLE();
625       return 0;
626   }
627 }
628 
629 
LaneCountFromFormat(VectorFormat vform)630 int LaneCountFromFormat(VectorFormat vform) {
631   VIXL_ASSERT(vform != kFormatUndefined);
632   switch (vform) {
633     case kFormat16B:
634       return 16;
635     case kFormat8B:
636     case kFormat8H:
637       return 8;
638     case kFormat4H:
639     case kFormat4S:
640       return 4;
641     case kFormat2H:
642     case kFormat2S:
643     case kFormat2D:
644       return 2;
645     case kFormat1D:
646     case kFormatB:
647     case kFormatH:
648     case kFormatS:
649     case kFormatD:
650       return 1;
651     default:
652       VIXL_UNREACHABLE();
653       return 0;
654   }
655 }
656 
657 
MaxLaneCountFromFormat(VectorFormat vform)658 int MaxLaneCountFromFormat(VectorFormat vform) {
659   VIXL_ASSERT(vform != kFormatUndefined);
660   switch (vform) {
661     case kFormatB:
662     case kFormat8B:
663     case kFormat16B:
664       return 16;
665     case kFormatH:
666     case kFormat4H:
667     case kFormat8H:
668       return 8;
669     case kFormatS:
670     case kFormat2S:
671     case kFormat4S:
672       return 4;
673     case kFormatD:
674     case kFormat1D:
675     case kFormat2D:
676       return 2;
677     default:
678       VIXL_UNREACHABLE();
679       return 0;
680   }
681 }
682 
683 
684 // Does 'vform' indicate a vector format or a scalar format?
IsVectorFormat(VectorFormat vform)685 bool IsVectorFormat(VectorFormat vform) {
686   VIXL_ASSERT(vform != kFormatUndefined);
687   switch (vform) {
688     case kFormatB:
689     case kFormatH:
690     case kFormatS:
691     case kFormatD:
692       return false;
693     default:
694       return true;
695   }
696 }
697 
698 
MaxIntFromFormat(VectorFormat vform)699 int64_t MaxIntFromFormat(VectorFormat vform) {
700   return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
701 }
702 
703 
MinIntFromFormat(VectorFormat vform)704 int64_t MinIntFromFormat(VectorFormat vform) {
705   return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
706 }
707 
708 
MaxUintFromFormat(VectorFormat vform)709 uint64_t MaxUintFromFormat(VectorFormat vform) {
710   return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
711 }
712 }  // namespace aarch64
713 }  // namespace vixl
714