1 // Copyright 2015, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include "vixl/a64/instructions-a64.h"
28 #include "vixl/a64/assembler-a64.h"
29
30 namespace vixl {
31
32
33 // Floating-point infinity values.
34 const float16 kFP16PositiveInfinity = 0x7c00;
35 const float16 kFP16NegativeInfinity = 0xfc00;
36 const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
37 const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
38 const double kFP64PositiveInfinity =
39 rawbits_to_double(UINT64_C(0x7ff0000000000000));
40 const double kFP64NegativeInfinity =
41 rawbits_to_double(UINT64_C(0xfff0000000000000));
42
43
44 // The default NaN values (for FPCR.DN=1).
45 const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
46 const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
47 const float16 kFP16DefaultNaN = 0x7e00;
48
49
RotateRight(uint64_t value,unsigned int rotate,unsigned int width)50 static uint64_t RotateRight(uint64_t value,
51 unsigned int rotate,
52 unsigned int width) {
53 VIXL_ASSERT(width <= 64);
54 rotate &= 63;
55 return ((value & ((UINT64_C(1) << rotate) - 1)) <<
56 (width - rotate)) | (value >> rotate);
57 }
58
59
RepeatBitsAcrossReg(unsigned reg_size,uint64_t value,unsigned width)60 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
61 uint64_t value,
62 unsigned width) {
63 VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
64 (width == 32));
65 VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
66 uint64_t result = value & ((UINT64_C(1) << width) - 1);
67 for (unsigned i = width; i < reg_size; i *= 2) {
68 result |= (result << i);
69 }
70 return result;
71 }
72
73
IsLoad() const74 bool Instruction::IsLoad() const {
75 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
76 return false;
77 }
78
79 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
80 return Mask(LoadStorePairLBit) != 0;
81 } else {
82 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
83 switch (op) {
84 case LDRB_w:
85 case LDRH_w:
86 case LDR_w:
87 case LDR_x:
88 case LDRSB_w:
89 case LDRSB_x:
90 case LDRSH_w:
91 case LDRSH_x:
92 case LDRSW_x:
93 case LDR_b:
94 case LDR_h:
95 case LDR_s:
96 case LDR_d:
97 case LDR_q: return true;
98 default: return false;
99 }
100 }
101 }
102
103
IsStore() const104 bool Instruction::IsStore() const {
105 if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
106 return false;
107 }
108
109 if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
110 return Mask(LoadStorePairLBit) == 0;
111 } else {
112 LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
113 switch (op) {
114 case STRB_w:
115 case STRH_w:
116 case STR_w:
117 case STR_x:
118 case STR_b:
119 case STR_h:
120 case STR_s:
121 case STR_d:
122 case STR_q: return true;
123 default: return false;
124 }
125 }
126 }
127
128
129 // Logical immediates can't encode zero, so a return value of zero is used to
130 // indicate a failure case. Specifically, where the constraints on imm_s are
131 // not met.
ImmLogical() const132 uint64_t Instruction::ImmLogical() const {
133 unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
134 int64_t n = BitN();
135 int64_t imm_s = ImmSetBits();
136 int64_t imm_r = ImmRotate();
137
138 // An integer is constructed from the n, imm_s and imm_r bits according to
139 // the following table:
140 //
141 // N imms immr size S R
142 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
143 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
144 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
145 // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
146 // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
147 // 0 11110s xxxxxr 2 UInt(s) UInt(r)
148 // (s bits must not be all set)
149 //
150 // A pattern is constructed of size bits, where the least significant S+1
151 // bits are set. The pattern is rotated right by R, and repeated across a
152 // 32 or 64-bit value, depending on destination register width.
153 //
154
155 if (n == 1) {
156 if (imm_s == 0x3f) {
157 return 0;
158 }
159 uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
160 return RotateRight(bits, imm_r, 64);
161 } else {
162 if ((imm_s >> 1) == 0x1f) {
163 return 0;
164 }
165 for (int width = 0x20; width >= 0x2; width >>= 1) {
166 if ((imm_s & width) == 0) {
167 int mask = width - 1;
168 if ((imm_s & mask) == mask) {
169 return 0;
170 }
171 uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
172 return RepeatBitsAcrossReg(reg_size,
173 RotateRight(bits, imm_r & mask, width),
174 width);
175 }
176 }
177 }
178 VIXL_UNREACHABLE();
179 return 0;
180 }
181
182
ImmNEONabcdefgh() const183 uint32_t Instruction::ImmNEONabcdefgh() const {
184 return ImmNEONabc() << 5 | ImmNEONdefgh();
185 }
186
187
Imm8ToFP32(uint32_t imm8)188 float Instruction::Imm8ToFP32(uint32_t imm8) {
189 // Imm8: abcdefgh (8 bits)
190 // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
191 // where B is b ^ 1
192 uint32_t bits = imm8;
193 uint32_t bit7 = (bits >> 7) & 0x1;
194 uint32_t bit6 = (bits >> 6) & 0x1;
195 uint32_t bit5_to_0 = bits & 0x3f;
196 uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
197
198 return rawbits_to_float(result);
199 }
200
201
ImmFP32() const202 float Instruction::ImmFP32() const {
203 return Imm8ToFP32(ImmFP());
204 }
205
206
Imm8ToFP64(uint32_t imm8)207 double Instruction::Imm8ToFP64(uint32_t imm8) {
208 // Imm8: abcdefgh (8 bits)
209 // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
210 // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
211 // where B is b ^ 1
212 uint32_t bits = imm8;
213 uint64_t bit7 = (bits >> 7) & 0x1;
214 uint64_t bit6 = (bits >> 6) & 0x1;
215 uint64_t bit5_to_0 = bits & 0x3f;
216 uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
217
218 return rawbits_to_double(result);
219 }
220
221
ImmFP64() const222 double Instruction::ImmFP64() const {
223 return Imm8ToFP64(ImmFP());
224 }
225
226
ImmNEONFP32() const227 float Instruction::ImmNEONFP32() const {
228 return Imm8ToFP32(ImmNEONabcdefgh());
229 }
230
231
ImmNEONFP64() const232 double Instruction::ImmNEONFP64() const {
233 return Imm8ToFP64(ImmNEONabcdefgh());
234 }
235
236
CalcLSDataSize(LoadStoreOp op)237 unsigned CalcLSDataSize(LoadStoreOp op) {
238 VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
239 unsigned size = static_cast<Instr>(op) >> LSSize_offset;
240 if ((op & LSVector_mask) != 0) {
241 // Vector register memory operations encode the access size in the "size"
242 // and "opc" fields.
243 if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
244 size = kQRegSizeInBytesLog2;
245 }
246 }
247 return size;
248 }
249
250
CalcLSPairDataSize(LoadStorePairOp op)251 unsigned CalcLSPairDataSize(LoadStorePairOp op) {
252 VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
253 VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
254 switch (op) {
255 case STP_q:
256 case LDP_q: return kQRegSizeInBytesLog2;
257 case STP_x:
258 case LDP_x:
259 case STP_d:
260 case LDP_d: return kXRegSizeInBytesLog2;
261 default: return kWRegSizeInBytesLog2;
262 }
263 }
264
265
ImmBranchRangeBitwidth(ImmBranchType branch_type)266 int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
267 switch (branch_type) {
268 case UncondBranchType:
269 return ImmUncondBranch_width;
270 case CondBranchType:
271 return ImmCondBranch_width;
272 case CompareBranchType:
273 return ImmCmpBranch_width;
274 case TestBranchType:
275 return ImmTestBranch_width;
276 default:
277 VIXL_UNREACHABLE();
278 return 0;
279 }
280 }
281
282
ImmBranchForwardRange(ImmBranchType branch_type)283 int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
284 int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
285 return encoded_max * kInstructionSize;
286 }
287
288
IsValidImmPCOffset(ImmBranchType branch_type,int32_t offset)289 bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
290 int32_t offset) {
291 return is_intn(ImmBranchRangeBitwidth(branch_type), offset);
292 }
293
294
ImmPCOffsetTarget() const295 const Instruction* Instruction::ImmPCOffsetTarget() const {
296 const Instruction * base = this;
297 ptrdiff_t offset;
298 if (IsPCRelAddressing()) {
299 // ADR and ADRP.
300 offset = ImmPCRel();
301 if (Mask(PCRelAddressingMask) == ADRP) {
302 base = AlignDown(base, kPageSize);
303 offset *= kPageSize;
304 } else {
305 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
306 }
307 } else {
308 // All PC-relative branches.
309 VIXL_ASSERT(BranchType() != UnknownBranchType);
310 // Relative branch offsets are instruction-size-aligned.
311 offset = ImmBranch() << kInstructionSizeLog2;
312 }
313 return base + offset;
314 }
315
316
ImmBranch() const317 int Instruction::ImmBranch() const {
318 switch (BranchType()) {
319 case CondBranchType: return ImmCondBranch();
320 case UncondBranchType: return ImmUncondBranch();
321 case CompareBranchType: return ImmCmpBranch();
322 case TestBranchType: return ImmTestBranch();
323 default: VIXL_UNREACHABLE();
324 }
325 return 0;
326 }
327
328
SetImmPCOffsetTarget(const Instruction * target)329 void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
330 if (IsPCRelAddressing()) {
331 SetPCRelImmTarget(target);
332 } else {
333 SetBranchImmTarget(target);
334 }
335 }
336
337
SetPCRelImmTarget(const Instruction * target)338 void Instruction::SetPCRelImmTarget(const Instruction* target) {
339 int32_t imm21;
340 if ((Mask(PCRelAddressingMask) == ADR)) {
341 imm21 = target - this;
342 } else {
343 VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
344 uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
345 uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
346 imm21 = target_page - this_page;
347 }
348 Instr imm = Assembler::ImmPCRelAddress(imm21);
349
350 SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
351 }
352
353
SetBranchImmTarget(const Instruction * target)354 void Instruction::SetBranchImmTarget(const Instruction* target) {
355 VIXL_ASSERT(((target - this) & 3) == 0);
356 Instr branch_imm = 0;
357 uint32_t imm_mask = 0;
358 int offset = (target - this) >> kInstructionSizeLog2;
359 switch (BranchType()) {
360 case CondBranchType: {
361 branch_imm = Assembler::ImmCondBranch(offset);
362 imm_mask = ImmCondBranch_mask;
363 break;
364 }
365 case UncondBranchType: {
366 branch_imm = Assembler::ImmUncondBranch(offset);
367 imm_mask = ImmUncondBranch_mask;
368 break;
369 }
370 case CompareBranchType: {
371 branch_imm = Assembler::ImmCmpBranch(offset);
372 imm_mask = ImmCmpBranch_mask;
373 break;
374 }
375 case TestBranchType: {
376 branch_imm = Assembler::ImmTestBranch(offset);
377 imm_mask = ImmTestBranch_mask;
378 break;
379 }
380 default: VIXL_UNREACHABLE();
381 }
382 SetInstructionBits(Mask(~imm_mask) | branch_imm);
383 }
384
385
SetImmLLiteral(const Instruction * source)386 void Instruction::SetImmLLiteral(const Instruction* source) {
387 VIXL_ASSERT(IsWordAligned(source));
388 ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
389 Instr imm = Assembler::ImmLLiteral(offset);
390 Instr mask = ImmLLiteral_mask;
391
392 SetInstructionBits(Mask(~mask) | imm);
393 }
394
395
VectorFormatHalfWidth(const VectorFormat vform)396 VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
397 VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
398 vform == kFormatH || vform == kFormatS || vform == kFormatD);
399 switch (vform) {
400 case kFormat8H: return kFormat8B;
401 case kFormat4S: return kFormat4H;
402 case kFormat2D: return kFormat2S;
403 case kFormatH: return kFormatB;
404 case kFormatS: return kFormatH;
405 case kFormatD: return kFormatS;
406 default: VIXL_UNREACHABLE(); return kFormatUndefined;
407 }
408 }
409
410
VectorFormatDoubleWidth(const VectorFormat vform)411 VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
412 VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
413 vform == kFormatB || vform == kFormatH || vform == kFormatS);
414 switch (vform) {
415 case kFormat8B: return kFormat8H;
416 case kFormat4H: return kFormat4S;
417 case kFormat2S: return kFormat2D;
418 case kFormatB: return kFormatH;
419 case kFormatH: return kFormatS;
420 case kFormatS: return kFormatD;
421 default: VIXL_UNREACHABLE(); return kFormatUndefined;
422 }
423 }
424
425
VectorFormatFillQ(const VectorFormat vform)426 VectorFormat VectorFormatFillQ(const VectorFormat vform) {
427 switch (vform) {
428 case kFormatB:
429 case kFormat8B:
430 case kFormat16B: return kFormat16B;
431 case kFormatH:
432 case kFormat4H:
433 case kFormat8H: return kFormat8H;
434 case kFormatS:
435 case kFormat2S:
436 case kFormat4S: return kFormat4S;
437 case kFormatD:
438 case kFormat1D:
439 case kFormat2D: return kFormat2D;
440 default: VIXL_UNREACHABLE(); return kFormatUndefined;
441 }
442 }
443
VectorFormatHalfWidthDoubleLanes(const VectorFormat vform)444 VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
445 switch (vform) {
446 case kFormat4H: return kFormat8B;
447 case kFormat8H: return kFormat16B;
448 case kFormat2S: return kFormat4H;
449 case kFormat4S: return kFormat8H;
450 case kFormat1D: return kFormat2S;
451 case kFormat2D: return kFormat4S;
452 default: VIXL_UNREACHABLE(); return kFormatUndefined;
453 }
454 }
455
VectorFormatDoubleLanes(const VectorFormat vform)456 VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
457 VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
458 switch (vform) {
459 case kFormat8B: return kFormat16B;
460 case kFormat4H: return kFormat8H;
461 case kFormat2S: return kFormat4S;
462 default: VIXL_UNREACHABLE(); return kFormatUndefined;
463 }
464 }
465
466
VectorFormatHalfLanes(const VectorFormat vform)467 VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
468 VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
469 switch (vform) {
470 case kFormat16B: return kFormat8B;
471 case kFormat8H: return kFormat4H;
472 case kFormat4S: return kFormat2S;
473 default: VIXL_UNREACHABLE(); return kFormatUndefined;
474 }
475 }
476
477
ScalarFormatFromLaneSize(int laneSize)478 VectorFormat ScalarFormatFromLaneSize(int laneSize) {
479 switch (laneSize) {
480 case 8: return kFormatB;
481 case 16: return kFormatH;
482 case 32: return kFormatS;
483 case 64: return kFormatD;
484 default: VIXL_UNREACHABLE(); return kFormatUndefined;
485 }
486 }
487
488
RegisterSizeInBitsFromFormat(VectorFormat vform)489 unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
490 VIXL_ASSERT(vform != kFormatUndefined);
491 switch (vform) {
492 case kFormatB: return kBRegSize;
493 case kFormatH: return kHRegSize;
494 case kFormatS: return kSRegSize;
495 case kFormatD: return kDRegSize;
496 case kFormat8B:
497 case kFormat4H:
498 case kFormat2S:
499 case kFormat1D: return kDRegSize;
500 default: return kQRegSize;
501 }
502 }
503
504
RegisterSizeInBytesFromFormat(VectorFormat vform)505 unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
506 return RegisterSizeInBitsFromFormat(vform) / 8;
507 }
508
509
LaneSizeInBitsFromFormat(VectorFormat vform)510 unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
511 VIXL_ASSERT(vform != kFormatUndefined);
512 switch (vform) {
513 case kFormatB:
514 case kFormat8B:
515 case kFormat16B: return 8;
516 case kFormatH:
517 case kFormat4H:
518 case kFormat8H: return 16;
519 case kFormatS:
520 case kFormat2S:
521 case kFormat4S: return 32;
522 case kFormatD:
523 case kFormat1D:
524 case kFormat2D: return 64;
525 default: VIXL_UNREACHABLE(); return 0;
526 }
527 }
528
529
LaneSizeInBytesFromFormat(VectorFormat vform)530 int LaneSizeInBytesFromFormat(VectorFormat vform) {
531 return LaneSizeInBitsFromFormat(vform) / 8;
532 }
533
534
LaneSizeInBytesLog2FromFormat(VectorFormat vform)535 int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
536 VIXL_ASSERT(vform != kFormatUndefined);
537 switch (vform) {
538 case kFormatB:
539 case kFormat8B:
540 case kFormat16B: return 0;
541 case kFormatH:
542 case kFormat4H:
543 case kFormat8H: return 1;
544 case kFormatS:
545 case kFormat2S:
546 case kFormat4S: return 2;
547 case kFormatD:
548 case kFormat1D:
549 case kFormat2D: return 3;
550 default: VIXL_UNREACHABLE(); return 0;
551 }
552 }
553
554
LaneCountFromFormat(VectorFormat vform)555 int LaneCountFromFormat(VectorFormat vform) {
556 VIXL_ASSERT(vform != kFormatUndefined);
557 switch (vform) {
558 case kFormat16B: return 16;
559 case kFormat8B:
560 case kFormat8H: return 8;
561 case kFormat4H:
562 case kFormat4S: return 4;
563 case kFormat2S:
564 case kFormat2D: return 2;
565 case kFormat1D:
566 case kFormatB:
567 case kFormatH:
568 case kFormatS:
569 case kFormatD: return 1;
570 default: VIXL_UNREACHABLE(); return 0;
571 }
572 }
573
574
MaxLaneCountFromFormat(VectorFormat vform)575 int MaxLaneCountFromFormat(VectorFormat vform) {
576 VIXL_ASSERT(vform != kFormatUndefined);
577 switch (vform) {
578 case kFormatB:
579 case kFormat8B:
580 case kFormat16B: return 16;
581 case kFormatH:
582 case kFormat4H:
583 case kFormat8H: return 8;
584 case kFormatS:
585 case kFormat2S:
586 case kFormat4S: return 4;
587 case kFormatD:
588 case kFormat1D:
589 case kFormat2D: return 2;
590 default: VIXL_UNREACHABLE(); return 0;
591 }
592 }
593
594
595 // Does 'vform' indicate a vector format or a scalar format?
IsVectorFormat(VectorFormat vform)596 bool IsVectorFormat(VectorFormat vform) {
597 VIXL_ASSERT(vform != kFormatUndefined);
598 switch (vform) {
599 case kFormatB:
600 case kFormatH:
601 case kFormatS:
602 case kFormatD: return false;
603 default: return true;
604 }
605 }
606
607
MaxIntFromFormat(VectorFormat vform)608 int64_t MaxIntFromFormat(VectorFormat vform) {
609 return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
610 }
611
612
MinIntFromFormat(VectorFormat vform)613 int64_t MinIntFromFormat(VectorFormat vform) {
614 return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
615 }
616
617
MaxUintFromFormat(VectorFormat vform)618 uint64_t MaxUintFromFormat(VectorFormat vform) {
619 return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
620 }
621 } // namespace vixl
622
623