1 // Copyright 2014, ARM Limited
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 //   * Redistributions of source code must retain the above copyright notice,
8 //     this list of conditions and the following disclaimer.
9 //   * Redistributions in binary form must reproduce the above copyright notice,
10 //     this list of conditions and the following disclaimer in the documentation
11 //     and/or other materials provided with the distribution.
12 //   * Neither the name of ARM Limited nor the names of its contributors may be
13 //     used to endorse or promote products derived from this software without
14 //     specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 
27 #include "test-utils-a64.h"
28 
29 #include <cmath>
30 
31 #include "test-runner.h"
32 #include "vixl/a64/macro-assembler-a64.h"
33 #include "vixl/a64/simulator-a64.h"
34 #include "vixl/a64/disasm-a64.h"
35 #include "vixl/a64/cpu-a64.h"
36 
37 #define __ masm->
38 
39 namespace vixl {
40 
41 
42 // This value is a signalling NaN as both a double and as a float (taking the
43 // least-significant word).
44 const double kFP64SignallingNaN =
45     rawbits_to_double(UINT64_C(0x7ff000007f800001));
46 const float kFP32SignallingNaN = rawbits_to_float(0x7f800001);
47 
48 // A similar value, but as a quiet NaN.
49 const double kFP64QuietNaN = rawbits_to_double(UINT64_C(0x7ff800007fc00001));
50 const float kFP32QuietNaN = rawbits_to_float(0x7fc00001);
51 
52 
Equal32(uint32_t expected,const RegisterDump *,uint32_t result)53 bool Equal32(uint32_t expected, const RegisterDump*, uint32_t result) {
54   if (result != expected) {
55     printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
56            expected, result);
57   }
58 
59   return expected == result;
60 }
61 
62 
Equal64(uint64_t expected,const RegisterDump *,uint64_t result)63 bool Equal64(uint64_t expected, const RegisterDump*, uint64_t result) {
64   if (result != expected) {
65     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
66            expected, result);
67   }
68 
69   return expected == result;
70 }
71 
72 
Equal128(vec128_t expected,const RegisterDump *,vec128_t result)73 bool Equal128(vec128_t expected, const RegisterDump*, vec128_t result) {
74   if ((result.h != expected.h) || (result.l != expected.l)) {
75     printf("Expected 0x%016" PRIx64 "%016" PRIx64 "\t "
76            "Found 0x%016" PRIx64 "%016" PRIx64 "\n",
77            expected.h, expected.l, result.h, result.l);
78   }
79 
80   return ((expected.h == result.h) && (expected.l == result.l));
81 }
82 
83 
EqualFP32(float expected,const RegisterDump *,float result)84 bool EqualFP32(float expected, const RegisterDump*, float result) {
85   if (float_to_rawbits(expected) == float_to_rawbits(result)) {
86     return true;
87   } else {
88     if (std::isnan(expected) || (expected == 0.0)) {
89       printf("Expected 0x%08" PRIx32 "\t Found 0x%08" PRIx32 "\n",
90              float_to_rawbits(expected), float_to_rawbits(result));
91     } else {
92       printf("Expected %.9f (0x%08" PRIx32 ")\t "
93              "Found %.9f (0x%08" PRIx32 ")\n",
94              expected, float_to_rawbits(expected),
95              result, float_to_rawbits(result));
96     }
97     return false;
98   }
99 }
100 
101 
EqualFP64(double expected,const RegisterDump *,double result)102 bool EqualFP64(double expected, const RegisterDump*, double result) {
103   if (double_to_rawbits(expected) == double_to_rawbits(result)) {
104     return true;
105   }
106 
107   if (std::isnan(expected) || (expected == 0.0)) {
108     printf("Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
109            double_to_rawbits(expected), double_to_rawbits(result));
110   } else {
111     printf("Expected %.17f (0x%016" PRIx64 ")\t "
112            "Found %.17f (0x%016" PRIx64 ")\n",
113            expected, double_to_rawbits(expected),
114            result, double_to_rawbits(result));
115   }
116   return false;
117 }
118 
119 
Equal32(uint32_t expected,const RegisterDump * core,const Register & reg)120 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
121   VIXL_ASSERT(reg.Is32Bits());
122   // Retrieve the corresponding X register so we can check that the upper part
123   // was properly cleared.
124   int64_t result_x = core->xreg(reg.code());
125   if ((result_x & 0xffffffff00000000) != 0) {
126     printf("Expected 0x%08" PRIx32 "\t Found 0x%016" PRIx64 "\n",
127            expected, result_x);
128     return false;
129   }
130   uint32_t result_w = core->wreg(reg.code());
131   return Equal32(expected, core, result_w);
132 }
133 
134 
Equal64(uint64_t expected,const RegisterDump * core,const Register & reg)135 bool Equal64(uint64_t expected, const RegisterDump* core, const Register& reg) {
136   VIXL_ASSERT(reg.Is64Bits());
137   uint64_t result = core->xreg(reg.code());
138   return Equal64(expected, core, result);
139 }
140 
141 
Equal128(uint64_t expected_h,uint64_t expected_l,const RegisterDump * core,const VRegister & vreg)142 bool Equal128(uint64_t expected_h,
143               uint64_t expected_l,
144               const RegisterDump* core,
145               const VRegister& vreg) {
146   VIXL_ASSERT(vreg.Is128Bits());
147   vec128_t expected = {expected_l, expected_h};
148   vec128_t result = core->qreg(vreg.code());
149   return Equal128(expected, core, result);
150 }
151 
152 
EqualFP32(float expected,const RegisterDump * core,const FPRegister & fpreg)153 bool EqualFP32(float expected,
154                const RegisterDump* core,
155                const FPRegister& fpreg) {
156   VIXL_ASSERT(fpreg.Is32Bits());
157   // Retrieve the corresponding D register so we can check that the upper part
158   // was properly cleared.
159   uint64_t result_64 = core->dreg_bits(fpreg.code());
160   if ((result_64 & 0xffffffff00000000) != 0) {
161     printf("Expected 0x%08" PRIx32 " (%f)\t Found 0x%016" PRIx64 "\n",
162            float_to_rawbits(expected), expected, result_64);
163     return false;
164   }
165 
166   return EqualFP32(expected, core, core->sreg(fpreg.code()));
167 }
168 
169 
EqualFP64(double expected,const RegisterDump * core,const FPRegister & fpreg)170 bool EqualFP64(double expected,
171                const RegisterDump* core,
172                const FPRegister& fpreg) {
173   VIXL_ASSERT(fpreg.Is64Bits());
174   return EqualFP64(expected, core, core->dreg(fpreg.code()));
175 }
176 
177 
Equal64(const Register & reg0,const RegisterDump * core,const Register & reg1)178 bool Equal64(const Register& reg0,
179              const RegisterDump* core,
180              const Register& reg1) {
181   VIXL_ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
182   int64_t expected = core->xreg(reg0.code());
183   int64_t result = core->xreg(reg1.code());
184   return Equal64(expected, core, result);
185 }
186 
187 
FlagN(uint32_t flags)188 static char FlagN(uint32_t flags) {
189   return (flags & NFlag) ? 'N' : 'n';
190 }
191 
192 
FlagZ(uint32_t flags)193 static char FlagZ(uint32_t flags) {
194   return (flags & ZFlag) ? 'Z' : 'z';
195 }
196 
197 
FlagC(uint32_t flags)198 static char FlagC(uint32_t flags) {
199   return (flags & CFlag) ? 'C' : 'c';
200 }
201 
202 
FlagV(uint32_t flags)203 static char FlagV(uint32_t flags) {
204   return (flags & VFlag) ? 'V' : 'v';
205 }
206 
207 
EqualNzcv(uint32_t expected,uint32_t result)208 bool EqualNzcv(uint32_t expected, uint32_t result) {
209   VIXL_ASSERT((expected & ~NZCVFlag) == 0);
210   VIXL_ASSERT((result & ~NZCVFlag) == 0);
211   if (result != expected) {
212     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
213         FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
214         FlagN(result), FlagZ(result), FlagC(result), FlagV(result));
215     return false;
216   }
217 
218   return true;
219 }
220 
221 
EqualRegisters(const RegisterDump * a,const RegisterDump * b)222 bool EqualRegisters(const RegisterDump* a, const RegisterDump* b) {
223   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
224     if (a->xreg(i) != b->xreg(i)) {
225       printf("x%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
226              i, a->xreg(i), b->xreg(i));
227       return false;
228     }
229   }
230 
231   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
232     uint64_t a_bits = a->dreg_bits(i);
233     uint64_t b_bits = b->dreg_bits(i);
234     if (a_bits != b_bits) {
235       printf("d%d\t Expected 0x%016" PRIx64 "\t Found 0x%016" PRIx64 "\n",
236              i, a_bits, b_bits);
237       return false;
238     }
239   }
240 
241   return true;
242 }
243 
244 
PopulateRegisterArray(Register * w,Register * x,Register * r,int reg_size,int reg_count,RegList allowed)245 RegList PopulateRegisterArray(Register* w, Register* x, Register* r,
246                               int reg_size, int reg_count, RegList allowed) {
247   RegList list = 0;
248   int i = 0;
249   for (unsigned n = 0; (n < kNumberOfRegisters) && (i < reg_count); n++) {
250     if (((UINT64_C(1) << n) & allowed) != 0) {
251       // Only assign allowed registers.
252       if (r) {
253         r[i] = Register(n, reg_size);
254       }
255       if (x) {
256         x[i] = Register(n, kXRegSize);
257       }
258       if (w) {
259         w[i] = Register(n, kWRegSize);
260       }
261       list |= (UINT64_C(1) << n);
262       i++;
263     }
264   }
265   // Check that we got enough registers.
266   VIXL_ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
267 
268   return list;
269 }
270 
271 
PopulateFPRegisterArray(FPRegister * s,FPRegister * d,FPRegister * v,int reg_size,int reg_count,RegList allowed)272 RegList PopulateFPRegisterArray(FPRegister* s, FPRegister* d, FPRegister* v,
273                                 int reg_size, int reg_count, RegList allowed) {
274   RegList list = 0;
275   int i = 0;
276   for (unsigned n = 0; (n < kNumberOfFPRegisters) && (i < reg_count); n++) {
277     if (((UINT64_C(1) << n) & allowed) != 0) {
278       // Only assigned allowed registers.
279       if (v) {
280         v[i] = FPRegister(n, reg_size);
281       }
282       if (d) {
283         d[i] = FPRegister(n, kDRegSize);
284       }
285       if (s) {
286         s[i] = FPRegister(n, kSRegSize);
287       }
288       list |= (UINT64_C(1) << n);
289       i++;
290     }
291   }
292   // Check that we got enough registers.
293   VIXL_ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
294 
295   return list;
296 }
297 
298 
Clobber(MacroAssembler * masm,RegList reg_list,uint64_t const value)299 void Clobber(MacroAssembler* masm, RegList reg_list, uint64_t const value) {
300   Register first = NoReg;
301   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
302     if (reg_list & (UINT64_C(1) << i)) {
303       Register xn(i, kXRegSize);
304       // We should never write into sp here.
305       VIXL_ASSERT(!xn.Is(sp));
306       if (!xn.IsZero()) {
307         if (!first.IsValid()) {
308           // This is the first register we've hit, so construct the literal.
309           __ Mov(xn, value);
310           first = xn;
311         } else {
312           // We've already loaded the literal, so re-use the value already
313           // loaded into the first register we hit.
314           __ Mov(xn, first);
315         }
316       }
317     }
318   }
319 }
320 
321 
ClobberFP(MacroAssembler * masm,RegList reg_list,double const value)322 void ClobberFP(MacroAssembler* masm, RegList reg_list, double const value) {
323   FPRegister first = NoFPReg;
324   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
325     if (reg_list & (UINT64_C(1) << i)) {
326       FPRegister dn(i, kDRegSize);
327       if (!first.IsValid()) {
328         // This is the first register we've hit, so construct the literal.
329         __ Fmov(dn, value);
330         first = dn;
331       } else {
332         // We've already loaded the literal, so re-use the value already loaded
333         // into the first register we hit.
334         __ Fmov(dn, first);
335       }
336     }
337   }
338 }
339 
340 
Clobber(MacroAssembler * masm,CPURegList reg_list)341 void Clobber(MacroAssembler* masm, CPURegList reg_list) {
342   if (reg_list.type() == CPURegister::kRegister) {
343     // This will always clobber X registers.
344     Clobber(masm, reg_list.list());
345   } else if (reg_list.type() == CPURegister::kVRegister) {
346     // This will always clobber D registers.
347     ClobberFP(masm, reg_list.list());
348   } else {
349     VIXL_UNREACHABLE();
350   }
351 }
352 
353 
Dump(MacroAssembler * masm)354 void RegisterDump::Dump(MacroAssembler* masm) {
355   VIXL_ASSERT(__ StackPointer().Is(sp));
356 
357   // Ensure that we don't unintentionally clobber any registers.
358   UseScratchRegisterScope temps(masm);
359   temps.ExcludeAll();
360 
361   // Preserve some temporary registers.
362   Register dump_base = x0;
363   Register dump = x1;
364   Register tmp = x2;
365   Register dump_base_w = dump_base.W();
366   Register dump_w = dump.W();
367   Register tmp_w = tmp.W();
368 
369   // Offsets into the dump_ structure.
370   const int x_offset = offsetof(dump_t, x_);
371   const int w_offset = offsetof(dump_t, w_);
372   const int d_offset = offsetof(dump_t, d_);
373   const int s_offset = offsetof(dump_t, s_);
374   const int q_offset = offsetof(dump_t, q_);
375   const int sp_offset = offsetof(dump_t, sp_);
376   const int wsp_offset = offsetof(dump_t, wsp_);
377   const int flags_offset = offsetof(dump_t, flags_);
378 
379   __ Push(xzr, dump_base, dump, tmp);
380 
381   // Load the address where we will dump the state.
382   __ Mov(dump_base, reinterpret_cast<uintptr_t>(&dump_));
383 
384   // Dump the stack pointer (sp and wsp).
385   // The stack pointer cannot be stored directly; it needs to be moved into
386   // another register first. Also, we pushed four X registers, so we need to
387   // compensate here.
388   __ Add(tmp, sp, 4 * kXRegSizeInBytes);
389   __ Str(tmp, MemOperand(dump_base, sp_offset));
390   __ Add(tmp_w, wsp, 4 * kXRegSizeInBytes);
391   __ Str(tmp_w, MemOperand(dump_base, wsp_offset));
392 
393   // Dump X registers.
394   __ Add(dump, dump_base, x_offset);
395   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
396     __ Stp(Register::XRegFromCode(i), Register::XRegFromCode(i + 1),
397            MemOperand(dump, i * kXRegSizeInBytes));
398   }
399 
400   // Dump W registers.
401   __ Add(dump, dump_base, w_offset);
402   for (unsigned i = 0; i < kNumberOfRegisters; i += 2) {
403     __ Stp(Register::WRegFromCode(i), Register::WRegFromCode(i + 1),
404            MemOperand(dump, i * kWRegSizeInBytes));
405   }
406 
407   // Dump D registers.
408   __ Add(dump, dump_base, d_offset);
409   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
410     __ Stp(FPRegister::DRegFromCode(i), FPRegister::DRegFromCode(i + 1),
411            MemOperand(dump, i * kDRegSizeInBytes));
412   }
413 
414   // Dump S registers.
415   __ Add(dump, dump_base, s_offset);
416   for (unsigned i = 0; i < kNumberOfFPRegisters; i += 2) {
417     __ Stp(FPRegister::SRegFromCode(i), FPRegister::SRegFromCode(i + 1),
418            MemOperand(dump, i * kSRegSizeInBytes));
419   }
420 
421   // Dump Q registers.
422   __ Add(dump, dump_base, q_offset);
423   for (unsigned i = 0; i < kNumberOfVRegisters; i += 2) {
424     __ Stp(VRegister::QRegFromCode(i), VRegister::QRegFromCode(i + 1),
425            MemOperand(dump, i * kQRegSizeInBytes));
426   }
427 
428   // Dump the flags.
429   __ Mrs(tmp, NZCV);
430   __ Str(tmp, MemOperand(dump_base, flags_offset));
431 
432   // To dump the values that were in tmp amd dump, we need a new scratch
433   // register.  We can use any of the already dumped registers since we can
434   // easily restore them.
435   Register dump2_base = x10;
436   Register dump2 = x11;
437   VIXL_ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
438 
439   // Don't lose the dump_ address.
440   __ Mov(dump2_base, dump_base);
441 
442   __ Pop(tmp, dump, dump_base, xzr);
443 
444   __ Add(dump2, dump2_base, w_offset);
445   __ Str(dump_base_w, MemOperand(dump2, dump_base.code() * kWRegSizeInBytes));
446   __ Str(dump_w, MemOperand(dump2, dump.code() * kWRegSizeInBytes));
447   __ Str(tmp_w, MemOperand(dump2, tmp.code() * kWRegSizeInBytes));
448 
449   __ Add(dump2, dump2_base, x_offset);
450   __ Str(dump_base, MemOperand(dump2, dump_base.code() * kXRegSizeInBytes));
451   __ Str(dump, MemOperand(dump2, dump.code() * kXRegSizeInBytes));
452   __ Str(tmp, MemOperand(dump2, tmp.code() * kXRegSizeInBytes));
453 
454   // Finally, restore dump2_base and dump2.
455   __ Ldr(dump2_base, MemOperand(dump2, dump2_base.code() * kXRegSizeInBytes));
456   __ Ldr(dump2, MemOperand(dump2, dump2.code() * kXRegSizeInBytes));
457 
458   completed_ = true;
459 }
460 
461 }  // namespace vixl
462