1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <cmath>
32 #include <limits>
33 
34 #include "src/v8.h"
35 
36 #include "src/arm64/decoder-arm64-inl.h"
37 #include "src/arm64/disasm-arm64.h"
38 #include "src/arm64/simulator-arm64.h"
39 #include "src/arm64/utils-arm64.h"
40 #include "src/macro-assembler.h"
41 #include "test/cctest/cctest.h"
42 #include "test/cctest/test-utils-arm64.h"
43 
44 using namespace v8::internal;
45 
46 // Test infrastructure.
47 //
48 // Tests are functions which accept no parameters and have no return values.
49 // The testing code should not perform an explicit return once completed. For
50 // example to test the mov immediate instruction a very simple test would be:
51 //
52 //   TEST(mov_x0_one) {
53 //     SETUP();
54 //
55 //     START();
56 //     __ mov(x0, Operand(1));
57 //     END();
58 //
59 //     RUN();
60 //
61 //     CHECK_EQUAL_64(1, x0);
62 //
63 //     TEARDOWN();
64 //   }
65 //
66 // Within a START ... END block all registers but sp can be modified. sp has to
67 // be explicitly saved/restored. The END() macro replaces the function return
68 // so it may appear multiple times in a test if the test has multiple exit
69 // points.
70 //
71 // Once the test has been run all integer and floating point registers as well
72 // as flags are accessible through a RegisterDump instance, see
73 // utils-arm64.cc for more info on RegisterDump.
74 //
75 // We provide some helper assert to handle common cases:
76 //
77 //   CHECK_EQUAL_32(int32_t, int_32t)
78 //   CHECK_EQUAL_FP32(float, float)
79 //   CHECK_EQUAL_32(int32_t, W register)
80 //   CHECK_EQUAL_FP32(float, S register)
81 //   CHECK_EQUAL_64(int64_t, int_64t)
82 //   CHECK_EQUAL_FP64(double, double)
83 //   CHECK_EQUAL_64(int64_t, X register)
84 //   CHECK_EQUAL_64(X register, X register)
85 //   CHECK_EQUAL_FP64(double, D register)
86 //
87 // e.g. CHECK_EQUAL_64(0.5, d30);
88 //
89 // If more advance computation is required before the assert then access the
90 // RegisterDump named core directly:
91 //
92 //   CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
93 
94 
95 #if 0  // TODO(all): enable.
96 static v8::Persistent<v8::Context> env;
97 
98 static void InitializeVM() {
99   if (env.IsEmpty()) {
100     env = v8::Context::New();
101   }
102 }
103 #endif
104 
105 #define __ masm.
106 
107 #define BUF_SIZE 8192
108 #define SETUP() SETUP_SIZE(BUF_SIZE)
109 
110 #define INIT_V8()                                                              \
111   CcTest::InitializeVM();                                                      \
112 
113 #ifdef USE_SIMULATOR
114 
115 // Run tests with the simulator.
116 #define SETUP_SIZE(buf_size)                    \
117   Isolate* isolate = Isolate::Current();        \
118   HandleScope scope(isolate);                   \
119   DCHECK(isolate != NULL);                      \
120   byte* buf = new byte[buf_size];               \
121   MacroAssembler masm(isolate, buf, buf_size);  \
122   Decoder<DispatchingDecoderVisitor>* decoder = \
123       new Decoder<DispatchingDecoderVisitor>(); \
124   Simulator simulator(decoder);                 \
125   PrintDisassembler* pdis = NULL;               \
126   RegisterDump core;
127 
128 /*  if (Cctest::trace_sim()) {                                                 \
129     pdis = new PrintDisassembler(stdout);                                      \
130     decoder.PrependVisitor(pdis);                                              \
131   }                                                                            \
132   */
133 
134 // Reset the assembler and simulator, so that instructions can be generated,
135 // but don't actually emit any code. This can be used by tests that need to
136 // emit instructions at the start of the buffer. Note that START_AFTER_RESET
137 // must be called before any callee-saved register is modified, and before an
138 // END is encountered.
139 //
140 // Most tests should call START, rather than call RESET directly.
141 #define RESET()                                                                \
142   __ Reset();                                                                  \
143   simulator.ResetState();
144 
145 #define START_AFTER_RESET()                                                    \
146   __ SetStackPointer(csp);                                                     \
147   __ PushCalleeSavedRegisters();                                               \
148   __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
149 
150 #define START()                                                                \
151   RESET();                                                                     \
152   START_AFTER_RESET();
153 
154 #define RUN()                                                                  \
155   simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
156 
157 #define END()                                                                  \
158   __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL);                    \
159   core.Dump(&masm);                                                            \
160   __ PopCalleeSavedRegisters();                                                \
161   __ Ret();                                                                    \
162   __ GetCode(NULL);
163 
164 #define TEARDOWN()                                                             \
165   delete pdis;                                                                 \
166   delete[] buf;
167 
168 #else  // ifdef USE_SIMULATOR.
169 // Run the test on real hardware or models.
170 #define SETUP_SIZE(buf_size)                                                   \
171   Isolate* isolate = Isolate::Current();                                       \
172   HandleScope scope(isolate);                                                  \
173   DCHECK(isolate != NULL);                                                     \
174   byte* buf = new byte[buf_size];                                              \
175   MacroAssembler masm(isolate, buf, buf_size);                                 \
176   RegisterDump core;
177 
178 #define RESET()                                                                \
179   __ Reset();                                                                  \
180   /* Reset the machine state (like simulator.ResetState()). */                 \
181   __ Msr(NZCV, xzr);                                                           \
182   __ Msr(FPCR, xzr);
183 
184 
185 #define START_AFTER_RESET()                                                    \
186   __ SetStackPointer(csp);                                                     \
187   __ PushCalleeSavedRegisters();
188 
189 #define START()                                                                \
190   RESET();                                                                     \
191   START_AFTER_RESET();
192 
193 #define RUN()                                                \
194   CpuFeatures::FlushICache(buf, masm.SizeOfGeneratedCode()); \
195   {                                                          \
196     void (*test_function)(void);                             \
197     memcpy(&test_function, &buf, sizeof(buf));               \
198     test_function();                                         \
199   }
200 
201 #define END()                                                                  \
202   core.Dump(&masm);                                                            \
203   __ PopCalleeSavedRegisters();                                                \
204   __ Ret();                                                                    \
205   __ GetCode(NULL);
206 
207 #define TEARDOWN()                                                             \
208   delete[] buf;
209 
210 #endif  // ifdef USE_SIMULATOR.
211 
212 #define CHECK_EQUAL_NZCV(expected)                                            \
213   CHECK(EqualNzcv(expected, core.flags_nzcv()))
214 
215 #define CHECK_EQUAL_REGISTERS(expected)                                       \
216   CHECK(EqualRegisters(&expected, &core))
217 
218 #define CHECK_EQUAL_32(expected, result)                                      \
219   CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
220 
221 #define CHECK_EQUAL_FP32(expected, result)                                    \
222   CHECK(EqualFP32(expected, &core, result))
223 
224 #define CHECK_EQUAL_64(expected, result)                                      \
225   CHECK(Equal64(expected, &core, result))
226 
227 #define CHECK_EQUAL_FP64(expected, result)                                    \
228   CHECK(EqualFP64(expected, &core, result))
229 
230 #ifdef DEBUG
231 #define DCHECK_LITERAL_POOL_SIZE(expected)                                     \
232   CHECK((expected) == (__ LiteralPoolSize()))
233 #else
234 #define DCHECK_LITERAL_POOL_SIZE(expected)                                     \
235   ((void) 0)
236 #endif
237 
238 
TEST(stack_ops)239 TEST(stack_ops) {
240   INIT_V8();
241   SETUP();
242 
243   START();
244   // save csp.
245   __ Mov(x29, csp);
246 
247   // Set the csp to a known value.
248   __ Mov(x16, 0x1000);
249   __ Mov(csp, x16);
250   __ Mov(x0, csp);
251 
252   // Add immediate to the csp, and move the result to a normal register.
253   __ Add(csp, csp, Operand(0x50));
254   __ Mov(x1, csp);
255 
256   // Add extended to the csp, and move the result to a normal register.
257   __ Mov(x17, 0xfff);
258   __ Add(csp, csp, Operand(x17, SXTB));
259   __ Mov(x2, csp);
260 
261   // Create an csp using a logical instruction, and move to normal register.
262   __ Orr(csp, xzr, Operand(0x1fff));
263   __ Mov(x3, csp);
264 
265   // Write wcsp using a logical instruction.
266   __ Orr(wcsp, wzr, Operand(0xfffffff8L));
267   __ Mov(x4, csp);
268 
269   // Write csp, and read back wcsp.
270   __ Orr(csp, xzr, Operand(0xfffffff8L));
271   __ Mov(w5, wcsp);
272 
273   //  restore csp.
274   __ Mov(csp, x29);
275   END();
276 
277   RUN();
278 
279   CHECK_EQUAL_64(0x1000, x0);
280   CHECK_EQUAL_64(0x1050, x1);
281   CHECK_EQUAL_64(0x104f, x2);
282   CHECK_EQUAL_64(0x1fff, x3);
283   CHECK_EQUAL_64(0xfffffff8, x4);
284   CHECK_EQUAL_64(0xfffffff8, x5);
285 
286   TEARDOWN();
287 }
288 
289 
TEST(mvn)290 TEST(mvn) {
291   INIT_V8();
292   SETUP();
293 
294   START();
295   __ Mvn(w0, 0xfff);
296   __ Mvn(x1, 0xfff);
297   __ Mvn(w2, Operand(w0, LSL, 1));
298   __ Mvn(x3, Operand(x1, LSL, 2));
299   __ Mvn(w4, Operand(w0, LSR, 3));
300   __ Mvn(x5, Operand(x1, LSR, 4));
301   __ Mvn(w6, Operand(w0, ASR, 11));
302   __ Mvn(x7, Operand(x1, ASR, 12));
303   __ Mvn(w8, Operand(w0, ROR, 13));
304   __ Mvn(x9, Operand(x1, ROR, 14));
305   __ Mvn(w10, Operand(w2, UXTB));
306   __ Mvn(x11, Operand(x2, SXTB, 1));
307   __ Mvn(w12, Operand(w2, UXTH, 2));
308   __ Mvn(x13, Operand(x2, SXTH, 3));
309   __ Mvn(x14, Operand(w2, UXTW, 4));
310   __ Mvn(x15, Operand(w2, SXTW, 4));
311   END();
312 
313   RUN();
314 
315   CHECK_EQUAL_64(0xfffff000, x0);
316   CHECK_EQUAL_64(0xfffffffffffff000UL, x1);
317   CHECK_EQUAL_64(0x00001fff, x2);
318   CHECK_EQUAL_64(0x0000000000003fffUL, x3);
319   CHECK_EQUAL_64(0xe00001ff, x4);
320   CHECK_EQUAL_64(0xf0000000000000ffUL, x5);
321   CHECK_EQUAL_64(0x00000001, x6);
322   CHECK_EQUAL_64(0x0, x7);
323   CHECK_EQUAL_64(0x7ff80000, x8);
324   CHECK_EQUAL_64(0x3ffc000000000000UL, x9);
325   CHECK_EQUAL_64(0xffffff00, x10);
326   CHECK_EQUAL_64(0x0000000000000001UL, x11);
327   CHECK_EQUAL_64(0xffff8003, x12);
328   CHECK_EQUAL_64(0xffffffffffff0007UL, x13);
329   CHECK_EQUAL_64(0xfffffffffffe000fUL, x14);
330   CHECK_EQUAL_64(0xfffffffffffe000fUL, x15);
331 
332   TEARDOWN();
333 }
334 
335 
TEST(mov)336 TEST(mov) {
337   INIT_V8();
338   SETUP();
339 
340   START();
341   __ Mov(x0, 0xffffffffffffffffL);
342   __ Mov(x1, 0xffffffffffffffffL);
343   __ Mov(x2, 0xffffffffffffffffL);
344   __ Mov(x3, 0xffffffffffffffffL);
345 
346   __ Mov(x0, 0x0123456789abcdefL);
347 
348   __ movz(x1, 0xabcdL << 16);
349   __ movk(x2, 0xabcdL << 32);
350   __ movn(x3, 0xabcdL << 48);
351 
352   __ Mov(x4, 0x0123456789abcdefL);
353   __ Mov(x5, x4);
354 
355   __ Mov(w6, -1);
356 
357   // Test that moves back to the same register have the desired effect. This
358   // is a no-op for X registers, and a truncation for W registers.
359   __ Mov(x7, 0x0123456789abcdefL);
360   __ Mov(x7, x7);
361   __ Mov(x8, 0x0123456789abcdefL);
362   __ Mov(w8, w8);
363   __ Mov(x9, 0x0123456789abcdefL);
364   __ Mov(x9, Operand(x9));
365   __ Mov(x10, 0x0123456789abcdefL);
366   __ Mov(w10, Operand(w10));
367 
368   __ Mov(w11, 0xfff);
369   __ Mov(x12, 0xfff);
370   __ Mov(w13, Operand(w11, LSL, 1));
371   __ Mov(x14, Operand(x12, LSL, 2));
372   __ Mov(w15, Operand(w11, LSR, 3));
373   __ Mov(x18, Operand(x12, LSR, 4));
374   __ Mov(w19, Operand(w11, ASR, 11));
375   __ Mov(x20, Operand(x12, ASR, 12));
376   __ Mov(w21, Operand(w11, ROR, 13));
377   __ Mov(x22, Operand(x12, ROR, 14));
378   __ Mov(w23, Operand(w13, UXTB));
379   __ Mov(x24, Operand(x13, SXTB, 1));
380   __ Mov(w25, Operand(w13, UXTH, 2));
381   __ Mov(x26, Operand(x13, SXTH, 3));
382   __ Mov(x27, Operand(w13, UXTW, 4));
383   END();
384 
385   RUN();
386 
387   CHECK_EQUAL_64(0x0123456789abcdefL, x0);
388   CHECK_EQUAL_64(0x00000000abcd0000L, x1);
389   CHECK_EQUAL_64(0xffffabcdffffffffL, x2);
390   CHECK_EQUAL_64(0x5432ffffffffffffL, x3);
391   CHECK_EQUAL_64(x4, x5);
392   CHECK_EQUAL_32(-1, w6);
393   CHECK_EQUAL_64(0x0123456789abcdefL, x7);
394   CHECK_EQUAL_32(0x89abcdefL, w8);
395   CHECK_EQUAL_64(0x0123456789abcdefL, x9);
396   CHECK_EQUAL_32(0x89abcdefL, w10);
397   CHECK_EQUAL_64(0x00000fff, x11);
398   CHECK_EQUAL_64(0x0000000000000fffUL, x12);
399   CHECK_EQUAL_64(0x00001ffe, x13);
400   CHECK_EQUAL_64(0x0000000000003ffcUL, x14);
401   CHECK_EQUAL_64(0x000001ff, x15);
402   CHECK_EQUAL_64(0x00000000000000ffUL, x18);
403   CHECK_EQUAL_64(0x00000001, x19);
404   CHECK_EQUAL_64(0x0, x20);
405   CHECK_EQUAL_64(0x7ff80000, x21);
406   CHECK_EQUAL_64(0x3ffc000000000000UL, x22);
407   CHECK_EQUAL_64(0x000000fe, x23);
408   CHECK_EQUAL_64(0xfffffffffffffffcUL, x24);
409   CHECK_EQUAL_64(0x00007ff8, x25);
410   CHECK_EQUAL_64(0x000000000000fff0UL, x26);
411   CHECK_EQUAL_64(0x000000000001ffe0UL, x27);
412 
413   TEARDOWN();
414 }
415 
416 
TEST(mov_imm_w)417 TEST(mov_imm_w) {
418   INIT_V8();
419   SETUP();
420 
421   START();
422   __ Mov(w0, 0xffffffffL);
423   __ Mov(w1, 0xffff1234L);
424   __ Mov(w2, 0x1234ffffL);
425   __ Mov(w3, 0x00000000L);
426   __ Mov(w4, 0x00001234L);
427   __ Mov(w5, 0x12340000L);
428   __ Mov(w6, 0x12345678L);
429   __ Mov(w7, (int32_t)0x80000000);
430   __ Mov(w8, (int32_t)0xffff0000);
431   __ Mov(w9, kWMinInt);
432   END();
433 
434   RUN();
435 
436   CHECK_EQUAL_64(0xffffffffL, x0);
437   CHECK_EQUAL_64(0xffff1234L, x1);
438   CHECK_EQUAL_64(0x1234ffffL, x2);
439   CHECK_EQUAL_64(0x00000000L, x3);
440   CHECK_EQUAL_64(0x00001234L, x4);
441   CHECK_EQUAL_64(0x12340000L, x5);
442   CHECK_EQUAL_64(0x12345678L, x6);
443   CHECK_EQUAL_64(0x80000000L, x7);
444   CHECK_EQUAL_64(0xffff0000L, x8);
445   CHECK_EQUAL_32(kWMinInt, w9);
446 
447   TEARDOWN();
448 }
449 
450 
TEST(mov_imm_x)451 TEST(mov_imm_x) {
452   INIT_V8();
453   SETUP();
454 
455   START();
456   __ Mov(x0, 0xffffffffffffffffL);
457   __ Mov(x1, 0xffffffffffff1234L);
458   __ Mov(x2, 0xffffffff12345678L);
459   __ Mov(x3, 0xffff1234ffff5678L);
460   __ Mov(x4, 0x1234ffffffff5678L);
461   __ Mov(x5, 0x1234ffff5678ffffL);
462   __ Mov(x6, 0x12345678ffffffffL);
463   __ Mov(x7, 0x1234ffffffffffffL);
464   __ Mov(x8, 0x123456789abcffffL);
465   __ Mov(x9, 0x12345678ffff9abcL);
466   __ Mov(x10, 0x1234ffff56789abcL);
467   __ Mov(x11, 0xffff123456789abcL);
468   __ Mov(x12, 0x0000000000000000L);
469   __ Mov(x13, 0x0000000000001234L);
470   __ Mov(x14, 0x0000000012345678L);
471   __ Mov(x15, 0x0000123400005678L);
472   __ Mov(x18, 0x1234000000005678L);
473   __ Mov(x19, 0x1234000056780000L);
474   __ Mov(x20, 0x1234567800000000L);
475   __ Mov(x21, 0x1234000000000000L);
476   __ Mov(x22, 0x123456789abc0000L);
477   __ Mov(x23, 0x1234567800009abcL);
478   __ Mov(x24, 0x1234000056789abcL);
479   __ Mov(x25, 0x0000123456789abcL);
480   __ Mov(x26, 0x123456789abcdef0L);
481   __ Mov(x27, 0xffff000000000001L);
482   __ Mov(x28, 0x8000ffff00000000L);
483   END();
484 
485   RUN();
486 
487   CHECK_EQUAL_64(0xffffffffffff1234L, x1);
488   CHECK_EQUAL_64(0xffffffff12345678L, x2);
489   CHECK_EQUAL_64(0xffff1234ffff5678L, x3);
490   CHECK_EQUAL_64(0x1234ffffffff5678L, x4);
491   CHECK_EQUAL_64(0x1234ffff5678ffffL, x5);
492   CHECK_EQUAL_64(0x12345678ffffffffL, x6);
493   CHECK_EQUAL_64(0x1234ffffffffffffL, x7);
494   CHECK_EQUAL_64(0x123456789abcffffL, x8);
495   CHECK_EQUAL_64(0x12345678ffff9abcL, x9);
496   CHECK_EQUAL_64(0x1234ffff56789abcL, x10);
497   CHECK_EQUAL_64(0xffff123456789abcL, x11);
498   CHECK_EQUAL_64(0x0000000000000000L, x12);
499   CHECK_EQUAL_64(0x0000000000001234L, x13);
500   CHECK_EQUAL_64(0x0000000012345678L, x14);
501   CHECK_EQUAL_64(0x0000123400005678L, x15);
502   CHECK_EQUAL_64(0x1234000000005678L, x18);
503   CHECK_EQUAL_64(0x1234000056780000L, x19);
504   CHECK_EQUAL_64(0x1234567800000000L, x20);
505   CHECK_EQUAL_64(0x1234000000000000L, x21);
506   CHECK_EQUAL_64(0x123456789abc0000L, x22);
507   CHECK_EQUAL_64(0x1234567800009abcL, x23);
508   CHECK_EQUAL_64(0x1234000056789abcL, x24);
509   CHECK_EQUAL_64(0x0000123456789abcL, x25);
510   CHECK_EQUAL_64(0x123456789abcdef0L, x26);
511   CHECK_EQUAL_64(0xffff000000000001L, x27);
512   CHECK_EQUAL_64(0x8000ffff00000000L, x28);
513 
514   TEARDOWN();
515 }
516 
517 
TEST(orr)518 TEST(orr) {
519   INIT_V8();
520   SETUP();
521 
522   START();
523   __ Mov(x0, 0xf0f0);
524   __ Mov(x1, 0xf00000ff);
525 
526   __ Orr(x2, x0, Operand(x1));
527   __ Orr(w3, w0, Operand(w1, LSL, 28));
528   __ Orr(x4, x0, Operand(x1, LSL, 32));
529   __ Orr(x5, x0, Operand(x1, LSR, 4));
530   __ Orr(w6, w0, Operand(w1, ASR, 4));
531   __ Orr(x7, x0, Operand(x1, ASR, 4));
532   __ Orr(w8, w0, Operand(w1, ROR, 12));
533   __ Orr(x9, x0, Operand(x1, ROR, 12));
534   __ Orr(w10, w0, Operand(0xf));
535   __ Orr(x11, x0, Operand(0xf0000000f0000000L));
536   END();
537 
538   RUN();
539 
540   CHECK_EQUAL_64(0xf000f0ff, x2);
541   CHECK_EQUAL_64(0xf000f0f0, x3);
542   CHECK_EQUAL_64(0xf00000ff0000f0f0L, x4);
543   CHECK_EQUAL_64(0x0f00f0ff, x5);
544   CHECK_EQUAL_64(0xff00f0ff, x6);
545   CHECK_EQUAL_64(0x0f00f0ff, x7);
546   CHECK_EQUAL_64(0x0ffff0f0, x8);
547   CHECK_EQUAL_64(0x0ff00000000ff0f0L, x9);
548   CHECK_EQUAL_64(0xf0ff, x10);
549   CHECK_EQUAL_64(0xf0000000f000f0f0L, x11);
550 
551   TEARDOWN();
552 }
553 
554 
TEST(orr_extend)555 TEST(orr_extend) {
556   INIT_V8();
557   SETUP();
558 
559   START();
560   __ Mov(x0, 1);
561   __ Mov(x1, 0x8000000080008080UL);
562   __ Orr(w6, w0, Operand(w1, UXTB));
563   __ Orr(x7, x0, Operand(x1, UXTH, 1));
564   __ Orr(w8, w0, Operand(w1, UXTW, 2));
565   __ Orr(x9, x0, Operand(x1, UXTX, 3));
566   __ Orr(w10, w0, Operand(w1, SXTB));
567   __ Orr(x11, x0, Operand(x1, SXTH, 1));
568   __ Orr(x12, x0, Operand(x1, SXTW, 2));
569   __ Orr(x13, x0, Operand(x1, SXTX, 3));
570   END();
571 
572   RUN();
573 
574   CHECK_EQUAL_64(0x00000081, x6);
575   CHECK_EQUAL_64(0x00010101, x7);
576   CHECK_EQUAL_64(0x00020201, x8);
577   CHECK_EQUAL_64(0x0000000400040401UL, x9);
578   CHECK_EQUAL_64(0x00000000ffffff81UL, x10);
579   CHECK_EQUAL_64(0xffffffffffff0101UL, x11);
580   CHECK_EQUAL_64(0xfffffffe00020201UL, x12);
581   CHECK_EQUAL_64(0x0000000400040401UL, x13);
582 
583   TEARDOWN();
584 }
585 
586 
TEST(bitwise_wide_imm)587 TEST(bitwise_wide_imm) {
588   INIT_V8();
589   SETUP();
590 
591   START();
592   __ Mov(x0, 0);
593   __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
594 
595   __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
596   __ Orr(w11, w1, Operand(0x90abcdef));
597 
598   __ Orr(w12, w0, kWMinInt);
599   __ Eor(w13, w0, kWMinInt);
600   END();
601 
602   RUN();
603 
604   CHECK_EQUAL_64(0, x0);
605   CHECK_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
606   CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
607   CHECK_EQUAL_64(0xf0fbfdffUL, x11);
608   CHECK_EQUAL_32(kWMinInt, w12);
609   CHECK_EQUAL_32(kWMinInt, w13);
610 
611   TEARDOWN();
612 }
613 
614 
TEST(orn)615 TEST(orn) {
616   INIT_V8();
617   SETUP();
618 
619   START();
620   __ Mov(x0, 0xf0f0);
621   __ Mov(x1, 0xf00000ff);
622 
623   __ Orn(x2, x0, Operand(x1));
624   __ Orn(w3, w0, Operand(w1, LSL, 4));
625   __ Orn(x4, x0, Operand(x1, LSL, 4));
626   __ Orn(x5, x0, Operand(x1, LSR, 1));
627   __ Orn(w6, w0, Operand(w1, ASR, 1));
628   __ Orn(x7, x0, Operand(x1, ASR, 1));
629   __ Orn(w8, w0, Operand(w1, ROR, 16));
630   __ Orn(x9, x0, Operand(x1, ROR, 16));
631   __ Orn(w10, w0, Operand(0xffff));
632   __ Orn(x11, x0, Operand(0xffff0000ffffL));
633   END();
634 
635   RUN();
636 
637   CHECK_EQUAL_64(0xffffffff0ffffff0L, x2);
638   CHECK_EQUAL_64(0xfffff0ff, x3);
639   CHECK_EQUAL_64(0xfffffff0fffff0ffL, x4);
640   CHECK_EQUAL_64(0xffffffff87fffff0L, x5);
641   CHECK_EQUAL_64(0x07fffff0, x6);
642   CHECK_EQUAL_64(0xffffffff87fffff0L, x7);
643   CHECK_EQUAL_64(0xff00ffff, x8);
644   CHECK_EQUAL_64(0xff00ffffffffffffL, x9);
645   CHECK_EQUAL_64(0xfffff0f0, x10);
646   CHECK_EQUAL_64(0xffff0000fffff0f0L, x11);
647 
648   TEARDOWN();
649 }
650 
651 
TEST(orn_extend)652 TEST(orn_extend) {
653   INIT_V8();
654   SETUP();
655 
656   START();
657   __ Mov(x0, 1);
658   __ Mov(x1, 0x8000000080008081UL);
659   __ Orn(w6, w0, Operand(w1, UXTB));
660   __ Orn(x7, x0, Operand(x1, UXTH, 1));
661   __ Orn(w8, w0, Operand(w1, UXTW, 2));
662   __ Orn(x9, x0, Operand(x1, UXTX, 3));
663   __ Orn(w10, w0, Operand(w1, SXTB));
664   __ Orn(x11, x0, Operand(x1, SXTH, 1));
665   __ Orn(x12, x0, Operand(x1, SXTW, 2));
666   __ Orn(x13, x0, Operand(x1, SXTX, 3));
667   END();
668 
669   RUN();
670 
671   CHECK_EQUAL_64(0xffffff7f, x6);
672   CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
673   CHECK_EQUAL_64(0xfffdfdfb, x8);
674   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
675   CHECK_EQUAL_64(0x0000007f, x10);
676   CHECK_EQUAL_64(0x0000fefd, x11);
677   CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
678   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
679 
680   TEARDOWN();
681 }
682 
683 
TEST(and_)684 TEST(and_) {
685   INIT_V8();
686   SETUP();
687 
688   START();
689   __ Mov(x0, 0xfff0);
690   __ Mov(x1, 0xf00000ff);
691 
692   __ And(x2, x0, Operand(x1));
693   __ And(w3, w0, Operand(w1, LSL, 4));
694   __ And(x4, x0, Operand(x1, LSL, 4));
695   __ And(x5, x0, Operand(x1, LSR, 1));
696   __ And(w6, w0, Operand(w1, ASR, 20));
697   __ And(x7, x0, Operand(x1, ASR, 20));
698   __ And(w8, w0, Operand(w1, ROR, 28));
699   __ And(x9, x0, Operand(x1, ROR, 28));
700   __ And(w10, w0, Operand(0xff00));
701   __ And(x11, x0, Operand(0xff));
702   END();
703 
704   RUN();
705 
706   CHECK_EQUAL_64(0x000000f0, x2);
707   CHECK_EQUAL_64(0x00000ff0, x3);
708   CHECK_EQUAL_64(0x00000ff0, x4);
709   CHECK_EQUAL_64(0x00000070, x5);
710   CHECK_EQUAL_64(0x0000ff00, x6);
711   CHECK_EQUAL_64(0x00000f00, x7);
712   CHECK_EQUAL_64(0x00000ff0, x8);
713   CHECK_EQUAL_64(0x00000000, x9);
714   CHECK_EQUAL_64(0x0000ff00, x10);
715   CHECK_EQUAL_64(0x000000f0, x11);
716 
717   TEARDOWN();
718 }
719 
720 
TEST(and_extend)721 TEST(and_extend) {
722   INIT_V8();
723   SETUP();
724 
725   START();
726   __ Mov(x0, 0xffffffffffffffffUL);
727   __ Mov(x1, 0x8000000080008081UL);
728   __ And(w6, w0, Operand(w1, UXTB));
729   __ And(x7, x0, Operand(x1, UXTH, 1));
730   __ And(w8, w0, Operand(w1, UXTW, 2));
731   __ And(x9, x0, Operand(x1, UXTX, 3));
732   __ And(w10, w0, Operand(w1, SXTB));
733   __ And(x11, x0, Operand(x1, SXTH, 1));
734   __ And(x12, x0, Operand(x1, SXTW, 2));
735   __ And(x13, x0, Operand(x1, SXTX, 3));
736   END();
737 
738   RUN();
739 
740   CHECK_EQUAL_64(0x00000081, x6);
741   CHECK_EQUAL_64(0x00010102, x7);
742   CHECK_EQUAL_64(0x00020204, x8);
743   CHECK_EQUAL_64(0x0000000400040408UL, x9);
744   CHECK_EQUAL_64(0xffffff81, x10);
745   CHECK_EQUAL_64(0xffffffffffff0102UL, x11);
746   CHECK_EQUAL_64(0xfffffffe00020204UL, x12);
747   CHECK_EQUAL_64(0x0000000400040408UL, x13);
748 
749   TEARDOWN();
750 }
751 
752 
TEST(ands)753 TEST(ands) {
754   INIT_V8();
755   SETUP();
756 
757   START();
758   __ Mov(x1, 0xf00000ff);
759   __ Ands(w0, w1, Operand(w1));
760   END();
761 
762   RUN();
763 
764   CHECK_EQUAL_NZCV(NFlag);
765   CHECK_EQUAL_64(0xf00000ff, x0);
766 
767   START();
768   __ Mov(x0, 0xfff0);
769   __ Mov(x1, 0xf00000ff);
770   __ Ands(w0, w0, Operand(w1, LSR, 4));
771   END();
772 
773   RUN();
774 
775   CHECK_EQUAL_NZCV(ZFlag);
776   CHECK_EQUAL_64(0x00000000, x0);
777 
778   START();
779   __ Mov(x0, 0x8000000000000000L);
780   __ Mov(x1, 0x00000001);
781   __ Ands(x0, x0, Operand(x1, ROR, 1));
782   END();
783 
784   RUN();
785 
786   CHECK_EQUAL_NZCV(NFlag);
787   CHECK_EQUAL_64(0x8000000000000000L, x0);
788 
789   START();
790   __ Mov(x0, 0xfff0);
791   __ Ands(w0, w0, Operand(0xf));
792   END();
793 
794   RUN();
795 
796   CHECK_EQUAL_NZCV(ZFlag);
797   CHECK_EQUAL_64(0x00000000, x0);
798 
799   START();
800   __ Mov(x0, 0xff000000);
801   __ Ands(w0, w0, Operand(0x80000000));
802   END();
803 
804   RUN();
805 
806   CHECK_EQUAL_NZCV(NFlag);
807   CHECK_EQUAL_64(0x80000000, x0);
808 
809   TEARDOWN();
810 }
811 
812 
TEST(bic)813 TEST(bic) {
814   INIT_V8();
815   SETUP();
816 
817   START();
818   __ Mov(x0, 0xfff0);
819   __ Mov(x1, 0xf00000ff);
820 
821   __ Bic(x2, x0, Operand(x1));
822   __ Bic(w3, w0, Operand(w1, LSL, 4));
823   __ Bic(x4, x0, Operand(x1, LSL, 4));
824   __ Bic(x5, x0, Operand(x1, LSR, 1));
825   __ Bic(w6, w0, Operand(w1, ASR, 20));
826   __ Bic(x7, x0, Operand(x1, ASR, 20));
827   __ Bic(w8, w0, Operand(w1, ROR, 28));
828   __ Bic(x9, x0, Operand(x1, ROR, 24));
829   __ Bic(x10, x0, Operand(0x1f));
830   __ Bic(x11, x0, Operand(0x100));
831 
832   // Test bic into csp when the constant cannot be encoded in the immediate
833   // field.
834   // Use x20 to preserve csp. We check for the result via x21 because the
835   // test infrastructure requires that csp be restored to its original value.
836   __ Mov(x20, csp);
837   __ Mov(x0, 0xffffff);
838   __ Bic(csp, x0, Operand(0xabcdef));
839   __ Mov(x21, csp);
840   __ Mov(csp, x20);
841   END();
842 
843   RUN();
844 
845   CHECK_EQUAL_64(0x0000ff00, x2);
846   CHECK_EQUAL_64(0x0000f000, x3);
847   CHECK_EQUAL_64(0x0000f000, x4);
848   CHECK_EQUAL_64(0x0000ff80, x5);
849   CHECK_EQUAL_64(0x000000f0, x6);
850   CHECK_EQUAL_64(0x0000f0f0, x7);
851   CHECK_EQUAL_64(0x0000f000, x8);
852   CHECK_EQUAL_64(0x0000ff00, x9);
853   CHECK_EQUAL_64(0x0000ffe0, x10);
854   CHECK_EQUAL_64(0x0000fef0, x11);
855 
856   CHECK_EQUAL_64(0x543210, x21);
857 
858   TEARDOWN();
859 }
860 
861 
TEST(bic_extend)862 TEST(bic_extend) {
863   INIT_V8();
864   SETUP();
865 
866   START();
867   __ Mov(x0, 0xffffffffffffffffUL);
868   __ Mov(x1, 0x8000000080008081UL);
869   __ Bic(w6, w0, Operand(w1, UXTB));
870   __ Bic(x7, x0, Operand(x1, UXTH, 1));
871   __ Bic(w8, w0, Operand(w1, UXTW, 2));
872   __ Bic(x9, x0, Operand(x1, UXTX, 3));
873   __ Bic(w10, w0, Operand(w1, SXTB));
874   __ Bic(x11, x0, Operand(x1, SXTH, 1));
875   __ Bic(x12, x0, Operand(x1, SXTW, 2));
876   __ Bic(x13, x0, Operand(x1, SXTX, 3));
877   END();
878 
879   RUN();
880 
881   CHECK_EQUAL_64(0xffffff7e, x6);
882   CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
883   CHECK_EQUAL_64(0xfffdfdfb, x8);
884   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
885   CHECK_EQUAL_64(0x0000007e, x10);
886   CHECK_EQUAL_64(0x0000fefd, x11);
887   CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
888   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
889 
890   TEARDOWN();
891 }
892 
893 
TEST(bics)894 TEST(bics) {
895   INIT_V8();
896   SETUP();
897 
898   START();
899   __ Mov(x1, 0xffff);
900   __ Bics(w0, w1, Operand(w1));
901   END();
902 
903   RUN();
904 
905   CHECK_EQUAL_NZCV(ZFlag);
906   CHECK_EQUAL_64(0x00000000, x0);
907 
908   START();
909   __ Mov(x0, 0xffffffff);
910   __ Bics(w0, w0, Operand(w0, LSR, 1));
911   END();
912 
913   RUN();
914 
915   CHECK_EQUAL_NZCV(NFlag);
916   CHECK_EQUAL_64(0x80000000, x0);
917 
918   START();
919   __ Mov(x0, 0x8000000000000000L);
920   __ Mov(x1, 0x00000001);
921   __ Bics(x0, x0, Operand(x1, ROR, 1));
922   END();
923 
924   RUN();
925 
926   CHECK_EQUAL_NZCV(ZFlag);
927   CHECK_EQUAL_64(0x00000000, x0);
928 
929   START();
930   __ Mov(x0, 0xffffffffffffffffL);
931   __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
932   END();
933 
934   RUN();
935 
936   CHECK_EQUAL_NZCV(NFlag);
937   CHECK_EQUAL_64(0x8000000000000000L, x0);
938 
939   START();
940   __ Mov(w0, 0xffff0000);
941   __ Bics(w0, w0, Operand(0xfffffff0));
942   END();
943 
944   RUN();
945 
946   CHECK_EQUAL_NZCV(ZFlag);
947   CHECK_EQUAL_64(0x00000000, x0);
948 
949   TEARDOWN();
950 }
951 
952 
TEST(eor)953 TEST(eor) {
954   INIT_V8();
955   SETUP();
956 
957   START();
958   __ Mov(x0, 0xfff0);
959   __ Mov(x1, 0xf00000ff);
960 
961   __ Eor(x2, x0, Operand(x1));
962   __ Eor(w3, w0, Operand(w1, LSL, 4));
963   __ Eor(x4, x0, Operand(x1, LSL, 4));
964   __ Eor(x5, x0, Operand(x1, LSR, 1));
965   __ Eor(w6, w0, Operand(w1, ASR, 20));
966   __ Eor(x7, x0, Operand(x1, ASR, 20));
967   __ Eor(w8, w0, Operand(w1, ROR, 28));
968   __ Eor(x9, x0, Operand(x1, ROR, 28));
969   __ Eor(w10, w0, Operand(0xff00ff00));
970   __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
971   END();
972 
973   RUN();
974 
975   CHECK_EQUAL_64(0xf000ff0f, x2);
976   CHECK_EQUAL_64(0x0000f000, x3);
977   CHECK_EQUAL_64(0x0000000f0000f000L, x4);
978   CHECK_EQUAL_64(0x7800ff8f, x5);
979   CHECK_EQUAL_64(0xffff00f0, x6);
980   CHECK_EQUAL_64(0x0000f0f0, x7);
981   CHECK_EQUAL_64(0x0000f00f, x8);
982   CHECK_EQUAL_64(0x00000ff00000ffffL, x9);
983   CHECK_EQUAL_64(0xff0000f0, x10);
984   CHECK_EQUAL_64(0xff00ff00ff0000f0L, x11);
985 
986   TEARDOWN();
987 }
988 
989 
TEST(eor_extend)990 TEST(eor_extend) {
991   INIT_V8();
992   SETUP();
993 
994   START();
995   __ Mov(x0, 0x1111111111111111UL);
996   __ Mov(x1, 0x8000000080008081UL);
997   __ Eor(w6, w0, Operand(w1, UXTB));
998   __ Eor(x7, x0, Operand(x1, UXTH, 1));
999   __ Eor(w8, w0, Operand(w1, UXTW, 2));
1000   __ Eor(x9, x0, Operand(x1, UXTX, 3));
1001   __ Eor(w10, w0, Operand(w1, SXTB));
1002   __ Eor(x11, x0, Operand(x1, SXTH, 1));
1003   __ Eor(x12, x0, Operand(x1, SXTW, 2));
1004   __ Eor(x13, x0, Operand(x1, SXTX, 3));
1005   END();
1006 
1007   RUN();
1008 
1009   CHECK_EQUAL_64(0x11111190, x6);
1010   CHECK_EQUAL_64(0x1111111111101013UL, x7);
1011   CHECK_EQUAL_64(0x11131315, x8);
1012   CHECK_EQUAL_64(0x1111111511151519UL, x9);
1013   CHECK_EQUAL_64(0xeeeeee90, x10);
1014   CHECK_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
1015   CHECK_EQUAL_64(0xeeeeeeef11131315UL, x12);
1016   CHECK_EQUAL_64(0x1111111511151519UL, x13);
1017 
1018   TEARDOWN();
1019 }
1020 
1021 
TEST(eon)1022 TEST(eon) {
1023   INIT_V8();
1024   SETUP();
1025 
1026   START();
1027   __ Mov(x0, 0xfff0);
1028   __ Mov(x1, 0xf00000ff);
1029 
1030   __ Eon(x2, x0, Operand(x1));
1031   __ Eon(w3, w0, Operand(w1, LSL, 4));
1032   __ Eon(x4, x0, Operand(x1, LSL, 4));
1033   __ Eon(x5, x0, Operand(x1, LSR, 1));
1034   __ Eon(w6, w0, Operand(w1, ASR, 20));
1035   __ Eon(x7, x0, Operand(x1, ASR, 20));
1036   __ Eon(w8, w0, Operand(w1, ROR, 28));
1037   __ Eon(x9, x0, Operand(x1, ROR, 28));
1038   __ Eon(w10, w0, Operand(0x03c003c0));
1039   __ Eon(x11, x0, Operand(0x0000100000001000L));
1040   END();
1041 
1042   RUN();
1043 
1044   CHECK_EQUAL_64(0xffffffff0fff00f0L, x2);
1045   CHECK_EQUAL_64(0xffff0fff, x3);
1046   CHECK_EQUAL_64(0xfffffff0ffff0fffL, x4);
1047   CHECK_EQUAL_64(0xffffffff87ff0070L, x5);
1048   CHECK_EQUAL_64(0x0000ff0f, x6);
1049   CHECK_EQUAL_64(0xffffffffffff0f0fL, x7);
1050   CHECK_EQUAL_64(0xffff0ff0, x8);
1051   CHECK_EQUAL_64(0xfffff00fffff0000L, x9);
1052   CHECK_EQUAL_64(0xfc3f03cf, x10);
1053   CHECK_EQUAL_64(0xffffefffffff100fL, x11);
1054 
1055   TEARDOWN();
1056 }
1057 
1058 
TEST(eon_extend)1059 TEST(eon_extend) {
1060   INIT_V8();
1061   SETUP();
1062 
1063   START();
1064   __ Mov(x0, 0x1111111111111111UL);
1065   __ Mov(x1, 0x8000000080008081UL);
1066   __ Eon(w6, w0, Operand(w1, UXTB));
1067   __ Eon(x7, x0, Operand(x1, UXTH, 1));
1068   __ Eon(w8, w0, Operand(w1, UXTW, 2));
1069   __ Eon(x9, x0, Operand(x1, UXTX, 3));
1070   __ Eon(w10, w0, Operand(w1, SXTB));
1071   __ Eon(x11, x0, Operand(x1, SXTH, 1));
1072   __ Eon(x12, x0, Operand(x1, SXTW, 2));
1073   __ Eon(x13, x0, Operand(x1, SXTX, 3));
1074   END();
1075 
1076   RUN();
1077 
1078   CHECK_EQUAL_64(0xeeeeee6f, x6);
1079   CHECK_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1080   CHECK_EQUAL_64(0xeeececea, x8);
1081   CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1082   CHECK_EQUAL_64(0x1111116f, x10);
1083   CHECK_EQUAL_64(0x111111111111efecUL, x11);
1084   CHECK_EQUAL_64(0x11111110eeececeaUL, x12);
1085   CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1086 
1087   TEARDOWN();
1088 }
1089 
1090 
TEST(mul)1091 TEST(mul) {
1092   INIT_V8();
1093   SETUP();
1094 
1095   START();
1096   __ Mov(x16, 0);
1097   __ Mov(x17, 1);
1098   __ Mov(x18, 0xffffffff);
1099   __ Mov(x19, 0xffffffffffffffffUL);
1100 
1101   __ Mul(w0, w16, w16);
1102   __ Mul(w1, w16, w17);
1103   __ Mul(w2, w17, w18);
1104   __ Mul(w3, w18, w19);
1105   __ Mul(x4, x16, x16);
1106   __ Mul(x5, x17, x18);
1107   __ Mul(x6, x18, x19);
1108   __ Mul(x7, x19, x19);
1109   __ Smull(x8, w17, w18);
1110   __ Smull(x9, w18, w18);
1111   __ Smull(x10, w19, w19);
1112   __ Mneg(w11, w16, w16);
1113   __ Mneg(w12, w16, w17);
1114   __ Mneg(w13, w17, w18);
1115   __ Mneg(w14, w18, w19);
1116   __ Mneg(x20, x16, x16);
1117   __ Mneg(x21, x17, x18);
1118   __ Mneg(x22, x18, x19);
1119   __ Mneg(x23, x19, x19);
1120   END();
1121 
1122   RUN();
1123 
1124   CHECK_EQUAL_64(0, x0);
1125   CHECK_EQUAL_64(0, x1);
1126   CHECK_EQUAL_64(0xffffffff, x2);
1127   CHECK_EQUAL_64(1, x3);
1128   CHECK_EQUAL_64(0, x4);
1129   CHECK_EQUAL_64(0xffffffff, x5);
1130   CHECK_EQUAL_64(0xffffffff00000001UL, x6);
1131   CHECK_EQUAL_64(1, x7);
1132   CHECK_EQUAL_64(0xffffffffffffffffUL, x8);
1133   CHECK_EQUAL_64(1, x9);
1134   CHECK_EQUAL_64(1, x10);
1135   CHECK_EQUAL_64(0, x11);
1136   CHECK_EQUAL_64(0, x12);
1137   CHECK_EQUAL_64(1, x13);
1138   CHECK_EQUAL_64(0xffffffff, x14);
1139   CHECK_EQUAL_64(0, x20);
1140   CHECK_EQUAL_64(0xffffffff00000001UL, x21);
1141   CHECK_EQUAL_64(0xffffffff, x22);
1142   CHECK_EQUAL_64(0xffffffffffffffffUL, x23);
1143 
1144   TEARDOWN();
1145 }
1146 
1147 
SmullHelper(int64_t expected,int64_t a,int64_t b)1148 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1149   SETUP();
1150   START();
1151   __ Mov(w0, a);
1152   __ Mov(w1, b);
1153   __ Smull(x2, w0, w1);
1154   END();
1155   RUN();
1156   CHECK_EQUAL_64(expected, x2);
1157   TEARDOWN();
1158 }
1159 
1160 
TEST(smull)1161 TEST(smull) {
1162   INIT_V8();
1163   SmullHelper(0, 0, 0);
1164   SmullHelper(1, 1, 1);
1165   SmullHelper(-1, -1, 1);
1166   SmullHelper(1, -1, -1);
1167   SmullHelper(0xffffffff80000000, 0x80000000, 1);
1168   SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1169 }
1170 
1171 
TEST(madd)1172 TEST(madd) {
1173   INIT_V8();
1174   SETUP();
1175 
1176   START();
1177   __ Mov(x16, 0);
1178   __ Mov(x17, 1);
1179   __ Mov(x18, 0xffffffff);
1180   __ Mov(x19, 0xffffffffffffffffUL);
1181 
1182   __ Madd(w0, w16, w16, w16);
1183   __ Madd(w1, w16, w16, w17);
1184   __ Madd(w2, w16, w16, w18);
1185   __ Madd(w3, w16, w16, w19);
1186   __ Madd(w4, w16, w17, w17);
1187   __ Madd(w5, w17, w17, w18);
1188   __ Madd(w6, w17, w17, w19);
1189   __ Madd(w7, w17, w18, w16);
1190   __ Madd(w8, w17, w18, w18);
1191   __ Madd(w9, w18, w18, w17);
1192   __ Madd(w10, w18, w19, w18);
1193   __ Madd(w11, w19, w19, w19);
1194 
1195   __ Madd(x12, x16, x16, x16);
1196   __ Madd(x13, x16, x16, x17);
1197   __ Madd(x14, x16, x16, x18);
1198   __ Madd(x15, x16, x16, x19);
1199   __ Madd(x20, x16, x17, x17);
1200   __ Madd(x21, x17, x17, x18);
1201   __ Madd(x22, x17, x17, x19);
1202   __ Madd(x23, x17, x18, x16);
1203   __ Madd(x24, x17, x18, x18);
1204   __ Madd(x25, x18, x18, x17);
1205   __ Madd(x26, x18, x19, x18);
1206   __ Madd(x27, x19, x19, x19);
1207 
1208   END();
1209 
1210   RUN();
1211 
1212   CHECK_EQUAL_64(0, x0);
1213   CHECK_EQUAL_64(1, x1);
1214   CHECK_EQUAL_64(0xffffffff, x2);
1215   CHECK_EQUAL_64(0xffffffff, x3);
1216   CHECK_EQUAL_64(1, x4);
1217   CHECK_EQUAL_64(0, x5);
1218   CHECK_EQUAL_64(0, x6);
1219   CHECK_EQUAL_64(0xffffffff, x7);
1220   CHECK_EQUAL_64(0xfffffffe, x8);
1221   CHECK_EQUAL_64(2, x9);
1222   CHECK_EQUAL_64(0, x10);
1223   CHECK_EQUAL_64(0, x11);
1224 
1225   CHECK_EQUAL_64(0, x12);
1226   CHECK_EQUAL_64(1, x13);
1227   CHECK_EQUAL_64(0xffffffff, x14);
1228   CHECK_EQUAL_64(0xffffffffffffffff, x15);
1229   CHECK_EQUAL_64(1, x20);
1230   CHECK_EQUAL_64(0x100000000UL, x21);
1231   CHECK_EQUAL_64(0, x22);
1232   CHECK_EQUAL_64(0xffffffff, x23);
1233   CHECK_EQUAL_64(0x1fffffffe, x24);
1234   CHECK_EQUAL_64(0xfffffffe00000002UL, x25);
1235   CHECK_EQUAL_64(0, x26);
1236   CHECK_EQUAL_64(0, x27);
1237 
1238   TEARDOWN();
1239 }
1240 
1241 
TEST(msub)1242 TEST(msub) {
1243   INIT_V8();
1244   SETUP();
1245 
1246   START();
1247   __ Mov(x16, 0);
1248   __ Mov(x17, 1);
1249   __ Mov(x18, 0xffffffff);
1250   __ Mov(x19, 0xffffffffffffffffUL);
1251 
1252   __ Msub(w0, w16, w16, w16);
1253   __ Msub(w1, w16, w16, w17);
1254   __ Msub(w2, w16, w16, w18);
1255   __ Msub(w3, w16, w16, w19);
1256   __ Msub(w4, w16, w17, w17);
1257   __ Msub(w5, w17, w17, w18);
1258   __ Msub(w6, w17, w17, w19);
1259   __ Msub(w7, w17, w18, w16);
1260   __ Msub(w8, w17, w18, w18);
1261   __ Msub(w9, w18, w18, w17);
1262   __ Msub(w10, w18, w19, w18);
1263   __ Msub(w11, w19, w19, w19);
1264 
1265   __ Msub(x12, x16, x16, x16);
1266   __ Msub(x13, x16, x16, x17);
1267   __ Msub(x14, x16, x16, x18);
1268   __ Msub(x15, x16, x16, x19);
1269   __ Msub(x20, x16, x17, x17);
1270   __ Msub(x21, x17, x17, x18);
1271   __ Msub(x22, x17, x17, x19);
1272   __ Msub(x23, x17, x18, x16);
1273   __ Msub(x24, x17, x18, x18);
1274   __ Msub(x25, x18, x18, x17);
1275   __ Msub(x26, x18, x19, x18);
1276   __ Msub(x27, x19, x19, x19);
1277 
1278   END();
1279 
1280   RUN();
1281 
1282   CHECK_EQUAL_64(0, x0);
1283   CHECK_EQUAL_64(1, x1);
1284   CHECK_EQUAL_64(0xffffffff, x2);
1285   CHECK_EQUAL_64(0xffffffff, x3);
1286   CHECK_EQUAL_64(1, x4);
1287   CHECK_EQUAL_64(0xfffffffe, x5);
1288   CHECK_EQUAL_64(0xfffffffe, x6);
1289   CHECK_EQUAL_64(1, x7);
1290   CHECK_EQUAL_64(0, x8);
1291   CHECK_EQUAL_64(0, x9);
1292   CHECK_EQUAL_64(0xfffffffe, x10);
1293   CHECK_EQUAL_64(0xfffffffe, x11);
1294 
1295   CHECK_EQUAL_64(0, x12);
1296   CHECK_EQUAL_64(1, x13);
1297   CHECK_EQUAL_64(0xffffffff, x14);
1298   CHECK_EQUAL_64(0xffffffffffffffffUL, x15);
1299   CHECK_EQUAL_64(1, x20);
1300   CHECK_EQUAL_64(0xfffffffeUL, x21);
1301   CHECK_EQUAL_64(0xfffffffffffffffeUL, x22);
1302   CHECK_EQUAL_64(0xffffffff00000001UL, x23);
1303   CHECK_EQUAL_64(0, x24);
1304   CHECK_EQUAL_64(0x200000000UL, x25);
1305   CHECK_EQUAL_64(0x1fffffffeUL, x26);
1306   CHECK_EQUAL_64(0xfffffffffffffffeUL, x27);
1307 
1308   TEARDOWN();
1309 }
1310 
1311 
TEST(smulh)1312 TEST(smulh) {
1313   INIT_V8();
1314   SETUP();
1315 
1316   START();
1317   __ Mov(x20, 0);
1318   __ Mov(x21, 1);
1319   __ Mov(x22, 0x0000000100000000L);
1320   __ Mov(x23, 0x12345678);
1321   __ Mov(x24, 0x0123456789abcdefL);
1322   __ Mov(x25, 0x0000000200000000L);
1323   __ Mov(x26, 0x8000000000000000UL);
1324   __ Mov(x27, 0xffffffffffffffffUL);
1325   __ Mov(x28, 0x5555555555555555UL);
1326   __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1327 
1328   __ Smulh(x0, x20, x24);
1329   __ Smulh(x1, x21, x24);
1330   __ Smulh(x2, x22, x23);
1331   __ Smulh(x3, x22, x24);
1332   __ Smulh(x4, x24, x25);
1333   __ Smulh(x5, x23, x27);
1334   __ Smulh(x6, x26, x26);
1335   __ Smulh(x7, x26, x27);
1336   __ Smulh(x8, x27, x27);
1337   __ Smulh(x9, x28, x28);
1338   __ Smulh(x10, x28, x29);
1339   __ Smulh(x11, x29, x29);
1340   END();
1341 
1342   RUN();
1343 
1344   CHECK_EQUAL_64(0, x0);
1345   CHECK_EQUAL_64(0, x1);
1346   CHECK_EQUAL_64(0, x2);
1347   CHECK_EQUAL_64(0x01234567, x3);
1348   CHECK_EQUAL_64(0x02468acf, x4);
1349   CHECK_EQUAL_64(0xffffffffffffffffUL, x5);
1350   CHECK_EQUAL_64(0x4000000000000000UL, x6);
1351   CHECK_EQUAL_64(0, x7);
1352   CHECK_EQUAL_64(0, x8);
1353   CHECK_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1354   CHECK_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1355   CHECK_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1356 
1357   TEARDOWN();
1358 }
1359 
1360 
TEST(smaddl_umaddl)1361 TEST(smaddl_umaddl) {
1362   INIT_V8();
1363   SETUP();
1364 
1365   START();
1366   __ Mov(x17, 1);
1367   __ Mov(x18, 0xffffffff);
1368   __ Mov(x19, 0xffffffffffffffffUL);
1369   __ Mov(x20, 4);
1370   __ Mov(x21, 0x200000000UL);
1371 
1372   __ Smaddl(x9, w17, w18, x20);
1373   __ Smaddl(x10, w18, w18, x20);
1374   __ Smaddl(x11, w19, w19, x20);
1375   __ Smaddl(x12, w19, w19, x21);
1376   __ Umaddl(x13, w17, w18, x20);
1377   __ Umaddl(x14, w18, w18, x20);
1378   __ Umaddl(x15, w19, w19, x20);
1379   __ Umaddl(x22, w19, w19, x21);
1380   END();
1381 
1382   RUN();
1383 
1384   CHECK_EQUAL_64(3, x9);
1385   CHECK_EQUAL_64(5, x10);
1386   CHECK_EQUAL_64(5, x11);
1387   CHECK_EQUAL_64(0x200000001UL, x12);
1388   CHECK_EQUAL_64(0x100000003UL, x13);
1389   CHECK_EQUAL_64(0xfffffffe00000005UL, x14);
1390   CHECK_EQUAL_64(0xfffffffe00000005UL, x15);
1391   CHECK_EQUAL_64(0x1, x22);
1392 
1393   TEARDOWN();
1394 }
1395 
1396 
TEST(smsubl_umsubl)1397 TEST(smsubl_umsubl) {
1398   INIT_V8();
1399   SETUP();
1400 
1401   START();
1402   __ Mov(x17, 1);
1403   __ Mov(x18, 0xffffffff);
1404   __ Mov(x19, 0xffffffffffffffffUL);
1405   __ Mov(x20, 4);
1406   __ Mov(x21, 0x200000000UL);
1407 
1408   __ Smsubl(x9, w17, w18, x20);
1409   __ Smsubl(x10, w18, w18, x20);
1410   __ Smsubl(x11, w19, w19, x20);
1411   __ Smsubl(x12, w19, w19, x21);
1412   __ Umsubl(x13, w17, w18, x20);
1413   __ Umsubl(x14, w18, w18, x20);
1414   __ Umsubl(x15, w19, w19, x20);
1415   __ Umsubl(x22, w19, w19, x21);
1416   END();
1417 
1418   RUN();
1419 
1420   CHECK_EQUAL_64(5, x9);
1421   CHECK_EQUAL_64(3, x10);
1422   CHECK_EQUAL_64(3, x11);
1423   CHECK_EQUAL_64(0x1ffffffffUL, x12);
1424   CHECK_EQUAL_64(0xffffffff00000005UL, x13);
1425   CHECK_EQUAL_64(0x200000003UL, x14);
1426   CHECK_EQUAL_64(0x200000003UL, x15);
1427   CHECK_EQUAL_64(0x3ffffffffUL, x22);
1428 
1429   TEARDOWN();
1430 }
1431 
1432 
TEST(div)1433 TEST(div) {
1434   INIT_V8();
1435   SETUP();
1436 
1437   START();
1438   __ Mov(x16, 1);
1439   __ Mov(x17, 0xffffffff);
1440   __ Mov(x18, 0xffffffffffffffffUL);
1441   __ Mov(x19, 0x80000000);
1442   __ Mov(x20, 0x8000000000000000UL);
1443   __ Mov(x21, 2);
1444 
1445   __ Udiv(w0, w16, w16);
1446   __ Udiv(w1, w17, w16);
1447   __ Sdiv(w2, w16, w16);
1448   __ Sdiv(w3, w16, w17);
1449   __ Sdiv(w4, w17, w18);
1450 
1451   __ Udiv(x5, x16, x16);
1452   __ Udiv(x6, x17, x18);
1453   __ Sdiv(x7, x16, x16);
1454   __ Sdiv(x8, x16, x17);
1455   __ Sdiv(x9, x17, x18);
1456 
1457   __ Udiv(w10, w19, w21);
1458   __ Sdiv(w11, w19, w21);
1459   __ Udiv(x12, x19, x21);
1460   __ Sdiv(x13, x19, x21);
1461   __ Udiv(x14, x20, x21);
1462   __ Sdiv(x15, x20, x21);
1463 
1464   __ Udiv(w22, w19, w17);
1465   __ Sdiv(w23, w19, w17);
1466   __ Udiv(x24, x20, x18);
1467   __ Sdiv(x25, x20, x18);
1468 
1469   __ Udiv(x26, x16, x21);
1470   __ Sdiv(x27, x16, x21);
1471   __ Udiv(x28, x18, x21);
1472   __ Sdiv(x29, x18, x21);
1473 
1474   __ Mov(x17, 0);
1475   __ Udiv(w18, w16, w17);
1476   __ Sdiv(w19, w16, w17);
1477   __ Udiv(x20, x16, x17);
1478   __ Sdiv(x21, x16, x17);
1479   END();
1480 
1481   RUN();
1482 
1483   CHECK_EQUAL_64(1, x0);
1484   CHECK_EQUAL_64(0xffffffff, x1);
1485   CHECK_EQUAL_64(1, x2);
1486   CHECK_EQUAL_64(0xffffffff, x3);
1487   CHECK_EQUAL_64(1, x4);
1488   CHECK_EQUAL_64(1, x5);
1489   CHECK_EQUAL_64(0, x6);
1490   CHECK_EQUAL_64(1, x7);
1491   CHECK_EQUAL_64(0, x8);
1492   CHECK_EQUAL_64(0xffffffff00000001UL, x9);
1493   CHECK_EQUAL_64(0x40000000, x10);
1494   CHECK_EQUAL_64(0xC0000000, x11);
1495   CHECK_EQUAL_64(0x40000000, x12);
1496   CHECK_EQUAL_64(0x40000000, x13);
1497   CHECK_EQUAL_64(0x4000000000000000UL, x14);
1498   CHECK_EQUAL_64(0xC000000000000000UL, x15);
1499   CHECK_EQUAL_64(0, x22);
1500   CHECK_EQUAL_64(0x80000000, x23);
1501   CHECK_EQUAL_64(0, x24);
1502   CHECK_EQUAL_64(0x8000000000000000UL, x25);
1503   CHECK_EQUAL_64(0, x26);
1504   CHECK_EQUAL_64(0, x27);
1505   CHECK_EQUAL_64(0x7fffffffffffffffUL, x28);
1506   CHECK_EQUAL_64(0, x29);
1507   CHECK_EQUAL_64(0, x18);
1508   CHECK_EQUAL_64(0, x19);
1509   CHECK_EQUAL_64(0, x20);
1510   CHECK_EQUAL_64(0, x21);
1511 
1512   TEARDOWN();
1513 }
1514 
1515 
TEST(rbit_rev)1516 TEST(rbit_rev) {
1517   INIT_V8();
1518   SETUP();
1519 
1520   START();
1521   __ Mov(x24, 0xfedcba9876543210UL);
1522   __ Rbit(w0, w24);
1523   __ Rbit(x1, x24);
1524   __ Rev16(w2, w24);
1525   __ Rev16(x3, x24);
1526   __ Rev(w4, w24);
1527   __ Rev32(x5, x24);
1528   __ Rev(x6, x24);
1529   END();
1530 
1531   RUN();
1532 
1533   CHECK_EQUAL_64(0x084c2a6e, x0);
1534   CHECK_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1535   CHECK_EQUAL_64(0x54761032, x2);
1536   CHECK_EQUAL_64(0xdcfe98ba54761032UL, x3);
1537   CHECK_EQUAL_64(0x10325476, x4);
1538   CHECK_EQUAL_64(0x98badcfe10325476UL, x5);
1539   CHECK_EQUAL_64(0x1032547698badcfeUL, x6);
1540 
1541   TEARDOWN();
1542 }
1543 
1544 
TEST(clz_cls)1545 TEST(clz_cls) {
1546   INIT_V8();
1547   SETUP();
1548 
1549   START();
1550   __ Mov(x24, 0x0008000000800000UL);
1551   __ Mov(x25, 0xff800000fff80000UL);
1552   __ Mov(x26, 0);
1553   __ Clz(w0, w24);
1554   __ Clz(x1, x24);
1555   __ Clz(w2, w25);
1556   __ Clz(x3, x25);
1557   __ Clz(w4, w26);
1558   __ Clz(x5, x26);
1559   __ Cls(w6, w24);
1560   __ Cls(x7, x24);
1561   __ Cls(w8, w25);
1562   __ Cls(x9, x25);
1563   __ Cls(w10, w26);
1564   __ Cls(x11, x26);
1565   END();
1566 
1567   RUN();
1568 
1569   CHECK_EQUAL_64(8, x0);
1570   CHECK_EQUAL_64(12, x1);
1571   CHECK_EQUAL_64(0, x2);
1572   CHECK_EQUAL_64(0, x3);
1573   CHECK_EQUAL_64(32, x4);
1574   CHECK_EQUAL_64(64, x5);
1575   CHECK_EQUAL_64(7, x6);
1576   CHECK_EQUAL_64(11, x7);
1577   CHECK_EQUAL_64(12, x8);
1578   CHECK_EQUAL_64(8, x9);
1579   CHECK_EQUAL_64(31, x10);
1580   CHECK_EQUAL_64(63, x11);
1581 
1582   TEARDOWN();
1583 }
1584 
1585 
TEST(label)1586 TEST(label) {
1587   INIT_V8();
1588   SETUP();
1589 
1590   Label label_1, label_2, label_3, label_4;
1591 
1592   START();
1593   __ Mov(x0, 0x1);
1594   __ Mov(x1, 0x0);
1595   __ Mov(x22, lr);    // Save lr.
1596 
1597   __ B(&label_1);
1598   __ B(&label_1);
1599   __ B(&label_1);     // Multiple branches to the same label.
1600   __ Mov(x0, 0x0);
1601   __ Bind(&label_2);
1602   __ B(&label_3);     // Forward branch.
1603   __ Mov(x0, 0x0);
1604   __ Bind(&label_1);
1605   __ B(&label_2);     // Backward branch.
1606   __ Mov(x0, 0x0);
1607   __ Bind(&label_3);
1608   __ Bl(&label_4);
1609   END();
1610 
1611   __ Bind(&label_4);
1612   __ Mov(x1, 0x1);
1613   __ Mov(lr, x22);
1614   END();
1615 
1616   RUN();
1617 
1618   CHECK_EQUAL_64(0x1, x0);
1619   CHECK_EQUAL_64(0x1, x1);
1620 
1621   TEARDOWN();
1622 }
1623 
1624 
TEST(branch_at_start)1625 TEST(branch_at_start) {
1626   INIT_V8();
1627   SETUP();
1628 
1629   Label good, exit;
1630 
1631   // Test that branches can exist at the start of the buffer. (This is a
1632   // boundary condition in the label-handling code.) To achieve this, we have
1633   // to work around the code generated by START.
1634   RESET();
1635   __ B(&good);
1636 
1637   START_AFTER_RESET();
1638   __ Mov(x0, 0x0);
1639   END();
1640 
1641   __ Bind(&exit);
1642   START_AFTER_RESET();
1643   __ Mov(x0, 0x1);
1644   END();
1645 
1646   __ Bind(&good);
1647   __ B(&exit);
1648   END();
1649 
1650   RUN();
1651 
1652   CHECK_EQUAL_64(0x1, x0);
1653   TEARDOWN();
1654 }
1655 
1656 
TEST(adr)1657 TEST(adr) {
1658   INIT_V8();
1659   SETUP();
1660 
1661   Label label_1, label_2, label_3, label_4;
1662 
1663   START();
1664   __ Mov(x0, 0x0);        // Set to non-zero to indicate failure.
1665   __ Adr(x1, &label_3);   // Set to zero to indicate success.
1666 
1667   __ Adr(x2, &label_1);   // Multiple forward references to the same label.
1668   __ Adr(x3, &label_1);
1669   __ Adr(x4, &label_1);
1670 
1671   __ Bind(&label_2);
1672   __ Eor(x5, x2, Operand(x3));  // Ensure that x2,x3 and x4 are identical.
1673   __ Eor(x6, x2, Operand(x4));
1674   __ Orr(x0, x0, Operand(x5));
1675   __ Orr(x0, x0, Operand(x6));
1676   __ Br(x2);  // label_1, label_3
1677 
1678   __ Bind(&label_3);
1679   __ Adr(x2, &label_3);   // Self-reference (offset 0).
1680   __ Eor(x1, x1, Operand(x2));
1681   __ Adr(x2, &label_4);   // Simple forward reference.
1682   __ Br(x2);  // label_4
1683 
1684   __ Bind(&label_1);
1685   __ Adr(x2, &label_3);   // Multiple reverse references to the same label.
1686   __ Adr(x3, &label_3);
1687   __ Adr(x4, &label_3);
1688   __ Adr(x5, &label_2);   // Simple reverse reference.
1689   __ Br(x5);  // label_2
1690 
1691   __ Bind(&label_4);
1692   END();
1693 
1694   RUN();
1695 
1696   CHECK_EQUAL_64(0x0, x0);
1697   CHECK_EQUAL_64(0x0, x1);
1698 
1699   TEARDOWN();
1700 }
1701 
1702 
TEST(adr_far)1703 TEST(adr_far) {
1704   INIT_V8();
1705 
1706   int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
1707   SETUP_SIZE(max_range + 1000 * kInstructionSize);
1708 
1709   Label done, fail;
1710   Label test_near, near_forward, near_backward;
1711   Label test_far, far_forward, far_backward;
1712 
1713   START();
1714   __ Mov(x0, 0x0);
1715 
1716   __ Bind(&test_near);
1717   __ Adr(x10, &near_forward, MacroAssembler::kAdrFar);
1718   __ Br(x10);
1719   __ B(&fail);
1720   __ Bind(&near_backward);
1721   __ Orr(x0, x0, 1 << 1);
1722   __ B(&test_far);
1723 
1724   __ Bind(&near_forward);
1725   __ Orr(x0, x0, 1 << 0);
1726   __ Adr(x10, &near_backward, MacroAssembler::kAdrFar);
1727   __ Br(x10);
1728 
1729   __ Bind(&test_far);
1730   __ Adr(x10, &far_forward, MacroAssembler::kAdrFar);
1731   __ Br(x10);
1732   __ B(&fail);
1733   __ Bind(&far_backward);
1734   __ Orr(x0, x0, 1 << 3);
1735   __ B(&done);
1736 
1737   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1738     if (i % 100 == 0) {
1739       // If we do land in this code, we do not want to execute so many nops
1740       // before reaching the end of test (especially if tracing is activated).
1741       __ b(&fail);
1742     } else {
1743       __ nop();
1744     }
1745   }
1746 
1747 
1748   __ Bind(&far_forward);
1749   __ Orr(x0, x0, 1 << 2);
1750   __ Adr(x10, &far_backward, MacroAssembler::kAdrFar);
1751   __ Br(x10);
1752 
1753   __ B(&done);
1754   __ Bind(&fail);
1755   __ Orr(x0, x0, 1 << 4);
1756   __ Bind(&done);
1757 
1758   END();
1759 
1760   RUN();
1761 
1762   CHECK_EQUAL_64(0xf, x0);
1763 
1764   TEARDOWN();
1765 }
1766 
1767 
TEST(branch_cond)1768 TEST(branch_cond) {
1769   INIT_V8();
1770   SETUP();
1771 
1772   Label wrong;
1773 
1774   START();
1775   __ Mov(x0, 0x1);
1776   __ Mov(x1, 0x1);
1777   __ Mov(x2, 0x8000000000000000L);
1778 
1779   // For each 'cmp' instruction below, condition codes other than the ones
1780   // following it would branch.
1781 
1782   __ Cmp(x1, 0);
1783   __ B(&wrong, eq);
1784   __ B(&wrong, lo);
1785   __ B(&wrong, mi);
1786   __ B(&wrong, vs);
1787   __ B(&wrong, ls);
1788   __ B(&wrong, lt);
1789   __ B(&wrong, le);
1790   Label ok_1;
1791   __ B(&ok_1, ne);
1792   __ Mov(x0, 0x0);
1793   __ Bind(&ok_1);
1794 
1795   __ Cmp(x1, 1);
1796   __ B(&wrong, ne);
1797   __ B(&wrong, lo);
1798   __ B(&wrong, mi);
1799   __ B(&wrong, vs);
1800   __ B(&wrong, hi);
1801   __ B(&wrong, lt);
1802   __ B(&wrong, gt);
1803   Label ok_2;
1804   __ B(&ok_2, pl);
1805   __ Mov(x0, 0x0);
1806   __ Bind(&ok_2);
1807 
1808   __ Cmp(x1, 2);
1809   __ B(&wrong, eq);
1810   __ B(&wrong, hs);
1811   __ B(&wrong, pl);
1812   __ B(&wrong, vs);
1813   __ B(&wrong, hi);
1814   __ B(&wrong, ge);
1815   __ B(&wrong, gt);
1816   Label ok_3;
1817   __ B(&ok_3, vc);
1818   __ Mov(x0, 0x0);
1819   __ Bind(&ok_3);
1820 
1821   __ Cmp(x2, 1);
1822   __ B(&wrong, eq);
1823   __ B(&wrong, lo);
1824   __ B(&wrong, mi);
1825   __ B(&wrong, vc);
1826   __ B(&wrong, ls);
1827   __ B(&wrong, ge);
1828   __ B(&wrong, gt);
1829   Label ok_4;
1830   __ B(&ok_4, le);
1831   __ Mov(x0, 0x0);
1832   __ Bind(&ok_4);
1833 
1834   Label ok_5;
1835   __ b(&ok_5, al);
1836   __ Mov(x0, 0x0);
1837   __ Bind(&ok_5);
1838 
1839   Label ok_6;
1840   __ b(&ok_6, nv);
1841   __ Mov(x0, 0x0);
1842   __ Bind(&ok_6);
1843 
1844   END();
1845 
1846   __ Bind(&wrong);
1847   __ Mov(x0, 0x0);
1848   END();
1849 
1850   RUN();
1851 
1852   CHECK_EQUAL_64(0x1, x0);
1853 
1854   TEARDOWN();
1855 }
1856 
1857 
TEST(branch_to_reg)1858 TEST(branch_to_reg) {
1859   INIT_V8();
1860   SETUP();
1861 
1862   // Test br.
1863   Label fn1, after_fn1;
1864 
1865   START();
1866   __ Mov(x29, lr);
1867 
1868   __ Mov(x1, 0);
1869   __ B(&after_fn1);
1870 
1871   __ Bind(&fn1);
1872   __ Mov(x0, lr);
1873   __ Mov(x1, 42);
1874   __ Br(x0);
1875 
1876   __ Bind(&after_fn1);
1877   __ Bl(&fn1);
1878 
1879   // Test blr.
1880   Label fn2, after_fn2;
1881 
1882   __ Mov(x2, 0);
1883   __ B(&after_fn2);
1884 
1885   __ Bind(&fn2);
1886   __ Mov(x0, lr);
1887   __ Mov(x2, 84);
1888   __ Blr(x0);
1889 
1890   __ Bind(&after_fn2);
1891   __ Bl(&fn2);
1892   __ Mov(x3, lr);
1893 
1894   __ Mov(lr, x29);
1895   END();
1896 
1897   RUN();
1898 
1899   CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1900   CHECK_EQUAL_64(42, x1);
1901   CHECK_EQUAL_64(84, x2);
1902 
1903   TEARDOWN();
1904 }
1905 
1906 
TEST(compare_branch)1907 TEST(compare_branch) {
1908   INIT_V8();
1909   SETUP();
1910 
1911   START();
1912   __ Mov(x0, 0);
1913   __ Mov(x1, 0);
1914   __ Mov(x2, 0);
1915   __ Mov(x3, 0);
1916   __ Mov(x4, 0);
1917   __ Mov(x5, 0);
1918   __ Mov(x16, 0);
1919   __ Mov(x17, 42);
1920 
1921   Label zt, zt_end;
1922   __ Cbz(w16, &zt);
1923   __ B(&zt_end);
1924   __ Bind(&zt);
1925   __ Mov(x0, 1);
1926   __ Bind(&zt_end);
1927 
1928   Label zf, zf_end;
1929   __ Cbz(x17, &zf);
1930   __ B(&zf_end);
1931   __ Bind(&zf);
1932   __ Mov(x1, 1);
1933   __ Bind(&zf_end);
1934 
1935   Label nzt, nzt_end;
1936   __ Cbnz(w17, &nzt);
1937   __ B(&nzt_end);
1938   __ Bind(&nzt);
1939   __ Mov(x2, 1);
1940   __ Bind(&nzt_end);
1941 
1942   Label nzf, nzf_end;
1943   __ Cbnz(x16, &nzf);
1944   __ B(&nzf_end);
1945   __ Bind(&nzf);
1946   __ Mov(x3, 1);
1947   __ Bind(&nzf_end);
1948 
1949   __ Mov(x18, 0xffffffff00000000UL);
1950 
1951   Label a, a_end;
1952   __ Cbz(w18, &a);
1953   __ B(&a_end);
1954   __ Bind(&a);
1955   __ Mov(x4, 1);
1956   __ Bind(&a_end);
1957 
1958   Label b, b_end;
1959   __ Cbnz(w18, &b);
1960   __ B(&b_end);
1961   __ Bind(&b);
1962   __ Mov(x5, 1);
1963   __ Bind(&b_end);
1964 
1965   END();
1966 
1967   RUN();
1968 
1969   CHECK_EQUAL_64(1, x0);
1970   CHECK_EQUAL_64(0, x1);
1971   CHECK_EQUAL_64(1, x2);
1972   CHECK_EQUAL_64(0, x3);
1973   CHECK_EQUAL_64(1, x4);
1974   CHECK_EQUAL_64(0, x5);
1975 
1976   TEARDOWN();
1977 }
1978 
1979 
TEST(test_branch)1980 TEST(test_branch) {
1981   INIT_V8();
1982   SETUP();
1983 
1984   START();
1985   __ Mov(x0, 0);
1986   __ Mov(x1, 0);
1987   __ Mov(x2, 0);
1988   __ Mov(x3, 0);
1989   __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1990 
1991   Label bz, bz_end;
1992   __ Tbz(w16, 0, &bz);
1993   __ B(&bz_end);
1994   __ Bind(&bz);
1995   __ Mov(x0, 1);
1996   __ Bind(&bz_end);
1997 
1998   Label bo, bo_end;
1999   __ Tbz(x16, 63, &bo);
2000   __ B(&bo_end);
2001   __ Bind(&bo);
2002   __ Mov(x1, 1);
2003   __ Bind(&bo_end);
2004 
2005   Label nbz, nbz_end;
2006   __ Tbnz(x16, 61, &nbz);
2007   __ B(&nbz_end);
2008   __ Bind(&nbz);
2009   __ Mov(x2, 1);
2010   __ Bind(&nbz_end);
2011 
2012   Label nbo, nbo_end;
2013   __ Tbnz(w16, 2, &nbo);
2014   __ B(&nbo_end);
2015   __ Bind(&nbo);
2016   __ Mov(x3, 1);
2017   __ Bind(&nbo_end);
2018   END();
2019 
2020   RUN();
2021 
2022   CHECK_EQUAL_64(1, x0);
2023   CHECK_EQUAL_64(0, x1);
2024   CHECK_EQUAL_64(1, x2);
2025   CHECK_EQUAL_64(0, x3);
2026 
2027   TEARDOWN();
2028 }
2029 
2030 
TEST(far_branch_backward)2031 TEST(far_branch_backward) {
2032   INIT_V8();
2033 
2034   // Test that the MacroAssembler correctly resolves backward branches to labels
2035   // that are outside the immediate range of branch instructions.
2036   int max_range =
2037     std::max(Instruction::ImmBranchRange(TestBranchType),
2038              std::max(Instruction::ImmBranchRange(CompareBranchType),
2039                       Instruction::ImmBranchRange(CondBranchType)));
2040 
2041   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2042 
2043   START();
2044 
2045   Label done, fail;
2046   Label test_tbz, test_cbz, test_bcond;
2047   Label success_tbz, success_cbz, success_bcond;
2048 
2049   __ Mov(x0, 0);
2050   __ Mov(x1, 1);
2051   __ Mov(x10, 0);
2052 
2053   __ B(&test_tbz);
2054   __ Bind(&success_tbz);
2055   __ Orr(x0, x0, 1 << 0);
2056   __ B(&test_cbz);
2057   __ Bind(&success_cbz);
2058   __ Orr(x0, x0, 1 << 1);
2059   __ B(&test_bcond);
2060   __ Bind(&success_bcond);
2061   __ Orr(x0, x0, 1 << 2);
2062 
2063   __ B(&done);
2064 
2065   // Generate enough code to overflow the immediate range of the three types of
2066   // branches below.
2067   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2068     if (i % 100 == 0) {
2069       // If we do land in this code, we do not want to execute so many nops
2070       // before reaching the end of test (especially if tracing is activated).
2071       __ B(&fail);
2072     } else {
2073       __ Nop();
2074     }
2075   }
2076   __ B(&fail);
2077 
2078   __ Bind(&test_tbz);
2079   __ Tbz(x10, 7, &success_tbz);
2080   __ Bind(&test_cbz);
2081   __ Cbz(x10, &success_cbz);
2082   __ Bind(&test_bcond);
2083   __ Cmp(x10, 0);
2084   __ B(eq, &success_bcond);
2085 
2086   // For each out-of-range branch instructions, at least two instructions should
2087   // have been generated.
2088   CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2089 
2090   __ Bind(&fail);
2091   __ Mov(x1, 0);
2092   __ Bind(&done);
2093 
2094   END();
2095 
2096   RUN();
2097 
2098   CHECK_EQUAL_64(0x7, x0);
2099   CHECK_EQUAL_64(0x1, x1);
2100 
2101   TEARDOWN();
2102 }
2103 
2104 
TEST(far_branch_simple_veneer)2105 TEST(far_branch_simple_veneer) {
2106   INIT_V8();
2107 
2108   // Test that the MacroAssembler correctly emits veneers for forward branches
2109   // to labels that are outside the immediate range of branch instructions.
2110   int max_range =
2111     std::max(Instruction::ImmBranchRange(TestBranchType),
2112              std::max(Instruction::ImmBranchRange(CompareBranchType),
2113                       Instruction::ImmBranchRange(CondBranchType)));
2114 
2115   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2116 
2117   START();
2118 
2119   Label done, fail;
2120   Label test_tbz, test_cbz, test_bcond;
2121   Label success_tbz, success_cbz, success_bcond;
2122 
2123   __ Mov(x0, 0);
2124   __ Mov(x1, 1);
2125   __ Mov(x10, 0);
2126 
2127   __ Bind(&test_tbz);
2128   __ Tbz(x10, 7, &success_tbz);
2129   __ Bind(&test_cbz);
2130   __ Cbz(x10, &success_cbz);
2131   __ Bind(&test_bcond);
2132   __ Cmp(x10, 0);
2133   __ B(eq, &success_bcond);
2134 
2135   // Generate enough code to overflow the immediate range of the three types of
2136   // branches below.
2137   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2138     if (i % 100 == 0) {
2139       // If we do land in this code, we do not want to execute so many nops
2140       // before reaching the end of test (especially if tracing is activated).
2141       // Also, the branches give the MacroAssembler the opportunity to emit the
2142       // veneers.
2143       __ B(&fail);
2144     } else {
2145       __ Nop();
2146     }
2147   }
2148   __ B(&fail);
2149 
2150   __ Bind(&success_tbz);
2151   __ Orr(x0, x0, 1 << 0);
2152   __ B(&test_cbz);
2153   __ Bind(&success_cbz);
2154   __ Orr(x0, x0, 1 << 1);
2155   __ B(&test_bcond);
2156   __ Bind(&success_bcond);
2157   __ Orr(x0, x0, 1 << 2);
2158 
2159   __ B(&done);
2160   __ Bind(&fail);
2161   __ Mov(x1, 0);
2162   __ Bind(&done);
2163 
2164   END();
2165 
2166   RUN();
2167 
2168   CHECK_EQUAL_64(0x7, x0);
2169   CHECK_EQUAL_64(0x1, x1);
2170 
2171   TEARDOWN();
2172 }
2173 
2174 
TEST(far_branch_veneer_link_chain)2175 TEST(far_branch_veneer_link_chain) {
2176   INIT_V8();
2177 
2178   // Test that the MacroAssembler correctly emits veneers for forward branches
2179   // that target out-of-range labels and are part of multiple instructions
2180   // jumping to that label.
2181   //
2182   // We test the three situations with the different types of instruction:
2183   // (1)- When the branch is at the start of the chain with tbz.
2184   // (2)- When the branch is in the middle of the chain with cbz.
2185   // (3)- When the branch is at the end of the chain with bcond.
2186   int max_range =
2187     std::max(Instruction::ImmBranchRange(TestBranchType),
2188              std::max(Instruction::ImmBranchRange(CompareBranchType),
2189                       Instruction::ImmBranchRange(CondBranchType)));
2190 
2191   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2192 
2193   START();
2194 
2195   Label skip, fail, done;
2196   Label test_tbz, test_cbz, test_bcond;
2197   Label success_tbz, success_cbz, success_bcond;
2198 
2199   __ Mov(x0, 0);
2200   __ Mov(x1, 1);
2201   __ Mov(x10, 0);
2202 
2203   __ B(&skip);
2204   // Branches at the start of the chain for situations (2) and (3).
2205   __ B(&success_cbz);
2206   __ B(&success_bcond);
2207   __ Nop();
2208   __ B(&success_bcond);
2209   __ B(&success_cbz);
2210   __ Bind(&skip);
2211 
2212   __ Bind(&test_tbz);
2213   __ Tbz(x10, 7, &success_tbz);
2214   __ Bind(&test_cbz);
2215   __ Cbz(x10, &success_cbz);
2216   __ Bind(&test_bcond);
2217   __ Cmp(x10, 0);
2218   __ B(eq, &success_bcond);
2219 
2220   skip.Unuse();
2221   __ B(&skip);
2222   // Branches at the end of the chain for situations (1) and (2).
2223   __ B(&success_cbz);
2224   __ B(&success_tbz);
2225   __ Nop();
2226   __ B(&success_tbz);
2227   __ B(&success_cbz);
2228   __ Bind(&skip);
2229 
2230   // Generate enough code to overflow the immediate range of the three types of
2231   // branches below.
2232   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2233     if (i % 100 == 0) {
2234       // If we do land in this code, we do not want to execute so many nops
2235       // before reaching the end of test (especially if tracing is activated).
2236       // Also, the branches give the MacroAssembler the opportunity to emit the
2237       // veneers.
2238       __ B(&fail);
2239     } else {
2240       __ Nop();
2241     }
2242   }
2243   __ B(&fail);
2244 
2245   __ Bind(&success_tbz);
2246   __ Orr(x0, x0, 1 << 0);
2247   __ B(&test_cbz);
2248   __ Bind(&success_cbz);
2249   __ Orr(x0, x0, 1 << 1);
2250   __ B(&test_bcond);
2251   __ Bind(&success_bcond);
2252   __ Orr(x0, x0, 1 << 2);
2253 
2254   __ B(&done);
2255   __ Bind(&fail);
2256   __ Mov(x1, 0);
2257   __ Bind(&done);
2258 
2259   END();
2260 
2261   RUN();
2262 
2263   CHECK_EQUAL_64(0x7, x0);
2264   CHECK_EQUAL_64(0x1, x1);
2265 
2266   TEARDOWN();
2267 }
2268 
2269 
TEST(far_branch_veneer_broken_link_chain)2270 TEST(far_branch_veneer_broken_link_chain) {
2271   INIT_V8();
2272 
2273   // Check that the MacroAssembler correctly handles the situation when removing
2274   // a branch from the link chain of a label and the two links on each side of
2275   // the removed branch cannot be linked together (out of range).
2276   //
2277   // We test with tbz because it has a small range.
2278   int max_range = Instruction::ImmBranchRange(TestBranchType);
2279   int inter_range = max_range / 2 + max_range / 10;
2280 
2281   SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2282 
2283   START();
2284 
2285   Label skip, fail, done;
2286   Label test_1, test_2, test_3;
2287   Label far_target;
2288 
2289   __ Mov(x0, 0);  // Indicates the origin of the branch.
2290   __ Mov(x1, 1);
2291   __ Mov(x10, 0);
2292 
2293   // First instruction in the label chain.
2294   __ Bind(&test_1);
2295   __ Mov(x0, 1);
2296   __ B(&far_target);
2297 
2298   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2299     if (i % 100 == 0) {
2300       // Do not allow generating veneers. They should not be needed.
2301       __ b(&fail);
2302     } else {
2303       __ Nop();
2304     }
2305   }
2306 
2307   // Will need a veneer to point to reach the target.
2308   __ Bind(&test_2);
2309   __ Mov(x0, 2);
2310   __ Tbz(x10, 7, &far_target);
2311 
2312   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2313     if (i % 100 == 0) {
2314       // Do not allow generating veneers. They should not be needed.
2315       __ b(&fail);
2316     } else {
2317       __ Nop();
2318     }
2319   }
2320 
2321   // Does not need a veneer to reach the target, but the initial branch
2322   // instruction is out of range.
2323   __ Bind(&test_3);
2324   __ Mov(x0, 3);
2325   __ Tbz(x10, 7, &far_target);
2326 
2327   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2328     if (i % 100 == 0) {
2329       // Allow generating veneers.
2330       __ B(&fail);
2331     } else {
2332       __ Nop();
2333     }
2334   }
2335 
2336   __ B(&fail);
2337 
2338   __ Bind(&far_target);
2339   __ Cmp(x0, 1);
2340   __ B(eq, &test_2);
2341   __ Cmp(x0, 2);
2342   __ B(eq, &test_3);
2343 
2344   __ B(&done);
2345   __ Bind(&fail);
2346   __ Mov(x1, 0);
2347   __ Bind(&done);
2348 
2349   END();
2350 
2351   RUN();
2352 
2353   CHECK_EQUAL_64(0x3, x0);
2354   CHECK_EQUAL_64(0x1, x1);
2355 
2356   TEARDOWN();
2357 }
2358 
2359 
TEST(branch_type)2360 TEST(branch_type) {
2361   INIT_V8();
2362 
2363   SETUP();
2364 
2365   Label fail, done;
2366 
2367   START();
2368   __ Mov(x0, 0x0);
2369   __ Mov(x10, 0x7);
2370   __ Mov(x11, 0x0);
2371 
2372   // Test non taken branches.
2373   __ Cmp(x10, 0x7);
2374   __ B(&fail, ne);
2375   __ B(&fail, never);
2376   __ B(&fail, reg_zero, x10);
2377   __ B(&fail, reg_not_zero, x11);
2378   __ B(&fail, reg_bit_clear, x10, 0);
2379   __ B(&fail, reg_bit_set, x10, 3);
2380 
2381   // Test taken branches.
2382   Label l1, l2, l3, l4, l5;
2383   __ Cmp(x10, 0x7);
2384   __ B(&l1, eq);
2385   __ B(&fail);
2386   __ Bind(&l1);
2387   __ B(&l2, always);
2388   __ B(&fail);
2389   __ Bind(&l2);
2390   __ B(&l3, reg_not_zero, x10);
2391   __ B(&fail);
2392   __ Bind(&l3);
2393   __ B(&l4, reg_bit_clear, x10, 15);
2394   __ B(&fail);
2395   __ Bind(&l4);
2396   __ B(&l5, reg_bit_set, x10, 1);
2397   __ B(&fail);
2398   __ Bind(&l5);
2399 
2400   __ B(&done);
2401 
2402   __ Bind(&fail);
2403   __ Mov(x0, 0x1);
2404 
2405   __ Bind(&done);
2406 
2407   END();
2408 
2409   RUN();
2410 
2411   CHECK_EQUAL_64(0x0, x0);
2412 
2413   TEARDOWN();
2414 }
2415 
2416 
TEST(ldr_str_offset)2417 TEST(ldr_str_offset) {
2418   INIT_V8();
2419   SETUP();
2420 
2421   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2422   uint64_t dst[5] = {0, 0, 0, 0, 0};
2423   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2424   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2425 
2426   START();
2427   __ Mov(x17, src_base);
2428   __ Mov(x18, dst_base);
2429   __ Ldr(w0, MemOperand(x17));
2430   __ Str(w0, MemOperand(x18));
2431   __ Ldr(w1, MemOperand(x17, 4));
2432   __ Str(w1, MemOperand(x18, 12));
2433   __ Ldr(x2, MemOperand(x17, 8));
2434   __ Str(x2, MemOperand(x18, 16));
2435   __ Ldrb(w3, MemOperand(x17, 1));
2436   __ Strb(w3, MemOperand(x18, 25));
2437   __ Ldrh(w4, MemOperand(x17, 2));
2438   __ Strh(w4, MemOperand(x18, 33));
2439   END();
2440 
2441   RUN();
2442 
2443   CHECK_EQUAL_64(0x76543210, x0);
2444   CHECK_EQUAL_64(0x76543210, dst[0]);
2445   CHECK_EQUAL_64(0xfedcba98, x1);
2446   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2447   CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
2448   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2449   CHECK_EQUAL_64(0x32, x3);
2450   CHECK_EQUAL_64(0x3200, dst[3]);
2451   CHECK_EQUAL_64(0x7654, x4);
2452   CHECK_EQUAL_64(0x765400, dst[4]);
2453   CHECK_EQUAL_64(src_base, x17);
2454   CHECK_EQUAL_64(dst_base, x18);
2455 
2456   TEARDOWN();
2457 }
2458 
2459 
TEST(ldr_str_wide)2460 TEST(ldr_str_wide) {
2461   INIT_V8();
2462   SETUP();
2463 
2464   uint32_t src[8192];
2465   uint32_t dst[8192];
2466   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2467   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2468   memset(src, 0xaa, 8192 * sizeof(src[0]));
2469   memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2470   src[0] = 0;
2471   src[6144] = 6144;
2472   src[8191] = 8191;
2473 
2474   START();
2475   __ Mov(x22, src_base);
2476   __ Mov(x23, dst_base);
2477   __ Mov(x24, src_base);
2478   __ Mov(x25, dst_base);
2479   __ Mov(x26, src_base);
2480   __ Mov(x27, dst_base);
2481 
2482   __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2483   __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2484   __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2485   __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2486   __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2487   __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2488   END();
2489 
2490   RUN();
2491 
2492   CHECK_EQUAL_32(8191, w0);
2493   CHECK_EQUAL_32(8191, dst[8191]);
2494   CHECK_EQUAL_64(src_base, x22);
2495   CHECK_EQUAL_64(dst_base, x23);
2496   CHECK_EQUAL_32(0, w1);
2497   CHECK_EQUAL_32(0, dst[0]);
2498   CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2499   CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2500   CHECK_EQUAL_32(6144, w2);
2501   CHECK_EQUAL_32(6144, dst[6144]);
2502   CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2503   CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2504 
2505   TEARDOWN();
2506 }
2507 
2508 
TEST(ldr_str_preindex)2509 TEST(ldr_str_preindex) {
2510   INIT_V8();
2511   SETUP();
2512 
2513   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2514   uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2515   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2516   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2517 
2518   START();
2519   __ Mov(x17, src_base);
2520   __ Mov(x18, dst_base);
2521   __ Mov(x19, src_base);
2522   __ Mov(x20, dst_base);
2523   __ Mov(x21, src_base + 16);
2524   __ Mov(x22, dst_base + 40);
2525   __ Mov(x23, src_base);
2526   __ Mov(x24, dst_base);
2527   __ Mov(x25, src_base);
2528   __ Mov(x26, dst_base);
2529   __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2530   __ Str(w0, MemOperand(x18, 12, PreIndex));
2531   __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2532   __ Str(x1, MemOperand(x20, 16, PreIndex));
2533   __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2534   __ Str(w2, MemOperand(x22, -4, PreIndex));
2535   __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2536   __ Strb(w3, MemOperand(x24, 25, PreIndex));
2537   __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2538   __ Strh(w4, MemOperand(x26, 41, PreIndex));
2539   END();
2540 
2541   RUN();
2542 
2543   CHECK_EQUAL_64(0xfedcba98, x0);
2544   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2545   CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
2546   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2547   CHECK_EQUAL_64(0x01234567, x2);
2548   CHECK_EQUAL_64(0x0123456700000000UL, dst[4]);
2549   CHECK_EQUAL_64(0x32, x3);
2550   CHECK_EQUAL_64(0x3200, dst[3]);
2551   CHECK_EQUAL_64(0x9876, x4);
2552   CHECK_EQUAL_64(0x987600, dst[5]);
2553   CHECK_EQUAL_64(src_base + 4, x17);
2554   CHECK_EQUAL_64(dst_base + 12, x18);
2555   CHECK_EQUAL_64(src_base + 8, x19);
2556   CHECK_EQUAL_64(dst_base + 16, x20);
2557   CHECK_EQUAL_64(src_base + 12, x21);
2558   CHECK_EQUAL_64(dst_base + 36, x22);
2559   CHECK_EQUAL_64(src_base + 1, x23);
2560   CHECK_EQUAL_64(dst_base + 25, x24);
2561   CHECK_EQUAL_64(src_base + 3, x25);
2562   CHECK_EQUAL_64(dst_base + 41, x26);
2563 
2564   TEARDOWN();
2565 }
2566 
2567 
TEST(ldr_str_postindex)2568 TEST(ldr_str_postindex) {
2569   INIT_V8();
2570   SETUP();
2571 
2572   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2573   uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2574   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2575   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2576 
2577   START();
2578   __ Mov(x17, src_base + 4);
2579   __ Mov(x18, dst_base + 12);
2580   __ Mov(x19, src_base + 8);
2581   __ Mov(x20, dst_base + 16);
2582   __ Mov(x21, src_base + 8);
2583   __ Mov(x22, dst_base + 32);
2584   __ Mov(x23, src_base + 1);
2585   __ Mov(x24, dst_base + 25);
2586   __ Mov(x25, src_base + 3);
2587   __ Mov(x26, dst_base + 41);
2588   __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2589   __ Str(w0, MemOperand(x18, 12, PostIndex));
2590   __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2591   __ Str(x1, MemOperand(x20, 16, PostIndex));
2592   __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2593   __ Str(x2, MemOperand(x22, -32, PostIndex));
2594   __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2595   __ Strb(w3, MemOperand(x24, 5, PostIndex));
2596   __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2597   __ Strh(w4, MemOperand(x26, -41, PostIndex));
2598   END();
2599 
2600   RUN();
2601 
2602   CHECK_EQUAL_64(0xfedcba98, x0);
2603   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2604   CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
2605   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2606   CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
2607   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2608   CHECK_EQUAL_64(0x32, x3);
2609   CHECK_EQUAL_64(0x3200, dst[3]);
2610   CHECK_EQUAL_64(0x9876, x4);
2611   CHECK_EQUAL_64(0x987600, dst[5]);
2612   CHECK_EQUAL_64(src_base + 8, x17);
2613   CHECK_EQUAL_64(dst_base + 24, x18);
2614   CHECK_EQUAL_64(src_base + 16, x19);
2615   CHECK_EQUAL_64(dst_base + 32, x20);
2616   CHECK_EQUAL_64(src_base, x21);
2617   CHECK_EQUAL_64(dst_base, x22);
2618   CHECK_EQUAL_64(src_base + 2, x23);
2619   CHECK_EQUAL_64(dst_base + 30, x24);
2620   CHECK_EQUAL_64(src_base, x25);
2621   CHECK_EQUAL_64(dst_base, x26);
2622 
2623   TEARDOWN();
2624 }
2625 
2626 
TEST(load_signed)2627 TEST(load_signed) {
2628   INIT_V8();
2629   SETUP();
2630 
2631   uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2632   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2633 
2634   START();
2635   __ Mov(x24, src_base);
2636   __ Ldrsb(w0, MemOperand(x24));
2637   __ Ldrsb(w1, MemOperand(x24, 4));
2638   __ Ldrsh(w2, MemOperand(x24));
2639   __ Ldrsh(w3, MemOperand(x24, 4));
2640   __ Ldrsb(x4, MemOperand(x24));
2641   __ Ldrsb(x5, MemOperand(x24, 4));
2642   __ Ldrsh(x6, MemOperand(x24));
2643   __ Ldrsh(x7, MemOperand(x24, 4));
2644   __ Ldrsw(x8, MemOperand(x24));
2645   __ Ldrsw(x9, MemOperand(x24, 4));
2646   END();
2647 
2648   RUN();
2649 
2650   CHECK_EQUAL_64(0xffffff80, x0);
2651   CHECK_EQUAL_64(0x0000007f, x1);
2652   CHECK_EQUAL_64(0xffff8080, x2);
2653   CHECK_EQUAL_64(0x00007f7f, x3);
2654   CHECK_EQUAL_64(0xffffffffffffff80UL, x4);
2655   CHECK_EQUAL_64(0x000000000000007fUL, x5);
2656   CHECK_EQUAL_64(0xffffffffffff8080UL, x6);
2657   CHECK_EQUAL_64(0x0000000000007f7fUL, x7);
2658   CHECK_EQUAL_64(0xffffffff80008080UL, x8);
2659   CHECK_EQUAL_64(0x000000007fff7f7fUL, x9);
2660 
2661   TEARDOWN();
2662 }
2663 
2664 
TEST(load_store_regoffset)2665 TEST(load_store_regoffset) {
2666   INIT_V8();
2667   SETUP();
2668 
2669   uint32_t src[3] = {1, 2, 3};
2670   uint32_t dst[4] = {0, 0, 0, 0};
2671   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2672   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2673 
2674   START();
2675   __ Mov(x16, src_base);
2676   __ Mov(x17, dst_base);
2677   __ Mov(x18, src_base + 3 * sizeof(src[0]));
2678   __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2679   __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2680   __ Mov(x24, 0);
2681   __ Mov(x25, 4);
2682   __ Mov(x26, -4);
2683   __ Mov(x27, 0xfffffffc);  // 32-bit -4.
2684   __ Mov(x28, 0xfffffffe);  // 32-bit -2.
2685   __ Mov(x29, 0xffffffff);  // 32-bit -1.
2686 
2687   __ Ldr(w0, MemOperand(x16, x24));
2688   __ Ldr(x1, MemOperand(x16, x25));
2689   __ Ldr(w2, MemOperand(x18, x26));
2690   __ Ldr(w3, MemOperand(x18, x27, SXTW));
2691   __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2692   __ Str(w0, MemOperand(x17, x24));
2693   __ Str(x1, MemOperand(x17, x25));
2694   __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2695   END();
2696 
2697   RUN();
2698 
2699   CHECK_EQUAL_64(1, x0);
2700   CHECK_EQUAL_64(0x0000000300000002UL, x1);
2701   CHECK_EQUAL_64(3, x2);
2702   CHECK_EQUAL_64(3, x3);
2703   CHECK_EQUAL_64(2, x4);
2704   CHECK_EQUAL_32(1, dst[0]);
2705   CHECK_EQUAL_32(2, dst[1]);
2706   CHECK_EQUAL_32(3, dst[2]);
2707   CHECK_EQUAL_32(3, dst[3]);
2708 
2709   TEARDOWN();
2710 }
2711 
2712 
TEST(load_store_float)2713 TEST(load_store_float) {
2714   INIT_V8();
2715   SETUP();
2716 
2717   float src[3] = {1.0, 2.0, 3.0};
2718   float dst[3] = {0.0, 0.0, 0.0};
2719   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2720   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2721 
2722   START();
2723   __ Mov(x17, src_base);
2724   __ Mov(x18, dst_base);
2725   __ Mov(x19, src_base);
2726   __ Mov(x20, dst_base);
2727   __ Mov(x21, src_base);
2728   __ Mov(x22, dst_base);
2729   __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2730   __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2731   __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2732   __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2733   __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2734   __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2735   END();
2736 
2737   RUN();
2738 
2739   CHECK_EQUAL_FP32(2.0, s0);
2740   CHECK_EQUAL_FP32(2.0, dst[0]);
2741   CHECK_EQUAL_FP32(1.0, s1);
2742   CHECK_EQUAL_FP32(1.0, dst[2]);
2743   CHECK_EQUAL_FP32(3.0, s2);
2744   CHECK_EQUAL_FP32(3.0, dst[1]);
2745   CHECK_EQUAL_64(src_base, x17);
2746   CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2747   CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2748   CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2749   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2750   CHECK_EQUAL_64(dst_base, x22);
2751 
2752   TEARDOWN();
2753 }
2754 
2755 
TEST(load_store_double)2756 TEST(load_store_double) {
2757   INIT_V8();
2758   SETUP();
2759 
2760   double src[3] = {1.0, 2.0, 3.0};
2761   double dst[3] = {0.0, 0.0, 0.0};
2762   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2763   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2764 
2765   START();
2766   __ Mov(x17, src_base);
2767   __ Mov(x18, dst_base);
2768   __ Mov(x19, src_base);
2769   __ Mov(x20, dst_base);
2770   __ Mov(x21, src_base);
2771   __ Mov(x22, dst_base);
2772   __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2773   __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2774   __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2775   __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2776   __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2777   __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2778   END();
2779 
2780   RUN();
2781 
2782   CHECK_EQUAL_FP64(2.0, d0);
2783   CHECK_EQUAL_FP64(2.0, dst[0]);
2784   CHECK_EQUAL_FP64(1.0, d1);
2785   CHECK_EQUAL_FP64(1.0, dst[2]);
2786   CHECK_EQUAL_FP64(3.0, d2);
2787   CHECK_EQUAL_FP64(3.0, dst[1]);
2788   CHECK_EQUAL_64(src_base, x17);
2789   CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2790   CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2791   CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2792   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2793   CHECK_EQUAL_64(dst_base, x22);
2794 
2795   TEARDOWN();
2796 }
2797 
2798 
TEST(ldp_stp_float)2799 TEST(ldp_stp_float) {
2800   INIT_V8();
2801   SETUP();
2802 
2803   float src[2] = {1.0, 2.0};
2804   float dst[3] = {0.0, 0.0, 0.0};
2805   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2806   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2807 
2808   START();
2809   __ Mov(x16, src_base);
2810   __ Mov(x17, dst_base);
2811   __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2812   __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2813   END();
2814 
2815   RUN();
2816 
2817   CHECK_EQUAL_FP32(1.0, s31);
2818   CHECK_EQUAL_FP32(2.0, s0);
2819   CHECK_EQUAL_FP32(0.0, dst[0]);
2820   CHECK_EQUAL_FP32(2.0, dst[1]);
2821   CHECK_EQUAL_FP32(1.0, dst[2]);
2822   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2823   CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2824 
2825   TEARDOWN();
2826 }
2827 
2828 
TEST(ldp_stp_double)2829 TEST(ldp_stp_double) {
2830   INIT_V8();
2831   SETUP();
2832 
2833   double src[2] = {1.0, 2.0};
2834   double dst[3] = {0.0, 0.0, 0.0};
2835   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2836   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2837 
2838   START();
2839   __ Mov(x16, src_base);
2840   __ Mov(x17, dst_base);
2841   __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2842   __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2843   END();
2844 
2845   RUN();
2846 
2847   CHECK_EQUAL_FP64(1.0, d31);
2848   CHECK_EQUAL_FP64(2.0, d0);
2849   CHECK_EQUAL_FP64(0.0, dst[0]);
2850   CHECK_EQUAL_FP64(2.0, dst[1]);
2851   CHECK_EQUAL_FP64(1.0, dst[2]);
2852   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2853   CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2854 
2855   TEARDOWN();
2856 }
2857 
2858 
TEST(ldp_stp_offset)2859 TEST(ldp_stp_offset) {
2860   INIT_V8();
2861   SETUP();
2862 
2863   uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2864                      0xffeeddccbbaa9988UL};
2865   uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2866   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2867   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2868 
2869   START();
2870   __ Mov(x16, src_base);
2871   __ Mov(x17, dst_base);
2872   __ Mov(x18, src_base + 24);
2873   __ Mov(x19, dst_base + 56);
2874   __ Ldp(w0, w1, MemOperand(x16));
2875   __ Ldp(w2, w3, MemOperand(x16, 4));
2876   __ Ldp(x4, x5, MemOperand(x16, 8));
2877   __ Ldp(w6, w7, MemOperand(x18, -12));
2878   __ Ldp(x8, x9, MemOperand(x18, -16));
2879   __ Stp(w0, w1, MemOperand(x17));
2880   __ Stp(w2, w3, MemOperand(x17, 8));
2881   __ Stp(x4, x5, MemOperand(x17, 16));
2882   __ Stp(w6, w7, MemOperand(x19, -24));
2883   __ Stp(x8, x9, MemOperand(x19, -16));
2884   END();
2885 
2886   RUN();
2887 
2888   CHECK_EQUAL_64(0x44556677, x0);
2889   CHECK_EQUAL_64(0x00112233, x1);
2890   CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
2891   CHECK_EQUAL_64(0x00112233, x2);
2892   CHECK_EQUAL_64(0xccddeeff, x3);
2893   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2894   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
2895   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2896   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2897   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2898   CHECK_EQUAL_64(0x8899aabb, x6);
2899   CHECK_EQUAL_64(0xbbaa9988, x7);
2900   CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2901   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
2902   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2903   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2904   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2905   CHECK_EQUAL_64(src_base, x16);
2906   CHECK_EQUAL_64(dst_base, x17);
2907   CHECK_EQUAL_64(src_base + 24, x18);
2908   CHECK_EQUAL_64(dst_base + 56, x19);
2909 
2910   TEARDOWN();
2911 }
2912 
2913 
TEST(ldp_stp_offset_wide)2914 TEST(ldp_stp_offset_wide) {
2915   INIT_V8();
2916   SETUP();
2917 
2918   uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2919                      0xffeeddccbbaa9988};
2920   uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2921   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2922   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2923   // Move base too far from the array to force multiple instructions
2924   // to be emitted.
2925   const int64_t base_offset = 1024;
2926 
2927   START();
2928   __ Mov(x20, src_base - base_offset);
2929   __ Mov(x21, dst_base - base_offset);
2930   __ Mov(x18, src_base + base_offset + 24);
2931   __ Mov(x19, dst_base + base_offset + 56);
2932   __ Ldp(w0, w1, MemOperand(x20, base_offset));
2933   __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
2934   __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
2935   __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
2936   __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
2937   __ Stp(w0, w1, MemOperand(x21, base_offset));
2938   __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
2939   __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
2940   __ Stp(w6, w7, MemOperand(x19, -24 - base_offset));
2941   __ Stp(x8, x9, MemOperand(x19, -16 - base_offset));
2942   END();
2943 
2944   RUN();
2945 
2946   CHECK_EQUAL_64(0x44556677, x0);
2947   CHECK_EQUAL_64(0x00112233, x1);
2948   CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
2949   CHECK_EQUAL_64(0x00112233, x2);
2950   CHECK_EQUAL_64(0xccddeeff, x3);
2951   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2952   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
2953   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2954   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2955   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2956   CHECK_EQUAL_64(0x8899aabb, x6);
2957   CHECK_EQUAL_64(0xbbaa9988, x7);
2958   CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2959   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
2960   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2961   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2962   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2963   CHECK_EQUAL_64(src_base - base_offset, x20);
2964   CHECK_EQUAL_64(dst_base - base_offset, x21);
2965   CHECK_EQUAL_64(src_base + base_offset + 24, x18);
2966   CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
2967 
2968   TEARDOWN();
2969 }
2970 
2971 
TEST(ldnp_stnp_offset)2972 TEST(ldnp_stnp_offset) {
2973   INIT_V8();
2974   SETUP();
2975 
2976   uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2977                      0xffeeddccbbaa9988UL};
2978   uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2979   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2980   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2981 
2982   START();
2983   __ Mov(x16, src_base);
2984   __ Mov(x17, dst_base);
2985   __ Mov(x18, src_base + 24);
2986   __ Mov(x19, dst_base + 56);
2987   __ Ldnp(w0, w1, MemOperand(x16));
2988   __ Ldnp(w2, w3, MemOperand(x16, 4));
2989   __ Ldnp(x4, x5, MemOperand(x16, 8));
2990   __ Ldnp(w6, w7, MemOperand(x18, -12));
2991   __ Ldnp(x8, x9, MemOperand(x18, -16));
2992   __ Stnp(w0, w1, MemOperand(x17));
2993   __ Stnp(w2, w3, MemOperand(x17, 8));
2994   __ Stnp(x4, x5, MemOperand(x17, 16));
2995   __ Stnp(w6, w7, MemOperand(x19, -24));
2996   __ Stnp(x8, x9, MemOperand(x19, -16));
2997   END();
2998 
2999   RUN();
3000 
3001   CHECK_EQUAL_64(0x44556677, x0);
3002   CHECK_EQUAL_64(0x00112233, x1);
3003   CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
3004   CHECK_EQUAL_64(0x00112233, x2);
3005   CHECK_EQUAL_64(0xccddeeff, x3);
3006   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
3007   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
3008   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
3009   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
3010   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
3011   CHECK_EQUAL_64(0x8899aabb, x6);
3012   CHECK_EQUAL_64(0xbbaa9988, x7);
3013   CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
3014   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
3015   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
3016   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
3017   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
3018   CHECK_EQUAL_64(src_base, x16);
3019   CHECK_EQUAL_64(dst_base, x17);
3020   CHECK_EQUAL_64(src_base + 24, x18);
3021   CHECK_EQUAL_64(dst_base + 56, x19);
3022 
3023   TEARDOWN();
3024 }
3025 
3026 
TEST(ldp_stp_preindex)3027 TEST(ldp_stp_preindex) {
3028   INIT_V8();
3029   SETUP();
3030 
3031   uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
3032                      0xffeeddccbbaa9988UL};
3033   uint64_t dst[5] = {0, 0, 0, 0, 0};
3034   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3035   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3036 
3037   START();
3038   __ Mov(x16, src_base);
3039   __ Mov(x17, dst_base);
3040   __ Mov(x18, dst_base + 16);
3041   __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
3042   __ Mov(x19, x16);
3043   __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
3044   __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
3045   __ Mov(x20, x17);
3046   __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
3047   __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
3048   __ Mov(x21, x16);
3049   __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
3050   __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
3051   __ Mov(x22, x18);
3052   __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
3053   END();
3054 
3055   RUN();
3056 
3057   CHECK_EQUAL_64(0x00112233, x0);
3058   CHECK_EQUAL_64(0xccddeeff, x1);
3059   CHECK_EQUAL_64(0x44556677, x2);
3060   CHECK_EQUAL_64(0x00112233, x3);
3061   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
3062   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3063   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
3064   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
3065   CHECK_EQUAL_64(0x0011223344556677UL, x6);
3066   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
3067   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3068   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3069   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3070   CHECK_EQUAL_64(src_base, x16);
3071   CHECK_EQUAL_64(dst_base, x17);
3072   CHECK_EQUAL_64(dst_base + 16, x18);
3073   CHECK_EQUAL_64(src_base + 4, x19);
3074   CHECK_EQUAL_64(dst_base + 4, x20);
3075   CHECK_EQUAL_64(src_base + 8, x21);
3076   CHECK_EQUAL_64(dst_base + 24, x22);
3077 
3078   TEARDOWN();
3079 }
3080 
3081 
TEST(ldp_stp_preindex_wide)3082 TEST(ldp_stp_preindex_wide) {
3083   INIT_V8();
3084   SETUP();
3085 
3086   uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
3087                      0xffeeddccbbaa9988};
3088   uint64_t dst[5] = {0, 0, 0, 0, 0};
3089   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3090   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3091   // Move base too far from the array to force multiple instructions
3092   // to be emitted.
3093   const int64_t base_offset = 1024;
3094 
3095   START();
3096   __ Mov(x24, src_base - base_offset);
3097   __ Mov(x25, dst_base + base_offset);
3098   __ Mov(x18, dst_base + base_offset + 16);
3099   __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
3100   __ Mov(x19, x24);
3101   __ Mov(x24, src_base - base_offset + 4);
3102   __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
3103   __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PreIndex));
3104   __ Mov(x20, x25);
3105   __ Mov(x25, dst_base + base_offset + 4);
3106   __ Mov(x24, src_base - base_offset);
3107   __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PreIndex));
3108   __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PreIndex));
3109   __ Mov(x21, x24);
3110   __ Mov(x24, src_base - base_offset + 8);
3111   __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
3112   __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
3113   __ Mov(x22, x18);
3114   __ Mov(x18, dst_base + base_offset + 16 + 8);
3115   __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
3116   END();
3117 
3118   RUN();
3119 
3120   CHECK_EQUAL_64(0x00112233, x0);
3121   CHECK_EQUAL_64(0xccddeeff, x1);
3122   CHECK_EQUAL_64(0x44556677, x2);
3123   CHECK_EQUAL_64(0x00112233, x3);
3124   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
3125   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3126   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
3127   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
3128   CHECK_EQUAL_64(0x0011223344556677UL, x6);
3129   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
3130   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3131   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3132   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3133   CHECK_EQUAL_64(src_base, x24);
3134   CHECK_EQUAL_64(dst_base, x25);
3135   CHECK_EQUAL_64(dst_base + 16, x18);
3136   CHECK_EQUAL_64(src_base + 4, x19);
3137   CHECK_EQUAL_64(dst_base + 4, x20);
3138   CHECK_EQUAL_64(src_base + 8, x21);
3139   CHECK_EQUAL_64(dst_base + 24, x22);
3140 
3141   TEARDOWN();
3142 }
3143 
3144 
TEST(ldp_stp_postindex)3145 TEST(ldp_stp_postindex) {
3146   INIT_V8();
3147   SETUP();
3148 
3149   uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
3150                      0xffeeddccbbaa9988UL, 0x7766554433221100UL};
3151   uint64_t dst[5] = {0, 0, 0, 0, 0};
3152   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3153   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3154 
3155   START();
3156   __ Mov(x16, src_base);
3157   __ Mov(x17, dst_base);
3158   __ Mov(x18, dst_base + 16);
3159   __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
3160   __ Mov(x19, x16);
3161   __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
3162   __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
3163   __ Mov(x20, x17);
3164   __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
3165   __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
3166   __ Mov(x21, x16);
3167   __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
3168   __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
3169   __ Mov(x22, x18);
3170   __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
3171   END();
3172 
3173   RUN();
3174 
3175   CHECK_EQUAL_64(0x44556677, x0);
3176   CHECK_EQUAL_64(0x00112233, x1);
3177   CHECK_EQUAL_64(0x00112233, x2);
3178   CHECK_EQUAL_64(0xccddeeff, x3);
3179   CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
3180   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3181   CHECK_EQUAL_64(0x0011223344556677UL, x4);
3182   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
3183   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
3184   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
3185   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3186   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3187   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3188   CHECK_EQUAL_64(src_base, x16);
3189   CHECK_EQUAL_64(dst_base, x17);
3190   CHECK_EQUAL_64(dst_base + 16, x18);
3191   CHECK_EQUAL_64(src_base + 4, x19);
3192   CHECK_EQUAL_64(dst_base + 4, x20);
3193   CHECK_EQUAL_64(src_base + 8, x21);
3194   CHECK_EQUAL_64(dst_base + 24, x22);
3195 
3196   TEARDOWN();
3197 }
3198 
3199 
TEST(ldp_stp_postindex_wide)3200 TEST(ldp_stp_postindex_wide) {
3201   INIT_V8();
3202   SETUP();
3203 
3204   uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff, 0xffeeddccbbaa9988,
3205                      0x7766554433221100};
3206   uint64_t dst[5] = {0, 0, 0, 0, 0};
3207   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3208   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3209   // Move base too far from the array to force multiple instructions
3210   // to be emitted.
3211   const int64_t base_offset = 1024;
3212 
3213   START();
3214   __ Mov(x24, src_base);
3215   __ Mov(x25, dst_base);
3216   __ Mov(x18, dst_base + 16);
3217   __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
3218   __ Mov(x19, x24);
3219   __ Sub(x24, x24, base_offset);
3220   __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PostIndex));
3221   __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PostIndex));
3222   __ Mov(x20, x25);
3223   __ Sub(x24, x24, base_offset);
3224   __ Add(x25, x25, base_offset);
3225   __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PostIndex));
3226   __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PostIndex));
3227   __ Mov(x21, x24);
3228   __ Sub(x24, x24, base_offset);
3229   __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
3230   __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
3231   __ Mov(x22, x18);
3232   __ Add(x18, x18, base_offset);
3233   __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
3234   END();
3235 
3236   RUN();
3237 
3238   CHECK_EQUAL_64(0x44556677, x0);
3239   CHECK_EQUAL_64(0x00112233, x1);
3240   CHECK_EQUAL_64(0x00112233, x2);
3241   CHECK_EQUAL_64(0xccddeeff, x3);
3242   CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
3243   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3244   CHECK_EQUAL_64(0x0011223344556677UL, x4);
3245   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
3246   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
3247   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
3248   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3249   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3250   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3251   CHECK_EQUAL_64(src_base + base_offset, x24);
3252   CHECK_EQUAL_64(dst_base - base_offset, x25);
3253   CHECK_EQUAL_64(dst_base - base_offset + 16, x18);
3254   CHECK_EQUAL_64(src_base + base_offset + 4, x19);
3255   CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
3256   CHECK_EQUAL_64(src_base + base_offset + 8, x21);
3257   CHECK_EQUAL_64(dst_base - base_offset + 24, x22);
3258 
3259   TEARDOWN();
3260 }
3261 
3262 
TEST(ldp_sign_extend)3263 TEST(ldp_sign_extend) {
3264   INIT_V8();
3265   SETUP();
3266 
3267   uint32_t src[2] = {0x80000000, 0x7fffffff};
3268   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3269 
3270   START();
3271   __ Mov(x24, src_base);
3272   __ Ldpsw(x0, x1, MemOperand(x24));
3273   END();
3274 
3275   RUN();
3276 
3277   CHECK_EQUAL_64(0xffffffff80000000UL, x0);
3278   CHECK_EQUAL_64(0x000000007fffffffUL, x1);
3279 
3280   TEARDOWN();
3281 }
3282 
3283 
TEST(ldur_stur)3284 TEST(ldur_stur) {
3285   INIT_V8();
3286   SETUP();
3287 
3288   int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3289   int64_t dst[5] = {0, 0, 0, 0, 0};
3290   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3291   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3292 
3293   START();
3294   __ Mov(x17, src_base);
3295   __ Mov(x18, dst_base);
3296   __ Mov(x19, src_base + 16);
3297   __ Mov(x20, dst_base + 32);
3298   __ Mov(x21, dst_base + 40);
3299   __ Ldr(w0, MemOperand(x17, 1));
3300   __ Str(w0, MemOperand(x18, 2));
3301   __ Ldr(x1, MemOperand(x17, 3));
3302   __ Str(x1, MemOperand(x18, 9));
3303   __ Ldr(w2, MemOperand(x19, -9));
3304   __ Str(w2, MemOperand(x20, -5));
3305   __ Ldrb(w3, MemOperand(x19, -1));
3306   __ Strb(w3, MemOperand(x21, -1));
3307   END();
3308 
3309   RUN();
3310 
3311   CHECK_EQUAL_64(0x6789abcd, x0);
3312   CHECK_EQUAL_64(0x6789abcd0000L, dst[0]);
3313   CHECK_EQUAL_64(0xabcdef0123456789L, x1);
3314   CHECK_EQUAL_64(0xcdef012345678900L, dst[1]);
3315   CHECK_EQUAL_64(0x000000ab, dst[2]);
3316   CHECK_EQUAL_64(0xabcdef01, x2);
3317   CHECK_EQUAL_64(0x00abcdef01000000L, dst[3]);
3318   CHECK_EQUAL_64(0x00000001, x3);
3319   CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
3320   CHECK_EQUAL_64(src_base, x17);
3321   CHECK_EQUAL_64(dst_base, x18);
3322   CHECK_EQUAL_64(src_base + 16, x19);
3323   CHECK_EQUAL_64(dst_base + 32, x20);
3324 
3325   TEARDOWN();
3326 }
3327 
3328 
3329 #if 0  // TODO(all) enable.
3330 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3331 TEST(ldr_literal) {
3332   INIT_V8();
3333   SETUP();
3334 
3335   START();
3336   __ Ldr(x2, 0x1234567890abcdefUL);
3337   __ Ldr(w3, 0xfedcba09);
3338   __ Ldr(d13, 1.234);
3339   __ Ldr(s25, 2.5);
3340   END();
3341 
3342   RUN();
3343 
3344   CHECK_EQUAL_64(0x1234567890abcdefUL, x2);
3345   CHECK_EQUAL_64(0xfedcba09, x3);
3346   CHECK_EQUAL_FP64(1.234, d13);
3347   CHECK_EQUAL_FP32(2.5, s25);
3348 
3349   TEARDOWN();
3350 }
3351 
3352 
3353 static void LdrLiteralRangeHelper(ptrdiff_t range_,
3354                                   LiteralPoolEmitOption option,
3355                                   bool expect_dump) {
3356   DCHECK(range_ > 0);
3357   SETUP_SIZE(range_ + 1024);
3358 
3359   Label label_1, label_2;
3360 
3361   size_t range = static_cast<size_t>(range_);
3362   size_t code_size = 0;
3363   size_t pool_guard_size;
3364 
3365   if (option == NoJumpRequired) {
3366     // Space for an explicit branch.
3367     pool_guard_size = sizeof(Instr);
3368   } else {
3369     pool_guard_size = 0;
3370   }
3371 
3372   START();
3373   // Force a pool dump so the pool starts off empty.
3374   __ EmitLiteralPool(JumpRequired);
3375   DCHECK_LITERAL_POOL_SIZE(0);
3376 
3377   __ Ldr(x0, 0x1234567890abcdefUL);
3378   __ Ldr(w1, 0xfedcba09);
3379   __ Ldr(d0, 1.234);
3380   __ Ldr(s1, 2.5);
3381   DCHECK_LITERAL_POOL_SIZE(4);
3382 
3383   code_size += 4 * sizeof(Instr);
3384 
3385   // Check that the requested range (allowing space for a branch over the pool)
3386   // can be handled by this test.
3387   DCHECK((code_size + pool_guard_size) <= range);
3388 
3389   // Emit NOPs up to 'range', leaving space for the pool guard.
3390   while ((code_size + pool_guard_size) < range) {
3391     __ Nop();
3392     code_size += sizeof(Instr);
3393   }
3394 
3395   // Emit the guard sequence before the literal pool.
3396   if (option == NoJumpRequired) {
3397     __ B(&label_1);
3398     code_size += sizeof(Instr);
3399   }
3400 
3401   DCHECK(code_size == range);
3402   DCHECK_LITERAL_POOL_SIZE(4);
3403 
3404   // Possibly generate a literal pool.
3405   __ CheckLiteralPool(option);
3406   __ Bind(&label_1);
3407   if (expect_dump) {
3408     DCHECK_LITERAL_POOL_SIZE(0);
3409   } else {
3410     DCHECK_LITERAL_POOL_SIZE(4);
3411   }
3412 
3413   // Force a pool flush to check that a second pool functions correctly.
3414   __ EmitLiteralPool(JumpRequired);
3415   DCHECK_LITERAL_POOL_SIZE(0);
3416 
3417   // These loads should be after the pool (and will require a new one).
3418   __ Ldr(x4, 0x34567890abcdef12UL);
3419   __ Ldr(w5, 0xdcba09fe);
3420   __ Ldr(d4, 123.4);
3421   __ Ldr(s5, 250.0);
3422   DCHECK_LITERAL_POOL_SIZE(4);
3423   END();
3424 
3425   RUN();
3426 
3427   // Check that the literals loaded correctly.
3428   CHECK_EQUAL_64(0x1234567890abcdefUL, x0);
3429   CHECK_EQUAL_64(0xfedcba09, x1);
3430   CHECK_EQUAL_FP64(1.234, d0);
3431   CHECK_EQUAL_FP32(2.5, s1);
3432   CHECK_EQUAL_64(0x34567890abcdef12UL, x4);
3433   CHECK_EQUAL_64(0xdcba09fe, x5);
3434   CHECK_EQUAL_FP64(123.4, d4);
3435   CHECK_EQUAL_FP32(250.0, s5);
3436 
3437   TEARDOWN();
3438 }
3439 
3440 
3441 TEST(ldr_literal_range_1) {
3442   INIT_V8();
3443   LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3444                         NoJumpRequired,
3445                         true);
3446 }
3447 
3448 
3449 TEST(ldr_literal_range_2) {
3450   INIT_V8();
3451   LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3452                         NoJumpRequired,
3453                         false);
3454 }
3455 
3456 
3457 TEST(ldr_literal_range_3) {
3458   INIT_V8();
3459   LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3460                         JumpRequired,
3461                         true);
3462 }
3463 
3464 
3465 TEST(ldr_literal_range_4) {
3466   INIT_V8();
3467   LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3468                         JumpRequired,
3469                         false);
3470 }
3471 
3472 
3473 TEST(ldr_literal_range_5) {
3474   INIT_V8();
3475   LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3476                         JumpRequired,
3477                         false);
3478 }
3479 
3480 
3481 TEST(ldr_literal_range_6) {
3482   INIT_V8();
3483   LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3484                         JumpRequired,
3485                         false);
3486 }
3487 #endif
3488 
TEST(add_sub_imm)3489 TEST(add_sub_imm) {
3490   INIT_V8();
3491   SETUP();
3492 
3493   START();
3494   __ Mov(x0, 0x0);
3495   __ Mov(x1, 0x1111);
3496   __ Mov(x2, 0xffffffffffffffffL);
3497   __ Mov(x3, 0x8000000000000000L);
3498 
3499   __ Add(x10, x0, Operand(0x123));
3500   __ Add(x11, x1, Operand(0x122000));
3501   __ Add(x12, x0, Operand(0xabc << 12));
3502   __ Add(x13, x2, Operand(1));
3503 
3504   __ Add(w14, w0, Operand(0x123));
3505   __ Add(w15, w1, Operand(0x122000));
3506   __ Add(w16, w0, Operand(0xabc << 12));
3507   __ Add(w17, w2, Operand(1));
3508 
3509   __ Sub(x20, x0, Operand(0x1));
3510   __ Sub(x21, x1, Operand(0x111));
3511   __ Sub(x22, x1, Operand(0x1 << 12));
3512   __ Sub(x23, x3, Operand(1));
3513 
3514   __ Sub(w24, w0, Operand(0x1));
3515   __ Sub(w25, w1, Operand(0x111));
3516   __ Sub(w26, w1, Operand(0x1 << 12));
3517   __ Sub(w27, w3, Operand(1));
3518   END();
3519 
3520   RUN();
3521 
3522   CHECK_EQUAL_64(0x123, x10);
3523   CHECK_EQUAL_64(0x123111, x11);
3524   CHECK_EQUAL_64(0xabc000, x12);
3525   CHECK_EQUAL_64(0x0, x13);
3526 
3527   CHECK_EQUAL_32(0x123, w14);
3528   CHECK_EQUAL_32(0x123111, w15);
3529   CHECK_EQUAL_32(0xabc000, w16);
3530   CHECK_EQUAL_32(0x0, w17);
3531 
3532   CHECK_EQUAL_64(0xffffffffffffffffL, x20);
3533   CHECK_EQUAL_64(0x1000, x21);
3534   CHECK_EQUAL_64(0x111, x22);
3535   CHECK_EQUAL_64(0x7fffffffffffffffL, x23);
3536 
3537   CHECK_EQUAL_32(0xffffffff, w24);
3538   CHECK_EQUAL_32(0x1000, w25);
3539   CHECK_EQUAL_32(0x111, w26);
3540   CHECK_EQUAL_32(0xffffffff, w27);
3541 
3542   TEARDOWN();
3543 }
3544 
3545 
TEST(add_sub_wide_imm)3546 TEST(add_sub_wide_imm) {
3547   INIT_V8();
3548   SETUP();
3549 
3550   START();
3551   __ Mov(x0, 0x0);
3552   __ Mov(x1, 0x1);
3553 
3554   __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3555   __ Add(x11, x1, Operand(0xffffffff));
3556 
3557   __ Add(w12, w0, Operand(0x12345678));
3558   __ Add(w13, w1, Operand(0xffffffff));
3559 
3560   __ Add(w18, w0, Operand(kWMinInt));
3561   __ Sub(w19, w0, Operand(kWMinInt));
3562 
3563   __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3564   __ Sub(w21, w0, Operand(0x12345678));
3565   END();
3566 
3567   RUN();
3568 
3569   CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
3570   CHECK_EQUAL_64(0x100000000UL, x11);
3571 
3572   CHECK_EQUAL_32(0x12345678, w12);
3573   CHECK_EQUAL_64(0x0, x13);
3574 
3575   CHECK_EQUAL_32(kWMinInt, w18);
3576   CHECK_EQUAL_32(kWMinInt, w19);
3577 
3578   CHECK_EQUAL_64(-0x1234567890abcdefUL, x20);
3579   CHECK_EQUAL_32(-0x12345678, w21);
3580 
3581   TEARDOWN();
3582 }
3583 
3584 
TEST(add_sub_shifted)3585 TEST(add_sub_shifted) {
3586   INIT_V8();
3587   SETUP();
3588 
3589   START();
3590   __ Mov(x0, 0);
3591   __ Mov(x1, 0x0123456789abcdefL);
3592   __ Mov(x2, 0xfedcba9876543210L);
3593   __ Mov(x3, 0xffffffffffffffffL);
3594 
3595   __ Add(x10, x1, Operand(x2));
3596   __ Add(x11, x0, Operand(x1, LSL, 8));
3597   __ Add(x12, x0, Operand(x1, LSR, 8));
3598   __ Add(x13, x0, Operand(x1, ASR, 8));
3599   __ Add(x14, x0, Operand(x2, ASR, 8));
3600   __ Add(w15, w0, Operand(w1, ASR, 8));
3601   __ Add(w18, w3, Operand(w1, ROR, 8));
3602   __ Add(x19, x3, Operand(x1, ROR, 8));
3603 
3604   __ Sub(x20, x3, Operand(x2));
3605   __ Sub(x21, x3, Operand(x1, LSL, 8));
3606   __ Sub(x22, x3, Operand(x1, LSR, 8));
3607   __ Sub(x23, x3, Operand(x1, ASR, 8));
3608   __ Sub(x24, x3, Operand(x2, ASR, 8));
3609   __ Sub(w25, w3, Operand(w1, ASR, 8));
3610   __ Sub(w26, w3, Operand(w1, ROR, 8));
3611   __ Sub(x27, x3, Operand(x1, ROR, 8));
3612   END();
3613 
3614   RUN();
3615 
3616   CHECK_EQUAL_64(0xffffffffffffffffL, x10);
3617   CHECK_EQUAL_64(0x23456789abcdef00L, x11);
3618   CHECK_EQUAL_64(0x000123456789abcdL, x12);
3619   CHECK_EQUAL_64(0x000123456789abcdL, x13);
3620   CHECK_EQUAL_64(0xfffedcba98765432L, x14);
3621   CHECK_EQUAL_64(0xff89abcd, x15);
3622   CHECK_EQUAL_64(0xef89abcc, x18);
3623   CHECK_EQUAL_64(0xef0123456789abccL, x19);
3624 
3625   CHECK_EQUAL_64(0x0123456789abcdefL, x20);
3626   CHECK_EQUAL_64(0xdcba9876543210ffL, x21);
3627   CHECK_EQUAL_64(0xfffedcba98765432L, x22);
3628   CHECK_EQUAL_64(0xfffedcba98765432L, x23);
3629   CHECK_EQUAL_64(0x000123456789abcdL, x24);
3630   CHECK_EQUAL_64(0x00765432, x25);
3631   CHECK_EQUAL_64(0x10765432, x26);
3632   CHECK_EQUAL_64(0x10fedcba98765432L, x27);
3633 
3634   TEARDOWN();
3635 }
3636 
3637 
TEST(add_sub_extended)3638 TEST(add_sub_extended) {
3639   INIT_V8();
3640   SETUP();
3641 
3642   START();
3643   __ Mov(x0, 0);
3644   __ Mov(x1, 0x0123456789abcdefL);
3645   __ Mov(x2, 0xfedcba9876543210L);
3646   __ Mov(w3, 0x80);
3647 
3648   __ Add(x10, x0, Operand(x1, UXTB, 0));
3649   __ Add(x11, x0, Operand(x1, UXTB, 1));
3650   __ Add(x12, x0, Operand(x1, UXTH, 2));
3651   __ Add(x13, x0, Operand(x1, UXTW, 4));
3652 
3653   __ Add(x14, x0, Operand(x1, SXTB, 0));
3654   __ Add(x15, x0, Operand(x1, SXTB, 1));
3655   __ Add(x16, x0, Operand(x1, SXTH, 2));
3656   __ Add(x17, x0, Operand(x1, SXTW, 3));
3657   __ Add(x18, x0, Operand(x2, SXTB, 0));
3658   __ Add(x19, x0, Operand(x2, SXTB, 1));
3659   __ Add(x20, x0, Operand(x2, SXTH, 2));
3660   __ Add(x21, x0, Operand(x2, SXTW, 3));
3661 
3662   __ Add(x22, x1, Operand(x2, SXTB, 1));
3663   __ Sub(x23, x1, Operand(x2, SXTB, 1));
3664 
3665   __ Add(w24, w1, Operand(w2, UXTB, 2));
3666   __ Add(w25, w0, Operand(w1, SXTB, 0));
3667   __ Add(w26, w0, Operand(w1, SXTB, 1));
3668   __ Add(w27, w2, Operand(w1, SXTW, 3));
3669 
3670   __ Add(w28, w0, Operand(w1, SXTW, 3));
3671   __ Add(x29, x0, Operand(w1, SXTW, 3));
3672 
3673   __ Sub(x30, x0, Operand(w3, SXTB, 1));
3674   END();
3675 
3676   RUN();
3677 
3678   CHECK_EQUAL_64(0xefL, x10);
3679   CHECK_EQUAL_64(0x1deL, x11);
3680   CHECK_EQUAL_64(0x337bcL, x12);
3681   CHECK_EQUAL_64(0x89abcdef0L, x13);
3682 
3683   CHECK_EQUAL_64(0xffffffffffffffefL, x14);
3684   CHECK_EQUAL_64(0xffffffffffffffdeL, x15);
3685   CHECK_EQUAL_64(0xffffffffffff37bcL, x16);
3686   CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3687   CHECK_EQUAL_64(0x10L, x18);
3688   CHECK_EQUAL_64(0x20L, x19);
3689   CHECK_EQUAL_64(0xc840L, x20);
3690   CHECK_EQUAL_64(0x3b2a19080L, x21);
3691 
3692   CHECK_EQUAL_64(0x0123456789abce0fL, x22);
3693   CHECK_EQUAL_64(0x0123456789abcdcfL, x23);
3694 
3695   CHECK_EQUAL_32(0x89abce2f, w24);
3696   CHECK_EQUAL_32(0xffffffef, w25);
3697   CHECK_EQUAL_32(0xffffffde, w26);
3698   CHECK_EQUAL_32(0xc3b2a188, w27);
3699 
3700   CHECK_EQUAL_32(0x4d5e6f78, w28);
3701   CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3702 
3703   CHECK_EQUAL_64(256, x30);
3704 
3705   TEARDOWN();
3706 }
3707 
3708 
TEST(add_sub_negative)3709 TEST(add_sub_negative) {
3710   INIT_V8();
3711   SETUP();
3712 
3713   START();
3714   __ Mov(x0, 0);
3715   __ Mov(x1, 4687);
3716   __ Mov(x2, 0x1122334455667788);
3717   __ Mov(w3, 0x11223344);
3718   __ Mov(w4, 400000);
3719 
3720   __ Add(x10, x0, -42);
3721   __ Add(x11, x1, -687);
3722   __ Add(x12, x2, -0x88);
3723 
3724   __ Sub(x13, x0, -600);
3725   __ Sub(x14, x1, -313);
3726   __ Sub(x15, x2, -0x555);
3727 
3728   __ Add(w19, w3, -0x344);
3729   __ Add(w20, w4, -2000);
3730 
3731   __ Sub(w21, w3, -0xbc);
3732   __ Sub(w22, w4, -2000);
3733   END();
3734 
3735   RUN();
3736 
3737   CHECK_EQUAL_64(-42, x10);
3738   CHECK_EQUAL_64(4000, x11);
3739   CHECK_EQUAL_64(0x1122334455667700, x12);
3740 
3741   CHECK_EQUAL_64(600, x13);
3742   CHECK_EQUAL_64(5000, x14);
3743   CHECK_EQUAL_64(0x1122334455667cdd, x15);
3744 
3745   CHECK_EQUAL_32(0x11223000, w19);
3746   CHECK_EQUAL_32(398000, w20);
3747 
3748   CHECK_EQUAL_32(0x11223400, w21);
3749   CHECK_EQUAL_32(402000, w22);
3750 
3751   TEARDOWN();
3752 }
3753 
3754 
TEST(add_sub_zero)3755 TEST(add_sub_zero) {
3756   INIT_V8();
3757   SETUP();
3758 
3759   START();
3760   __ Mov(x0, 0);
3761   __ Mov(x1, 0);
3762   __ Mov(x2, 0);
3763 
3764   Label blob1;
3765   __ Bind(&blob1);
3766   __ Add(x0, x0, 0);
3767   __ Sub(x1, x1, 0);
3768   __ Sub(x2, x2, xzr);
3769   CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&blob1));
3770 
3771   Label blob2;
3772   __ Bind(&blob2);
3773   __ Add(w3, w3, 0);
3774   CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob2));
3775 
3776   Label blob3;
3777   __ Bind(&blob3);
3778   __ Sub(w3, w3, wzr);
3779   CHECK_NE(0, __ SizeOfCodeGeneratedSince(&blob3));
3780 
3781   END();
3782 
3783   RUN();
3784 
3785   CHECK_EQUAL_64(0, x0);
3786   CHECK_EQUAL_64(0, x1);
3787   CHECK_EQUAL_64(0, x2);
3788 
3789   TEARDOWN();
3790 }
3791 
3792 
TEST(claim_drop_zero)3793 TEST(claim_drop_zero) {
3794   INIT_V8();
3795   SETUP();
3796 
3797   START();
3798 
3799   Label start;
3800   __ Bind(&start);
3801   __ Claim(0);
3802   __ Drop(0);
3803   __ Claim(xzr, 8);
3804   __ Drop(xzr, 8);
3805   __ Claim(xzr, 0);
3806   __ Drop(xzr, 0);
3807   __ Claim(x7, 0);
3808   __ Drop(x7, 0);
3809   __ ClaimBySMI(xzr, 8);
3810   __ DropBySMI(xzr, 8);
3811   __ ClaimBySMI(xzr, 0);
3812   __ DropBySMI(xzr, 0);
3813   CHECK_EQ(0, __ SizeOfCodeGeneratedSince(&start));
3814 
3815   END();
3816 
3817   RUN();
3818 
3819   TEARDOWN();
3820 }
3821 
3822 
TEST(neg)3823 TEST(neg) {
3824   INIT_V8();
3825   SETUP();
3826 
3827   START();
3828   __ Mov(x0, 0xf123456789abcdefL);
3829 
3830   // Immediate.
3831   __ Neg(x1, 0x123);
3832   __ Neg(w2, 0x123);
3833 
3834   // Shifted.
3835   __ Neg(x3, Operand(x0, LSL, 1));
3836   __ Neg(w4, Operand(w0, LSL, 2));
3837   __ Neg(x5, Operand(x0, LSR, 3));
3838   __ Neg(w6, Operand(w0, LSR, 4));
3839   __ Neg(x7, Operand(x0, ASR, 5));
3840   __ Neg(w8, Operand(w0, ASR, 6));
3841 
3842   // Extended.
3843   __ Neg(w9, Operand(w0, UXTB));
3844   __ Neg(x10, Operand(x0, SXTB, 1));
3845   __ Neg(w11, Operand(w0, UXTH, 2));
3846   __ Neg(x12, Operand(x0, SXTH, 3));
3847   __ Neg(w13, Operand(w0, UXTW, 4));
3848   __ Neg(x14, Operand(x0, SXTW, 4));
3849   END();
3850 
3851   RUN();
3852 
3853   CHECK_EQUAL_64(0xfffffffffffffeddUL, x1);
3854   CHECK_EQUAL_64(0xfffffedd, x2);
3855   CHECK_EQUAL_64(0x1db97530eca86422UL, x3);
3856   CHECK_EQUAL_64(0xd950c844, x4);
3857   CHECK_EQUAL_64(0xe1db97530eca8643UL, x5);
3858   CHECK_EQUAL_64(0xf7654322, x6);
3859   CHECK_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3860   CHECK_EQUAL_64(0x01d950c9, x8);
3861   CHECK_EQUAL_64(0xffffff11, x9);
3862   CHECK_EQUAL_64(0x0000000000000022UL, x10);
3863   CHECK_EQUAL_64(0xfffcc844, x11);
3864   CHECK_EQUAL_64(0x0000000000019088UL, x12);
3865   CHECK_EQUAL_64(0x65432110, x13);
3866   CHECK_EQUAL_64(0x0000000765432110UL, x14);
3867 
3868   TEARDOWN();
3869 }
3870 
3871 
TEST(adc_sbc_shift)3872 TEST(adc_sbc_shift) {
3873   INIT_V8();
3874   SETUP();
3875 
3876   START();
3877   __ Mov(x0, 0);
3878   __ Mov(x1, 1);
3879   __ Mov(x2, 0x0123456789abcdefL);
3880   __ Mov(x3, 0xfedcba9876543210L);
3881   __ Mov(x4, 0xffffffffffffffffL);
3882 
3883   // Clear the C flag.
3884   __ Adds(x0, x0, Operand(0));
3885 
3886   __ Adc(x5, x2, Operand(x3));
3887   __ Adc(x6, x0, Operand(x1, LSL, 60));
3888   __ Sbc(x7, x4, Operand(x3, LSR, 4));
3889   __ Adc(x8, x2, Operand(x3, ASR, 4));
3890   __ Adc(x9, x2, Operand(x3, ROR, 8));
3891 
3892   __ Adc(w10, w2, Operand(w3));
3893   __ Adc(w11, w0, Operand(w1, LSL, 30));
3894   __ Sbc(w12, w4, Operand(w3, LSR, 4));
3895   __ Adc(w13, w2, Operand(w3, ASR, 4));
3896   __ Adc(w14, w2, Operand(w3, ROR, 8));
3897 
3898   // Set the C flag.
3899   __ Cmp(w0, Operand(w0));
3900 
3901   __ Adc(x18, x2, Operand(x3));
3902   __ Adc(x19, x0, Operand(x1, LSL, 60));
3903   __ Sbc(x20, x4, Operand(x3, LSR, 4));
3904   __ Adc(x21, x2, Operand(x3, ASR, 4));
3905   __ Adc(x22, x2, Operand(x3, ROR, 8));
3906 
3907   __ Adc(w23, w2, Operand(w3));
3908   __ Adc(w24, w0, Operand(w1, LSL, 30));
3909   __ Sbc(w25, w4, Operand(w3, LSR, 4));
3910   __ Adc(w26, w2, Operand(w3, ASR, 4));
3911   __ Adc(w27, w2, Operand(w3, ROR, 8));
3912   END();
3913 
3914   RUN();
3915 
3916   CHECK_EQUAL_64(0xffffffffffffffffL, x5);
3917   CHECK_EQUAL_64(1L << 60, x6);
3918   CHECK_EQUAL_64(0xf0123456789abcddL, x7);
3919   CHECK_EQUAL_64(0x0111111111111110L, x8);
3920   CHECK_EQUAL_64(0x1222222222222221L, x9);
3921 
3922   CHECK_EQUAL_32(0xffffffff, w10);
3923   CHECK_EQUAL_32(1 << 30, w11);
3924   CHECK_EQUAL_32(0xf89abcdd, w12);
3925   CHECK_EQUAL_32(0x91111110, w13);
3926   CHECK_EQUAL_32(0x9a222221, w14);
3927 
3928   CHECK_EQUAL_64(0xffffffffffffffffL + 1, x18);
3929   CHECK_EQUAL_64((1L << 60) + 1, x19);
3930   CHECK_EQUAL_64(0xf0123456789abcddL + 1, x20);
3931   CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
3932   CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
3933 
3934   CHECK_EQUAL_32(0xffffffff + 1, w23);
3935   CHECK_EQUAL_32((1 << 30) + 1, w24);
3936   CHECK_EQUAL_32(0xf89abcdd + 1, w25);
3937   CHECK_EQUAL_32(0x91111110 + 1, w26);
3938   CHECK_EQUAL_32(0x9a222221 + 1, w27);
3939 
3940   // Check that adc correctly sets the condition flags.
3941   START();
3942   __ Mov(x0, 1);
3943   __ Mov(x1, 0xffffffffffffffffL);
3944   // Clear the C flag.
3945   __ Adds(x0, x0, Operand(0));
3946   __ Adcs(x10, x0, Operand(x1));
3947   END();
3948 
3949   RUN();
3950 
3951   CHECK_EQUAL_NZCV(ZCFlag);
3952   CHECK_EQUAL_64(0, x10);
3953 
3954   START();
3955   __ Mov(x0, 1);
3956   __ Mov(x1, 0x8000000000000000L);
3957   // Clear the C flag.
3958   __ Adds(x0, x0, Operand(0));
3959   __ Adcs(x10, x0, Operand(x1, ASR, 63));
3960   END();
3961 
3962   RUN();
3963 
3964   CHECK_EQUAL_NZCV(ZCFlag);
3965   CHECK_EQUAL_64(0, x10);
3966 
3967   START();
3968   __ Mov(x0, 0x10);
3969   __ Mov(x1, 0x07ffffffffffffffL);
3970   // Clear the C flag.
3971   __ Adds(x0, x0, Operand(0));
3972   __ Adcs(x10, x0, Operand(x1, LSL, 4));
3973   END();
3974 
3975   RUN();
3976 
3977   CHECK_EQUAL_NZCV(NVFlag);
3978   CHECK_EQUAL_64(0x8000000000000000L, x10);
3979 
3980   // Check that sbc correctly sets the condition flags.
3981   START();
3982   __ Mov(x0, 0);
3983   __ Mov(x1, 0xffffffffffffffffL);
3984   // Clear the C flag.
3985   __ Adds(x0, x0, Operand(0));
3986   __ Sbcs(x10, x0, Operand(x1));
3987   END();
3988 
3989   RUN();
3990 
3991   CHECK_EQUAL_NZCV(ZFlag);
3992   CHECK_EQUAL_64(0, x10);
3993 
3994   START();
3995   __ Mov(x0, 1);
3996   __ Mov(x1, 0xffffffffffffffffL);
3997   // Clear the C flag.
3998   __ Adds(x0, x0, Operand(0));
3999   __ Sbcs(x10, x0, Operand(x1, LSR, 1));
4000   END();
4001 
4002   RUN();
4003 
4004   CHECK_EQUAL_NZCV(NFlag);
4005   CHECK_EQUAL_64(0x8000000000000001L, x10);
4006 
4007   START();
4008   __ Mov(x0, 0);
4009   // Clear the C flag.
4010   __ Adds(x0, x0, Operand(0));
4011   __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
4012   END();
4013 
4014   RUN();
4015 
4016   CHECK_EQUAL_NZCV(ZFlag);
4017   CHECK_EQUAL_64(0, x10);
4018 
4019   START()
4020   __ Mov(w0, 0x7fffffff);
4021   // Clear the C flag.
4022   __ Adds(x0, x0, Operand(0));
4023   __ Ngcs(w10, w0);
4024   END();
4025 
4026   RUN();
4027 
4028   CHECK_EQUAL_NZCV(NFlag);
4029   CHECK_EQUAL_64(0x80000000, x10);
4030 
4031   START();
4032   // Clear the C flag.
4033   __ Adds(x0, x0, Operand(0));
4034   __ Ngcs(x10, 0x7fffffffffffffffL);
4035   END();
4036 
4037   RUN();
4038 
4039   CHECK_EQUAL_NZCV(NFlag);
4040   CHECK_EQUAL_64(0x8000000000000000L, x10);
4041 
4042   START()
4043   __ Mov(x0, 0);
4044   // Set the C flag.
4045   __ Cmp(x0, Operand(x0));
4046   __ Sbcs(x10, x0, Operand(1));
4047   END();
4048 
4049   RUN();
4050 
4051   CHECK_EQUAL_NZCV(NFlag);
4052   CHECK_EQUAL_64(0xffffffffffffffffL, x10);
4053 
4054   START()
4055   __ Mov(x0, 0);
4056   // Set the C flag.
4057   __ Cmp(x0, Operand(x0));
4058   __ Ngcs(x10, 0x7fffffffffffffffL);
4059   END();
4060 
4061   RUN();
4062 
4063   CHECK_EQUAL_NZCV(NFlag);
4064   CHECK_EQUAL_64(0x8000000000000001L, x10);
4065 
4066   TEARDOWN();
4067 }
4068 
4069 
TEST(adc_sbc_extend)4070 TEST(adc_sbc_extend) {
4071   INIT_V8();
4072   SETUP();
4073 
4074   START();
4075   // Clear the C flag.
4076   __ Adds(x0, x0, Operand(0));
4077 
4078   __ Mov(x0, 0);
4079   __ Mov(x1, 1);
4080   __ Mov(x2, 0x0123456789abcdefL);
4081 
4082   __ Adc(x10, x1, Operand(w2, UXTB, 1));
4083   __ Adc(x11, x1, Operand(x2, SXTH, 2));
4084   __ Sbc(x12, x1, Operand(w2, UXTW, 4));
4085   __ Adc(x13, x1, Operand(x2, UXTX, 4));
4086 
4087   __ Adc(w14, w1, Operand(w2, UXTB, 1));
4088   __ Adc(w15, w1, Operand(w2, SXTH, 2));
4089   __ Adc(w9, w1, Operand(w2, UXTW, 4));
4090 
4091   // Set the C flag.
4092   __ Cmp(w0, Operand(w0));
4093 
4094   __ Adc(x20, x1, Operand(w2, UXTB, 1));
4095   __ Adc(x21, x1, Operand(x2, SXTH, 2));
4096   __ Sbc(x22, x1, Operand(w2, UXTW, 4));
4097   __ Adc(x23, x1, Operand(x2, UXTX, 4));
4098 
4099   __ Adc(w24, w1, Operand(w2, UXTB, 1));
4100   __ Adc(w25, w1, Operand(w2, SXTH, 2));
4101   __ Adc(w26, w1, Operand(w2, UXTW, 4));
4102   END();
4103 
4104   RUN();
4105 
4106   CHECK_EQUAL_64(0x1df, x10);
4107   CHECK_EQUAL_64(0xffffffffffff37bdL, x11);
4108   CHECK_EQUAL_64(0xfffffff765432110L, x12);
4109   CHECK_EQUAL_64(0x123456789abcdef1L, x13);
4110 
4111   CHECK_EQUAL_32(0x1df, w14);
4112   CHECK_EQUAL_32(0xffff37bd, w15);
4113   CHECK_EQUAL_32(0x9abcdef1, w9);
4114 
4115   CHECK_EQUAL_64(0x1df + 1, x20);
4116   CHECK_EQUAL_64(0xffffffffffff37bdL + 1, x21);
4117   CHECK_EQUAL_64(0xfffffff765432110L + 1, x22);
4118   CHECK_EQUAL_64(0x123456789abcdef1L + 1, x23);
4119 
4120   CHECK_EQUAL_32(0x1df + 1, w24);
4121   CHECK_EQUAL_32(0xffff37bd + 1, w25);
4122   CHECK_EQUAL_32(0x9abcdef1 + 1, w26);
4123 
4124   // Check that adc correctly sets the condition flags.
4125   START();
4126   __ Mov(x0, 0xff);
4127   __ Mov(x1, 0xffffffffffffffffL);
4128   // Clear the C flag.
4129   __ Adds(x0, x0, Operand(0));
4130   __ Adcs(x10, x0, Operand(x1, SXTX, 1));
4131   END();
4132 
4133   RUN();
4134 
4135   CHECK_EQUAL_NZCV(CFlag);
4136 
4137   START();
4138   __ Mov(x0, 0x7fffffffffffffffL);
4139   __ Mov(x1, 1);
4140   // Clear the C flag.
4141   __ Adds(x0, x0, Operand(0));
4142   __ Adcs(x10, x0, Operand(x1, UXTB, 2));
4143   END();
4144 
4145   RUN();
4146 
4147   CHECK_EQUAL_NZCV(NVFlag);
4148 
4149   START();
4150   __ Mov(x0, 0x7fffffffffffffffL);
4151   // Clear the C flag.
4152   __ Adds(x0, x0, Operand(0));
4153   __ Adcs(x10, x0, Operand(1));
4154   END();
4155 
4156   RUN();
4157 
4158   CHECK_EQUAL_NZCV(NVFlag);
4159 
4160   TEARDOWN();
4161 }
4162 
4163 
TEST(adc_sbc_wide_imm)4164 TEST(adc_sbc_wide_imm) {
4165   INIT_V8();
4166   SETUP();
4167 
4168   START();
4169   __ Mov(x0, 0);
4170 
4171   // Clear the C flag.
4172   __ Adds(x0, x0, Operand(0));
4173 
4174   __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
4175   __ Adc(w8, w0, Operand(0xffffffff));
4176   __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
4177   __ Sbc(w10, w0, Operand(0xffffffff));
4178   __ Ngc(x11, Operand(0xffffffff00000000UL));
4179   __ Ngc(w12, Operand(0xffff0000));
4180 
4181   // Set the C flag.
4182   __ Cmp(w0, Operand(w0));
4183 
4184   __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
4185   __ Adc(w19, w0, Operand(0xffffffff));
4186   __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
4187   __ Sbc(w21, w0, Operand(0xffffffff));
4188   __ Ngc(x22, Operand(0xffffffff00000000UL));
4189   __ Ngc(w23, Operand(0xffff0000));
4190   END();
4191 
4192   RUN();
4193 
4194   CHECK_EQUAL_64(0x1234567890abcdefUL, x7);
4195   CHECK_EQUAL_64(0xffffffff, x8);
4196   CHECK_EQUAL_64(0xedcba9876f543210UL, x9);
4197   CHECK_EQUAL_64(0, x10);
4198   CHECK_EQUAL_64(0xffffffff, x11);
4199   CHECK_EQUAL_64(0xffff, x12);
4200 
4201   CHECK_EQUAL_64(0x1234567890abcdefUL + 1, x18);
4202   CHECK_EQUAL_64(0, x19);
4203   CHECK_EQUAL_64(0xedcba9876f543211UL, x20);
4204   CHECK_EQUAL_64(1, x21);
4205   CHECK_EQUAL_64(0x100000000UL, x22);
4206   CHECK_EQUAL_64(0x10000, x23);
4207 
4208   TEARDOWN();
4209 }
4210 
4211 
TEST(flags)4212 TEST(flags) {
4213   INIT_V8();
4214   SETUP();
4215 
4216   START();
4217   __ Mov(x0, 0);
4218   __ Mov(x1, 0x1111111111111111L);
4219   __ Neg(x10, Operand(x0));
4220   __ Neg(x11, Operand(x1));
4221   __ Neg(w12, Operand(w1));
4222   // Clear the C flag.
4223   __ Adds(x0, x0, Operand(0));
4224   __ Ngc(x13, Operand(x0));
4225   // Set the C flag.
4226   __ Cmp(x0, Operand(x0));
4227   __ Ngc(w14, Operand(w0));
4228   END();
4229 
4230   RUN();
4231 
4232   CHECK_EQUAL_64(0, x10);
4233   CHECK_EQUAL_64(-0x1111111111111111L, x11);
4234   CHECK_EQUAL_32(-0x11111111, w12);
4235   CHECK_EQUAL_64(-1L, x13);
4236   CHECK_EQUAL_32(0, w14);
4237 
4238   START();
4239   __ Mov(x0, 0);
4240   __ Cmp(x0, Operand(x0));
4241   END();
4242 
4243   RUN();
4244 
4245   CHECK_EQUAL_NZCV(ZCFlag);
4246 
4247   START();
4248   __ Mov(w0, 0);
4249   __ Cmp(w0, Operand(w0));
4250   END();
4251 
4252   RUN();
4253 
4254   CHECK_EQUAL_NZCV(ZCFlag);
4255 
4256   START();
4257   __ Mov(x0, 0);
4258   __ Mov(x1, 0x1111111111111111L);
4259   __ Cmp(x0, Operand(x1));
4260   END();
4261 
4262   RUN();
4263 
4264   CHECK_EQUAL_NZCV(NFlag);
4265 
4266   START();
4267   __ Mov(w0, 0);
4268   __ Mov(w1, 0x11111111);
4269   __ Cmp(w0, Operand(w1));
4270   END();
4271 
4272   RUN();
4273 
4274   CHECK_EQUAL_NZCV(NFlag);
4275 
4276   START();
4277   __ Mov(x1, 0x1111111111111111L);
4278   __ Cmp(x1, Operand(0));
4279   END();
4280 
4281   RUN();
4282 
4283   CHECK_EQUAL_NZCV(CFlag);
4284 
4285   START();
4286   __ Mov(w1, 0x11111111);
4287   __ Cmp(w1, Operand(0));
4288   END();
4289 
4290   RUN();
4291 
4292   CHECK_EQUAL_NZCV(CFlag);
4293 
4294   START();
4295   __ Mov(x0, 1);
4296   __ Mov(x1, 0x7fffffffffffffffL);
4297   __ Cmn(x1, Operand(x0));
4298   END();
4299 
4300   RUN();
4301 
4302   CHECK_EQUAL_NZCV(NVFlag);
4303 
4304   START();
4305   __ Mov(w0, 1);
4306   __ Mov(w1, 0x7fffffff);
4307   __ Cmn(w1, Operand(w0));
4308   END();
4309 
4310   RUN();
4311 
4312   CHECK_EQUAL_NZCV(NVFlag);
4313 
4314   START();
4315   __ Mov(x0, 1);
4316   __ Mov(x1, 0xffffffffffffffffL);
4317   __ Cmn(x1, Operand(x0));
4318   END();
4319 
4320   RUN();
4321 
4322   CHECK_EQUAL_NZCV(ZCFlag);
4323 
4324   START();
4325   __ Mov(w0, 1);
4326   __ Mov(w1, 0xffffffff);
4327   __ Cmn(w1, Operand(w0));
4328   END();
4329 
4330   RUN();
4331 
4332   CHECK_EQUAL_NZCV(ZCFlag);
4333 
4334   START();
4335   __ Mov(w0, 0);
4336   __ Mov(w1, 1);
4337   // Clear the C flag.
4338   __ Adds(w0, w0, Operand(0));
4339   __ Ngcs(w0, Operand(w1));
4340   END();
4341 
4342   RUN();
4343 
4344   CHECK_EQUAL_NZCV(NFlag);
4345 
4346   START();
4347   __ Mov(w0, 0);
4348   __ Mov(w1, 0);
4349   // Set the C flag.
4350   __ Cmp(w0, Operand(w0));
4351   __ Ngcs(w0, Operand(w1));
4352   END();
4353 
4354   RUN();
4355 
4356   CHECK_EQUAL_NZCV(ZCFlag);
4357 
4358   TEARDOWN();
4359 }
4360 
4361 
TEST(cmp_shift)4362 TEST(cmp_shift) {
4363   INIT_V8();
4364   SETUP();
4365 
4366   START();
4367   __ Mov(x18, 0xf0000000);
4368   __ Mov(x19, 0xf000000010000000UL);
4369   __ Mov(x20, 0xf0000000f0000000UL);
4370   __ Mov(x21, 0x7800000078000000UL);
4371   __ Mov(x22, 0x3c0000003c000000UL);
4372   __ Mov(x23, 0x8000000780000000UL);
4373   __ Mov(x24, 0x0000000f00000000UL);
4374   __ Mov(x25, 0x00000003c0000000UL);
4375   __ Mov(x26, 0x8000000780000000UL);
4376   __ Mov(x27, 0xc0000003);
4377 
4378   __ Cmp(w20, Operand(w21, LSL, 1));
4379   __ Mrs(x0, NZCV);
4380 
4381   __ Cmp(x20, Operand(x22, LSL, 2));
4382   __ Mrs(x1, NZCV);
4383 
4384   __ Cmp(w19, Operand(w23, LSR, 3));
4385   __ Mrs(x2, NZCV);
4386 
4387   __ Cmp(x18, Operand(x24, LSR, 4));
4388   __ Mrs(x3, NZCV);
4389 
4390   __ Cmp(w20, Operand(w25, ASR, 2));
4391   __ Mrs(x4, NZCV);
4392 
4393   __ Cmp(x20, Operand(x26, ASR, 3));
4394   __ Mrs(x5, NZCV);
4395 
4396   __ Cmp(w27, Operand(w22, ROR, 28));
4397   __ Mrs(x6, NZCV);
4398 
4399   __ Cmp(x20, Operand(x21, ROR, 31));
4400   __ Mrs(x7, NZCV);
4401   END();
4402 
4403   RUN();
4404 
4405   CHECK_EQUAL_32(ZCFlag, w0);
4406   CHECK_EQUAL_32(ZCFlag, w1);
4407   CHECK_EQUAL_32(ZCFlag, w2);
4408   CHECK_EQUAL_32(ZCFlag, w3);
4409   CHECK_EQUAL_32(ZCFlag, w4);
4410   CHECK_EQUAL_32(ZCFlag, w5);
4411   CHECK_EQUAL_32(ZCFlag, w6);
4412   CHECK_EQUAL_32(ZCFlag, w7);
4413 
4414   TEARDOWN();
4415 }
4416 
4417 
TEST(cmp_extend)4418 TEST(cmp_extend) {
4419   INIT_V8();
4420   SETUP();
4421 
4422   START();
4423   __ Mov(w20, 0x2);
4424   __ Mov(w21, 0x1);
4425   __ Mov(x22, 0xffffffffffffffffUL);
4426   __ Mov(x23, 0xff);
4427   __ Mov(x24, 0xfffffffffffffffeUL);
4428   __ Mov(x25, 0xffff);
4429   __ Mov(x26, 0xffffffff);
4430 
4431   __ Cmp(w20, Operand(w21, LSL, 1));
4432   __ Mrs(x0, NZCV);
4433 
4434   __ Cmp(x22, Operand(x23, SXTB, 0));
4435   __ Mrs(x1, NZCV);
4436 
4437   __ Cmp(x24, Operand(x23, SXTB, 1));
4438   __ Mrs(x2, NZCV);
4439 
4440   __ Cmp(x24, Operand(x23, UXTB, 1));
4441   __ Mrs(x3, NZCV);
4442 
4443   __ Cmp(w22, Operand(w25, UXTH));
4444   __ Mrs(x4, NZCV);
4445 
4446   __ Cmp(x22, Operand(x25, SXTH));
4447   __ Mrs(x5, NZCV);
4448 
4449   __ Cmp(x22, Operand(x26, UXTW));
4450   __ Mrs(x6, NZCV);
4451 
4452   __ Cmp(x24, Operand(x26, SXTW, 1));
4453   __ Mrs(x7, NZCV);
4454   END();
4455 
4456   RUN();
4457 
4458   CHECK_EQUAL_32(ZCFlag, w0);
4459   CHECK_EQUAL_32(ZCFlag, w1);
4460   CHECK_EQUAL_32(ZCFlag, w2);
4461   CHECK_EQUAL_32(NCFlag, w3);
4462   CHECK_EQUAL_32(NCFlag, w4);
4463   CHECK_EQUAL_32(ZCFlag, w5);
4464   CHECK_EQUAL_32(NCFlag, w6);
4465   CHECK_EQUAL_32(ZCFlag, w7);
4466 
4467   TEARDOWN();
4468 }
4469 
4470 
TEST(ccmp)4471 TEST(ccmp) {
4472   INIT_V8();
4473   SETUP();
4474 
4475   START();
4476   __ Mov(w16, 0);
4477   __ Mov(w17, 1);
4478   __ Cmp(w16, w16);
4479   __ Ccmp(w16, w17, NCFlag, eq);
4480   __ Mrs(x0, NZCV);
4481 
4482   __ Cmp(w16, w16);
4483   __ Ccmp(w16, w17, NCFlag, ne);
4484   __ Mrs(x1, NZCV);
4485 
4486   __ Cmp(x16, x16);
4487   __ Ccmn(x16, 2, NZCVFlag, eq);
4488   __ Mrs(x2, NZCV);
4489 
4490   __ Cmp(x16, x16);
4491   __ Ccmn(x16, 2, NZCVFlag, ne);
4492   __ Mrs(x3, NZCV);
4493 
4494   __ ccmp(x16, x16, NZCVFlag, al);
4495   __ Mrs(x4, NZCV);
4496 
4497   __ ccmp(x16, x16, NZCVFlag, nv);
4498   __ Mrs(x5, NZCV);
4499 
4500   END();
4501 
4502   RUN();
4503 
4504   CHECK_EQUAL_32(NFlag, w0);
4505   CHECK_EQUAL_32(NCFlag, w1);
4506   CHECK_EQUAL_32(NoFlag, w2);
4507   CHECK_EQUAL_32(NZCVFlag, w3);
4508   CHECK_EQUAL_32(ZCFlag, w4);
4509   CHECK_EQUAL_32(ZCFlag, w5);
4510 
4511   TEARDOWN();
4512 }
4513 
4514 
TEST(ccmp_wide_imm)4515 TEST(ccmp_wide_imm) {
4516   INIT_V8();
4517   SETUP();
4518 
4519   START();
4520   __ Mov(w20, 0);
4521 
4522   __ Cmp(w20, Operand(w20));
4523   __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4524   __ Mrs(x0, NZCV);
4525 
4526   __ Cmp(w20, Operand(w20));
4527   __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4528   __ Mrs(x1, NZCV);
4529   END();
4530 
4531   RUN();
4532 
4533   CHECK_EQUAL_32(NFlag, w0);
4534   CHECK_EQUAL_32(NoFlag, w1);
4535 
4536   TEARDOWN();
4537 }
4538 
4539 
TEST(ccmp_shift_extend)4540 TEST(ccmp_shift_extend) {
4541   INIT_V8();
4542   SETUP();
4543 
4544   START();
4545   __ Mov(w20, 0x2);
4546   __ Mov(w21, 0x1);
4547   __ Mov(x22, 0xffffffffffffffffUL);
4548   __ Mov(x23, 0xff);
4549   __ Mov(x24, 0xfffffffffffffffeUL);
4550 
4551   __ Cmp(w20, Operand(w20));
4552   __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4553   __ Mrs(x0, NZCV);
4554 
4555   __ Cmp(w20, Operand(w20));
4556   __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4557   __ Mrs(x1, NZCV);
4558 
4559   __ Cmp(w20, Operand(w20));
4560   __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4561   __ Mrs(x2, NZCV);
4562 
4563   __ Cmp(w20, Operand(w20));
4564   __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4565   __ Mrs(x3, NZCV);
4566 
4567   __ Cmp(w20, Operand(w20));
4568   __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4569   __ Mrs(x4, NZCV);
4570   END();
4571 
4572   RUN();
4573 
4574   CHECK_EQUAL_32(ZCFlag, w0);
4575   CHECK_EQUAL_32(ZCFlag, w1);
4576   CHECK_EQUAL_32(ZCFlag, w2);
4577   CHECK_EQUAL_32(NCFlag, w3);
4578   CHECK_EQUAL_32(NZCVFlag, w4);
4579 
4580   TEARDOWN();
4581 }
4582 
4583 
TEST(csel)4584 TEST(csel) {
4585   INIT_V8();
4586   SETUP();
4587 
4588   START();
4589   __ Mov(x16, 0);
4590   __ Mov(x24, 0x0000000f0000000fUL);
4591   __ Mov(x25, 0x0000001f0000001fUL);
4592   __ Mov(x26, 0);
4593   __ Mov(x27, 0);
4594 
4595   __ Cmp(w16, 0);
4596   __ Csel(w0, w24, w25, eq);
4597   __ Csel(w1, w24, w25, ne);
4598   __ Csinc(w2, w24, w25, mi);
4599   __ Csinc(w3, w24, w25, pl);
4600 
4601   __ csel(w13, w24, w25, al);
4602   __ csel(x14, x24, x25, nv);
4603 
4604   __ Cmp(x16, 1);
4605   __ Csinv(x4, x24, x25, gt);
4606   __ Csinv(x5, x24, x25, le);
4607   __ Csneg(x6, x24, x25, hs);
4608   __ Csneg(x7, x24, x25, lo);
4609 
4610   __ Cset(w8, ne);
4611   __ Csetm(w9, ne);
4612   __ Cinc(x10, x25, ne);
4613   __ Cinv(x11, x24, ne);
4614   __ Cneg(x12, x24, ne);
4615 
4616   __ csel(w15, w24, w25, al);
4617   __ csel(x18, x24, x25, nv);
4618 
4619   __ CzeroX(x24, ne);
4620   __ CzeroX(x25, eq);
4621 
4622   __ CmovX(x26, x25, ne);
4623   __ CmovX(x27, x25, eq);
4624   END();
4625 
4626   RUN();
4627 
4628   CHECK_EQUAL_64(0x0000000f, x0);
4629   CHECK_EQUAL_64(0x0000001f, x1);
4630   CHECK_EQUAL_64(0x00000020, x2);
4631   CHECK_EQUAL_64(0x0000000f, x3);
4632   CHECK_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4633   CHECK_EQUAL_64(0x0000000f0000000fUL, x5);
4634   CHECK_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4635   CHECK_EQUAL_64(0x0000000f0000000fUL, x7);
4636   CHECK_EQUAL_64(0x00000001, x8);
4637   CHECK_EQUAL_64(0xffffffff, x9);
4638   CHECK_EQUAL_64(0x0000001f00000020UL, x10);
4639   CHECK_EQUAL_64(0xfffffff0fffffff0UL, x11);
4640   CHECK_EQUAL_64(0xfffffff0fffffff1UL, x12);
4641   CHECK_EQUAL_64(0x0000000f, x13);
4642   CHECK_EQUAL_64(0x0000000f0000000fUL, x14);
4643   CHECK_EQUAL_64(0x0000000f, x15);
4644   CHECK_EQUAL_64(0x0000000f0000000fUL, x18);
4645   CHECK_EQUAL_64(0, x24);
4646   CHECK_EQUAL_64(0x0000001f0000001fUL, x25);
4647   CHECK_EQUAL_64(0x0000001f0000001fUL, x26);
4648   CHECK_EQUAL_64(0, x27);
4649 
4650   TEARDOWN();
4651 }
4652 
4653 
TEST(csel_imm)4654 TEST(csel_imm) {
4655   INIT_V8();
4656   SETUP();
4657 
4658   START();
4659   __ Mov(x18, 0);
4660   __ Mov(x19, 0x80000000);
4661   __ Mov(x20, 0x8000000000000000UL);
4662 
4663   __ Cmp(x18, Operand(0));
4664   __ Csel(w0, w19, -2, ne);
4665   __ Csel(w1, w19, -1, ne);
4666   __ Csel(w2, w19, 0, ne);
4667   __ Csel(w3, w19, 1, ne);
4668   __ Csel(w4, w19, 2, ne);
4669   __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4670   __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4671   __ Csel(w7, w19, 3, eq);
4672 
4673   __ Csel(x8, x20, -2, ne);
4674   __ Csel(x9, x20, -1, ne);
4675   __ Csel(x10, x20, 0, ne);
4676   __ Csel(x11, x20, 1, ne);
4677   __ Csel(x12, x20, 2, ne);
4678   __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4679   __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4680   __ Csel(x15, x20, 3, eq);
4681 
4682   END();
4683 
4684   RUN();
4685 
4686   CHECK_EQUAL_32(-2, w0);
4687   CHECK_EQUAL_32(-1, w1);
4688   CHECK_EQUAL_32(0, w2);
4689   CHECK_EQUAL_32(1, w3);
4690   CHECK_EQUAL_32(2, w4);
4691   CHECK_EQUAL_32(-1, w5);
4692   CHECK_EQUAL_32(0x40000000, w6);
4693   CHECK_EQUAL_32(0x80000000, w7);
4694 
4695   CHECK_EQUAL_64(-2, x8);
4696   CHECK_EQUAL_64(-1, x9);
4697   CHECK_EQUAL_64(0, x10);
4698   CHECK_EQUAL_64(1, x11);
4699   CHECK_EQUAL_64(2, x12);
4700   CHECK_EQUAL_64(-1, x13);
4701   CHECK_EQUAL_64(0x4000000000000000UL, x14);
4702   CHECK_EQUAL_64(0x8000000000000000UL, x15);
4703 
4704   TEARDOWN();
4705 }
4706 
4707 
TEST(lslv)4708 TEST(lslv) {
4709   INIT_V8();
4710   SETUP();
4711 
4712   uint64_t value = 0x0123456789abcdefUL;
4713   int shift[] = {1, 3, 5, 9, 17, 33};
4714 
4715   START();
4716   __ Mov(x0, value);
4717   __ Mov(w1, shift[0]);
4718   __ Mov(w2, shift[1]);
4719   __ Mov(w3, shift[2]);
4720   __ Mov(w4, shift[3]);
4721   __ Mov(w5, shift[4]);
4722   __ Mov(w6, shift[5]);
4723 
4724   __ lslv(x0, x0, xzr);
4725 
4726   __ Lsl(x16, x0, x1);
4727   __ Lsl(x17, x0, x2);
4728   __ Lsl(x18, x0, x3);
4729   __ Lsl(x19, x0, x4);
4730   __ Lsl(x20, x0, x5);
4731   __ Lsl(x21, x0, x6);
4732 
4733   __ Lsl(w22, w0, w1);
4734   __ Lsl(w23, w0, w2);
4735   __ Lsl(w24, w0, w3);
4736   __ Lsl(w25, w0, w4);
4737   __ Lsl(w26, w0, w5);
4738   __ Lsl(w27, w0, w6);
4739   END();
4740 
4741   RUN();
4742 
4743   CHECK_EQUAL_64(value, x0);
4744   CHECK_EQUAL_64(value << (shift[0] & 63), x16);
4745   CHECK_EQUAL_64(value << (shift[1] & 63), x17);
4746   CHECK_EQUAL_64(value << (shift[2] & 63), x18);
4747   CHECK_EQUAL_64(value << (shift[3] & 63), x19);
4748   CHECK_EQUAL_64(value << (shift[4] & 63), x20);
4749   CHECK_EQUAL_64(value << (shift[5] & 63), x21);
4750   CHECK_EQUAL_32(value << (shift[0] & 31), w22);
4751   CHECK_EQUAL_32(value << (shift[1] & 31), w23);
4752   CHECK_EQUAL_32(value << (shift[2] & 31), w24);
4753   CHECK_EQUAL_32(value << (shift[3] & 31), w25);
4754   CHECK_EQUAL_32(value << (shift[4] & 31), w26);
4755   CHECK_EQUAL_32(value << (shift[5] & 31), w27);
4756 
4757   TEARDOWN();
4758 }
4759 
4760 
TEST(lsrv)4761 TEST(lsrv) {
4762   INIT_V8();
4763   SETUP();
4764 
4765   uint64_t value = 0x0123456789abcdefUL;
4766   int shift[] = {1, 3, 5, 9, 17, 33};
4767 
4768   START();
4769   __ Mov(x0, value);
4770   __ Mov(w1, shift[0]);
4771   __ Mov(w2, shift[1]);
4772   __ Mov(w3, shift[2]);
4773   __ Mov(w4, shift[3]);
4774   __ Mov(w5, shift[4]);
4775   __ Mov(w6, shift[5]);
4776 
4777   __ lsrv(x0, x0, xzr);
4778 
4779   __ Lsr(x16, x0, x1);
4780   __ Lsr(x17, x0, x2);
4781   __ Lsr(x18, x0, x3);
4782   __ Lsr(x19, x0, x4);
4783   __ Lsr(x20, x0, x5);
4784   __ Lsr(x21, x0, x6);
4785 
4786   __ Lsr(w22, w0, w1);
4787   __ Lsr(w23, w0, w2);
4788   __ Lsr(w24, w0, w3);
4789   __ Lsr(w25, w0, w4);
4790   __ Lsr(w26, w0, w5);
4791   __ Lsr(w27, w0, w6);
4792   END();
4793 
4794   RUN();
4795 
4796   CHECK_EQUAL_64(value, x0);
4797   CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
4798   CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
4799   CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
4800   CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
4801   CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
4802   CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
4803 
4804   value &= 0xffffffffUL;
4805   CHECK_EQUAL_32(value >> (shift[0] & 31), w22);
4806   CHECK_EQUAL_32(value >> (shift[1] & 31), w23);
4807   CHECK_EQUAL_32(value >> (shift[2] & 31), w24);
4808   CHECK_EQUAL_32(value >> (shift[3] & 31), w25);
4809   CHECK_EQUAL_32(value >> (shift[4] & 31), w26);
4810   CHECK_EQUAL_32(value >> (shift[5] & 31), w27);
4811 
4812   TEARDOWN();
4813 }
4814 
4815 
TEST(asrv)4816 TEST(asrv) {
4817   INIT_V8();
4818   SETUP();
4819 
4820   int64_t value = 0xfedcba98fedcba98UL;
4821   int shift[] = {1, 3, 5, 9, 17, 33};
4822 
4823   START();
4824   __ Mov(x0, value);
4825   __ Mov(w1, shift[0]);
4826   __ Mov(w2, shift[1]);
4827   __ Mov(w3, shift[2]);
4828   __ Mov(w4, shift[3]);
4829   __ Mov(w5, shift[4]);
4830   __ Mov(w6, shift[5]);
4831 
4832   __ asrv(x0, x0, xzr);
4833 
4834   __ Asr(x16, x0, x1);
4835   __ Asr(x17, x0, x2);
4836   __ Asr(x18, x0, x3);
4837   __ Asr(x19, x0, x4);
4838   __ Asr(x20, x0, x5);
4839   __ Asr(x21, x0, x6);
4840 
4841   __ Asr(w22, w0, w1);
4842   __ Asr(w23, w0, w2);
4843   __ Asr(w24, w0, w3);
4844   __ Asr(w25, w0, w4);
4845   __ Asr(w26, w0, w5);
4846   __ Asr(w27, w0, w6);
4847   END();
4848 
4849   RUN();
4850 
4851   CHECK_EQUAL_64(value, x0);
4852   CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
4853   CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
4854   CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
4855   CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
4856   CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
4857   CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
4858 
4859   int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4860   CHECK_EQUAL_32(value32 >> (shift[0] & 31), w22);
4861   CHECK_EQUAL_32(value32 >> (shift[1] & 31), w23);
4862   CHECK_EQUAL_32(value32 >> (shift[2] & 31), w24);
4863   CHECK_EQUAL_32(value32 >> (shift[3] & 31), w25);
4864   CHECK_EQUAL_32(value32 >> (shift[4] & 31), w26);
4865   CHECK_EQUAL_32(value32 >> (shift[5] & 31), w27);
4866 
4867   TEARDOWN();
4868 }
4869 
4870 
TEST(rorv)4871 TEST(rorv) {
4872   INIT_V8();
4873   SETUP();
4874 
4875   uint64_t value = 0x0123456789abcdefUL;
4876   int shift[] = {4, 8, 12, 16, 24, 36};
4877 
4878   START();
4879   __ Mov(x0, value);
4880   __ Mov(w1, shift[0]);
4881   __ Mov(w2, shift[1]);
4882   __ Mov(w3, shift[2]);
4883   __ Mov(w4, shift[3]);
4884   __ Mov(w5, shift[4]);
4885   __ Mov(w6, shift[5]);
4886 
4887   __ rorv(x0, x0, xzr);
4888 
4889   __ Ror(x16, x0, x1);
4890   __ Ror(x17, x0, x2);
4891   __ Ror(x18, x0, x3);
4892   __ Ror(x19, x0, x4);
4893   __ Ror(x20, x0, x5);
4894   __ Ror(x21, x0, x6);
4895 
4896   __ Ror(w22, w0, w1);
4897   __ Ror(w23, w0, w2);
4898   __ Ror(w24, w0, w3);
4899   __ Ror(w25, w0, w4);
4900   __ Ror(w26, w0, w5);
4901   __ Ror(w27, w0, w6);
4902   END();
4903 
4904   RUN();
4905 
4906   CHECK_EQUAL_64(value, x0);
4907   CHECK_EQUAL_64(0xf0123456789abcdeUL, x16);
4908   CHECK_EQUAL_64(0xef0123456789abcdUL, x17);
4909   CHECK_EQUAL_64(0xdef0123456789abcUL, x18);
4910   CHECK_EQUAL_64(0xcdef0123456789abUL, x19);
4911   CHECK_EQUAL_64(0xabcdef0123456789UL, x20);
4912   CHECK_EQUAL_64(0x789abcdef0123456UL, x21);
4913   CHECK_EQUAL_32(0xf89abcde, w22);
4914   CHECK_EQUAL_32(0xef89abcd, w23);
4915   CHECK_EQUAL_32(0xdef89abc, w24);
4916   CHECK_EQUAL_32(0xcdef89ab, w25);
4917   CHECK_EQUAL_32(0xabcdef89, w26);
4918   CHECK_EQUAL_32(0xf89abcde, w27);
4919 
4920   TEARDOWN();
4921 }
4922 
4923 
TEST(bfm)4924 TEST(bfm) {
4925   INIT_V8();
4926   SETUP();
4927 
4928   START();
4929   __ Mov(x1, 0x0123456789abcdefL);
4930 
4931   __ Mov(x10, 0x8888888888888888L);
4932   __ Mov(x11, 0x8888888888888888L);
4933   __ Mov(x12, 0x8888888888888888L);
4934   __ Mov(x13, 0x8888888888888888L);
4935   __ Mov(w20, 0x88888888);
4936   __ Mov(w21, 0x88888888);
4937 
4938   __ bfm(x10, x1, 16, 31);
4939   __ bfm(x11, x1, 32, 15);
4940 
4941   __ bfm(w20, w1, 16, 23);
4942   __ bfm(w21, w1, 24, 15);
4943 
4944   // Aliases.
4945   __ Bfi(x12, x1, 16, 8);
4946   __ Bfxil(x13, x1, 16, 8);
4947   END();
4948 
4949   RUN();
4950 
4951 
4952   CHECK_EQUAL_64(0x88888888888889abL, x10);
4953   CHECK_EQUAL_64(0x8888cdef88888888L, x11);
4954 
4955   CHECK_EQUAL_32(0x888888ab, w20);
4956   CHECK_EQUAL_32(0x88cdef88, w21);
4957 
4958   CHECK_EQUAL_64(0x8888888888ef8888L, x12);
4959   CHECK_EQUAL_64(0x88888888888888abL, x13);
4960 
4961   TEARDOWN();
4962 }
4963 
4964 
TEST(sbfm)4965 TEST(sbfm) {
4966   INIT_V8();
4967   SETUP();
4968 
4969   START();
4970   __ Mov(x1, 0x0123456789abcdefL);
4971   __ Mov(x2, 0xfedcba9876543210L);
4972 
4973   __ sbfm(x10, x1, 16, 31);
4974   __ sbfm(x11, x1, 32, 15);
4975   __ sbfm(x12, x1, 32, 47);
4976   __ sbfm(x13, x1, 48, 35);
4977 
4978   __ sbfm(w14, w1, 16, 23);
4979   __ sbfm(w15, w1, 24, 15);
4980   __ sbfm(w16, w2, 16, 23);
4981   __ sbfm(w17, w2, 24, 15);
4982 
4983   // Aliases.
4984   __ Asr(x18, x1, 32);
4985   __ Asr(x19, x2, 32);
4986   __ Sbfiz(x20, x1, 8, 16);
4987   __ Sbfiz(x21, x2, 8, 16);
4988   __ Sbfx(x22, x1, 8, 16);
4989   __ Sbfx(x23, x2, 8, 16);
4990   __ Sxtb(x24, w1);
4991   __ Sxtb(x25, x2);
4992   __ Sxth(x26, w1);
4993   __ Sxth(x27, x2);
4994   __ Sxtw(x28, w1);
4995   __ Sxtw(x29, x2);
4996   END();
4997 
4998   RUN();
4999 
5000 
5001   CHECK_EQUAL_64(0xffffffffffff89abL, x10);
5002   CHECK_EQUAL_64(0xffffcdef00000000L, x11);
5003   CHECK_EQUAL_64(0x4567L, x12);
5004   CHECK_EQUAL_64(0x789abcdef0000L, x13);
5005 
5006   CHECK_EQUAL_32(0xffffffab, w14);
5007   CHECK_EQUAL_32(0xffcdef00, w15);
5008   CHECK_EQUAL_32(0x54, w16);
5009   CHECK_EQUAL_32(0x00321000, w17);
5010 
5011   CHECK_EQUAL_64(0x01234567L, x18);
5012   CHECK_EQUAL_64(0xfffffffffedcba98L, x19);
5013   CHECK_EQUAL_64(0xffffffffffcdef00L, x20);
5014   CHECK_EQUAL_64(0x321000L, x21);
5015   CHECK_EQUAL_64(0xffffffffffffabcdL, x22);
5016   CHECK_EQUAL_64(0x5432L, x23);
5017   CHECK_EQUAL_64(0xffffffffffffffefL, x24);
5018   CHECK_EQUAL_64(0x10, x25);
5019   CHECK_EQUAL_64(0xffffffffffffcdefL, x26);
5020   CHECK_EQUAL_64(0x3210, x27);
5021   CHECK_EQUAL_64(0xffffffff89abcdefL, x28);
5022   CHECK_EQUAL_64(0x76543210, x29);
5023 
5024   TEARDOWN();
5025 }
5026 
5027 
TEST(ubfm)5028 TEST(ubfm) {
5029   INIT_V8();
5030   SETUP();
5031 
5032   START();
5033   __ Mov(x1, 0x0123456789abcdefL);
5034   __ Mov(x2, 0xfedcba9876543210L);
5035 
5036   __ Mov(x10, 0x8888888888888888L);
5037   __ Mov(x11, 0x8888888888888888L);
5038 
5039   __ ubfm(x10, x1, 16, 31);
5040   __ ubfm(x11, x1, 32, 15);
5041   __ ubfm(x12, x1, 32, 47);
5042   __ ubfm(x13, x1, 48, 35);
5043 
5044   __ ubfm(w25, w1, 16, 23);
5045   __ ubfm(w26, w1, 24, 15);
5046   __ ubfm(w27, w2, 16, 23);
5047   __ ubfm(w28, w2, 24, 15);
5048 
5049   // Aliases
5050   __ Lsl(x15, x1, 63);
5051   __ Lsl(x16, x1, 0);
5052   __ Lsr(x17, x1, 32);
5053   __ Ubfiz(x18, x1, 8, 16);
5054   __ Ubfx(x19, x1, 8, 16);
5055   __ Uxtb(x20, x1);
5056   __ Uxth(x21, x1);
5057   __ Uxtw(x22, x1);
5058   END();
5059 
5060   RUN();
5061 
5062   CHECK_EQUAL_64(0x00000000000089abL, x10);
5063   CHECK_EQUAL_64(0x0000cdef00000000L, x11);
5064   CHECK_EQUAL_64(0x4567L, x12);
5065   CHECK_EQUAL_64(0x789abcdef0000L, x13);
5066 
5067   CHECK_EQUAL_32(0x000000ab, w25);
5068   CHECK_EQUAL_32(0x00cdef00, w26);
5069   CHECK_EQUAL_32(0x54, w27);
5070   CHECK_EQUAL_32(0x00321000, w28);
5071 
5072   CHECK_EQUAL_64(0x8000000000000000L, x15);
5073   CHECK_EQUAL_64(0x0123456789abcdefL, x16);
5074   CHECK_EQUAL_64(0x01234567L, x17);
5075   CHECK_EQUAL_64(0xcdef00L, x18);
5076   CHECK_EQUAL_64(0xabcdL, x19);
5077   CHECK_EQUAL_64(0xefL, x20);
5078   CHECK_EQUAL_64(0xcdefL, x21);
5079   CHECK_EQUAL_64(0x89abcdefL, x22);
5080 
5081   TEARDOWN();
5082 }
5083 
5084 
TEST(extr)5085 TEST(extr) {
5086   INIT_V8();
5087   SETUP();
5088 
5089   START();
5090   __ Mov(x1, 0x0123456789abcdefL);
5091   __ Mov(x2, 0xfedcba9876543210L);
5092 
5093   __ Extr(w10, w1, w2, 0);
5094   __ Extr(x11, x1, x2, 0);
5095   __ Extr(w12, w1, w2, 1);
5096   __ Extr(x13, x2, x1, 2);
5097 
5098   __ Ror(w20, w1, 0);
5099   __ Ror(x21, x1, 0);
5100   __ Ror(w22, w2, 17);
5101   __ Ror(w23, w1, 31);
5102   __ Ror(x24, x2, 1);
5103   __ Ror(x25, x1, 63);
5104   END();
5105 
5106   RUN();
5107 
5108   CHECK_EQUAL_64(0x76543210, x10);
5109   CHECK_EQUAL_64(0xfedcba9876543210L, x11);
5110   CHECK_EQUAL_64(0xbb2a1908, x12);
5111   CHECK_EQUAL_64(0x0048d159e26af37bUL, x13);
5112   CHECK_EQUAL_64(0x89abcdef, x20);
5113   CHECK_EQUAL_64(0x0123456789abcdefL, x21);
5114   CHECK_EQUAL_64(0x19083b2a, x22);
5115   CHECK_EQUAL_64(0x13579bdf, x23);
5116   CHECK_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
5117   CHECK_EQUAL_64(0x02468acf13579bdeUL, x25);
5118 
5119   TEARDOWN();
5120 }
5121 
5122 
TEST(fmov_imm)5123 TEST(fmov_imm) {
5124   INIT_V8();
5125   SETUP();
5126 
5127   START();
5128   __ Fmov(s11, 1.0);
5129   __ Fmov(d22, -13.0);
5130   __ Fmov(s1, 255.0);
5131   __ Fmov(d2, 12.34567);
5132   __ Fmov(s3, 0.0);
5133   __ Fmov(d4, 0.0);
5134   __ Fmov(s5, kFP32PositiveInfinity);
5135   __ Fmov(d6, kFP64NegativeInfinity);
5136   END();
5137 
5138   RUN();
5139 
5140   CHECK_EQUAL_FP32(1.0, s11);
5141   CHECK_EQUAL_FP64(-13.0, d22);
5142   CHECK_EQUAL_FP32(255.0, s1);
5143   CHECK_EQUAL_FP64(12.34567, d2);
5144   CHECK_EQUAL_FP32(0.0, s3);
5145   CHECK_EQUAL_FP64(0.0, d4);
5146   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
5147   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d6);
5148 
5149   TEARDOWN();
5150 }
5151 
5152 
TEST(fmov_reg)5153 TEST(fmov_reg) {
5154   INIT_V8();
5155   SETUP();
5156 
5157   START();
5158   __ Fmov(s20, 1.0);
5159   __ Fmov(w10, s20);
5160   __ Fmov(s30, w10);
5161   __ Fmov(s5, s20);
5162   __ Fmov(d1, -13.0);
5163   __ Fmov(x1, d1);
5164   __ Fmov(d2, x1);
5165   __ Fmov(d4, d1);
5166   __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
5167   __ Fmov(s6, s6);
5168   END();
5169 
5170   RUN();
5171 
5172   CHECK_EQUAL_32(float_to_rawbits(1.0), w10);
5173   CHECK_EQUAL_FP32(1.0, s30);
5174   CHECK_EQUAL_FP32(1.0, s5);
5175   CHECK_EQUAL_64(double_to_rawbits(-13.0), x1);
5176   CHECK_EQUAL_FP64(-13.0, d2);
5177   CHECK_EQUAL_FP64(-13.0, d4);
5178   CHECK_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
5179 
5180   TEARDOWN();
5181 }
5182 
5183 
TEST(fadd)5184 TEST(fadd) {
5185   INIT_V8();
5186   SETUP();
5187 
5188   START();
5189   __ Fmov(s14, -0.0f);
5190   __ Fmov(s15, kFP32PositiveInfinity);
5191   __ Fmov(s16, kFP32NegativeInfinity);
5192   __ Fmov(s17, 3.25f);
5193   __ Fmov(s18, 1.0f);
5194   __ Fmov(s19, 0.0f);
5195 
5196   __ Fmov(d26, -0.0);
5197   __ Fmov(d27, kFP64PositiveInfinity);
5198   __ Fmov(d28, kFP64NegativeInfinity);
5199   __ Fmov(d29, 0.0);
5200   __ Fmov(d30, -2.0);
5201   __ Fmov(d31, 2.25);
5202 
5203   __ Fadd(s0, s17, s18);
5204   __ Fadd(s1, s18, s19);
5205   __ Fadd(s2, s14, s18);
5206   __ Fadd(s3, s15, s18);
5207   __ Fadd(s4, s16, s18);
5208   __ Fadd(s5, s15, s16);
5209   __ Fadd(s6, s16, s15);
5210 
5211   __ Fadd(d7, d30, d31);
5212   __ Fadd(d8, d29, d31);
5213   __ Fadd(d9, d26, d31);
5214   __ Fadd(d10, d27, d31);
5215   __ Fadd(d11, d28, d31);
5216   __ Fadd(d12, d27, d28);
5217   __ Fadd(d13, d28, d27);
5218   END();
5219 
5220   RUN();
5221 
5222   CHECK_EQUAL_FP32(4.25, s0);
5223   CHECK_EQUAL_FP32(1.0, s1);
5224   CHECK_EQUAL_FP32(1.0, s2);
5225   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
5226   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
5227   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5228   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5229   CHECK_EQUAL_FP64(0.25, d7);
5230   CHECK_EQUAL_FP64(2.25, d8);
5231   CHECK_EQUAL_FP64(2.25, d9);
5232   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d10);
5233   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d11);
5234   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5235   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5236 
5237   TEARDOWN();
5238 }
5239 
5240 
TEST(fsub)5241 TEST(fsub) {
5242   INIT_V8();
5243   SETUP();
5244 
5245   START();
5246   __ Fmov(s14, -0.0f);
5247   __ Fmov(s15, kFP32PositiveInfinity);
5248   __ Fmov(s16, kFP32NegativeInfinity);
5249   __ Fmov(s17, 3.25f);
5250   __ Fmov(s18, 1.0f);
5251   __ Fmov(s19, 0.0f);
5252 
5253   __ Fmov(d26, -0.0);
5254   __ Fmov(d27, kFP64PositiveInfinity);
5255   __ Fmov(d28, kFP64NegativeInfinity);
5256   __ Fmov(d29, 0.0);
5257   __ Fmov(d30, -2.0);
5258   __ Fmov(d31, 2.25);
5259 
5260   __ Fsub(s0, s17, s18);
5261   __ Fsub(s1, s18, s19);
5262   __ Fsub(s2, s14, s18);
5263   __ Fsub(s3, s18, s15);
5264   __ Fsub(s4, s18, s16);
5265   __ Fsub(s5, s15, s15);
5266   __ Fsub(s6, s16, s16);
5267 
5268   __ Fsub(d7, d30, d31);
5269   __ Fsub(d8, d29, d31);
5270   __ Fsub(d9, d26, d31);
5271   __ Fsub(d10, d31, d27);
5272   __ Fsub(d11, d31, d28);
5273   __ Fsub(d12, d27, d27);
5274   __ Fsub(d13, d28, d28);
5275   END();
5276 
5277   RUN();
5278 
5279   CHECK_EQUAL_FP32(2.25, s0);
5280   CHECK_EQUAL_FP32(1.0, s1);
5281   CHECK_EQUAL_FP32(-1.0, s2);
5282   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
5283   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
5284   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5285   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5286   CHECK_EQUAL_FP64(-4.25, d7);
5287   CHECK_EQUAL_FP64(-2.25, d8);
5288   CHECK_EQUAL_FP64(-2.25, d9);
5289   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
5290   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
5291   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5292   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5293 
5294   TEARDOWN();
5295 }
5296 
5297 
TEST(fmul)5298 TEST(fmul) {
5299   INIT_V8();
5300   SETUP();
5301 
5302   START();
5303   __ Fmov(s14, -0.0f);
5304   __ Fmov(s15, kFP32PositiveInfinity);
5305   __ Fmov(s16, kFP32NegativeInfinity);
5306   __ Fmov(s17, 3.25f);
5307   __ Fmov(s18, 2.0f);
5308   __ Fmov(s19, 0.0f);
5309   __ Fmov(s20, -2.0f);
5310 
5311   __ Fmov(d26, -0.0);
5312   __ Fmov(d27, kFP64PositiveInfinity);
5313   __ Fmov(d28, kFP64NegativeInfinity);
5314   __ Fmov(d29, 0.0);
5315   __ Fmov(d30, -2.0);
5316   __ Fmov(d31, 2.25);
5317 
5318   __ Fmul(s0, s17, s18);
5319   __ Fmul(s1, s18, s19);
5320   __ Fmul(s2, s14, s14);
5321   __ Fmul(s3, s15, s20);
5322   __ Fmul(s4, s16, s20);
5323   __ Fmul(s5, s15, s19);
5324   __ Fmul(s6, s19, s16);
5325 
5326   __ Fmul(d7, d30, d31);
5327   __ Fmul(d8, d29, d31);
5328   __ Fmul(d9, d26, d26);
5329   __ Fmul(d10, d27, d30);
5330   __ Fmul(d11, d28, d30);
5331   __ Fmul(d12, d27, d29);
5332   __ Fmul(d13, d29, d28);
5333   END();
5334 
5335   RUN();
5336 
5337   CHECK_EQUAL_FP32(6.5, s0);
5338   CHECK_EQUAL_FP32(0.0, s1);
5339   CHECK_EQUAL_FP32(0.0, s2);
5340   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
5341   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
5342   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5343   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5344   CHECK_EQUAL_FP64(-4.5, d7);
5345   CHECK_EQUAL_FP64(0.0, d8);
5346   CHECK_EQUAL_FP64(0.0, d9);
5347   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
5348   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
5349   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5350   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5351 
5352   TEARDOWN();
5353 }
5354 
5355 
FmaddFmsubHelper(double n,double m,double a,double fmadd,double fmsub,double fnmadd,double fnmsub)5356 static void FmaddFmsubHelper(double n, double m, double a,
5357                              double fmadd, double fmsub,
5358                              double fnmadd, double fnmsub) {
5359   SETUP();
5360   START();
5361 
5362   __ Fmov(d0, n);
5363   __ Fmov(d1, m);
5364   __ Fmov(d2, a);
5365   __ Fmadd(d28, d0, d1, d2);
5366   __ Fmsub(d29, d0, d1, d2);
5367   __ Fnmadd(d30, d0, d1, d2);
5368   __ Fnmsub(d31, d0, d1, d2);
5369 
5370   END();
5371   RUN();
5372 
5373   CHECK_EQUAL_FP64(fmadd, d28);
5374   CHECK_EQUAL_FP64(fmsub, d29);
5375   CHECK_EQUAL_FP64(fnmadd, d30);
5376   CHECK_EQUAL_FP64(fnmsub, d31);
5377 
5378   TEARDOWN();
5379 }
5380 
5381 
TEST(fmadd_fmsub_double)5382 TEST(fmadd_fmsub_double) {
5383   INIT_V8();
5384 
5385   // It's hard to check the result of fused operations because the only way to
5386   // calculate the result is using fma, which is what the simulator uses anyway.
5387   // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5388 
5389   // Basic operation.
5390   FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5391   FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
5392 
5393   // Check the sign of exact zeroes.
5394   //               n     m     a     fmadd  fmsub  fnmadd fnmsub
5395   FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5396   FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5397   FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5398   FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5399   FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5400   FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5401   FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5402   FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5403 
5404   // Check NaN generation.
5405   FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5406                    kFP64DefaultNaN, kFP64DefaultNaN,
5407                    kFP64DefaultNaN, kFP64DefaultNaN);
5408   FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5409                    kFP64DefaultNaN, kFP64DefaultNaN,
5410                    kFP64DefaultNaN, kFP64DefaultNaN);
5411   FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5412                    kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5413                    kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5414                    kFP64NegativeInfinity,   // -inf + (-inf * 1) = -inf
5415                    kFP64DefaultNaN);        // -inf + ( inf * 1) = NaN
5416   FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5417                    kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5418                    kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5419                    kFP64DefaultNaN,         // -inf + ( inf * 1) = NaN
5420                    kFP64NegativeInfinity);  // -inf + (-inf * 1) = -inf
5421 }
5422 
5423 
FmaddFmsubHelper(float n,float m,float a,float fmadd,float fmsub,float fnmadd,float fnmsub)5424 static void FmaddFmsubHelper(float n, float m, float a,
5425                              float fmadd, float fmsub,
5426                              float fnmadd, float fnmsub) {
5427   SETUP();
5428   START();
5429 
5430   __ Fmov(s0, n);
5431   __ Fmov(s1, m);
5432   __ Fmov(s2, a);
5433   __ Fmadd(s28, s0, s1, s2);
5434   __ Fmsub(s29, s0, s1, s2);
5435   __ Fnmadd(s30, s0, s1, s2);
5436   __ Fnmsub(s31, s0, s1, s2);
5437 
5438   END();
5439   RUN();
5440 
5441   CHECK_EQUAL_FP32(fmadd, s28);
5442   CHECK_EQUAL_FP32(fmsub, s29);
5443   CHECK_EQUAL_FP32(fnmadd, s30);
5444   CHECK_EQUAL_FP32(fnmsub, s31);
5445 
5446   TEARDOWN();
5447 }
5448 
5449 
TEST(fmadd_fmsub_float)5450 TEST(fmadd_fmsub_float) {
5451   INIT_V8();
5452   // It's hard to check the result of fused operations because the only way to
5453   // calculate the result is using fma, which is what the simulator uses anyway.
5454   // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5455 
5456   // Basic operation.
5457   FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5458   FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
5459 
5460   // Check the sign of exact zeroes.
5461   //               n      m      a      fmadd  fmsub  fnmadd fnmsub
5462   FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5463   FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5464   FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5465   FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5466   FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5467   FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5468   FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5469   FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5470 
5471   // Check NaN generation.
5472   FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5473                    kFP32DefaultNaN, kFP32DefaultNaN,
5474                    kFP32DefaultNaN, kFP32DefaultNaN);
5475   FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5476                    kFP32DefaultNaN, kFP32DefaultNaN,
5477                    kFP32DefaultNaN, kFP32DefaultNaN);
5478   FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5479                    kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5480                    kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5481                    kFP32NegativeInfinity,   // -inf + (-inf * 1) = -inf
5482                    kFP32DefaultNaN);        // -inf + ( inf * 1) = NaN
5483   FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5484                    kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5485                    kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5486                    kFP32DefaultNaN,         // -inf + ( inf * 1) = NaN
5487                    kFP32NegativeInfinity);  // -inf + (-inf * 1) = -inf
5488 }
5489 
5490 
TEST(fmadd_fmsub_double_nans)5491 TEST(fmadd_fmsub_double_nans) {
5492   INIT_V8();
5493   // Make sure that NaN propagation works correctly.
5494   double s1 = rawbits_to_double(0x7ff5555511111111);
5495   double s2 = rawbits_to_double(0x7ff5555522222222);
5496   double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5497   double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5498   double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5499   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5500   DCHECK(IsSignallingNaN(s1));
5501   DCHECK(IsSignallingNaN(s2));
5502   DCHECK(IsSignallingNaN(sa));
5503   DCHECK(IsQuietNaN(q1));
5504   DCHECK(IsQuietNaN(q2));
5505   DCHECK(IsQuietNaN(qa));
5506 
5507   // The input NaNs after passing through ProcessNaN.
5508   double s1_proc = rawbits_to_double(0x7ffd555511111111);
5509   double s2_proc = rawbits_to_double(0x7ffd555522222222);
5510   double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5511   double q1_proc = q1;
5512   double q2_proc = q2;
5513   double qa_proc = qa;
5514   DCHECK(IsQuietNaN(s1_proc));
5515   DCHECK(IsQuietNaN(s2_proc));
5516   DCHECK(IsQuietNaN(sa_proc));
5517   DCHECK(IsQuietNaN(q1_proc));
5518   DCHECK(IsQuietNaN(q2_proc));
5519   DCHECK(IsQuietNaN(qa_proc));
5520 
5521   // Negated NaNs as it would be done on ARMv8 hardware.
5522   double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
5523   double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
5524   double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
5525   double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
5526   DCHECK(IsQuietNaN(s1_proc_neg));
5527   DCHECK(IsQuietNaN(sa_proc_neg));
5528   DCHECK(IsQuietNaN(q1_proc_neg));
5529   DCHECK(IsQuietNaN(qa_proc_neg));
5530 
5531   // Quiet NaNs are propagated.
5532   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5533   FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5534   FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5535   FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5536   FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5537   FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5538   FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5539 
5540   // Signalling NaNs are propagated, and made quiet.
5541   FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5542   FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5543   FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5544   FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5545   FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5546   FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5547   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5548 
5549   // Signalling NaNs take precedence over quiet NaNs.
5550   FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5551   FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5552   FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5553   FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5554   FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5555   FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5556   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5557 
5558   // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5559   FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5560                    kFP64DefaultNaN, kFP64DefaultNaN,
5561                    kFP64DefaultNaN, kFP64DefaultNaN);
5562   FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5563                    kFP64DefaultNaN, kFP64DefaultNaN,
5564                    kFP64DefaultNaN, kFP64DefaultNaN);
5565   FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5566                    kFP64DefaultNaN, kFP64DefaultNaN,
5567                    kFP64DefaultNaN, kFP64DefaultNaN);
5568   FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5569                    kFP64DefaultNaN, kFP64DefaultNaN,
5570                    kFP64DefaultNaN, kFP64DefaultNaN);
5571 }
5572 
5573 
TEST(fmadd_fmsub_float_nans)5574 TEST(fmadd_fmsub_float_nans) {
5575   INIT_V8();
5576   // Make sure that NaN propagation works correctly.
5577   float s1 = rawbits_to_float(0x7f951111);
5578   float s2 = rawbits_to_float(0x7f952222);
5579   float sa = rawbits_to_float(0x7f95aaaa);
5580   float q1 = rawbits_to_float(0x7fea1111);
5581   float q2 = rawbits_to_float(0x7fea2222);
5582   float qa = rawbits_to_float(0x7feaaaaa);
5583   DCHECK(IsSignallingNaN(s1));
5584   DCHECK(IsSignallingNaN(s2));
5585   DCHECK(IsSignallingNaN(sa));
5586   DCHECK(IsQuietNaN(q1));
5587   DCHECK(IsQuietNaN(q2));
5588   DCHECK(IsQuietNaN(qa));
5589 
5590   // The input NaNs after passing through ProcessNaN.
5591   float s1_proc = rawbits_to_float(0x7fd51111);
5592   float s2_proc = rawbits_to_float(0x7fd52222);
5593   float sa_proc = rawbits_to_float(0x7fd5aaaa);
5594   float q1_proc = q1;
5595   float q2_proc = q2;
5596   float qa_proc = qa;
5597   DCHECK(IsQuietNaN(s1_proc));
5598   DCHECK(IsQuietNaN(s2_proc));
5599   DCHECK(IsQuietNaN(sa_proc));
5600   DCHECK(IsQuietNaN(q1_proc));
5601   DCHECK(IsQuietNaN(q2_proc));
5602   DCHECK(IsQuietNaN(qa_proc));
5603 
5604   // Negated NaNs as it would be done on ARMv8 hardware.
5605   float s1_proc_neg = rawbits_to_float(0xffd51111);
5606   float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
5607   float q1_proc_neg = rawbits_to_float(0xffea1111);
5608   float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
5609   DCHECK(IsQuietNaN(s1_proc_neg));
5610   DCHECK(IsQuietNaN(sa_proc_neg));
5611   DCHECK(IsQuietNaN(q1_proc_neg));
5612   DCHECK(IsQuietNaN(qa_proc_neg));
5613 
5614   // Quiet NaNs are propagated.
5615   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5616   FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5617   FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5618   FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5619   FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5620   FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5621   FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5622 
5623   // Signalling NaNs are propagated, and made quiet.
5624   FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5625   FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5626   FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5627   FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5628   FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5629   FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5630   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5631 
5632   // Signalling NaNs take precedence over quiet NaNs.
5633   FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5634   FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5635   FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5636   FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5637   FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5638   FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5639   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5640 
5641   // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5642   FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5643                    kFP32DefaultNaN, kFP32DefaultNaN,
5644                    kFP32DefaultNaN, kFP32DefaultNaN);
5645   FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5646                    kFP32DefaultNaN, kFP32DefaultNaN,
5647                    kFP32DefaultNaN, kFP32DefaultNaN);
5648   FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5649                    kFP32DefaultNaN, kFP32DefaultNaN,
5650                    kFP32DefaultNaN, kFP32DefaultNaN);
5651   FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5652                    kFP32DefaultNaN, kFP32DefaultNaN,
5653                    kFP32DefaultNaN, kFP32DefaultNaN);
5654 }
5655 
5656 
TEST(fdiv)5657 TEST(fdiv) {
5658   INIT_V8();
5659   SETUP();
5660 
5661   START();
5662   __ Fmov(s14, -0.0f);
5663   __ Fmov(s15, kFP32PositiveInfinity);
5664   __ Fmov(s16, kFP32NegativeInfinity);
5665   __ Fmov(s17, 3.25f);
5666   __ Fmov(s18, 2.0f);
5667   __ Fmov(s19, 2.0f);
5668   __ Fmov(s20, -2.0f);
5669 
5670   __ Fmov(d26, -0.0);
5671   __ Fmov(d27, kFP64PositiveInfinity);
5672   __ Fmov(d28, kFP64NegativeInfinity);
5673   __ Fmov(d29, 0.0);
5674   __ Fmov(d30, -2.0);
5675   __ Fmov(d31, 2.25);
5676 
5677   __ Fdiv(s0, s17, s18);
5678   __ Fdiv(s1, s18, s19);
5679   __ Fdiv(s2, s14, s18);
5680   __ Fdiv(s3, s18, s15);
5681   __ Fdiv(s4, s18, s16);
5682   __ Fdiv(s5, s15, s16);
5683   __ Fdiv(s6, s14, s14);
5684 
5685   __ Fdiv(d7, d31, d30);
5686   __ Fdiv(d8, d29, d31);
5687   __ Fdiv(d9, d26, d31);
5688   __ Fdiv(d10, d31, d27);
5689   __ Fdiv(d11, d31, d28);
5690   __ Fdiv(d12, d28, d27);
5691   __ Fdiv(d13, d29, d29);
5692   END();
5693 
5694   RUN();
5695 
5696   CHECK_EQUAL_FP32(1.625f, s0);
5697   CHECK_EQUAL_FP32(1.0f, s1);
5698   CHECK_EQUAL_FP32(-0.0f, s2);
5699   CHECK_EQUAL_FP32(0.0f, s3);
5700   CHECK_EQUAL_FP32(-0.0f, s4);
5701   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5702   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5703   CHECK_EQUAL_FP64(-1.125, d7);
5704   CHECK_EQUAL_FP64(0.0, d8);
5705   CHECK_EQUAL_FP64(-0.0, d9);
5706   CHECK_EQUAL_FP64(0.0, d10);
5707   CHECK_EQUAL_FP64(-0.0, d11);
5708   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5709   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5710 
5711   TEARDOWN();
5712 }
5713 
5714 
MinMaxHelper(float n,float m,bool min,float quiet_nan_substitute=0.0)5715 static float MinMaxHelper(float n,
5716                           float m,
5717                           bool min,
5718                           float quiet_nan_substitute = 0.0) {
5719   uint32_t raw_n = float_to_rawbits(n);
5720   uint32_t raw_m = float_to_rawbits(m);
5721 
5722   if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5723     // n is signalling NaN.
5724     return rawbits_to_float(raw_n | kSQuietNanMask);
5725   } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5726     // m is signalling NaN.
5727     return rawbits_to_float(raw_m | kSQuietNanMask);
5728   } else if (quiet_nan_substitute == 0.0) {
5729     if (std::isnan(n)) {
5730       // n is quiet NaN.
5731       return n;
5732     } else if (std::isnan(m)) {
5733       // m is quiet NaN.
5734       return m;
5735     }
5736   } else {
5737     // Substitute n or m if one is quiet, but not both.
5738     if (std::isnan(n) && !std::isnan(m)) {
5739       // n is quiet NaN: replace with substitute.
5740       n = quiet_nan_substitute;
5741     } else if (!std::isnan(n) && std::isnan(m)) {
5742       // m is quiet NaN: replace with substitute.
5743       m = quiet_nan_substitute;
5744     }
5745   }
5746 
5747   if ((n == 0.0) && (m == 0.0) &&
5748       (copysign(1.0, n) != copysign(1.0, m))) {
5749     return min ? -0.0 : 0.0;
5750   }
5751 
5752   return min ? fminf(n, m) : fmaxf(n, m);
5753 }
5754 
5755 
MinMaxHelper(double n,double m,bool min,double quiet_nan_substitute=0.0)5756 static double MinMaxHelper(double n,
5757                            double m,
5758                            bool min,
5759                            double quiet_nan_substitute = 0.0) {
5760   uint64_t raw_n = double_to_rawbits(n);
5761   uint64_t raw_m = double_to_rawbits(m);
5762 
5763   if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5764     // n is signalling NaN.
5765     return rawbits_to_double(raw_n | kDQuietNanMask);
5766   } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5767     // m is signalling NaN.
5768     return rawbits_to_double(raw_m | kDQuietNanMask);
5769   } else if (quiet_nan_substitute == 0.0) {
5770     if (std::isnan(n)) {
5771       // n is quiet NaN.
5772       return n;
5773     } else if (std::isnan(m)) {
5774       // m is quiet NaN.
5775       return m;
5776     }
5777   } else {
5778     // Substitute n or m if one is quiet, but not both.
5779     if (std::isnan(n) && !std::isnan(m)) {
5780       // n is quiet NaN: replace with substitute.
5781       n = quiet_nan_substitute;
5782     } else if (!std::isnan(n) && std::isnan(m)) {
5783       // m is quiet NaN: replace with substitute.
5784       m = quiet_nan_substitute;
5785     }
5786   }
5787 
5788   if ((n == 0.0) && (m == 0.0) &&
5789       (copysign(1.0, n) != copysign(1.0, m))) {
5790     return min ? -0.0 : 0.0;
5791   }
5792 
5793   return min ? fmin(n, m) : fmax(n, m);
5794 }
5795 
5796 
FminFmaxDoubleHelper(double n,double m,double min,double max,double minnm,double maxnm)5797 static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5798                                  double minnm, double maxnm) {
5799   SETUP();
5800 
5801   START();
5802   __ Fmov(d0, n);
5803   __ Fmov(d1, m);
5804   __ Fmin(d28, d0, d1);
5805   __ Fmax(d29, d0, d1);
5806   __ Fminnm(d30, d0, d1);
5807   __ Fmaxnm(d31, d0, d1);
5808   END();
5809 
5810   RUN();
5811 
5812   CHECK_EQUAL_FP64(min, d28);
5813   CHECK_EQUAL_FP64(max, d29);
5814   CHECK_EQUAL_FP64(minnm, d30);
5815   CHECK_EQUAL_FP64(maxnm, d31);
5816 
5817   TEARDOWN();
5818 }
5819 
5820 
TEST(fmax_fmin_d)5821 TEST(fmax_fmin_d) {
5822   INIT_V8();
5823   // Use non-standard NaNs to check that the payload bits are preserved.
5824   double snan = rawbits_to_double(0x7ff5555512345678);
5825   double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5826 
5827   double snan_processed = rawbits_to_double(0x7ffd555512345678);
5828   double qnan_processed = qnan;
5829 
5830   DCHECK(IsSignallingNaN(snan));
5831   DCHECK(IsQuietNaN(qnan));
5832   DCHECK(IsQuietNaN(snan_processed));
5833   DCHECK(IsQuietNaN(qnan_processed));
5834 
5835   // Bootstrap tests.
5836   FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5837   FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5838   FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5839                        kFP64NegativeInfinity, kFP64PositiveInfinity,
5840                        kFP64NegativeInfinity, kFP64PositiveInfinity);
5841   FminFmaxDoubleHelper(snan, 0,
5842                        snan_processed, snan_processed,
5843                        snan_processed, snan_processed);
5844   FminFmaxDoubleHelper(0, snan,
5845                        snan_processed, snan_processed,
5846                        snan_processed, snan_processed);
5847   FminFmaxDoubleHelper(qnan, 0,
5848                        qnan_processed, qnan_processed,
5849                        0, 0);
5850   FminFmaxDoubleHelper(0, qnan,
5851                        qnan_processed, qnan_processed,
5852                        0, 0);
5853   FminFmaxDoubleHelper(qnan, snan,
5854                        snan_processed, snan_processed,
5855                        snan_processed, snan_processed);
5856   FminFmaxDoubleHelper(snan, qnan,
5857                        snan_processed, snan_processed,
5858                        snan_processed, snan_processed);
5859 
5860   // Iterate over all combinations of inputs.
5861   double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5862                       -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5863                       kFP64PositiveInfinity, kFP64NegativeInfinity,
5864                       kFP64QuietNaN, kFP64SignallingNaN };
5865 
5866   const int count = sizeof(inputs) / sizeof(inputs[0]);
5867 
5868   for (int in = 0; in < count; in++) {
5869     double n = inputs[in];
5870     for (int im = 0; im < count; im++) {
5871       double m = inputs[im];
5872       FminFmaxDoubleHelper(n, m,
5873                            MinMaxHelper(n, m, true),
5874                            MinMaxHelper(n, m, false),
5875                            MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5876                            MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5877     }
5878   }
5879 }
5880 
5881 
FminFmaxFloatHelper(float n,float m,float min,float max,float minnm,float maxnm)5882 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5883                                 float minnm, float maxnm) {
5884   SETUP();
5885 
5886   START();
5887   __ Fmov(s0, n);
5888   __ Fmov(s1, m);
5889   __ Fmin(s28, s0, s1);
5890   __ Fmax(s29, s0, s1);
5891   __ Fminnm(s30, s0, s1);
5892   __ Fmaxnm(s31, s0, s1);
5893   END();
5894 
5895   RUN();
5896 
5897   CHECK_EQUAL_FP32(min, s28);
5898   CHECK_EQUAL_FP32(max, s29);
5899   CHECK_EQUAL_FP32(minnm, s30);
5900   CHECK_EQUAL_FP32(maxnm, s31);
5901 
5902   TEARDOWN();
5903 }
5904 
5905 
TEST(fmax_fmin_s)5906 TEST(fmax_fmin_s) {
5907   INIT_V8();
5908   // Use non-standard NaNs to check that the payload bits are preserved.
5909   float snan = rawbits_to_float(0x7f951234);
5910   float qnan = rawbits_to_float(0x7fea8765);
5911 
5912   float snan_processed = rawbits_to_float(0x7fd51234);
5913   float qnan_processed = qnan;
5914 
5915   DCHECK(IsSignallingNaN(snan));
5916   DCHECK(IsQuietNaN(qnan));
5917   DCHECK(IsQuietNaN(snan_processed));
5918   DCHECK(IsQuietNaN(qnan_processed));
5919 
5920   // Bootstrap tests.
5921   FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5922   FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5923   FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5924                       kFP32NegativeInfinity, kFP32PositiveInfinity,
5925                       kFP32NegativeInfinity, kFP32PositiveInfinity);
5926   FminFmaxFloatHelper(snan, 0,
5927                       snan_processed, snan_processed,
5928                       snan_processed, snan_processed);
5929   FminFmaxFloatHelper(0, snan,
5930                       snan_processed, snan_processed,
5931                       snan_processed, snan_processed);
5932   FminFmaxFloatHelper(qnan, 0,
5933                       qnan_processed, qnan_processed,
5934                       0, 0);
5935   FminFmaxFloatHelper(0, qnan,
5936                       qnan_processed, qnan_processed,
5937                       0, 0);
5938   FminFmaxFloatHelper(qnan, snan,
5939                       snan_processed, snan_processed,
5940                       snan_processed, snan_processed);
5941   FminFmaxFloatHelper(snan, qnan,
5942                       snan_processed, snan_processed,
5943                       snan_processed, snan_processed);
5944 
5945   // Iterate over all combinations of inputs.
5946   float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5947                      -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5948                      kFP32PositiveInfinity, kFP32NegativeInfinity,
5949                      kFP32QuietNaN, kFP32SignallingNaN };
5950 
5951   const int count = sizeof(inputs) / sizeof(inputs[0]);
5952 
5953   for (int in = 0; in < count; in++) {
5954     float n = inputs[in];
5955     for (int im = 0; im < count; im++) {
5956       float m = inputs[im];
5957       FminFmaxFloatHelper(n, m,
5958                           MinMaxHelper(n, m, true),
5959                           MinMaxHelper(n, m, false),
5960                           MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5961                           MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5962     }
5963   }
5964 }
5965 
5966 
TEST(fccmp)5967 TEST(fccmp) {
5968   INIT_V8();
5969   SETUP();
5970 
5971   START();
5972   __ Fmov(s16, 0.0);
5973   __ Fmov(s17, 0.5);
5974   __ Fmov(d18, -0.5);
5975   __ Fmov(d19, -1.0);
5976   __ Mov(x20, 0);
5977 
5978   __ Cmp(x20, 0);
5979   __ Fccmp(s16, s16, NoFlag, eq);
5980   __ Mrs(x0, NZCV);
5981 
5982   __ Cmp(x20, 0);
5983   __ Fccmp(s16, s16, VFlag, ne);
5984   __ Mrs(x1, NZCV);
5985 
5986   __ Cmp(x20, 0);
5987   __ Fccmp(s16, s17, CFlag, ge);
5988   __ Mrs(x2, NZCV);
5989 
5990   __ Cmp(x20, 0);
5991   __ Fccmp(s16, s17, CVFlag, lt);
5992   __ Mrs(x3, NZCV);
5993 
5994   __ Cmp(x20, 0);
5995   __ Fccmp(d18, d18, ZFlag, le);
5996   __ Mrs(x4, NZCV);
5997 
5998   __ Cmp(x20, 0);
5999   __ Fccmp(d18, d18, ZVFlag, gt);
6000   __ Mrs(x5, NZCV);
6001 
6002   __ Cmp(x20, 0);
6003   __ Fccmp(d18, d19, ZCVFlag, ls);
6004   __ Mrs(x6, NZCV);
6005 
6006   __ Cmp(x20, 0);
6007   __ Fccmp(d18, d19, NFlag, hi);
6008   __ Mrs(x7, NZCV);
6009 
6010   __ fccmp(s16, s16, NFlag, al);
6011   __ Mrs(x8, NZCV);
6012 
6013   __ fccmp(d18, d18, NFlag, nv);
6014   __ Mrs(x9, NZCV);
6015 
6016   END();
6017 
6018   RUN();
6019 
6020   CHECK_EQUAL_32(ZCFlag, w0);
6021   CHECK_EQUAL_32(VFlag, w1);
6022   CHECK_EQUAL_32(NFlag, w2);
6023   CHECK_EQUAL_32(CVFlag, w3);
6024   CHECK_EQUAL_32(ZCFlag, w4);
6025   CHECK_EQUAL_32(ZVFlag, w5);
6026   CHECK_EQUAL_32(CFlag, w6);
6027   CHECK_EQUAL_32(NFlag, w7);
6028   CHECK_EQUAL_32(ZCFlag, w8);
6029   CHECK_EQUAL_32(ZCFlag, w9);
6030 
6031   TEARDOWN();
6032 }
6033 
6034 
TEST(fcmp)6035 TEST(fcmp) {
6036   INIT_V8();
6037   SETUP();
6038 
6039   START();
6040 
6041   // Some of these tests require a floating-point scratch register assigned to
6042   // the macro assembler, but most do not.
6043   {
6044     // We're going to mess around with the available scratch registers in this
6045     // test. A UseScratchRegisterScope will make sure that they are restored to
6046     // the default values once we're finished.
6047     UseScratchRegisterScope temps(&masm);
6048     masm.FPTmpList()->set_list(0);
6049 
6050     __ Fmov(s8, 0.0);
6051     __ Fmov(s9, 0.5);
6052     __ Mov(w18, 0x7f800001);  // Single precision NaN.
6053     __ Fmov(s18, w18);
6054 
6055     __ Fcmp(s8, s8);
6056     __ Mrs(x0, NZCV);
6057     __ Fcmp(s8, s9);
6058     __ Mrs(x1, NZCV);
6059     __ Fcmp(s9, s8);
6060     __ Mrs(x2, NZCV);
6061     __ Fcmp(s8, s18);
6062     __ Mrs(x3, NZCV);
6063     __ Fcmp(s18, s18);
6064     __ Mrs(x4, NZCV);
6065     __ Fcmp(s8, 0.0);
6066     __ Mrs(x5, NZCV);
6067     masm.FPTmpList()->set_list(d0.Bit());
6068     __ Fcmp(s8, 255.0);
6069     masm.FPTmpList()->set_list(0);
6070     __ Mrs(x6, NZCV);
6071 
6072     __ Fmov(d19, 0.0);
6073     __ Fmov(d20, 0.5);
6074     __ Mov(x21, 0x7ff0000000000001UL);   // Double precision NaN.
6075     __ Fmov(d21, x21);
6076 
6077     __ Fcmp(d19, d19);
6078     __ Mrs(x10, NZCV);
6079     __ Fcmp(d19, d20);
6080     __ Mrs(x11, NZCV);
6081     __ Fcmp(d20, d19);
6082     __ Mrs(x12, NZCV);
6083     __ Fcmp(d19, d21);
6084     __ Mrs(x13, NZCV);
6085     __ Fcmp(d21, d21);
6086     __ Mrs(x14, NZCV);
6087     __ Fcmp(d19, 0.0);
6088     __ Mrs(x15, NZCV);
6089     masm.FPTmpList()->set_list(d0.Bit());
6090     __ Fcmp(d19, 12.3456);
6091     masm.FPTmpList()->set_list(0);
6092     __ Mrs(x16, NZCV);
6093   }
6094 
6095   END();
6096 
6097   RUN();
6098 
6099   CHECK_EQUAL_32(ZCFlag, w0);
6100   CHECK_EQUAL_32(NFlag, w1);
6101   CHECK_EQUAL_32(CFlag, w2);
6102   CHECK_EQUAL_32(CVFlag, w3);
6103   CHECK_EQUAL_32(CVFlag, w4);
6104   CHECK_EQUAL_32(ZCFlag, w5);
6105   CHECK_EQUAL_32(NFlag, w6);
6106   CHECK_EQUAL_32(ZCFlag, w10);
6107   CHECK_EQUAL_32(NFlag, w11);
6108   CHECK_EQUAL_32(CFlag, w12);
6109   CHECK_EQUAL_32(CVFlag, w13);
6110   CHECK_EQUAL_32(CVFlag, w14);
6111   CHECK_EQUAL_32(ZCFlag, w15);
6112   CHECK_EQUAL_32(NFlag, w16);
6113 
6114   TEARDOWN();
6115 }
6116 
6117 
TEST(fcsel)6118 TEST(fcsel) {
6119   INIT_V8();
6120   SETUP();
6121 
6122   START();
6123   __ Mov(x16, 0);
6124   __ Fmov(s16, 1.0);
6125   __ Fmov(s17, 2.0);
6126   __ Fmov(d18, 3.0);
6127   __ Fmov(d19, 4.0);
6128 
6129   __ Cmp(x16, 0);
6130   __ Fcsel(s0, s16, s17, eq);
6131   __ Fcsel(s1, s16, s17, ne);
6132   __ Fcsel(d2, d18, d19, eq);
6133   __ Fcsel(d3, d18, d19, ne);
6134   __ fcsel(s4, s16, s17, al);
6135   __ fcsel(d5, d18, d19, nv);
6136   END();
6137 
6138   RUN();
6139 
6140   CHECK_EQUAL_FP32(1.0, s0);
6141   CHECK_EQUAL_FP32(2.0, s1);
6142   CHECK_EQUAL_FP64(3.0, d2);
6143   CHECK_EQUAL_FP64(4.0, d3);
6144   CHECK_EQUAL_FP32(1.0, s4);
6145   CHECK_EQUAL_FP64(3.0, d5);
6146 
6147   TEARDOWN();
6148 }
6149 
6150 
TEST(fneg)6151 TEST(fneg) {
6152   INIT_V8();
6153   SETUP();
6154 
6155   START();
6156   __ Fmov(s16, 1.0);
6157   __ Fmov(s17, 0.0);
6158   __ Fmov(s18, kFP32PositiveInfinity);
6159   __ Fmov(d19, 1.0);
6160   __ Fmov(d20, 0.0);
6161   __ Fmov(d21, kFP64PositiveInfinity);
6162 
6163   __ Fneg(s0, s16);
6164   __ Fneg(s1, s0);
6165   __ Fneg(s2, s17);
6166   __ Fneg(s3, s2);
6167   __ Fneg(s4, s18);
6168   __ Fneg(s5, s4);
6169   __ Fneg(d6, d19);
6170   __ Fneg(d7, d6);
6171   __ Fneg(d8, d20);
6172   __ Fneg(d9, d8);
6173   __ Fneg(d10, d21);
6174   __ Fneg(d11, d10);
6175   END();
6176 
6177   RUN();
6178 
6179   CHECK_EQUAL_FP32(-1.0, s0);
6180   CHECK_EQUAL_FP32(1.0, s1);
6181   CHECK_EQUAL_FP32(-0.0, s2);
6182   CHECK_EQUAL_FP32(0.0, s3);
6183   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
6184   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
6185   CHECK_EQUAL_FP64(-1.0, d6);
6186   CHECK_EQUAL_FP64(1.0, d7);
6187   CHECK_EQUAL_FP64(-0.0, d8);
6188   CHECK_EQUAL_FP64(0.0, d9);
6189   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
6190   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
6191 
6192   TEARDOWN();
6193 }
6194 
6195 
TEST(fabs)6196 TEST(fabs) {
6197   INIT_V8();
6198   SETUP();
6199 
6200   START();
6201   __ Fmov(s16, -1.0);
6202   __ Fmov(s17, -0.0);
6203   __ Fmov(s18, kFP32NegativeInfinity);
6204   __ Fmov(d19, -1.0);
6205   __ Fmov(d20, -0.0);
6206   __ Fmov(d21, kFP64NegativeInfinity);
6207 
6208   __ Fabs(s0, s16);
6209   __ Fabs(s1, s0);
6210   __ Fabs(s2, s17);
6211   __ Fabs(s3, s18);
6212   __ Fabs(d4, d19);
6213   __ Fabs(d5, d4);
6214   __ Fabs(d6, d20);
6215   __ Fabs(d7, d21);
6216   END();
6217 
6218   RUN();
6219 
6220   CHECK_EQUAL_FP32(1.0, s0);
6221   CHECK_EQUAL_FP32(1.0, s1);
6222   CHECK_EQUAL_FP32(0.0, s2);
6223   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
6224   CHECK_EQUAL_FP64(1.0, d4);
6225   CHECK_EQUAL_FP64(1.0, d5);
6226   CHECK_EQUAL_FP64(0.0, d6);
6227   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
6228 
6229   TEARDOWN();
6230 }
6231 
6232 
TEST(fsqrt)6233 TEST(fsqrt) {
6234   INIT_V8();
6235   SETUP();
6236 
6237   START();
6238   __ Fmov(s16, 0.0);
6239   __ Fmov(s17, 1.0);
6240   __ Fmov(s18, 0.25);
6241   __ Fmov(s19, 65536.0);
6242   __ Fmov(s20, -0.0);
6243   __ Fmov(s21, kFP32PositiveInfinity);
6244   __ Fmov(s22, -1.0);
6245   __ Fmov(d23, 0.0);
6246   __ Fmov(d24, 1.0);
6247   __ Fmov(d25, 0.25);
6248   __ Fmov(d26, 4294967296.0);
6249   __ Fmov(d27, -0.0);
6250   __ Fmov(d28, kFP64PositiveInfinity);
6251   __ Fmov(d29, -1.0);
6252 
6253   __ Fsqrt(s0, s16);
6254   __ Fsqrt(s1, s17);
6255   __ Fsqrt(s2, s18);
6256   __ Fsqrt(s3, s19);
6257   __ Fsqrt(s4, s20);
6258   __ Fsqrt(s5, s21);
6259   __ Fsqrt(s6, s22);
6260   __ Fsqrt(d7, d23);
6261   __ Fsqrt(d8, d24);
6262   __ Fsqrt(d9, d25);
6263   __ Fsqrt(d10, d26);
6264   __ Fsqrt(d11, d27);
6265   __ Fsqrt(d12, d28);
6266   __ Fsqrt(d13, d29);
6267   END();
6268 
6269   RUN();
6270 
6271   CHECK_EQUAL_FP32(0.0, s0);
6272   CHECK_EQUAL_FP32(1.0, s1);
6273   CHECK_EQUAL_FP32(0.5, s2);
6274   CHECK_EQUAL_FP32(256.0, s3);
6275   CHECK_EQUAL_FP32(-0.0, s4);
6276   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
6277   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
6278   CHECK_EQUAL_FP64(0.0, d7);
6279   CHECK_EQUAL_FP64(1.0, d8);
6280   CHECK_EQUAL_FP64(0.5, d9);
6281   CHECK_EQUAL_FP64(65536.0, d10);
6282   CHECK_EQUAL_FP64(-0.0, d11);
6283   CHECK_EQUAL_FP64(kFP32PositiveInfinity, d12);
6284   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
6285 
6286   TEARDOWN();
6287 }
6288 
6289 
TEST(frinta)6290 TEST(frinta) {
6291   INIT_V8();
6292   SETUP();
6293 
6294   START();
6295   __ Fmov(s16, 1.0);
6296   __ Fmov(s17, 1.1);
6297   __ Fmov(s18, 1.5);
6298   __ Fmov(s19, 1.9);
6299   __ Fmov(s20, 2.5);
6300   __ Fmov(s21, -1.5);
6301   __ Fmov(s22, -2.5);
6302   __ Fmov(s23, kFP32PositiveInfinity);
6303   __ Fmov(s24, kFP32NegativeInfinity);
6304   __ Fmov(s25, 0.0);
6305   __ Fmov(s26, -0.0);
6306   __ Fmov(s27, -0.2);
6307 
6308   __ Frinta(s0, s16);
6309   __ Frinta(s1, s17);
6310   __ Frinta(s2, s18);
6311   __ Frinta(s3, s19);
6312   __ Frinta(s4, s20);
6313   __ Frinta(s5, s21);
6314   __ Frinta(s6, s22);
6315   __ Frinta(s7, s23);
6316   __ Frinta(s8, s24);
6317   __ Frinta(s9, s25);
6318   __ Frinta(s10, s26);
6319   __ Frinta(s11, s27);
6320 
6321   __ Fmov(d16, 1.0);
6322   __ Fmov(d17, 1.1);
6323   __ Fmov(d18, 1.5);
6324   __ Fmov(d19, 1.9);
6325   __ Fmov(d20, 2.5);
6326   __ Fmov(d21, -1.5);
6327   __ Fmov(d22, -2.5);
6328   __ Fmov(d23, kFP32PositiveInfinity);
6329   __ Fmov(d24, kFP32NegativeInfinity);
6330   __ Fmov(d25, 0.0);
6331   __ Fmov(d26, -0.0);
6332   __ Fmov(d27, -0.2);
6333 
6334   __ Frinta(d12, d16);
6335   __ Frinta(d13, d17);
6336   __ Frinta(d14, d18);
6337   __ Frinta(d15, d19);
6338   __ Frinta(d16, d20);
6339   __ Frinta(d17, d21);
6340   __ Frinta(d18, d22);
6341   __ Frinta(d19, d23);
6342   __ Frinta(d20, d24);
6343   __ Frinta(d21, d25);
6344   __ Frinta(d22, d26);
6345   __ Frinta(d23, d27);
6346   END();
6347 
6348   RUN();
6349 
6350   CHECK_EQUAL_FP32(1.0, s0);
6351   CHECK_EQUAL_FP32(1.0, s1);
6352   CHECK_EQUAL_FP32(2.0, s2);
6353   CHECK_EQUAL_FP32(2.0, s3);
6354   CHECK_EQUAL_FP32(3.0, s4);
6355   CHECK_EQUAL_FP32(-2.0, s5);
6356   CHECK_EQUAL_FP32(-3.0, s6);
6357   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6358   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6359   CHECK_EQUAL_FP32(0.0, s9);
6360   CHECK_EQUAL_FP32(-0.0, s10);
6361   CHECK_EQUAL_FP32(-0.0, s11);
6362   CHECK_EQUAL_FP64(1.0, d12);
6363   CHECK_EQUAL_FP64(1.0, d13);
6364   CHECK_EQUAL_FP64(2.0, d14);
6365   CHECK_EQUAL_FP64(2.0, d15);
6366   CHECK_EQUAL_FP64(3.0, d16);
6367   CHECK_EQUAL_FP64(-2.0, d17);
6368   CHECK_EQUAL_FP64(-3.0, d18);
6369   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6370   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6371   CHECK_EQUAL_FP64(0.0, d21);
6372   CHECK_EQUAL_FP64(-0.0, d22);
6373   CHECK_EQUAL_FP64(-0.0, d23);
6374 
6375   TEARDOWN();
6376 }
6377 
6378 
TEST(frintm)6379 TEST(frintm) {
6380   INIT_V8();
6381   SETUP();
6382 
6383   START();
6384   __ Fmov(s16, 1.0);
6385   __ Fmov(s17, 1.1);
6386   __ Fmov(s18, 1.5);
6387   __ Fmov(s19, 1.9);
6388   __ Fmov(s20, 2.5);
6389   __ Fmov(s21, -1.5);
6390   __ Fmov(s22, -2.5);
6391   __ Fmov(s23, kFP32PositiveInfinity);
6392   __ Fmov(s24, kFP32NegativeInfinity);
6393   __ Fmov(s25, 0.0);
6394   __ Fmov(s26, -0.0);
6395   __ Fmov(s27, -0.2);
6396 
6397   __ Frintm(s0, s16);
6398   __ Frintm(s1, s17);
6399   __ Frintm(s2, s18);
6400   __ Frintm(s3, s19);
6401   __ Frintm(s4, s20);
6402   __ Frintm(s5, s21);
6403   __ Frintm(s6, s22);
6404   __ Frintm(s7, s23);
6405   __ Frintm(s8, s24);
6406   __ Frintm(s9, s25);
6407   __ Frintm(s10, s26);
6408   __ Frintm(s11, s27);
6409 
6410   __ Fmov(d16, 1.0);
6411   __ Fmov(d17, 1.1);
6412   __ Fmov(d18, 1.5);
6413   __ Fmov(d19, 1.9);
6414   __ Fmov(d20, 2.5);
6415   __ Fmov(d21, -1.5);
6416   __ Fmov(d22, -2.5);
6417   __ Fmov(d23, kFP32PositiveInfinity);
6418   __ Fmov(d24, kFP32NegativeInfinity);
6419   __ Fmov(d25, 0.0);
6420   __ Fmov(d26, -0.0);
6421   __ Fmov(d27, -0.2);
6422 
6423   __ Frintm(d12, d16);
6424   __ Frintm(d13, d17);
6425   __ Frintm(d14, d18);
6426   __ Frintm(d15, d19);
6427   __ Frintm(d16, d20);
6428   __ Frintm(d17, d21);
6429   __ Frintm(d18, d22);
6430   __ Frintm(d19, d23);
6431   __ Frintm(d20, d24);
6432   __ Frintm(d21, d25);
6433   __ Frintm(d22, d26);
6434   __ Frintm(d23, d27);
6435   END();
6436 
6437   RUN();
6438 
6439   CHECK_EQUAL_FP32(1.0, s0);
6440   CHECK_EQUAL_FP32(1.0, s1);
6441   CHECK_EQUAL_FP32(1.0, s2);
6442   CHECK_EQUAL_FP32(1.0, s3);
6443   CHECK_EQUAL_FP32(2.0, s4);
6444   CHECK_EQUAL_FP32(-2.0, s5);
6445   CHECK_EQUAL_FP32(-3.0, s6);
6446   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6447   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6448   CHECK_EQUAL_FP32(0.0, s9);
6449   CHECK_EQUAL_FP32(-0.0, s10);
6450   CHECK_EQUAL_FP32(-1.0, s11);
6451   CHECK_EQUAL_FP64(1.0, d12);
6452   CHECK_EQUAL_FP64(1.0, d13);
6453   CHECK_EQUAL_FP64(1.0, d14);
6454   CHECK_EQUAL_FP64(1.0, d15);
6455   CHECK_EQUAL_FP64(2.0, d16);
6456   CHECK_EQUAL_FP64(-2.0, d17);
6457   CHECK_EQUAL_FP64(-3.0, d18);
6458   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6459   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6460   CHECK_EQUAL_FP64(0.0, d21);
6461   CHECK_EQUAL_FP64(-0.0, d22);
6462   CHECK_EQUAL_FP64(-1.0, d23);
6463 
6464   TEARDOWN();
6465 }
6466 
6467 
TEST(frintn)6468 TEST(frintn) {
6469   INIT_V8();
6470   SETUP();
6471 
6472   START();
6473   __ Fmov(s16, 1.0);
6474   __ Fmov(s17, 1.1);
6475   __ Fmov(s18, 1.5);
6476   __ Fmov(s19, 1.9);
6477   __ Fmov(s20, 2.5);
6478   __ Fmov(s21, -1.5);
6479   __ Fmov(s22, -2.5);
6480   __ Fmov(s23, kFP32PositiveInfinity);
6481   __ Fmov(s24, kFP32NegativeInfinity);
6482   __ Fmov(s25, 0.0);
6483   __ Fmov(s26, -0.0);
6484   __ Fmov(s27, -0.2);
6485 
6486   __ Frintn(s0, s16);
6487   __ Frintn(s1, s17);
6488   __ Frintn(s2, s18);
6489   __ Frintn(s3, s19);
6490   __ Frintn(s4, s20);
6491   __ Frintn(s5, s21);
6492   __ Frintn(s6, s22);
6493   __ Frintn(s7, s23);
6494   __ Frintn(s8, s24);
6495   __ Frintn(s9, s25);
6496   __ Frintn(s10, s26);
6497   __ Frintn(s11, s27);
6498 
6499   __ Fmov(d16, 1.0);
6500   __ Fmov(d17, 1.1);
6501   __ Fmov(d18, 1.5);
6502   __ Fmov(d19, 1.9);
6503   __ Fmov(d20, 2.5);
6504   __ Fmov(d21, -1.5);
6505   __ Fmov(d22, -2.5);
6506   __ Fmov(d23, kFP32PositiveInfinity);
6507   __ Fmov(d24, kFP32NegativeInfinity);
6508   __ Fmov(d25, 0.0);
6509   __ Fmov(d26, -0.0);
6510   __ Fmov(d27, -0.2);
6511 
6512   __ Frintn(d12, d16);
6513   __ Frintn(d13, d17);
6514   __ Frintn(d14, d18);
6515   __ Frintn(d15, d19);
6516   __ Frintn(d16, d20);
6517   __ Frintn(d17, d21);
6518   __ Frintn(d18, d22);
6519   __ Frintn(d19, d23);
6520   __ Frintn(d20, d24);
6521   __ Frintn(d21, d25);
6522   __ Frintn(d22, d26);
6523   __ Frintn(d23, d27);
6524   END();
6525 
6526   RUN();
6527 
6528   CHECK_EQUAL_FP32(1.0, s0);
6529   CHECK_EQUAL_FP32(1.0, s1);
6530   CHECK_EQUAL_FP32(2.0, s2);
6531   CHECK_EQUAL_FP32(2.0, s3);
6532   CHECK_EQUAL_FP32(2.0, s4);
6533   CHECK_EQUAL_FP32(-2.0, s5);
6534   CHECK_EQUAL_FP32(-2.0, s6);
6535   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6536   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6537   CHECK_EQUAL_FP32(0.0, s9);
6538   CHECK_EQUAL_FP32(-0.0, s10);
6539   CHECK_EQUAL_FP32(-0.0, s11);
6540   CHECK_EQUAL_FP64(1.0, d12);
6541   CHECK_EQUAL_FP64(1.0, d13);
6542   CHECK_EQUAL_FP64(2.0, d14);
6543   CHECK_EQUAL_FP64(2.0, d15);
6544   CHECK_EQUAL_FP64(2.0, d16);
6545   CHECK_EQUAL_FP64(-2.0, d17);
6546   CHECK_EQUAL_FP64(-2.0, d18);
6547   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6548   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6549   CHECK_EQUAL_FP64(0.0, d21);
6550   CHECK_EQUAL_FP64(-0.0, d22);
6551   CHECK_EQUAL_FP64(-0.0, d23);
6552 
6553   TEARDOWN();
6554 }
6555 
6556 
TEST(frintz)6557 TEST(frintz) {
6558   INIT_V8();
6559   SETUP();
6560 
6561   START();
6562   __ Fmov(s16, 1.0);
6563   __ Fmov(s17, 1.1);
6564   __ Fmov(s18, 1.5);
6565   __ Fmov(s19, 1.9);
6566   __ Fmov(s20, 2.5);
6567   __ Fmov(s21, -1.5);
6568   __ Fmov(s22, -2.5);
6569   __ Fmov(s23, kFP32PositiveInfinity);
6570   __ Fmov(s24, kFP32NegativeInfinity);
6571   __ Fmov(s25, 0.0);
6572   __ Fmov(s26, -0.0);
6573 
6574   __ Frintz(s0, s16);
6575   __ Frintz(s1, s17);
6576   __ Frintz(s2, s18);
6577   __ Frintz(s3, s19);
6578   __ Frintz(s4, s20);
6579   __ Frintz(s5, s21);
6580   __ Frintz(s6, s22);
6581   __ Frintz(s7, s23);
6582   __ Frintz(s8, s24);
6583   __ Frintz(s9, s25);
6584   __ Frintz(s10, s26);
6585 
6586   __ Fmov(d16, 1.0);
6587   __ Fmov(d17, 1.1);
6588   __ Fmov(d18, 1.5);
6589   __ Fmov(d19, 1.9);
6590   __ Fmov(d20, 2.5);
6591   __ Fmov(d21, -1.5);
6592   __ Fmov(d22, -2.5);
6593   __ Fmov(d23, kFP32PositiveInfinity);
6594   __ Fmov(d24, kFP32NegativeInfinity);
6595   __ Fmov(d25, 0.0);
6596   __ Fmov(d26, -0.0);
6597 
6598   __ Frintz(d11, d16);
6599   __ Frintz(d12, d17);
6600   __ Frintz(d13, d18);
6601   __ Frintz(d14, d19);
6602   __ Frintz(d15, d20);
6603   __ Frintz(d16, d21);
6604   __ Frintz(d17, d22);
6605   __ Frintz(d18, d23);
6606   __ Frintz(d19, d24);
6607   __ Frintz(d20, d25);
6608   __ Frintz(d21, d26);
6609   END();
6610 
6611   RUN();
6612 
6613   CHECK_EQUAL_FP32(1.0, s0);
6614   CHECK_EQUAL_FP32(1.0, s1);
6615   CHECK_EQUAL_FP32(1.0, s2);
6616   CHECK_EQUAL_FP32(1.0, s3);
6617   CHECK_EQUAL_FP32(2.0, s4);
6618   CHECK_EQUAL_FP32(-1.0, s5);
6619   CHECK_EQUAL_FP32(-2.0, s6);
6620   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6621   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6622   CHECK_EQUAL_FP32(0.0, s9);
6623   CHECK_EQUAL_FP32(-0.0, s10);
6624   CHECK_EQUAL_FP64(1.0, d11);
6625   CHECK_EQUAL_FP64(1.0, d12);
6626   CHECK_EQUAL_FP64(1.0, d13);
6627   CHECK_EQUAL_FP64(1.0, d14);
6628   CHECK_EQUAL_FP64(2.0, d15);
6629   CHECK_EQUAL_FP64(-1.0, d16);
6630   CHECK_EQUAL_FP64(-2.0, d17);
6631   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d18);
6632   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d19);
6633   CHECK_EQUAL_FP64(0.0, d20);
6634   CHECK_EQUAL_FP64(-0.0, d21);
6635 
6636   TEARDOWN();
6637 }
6638 
6639 
TEST(fcvt_ds)6640 TEST(fcvt_ds) {
6641   INIT_V8();
6642   SETUP();
6643 
6644   START();
6645   __ Fmov(s16, 1.0);
6646   __ Fmov(s17, 1.1);
6647   __ Fmov(s18, 1.5);
6648   __ Fmov(s19, 1.9);
6649   __ Fmov(s20, 2.5);
6650   __ Fmov(s21, -1.5);
6651   __ Fmov(s22, -2.5);
6652   __ Fmov(s23, kFP32PositiveInfinity);
6653   __ Fmov(s24, kFP32NegativeInfinity);
6654   __ Fmov(s25, 0.0);
6655   __ Fmov(s26, -0.0);
6656   __ Fmov(s27, FLT_MAX);
6657   __ Fmov(s28, FLT_MIN);
6658   __ Fmov(s29, rawbits_to_float(0x7fc12345));   // Quiet NaN.
6659   __ Fmov(s30, rawbits_to_float(0x7f812345));   // Signalling NaN.
6660 
6661   __ Fcvt(d0, s16);
6662   __ Fcvt(d1, s17);
6663   __ Fcvt(d2, s18);
6664   __ Fcvt(d3, s19);
6665   __ Fcvt(d4, s20);
6666   __ Fcvt(d5, s21);
6667   __ Fcvt(d6, s22);
6668   __ Fcvt(d7, s23);
6669   __ Fcvt(d8, s24);
6670   __ Fcvt(d9, s25);
6671   __ Fcvt(d10, s26);
6672   __ Fcvt(d11, s27);
6673   __ Fcvt(d12, s28);
6674   __ Fcvt(d13, s29);
6675   __ Fcvt(d14, s30);
6676   END();
6677 
6678   RUN();
6679 
6680   CHECK_EQUAL_FP64(1.0f, d0);
6681   CHECK_EQUAL_FP64(1.1f, d1);
6682   CHECK_EQUAL_FP64(1.5f, d2);
6683   CHECK_EQUAL_FP64(1.9f, d3);
6684   CHECK_EQUAL_FP64(2.5f, d4);
6685   CHECK_EQUAL_FP64(-1.5f, d5);
6686   CHECK_EQUAL_FP64(-2.5f, d6);
6687   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
6688   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d8);
6689   CHECK_EQUAL_FP64(0.0f, d9);
6690   CHECK_EQUAL_FP64(-0.0f, d10);
6691   CHECK_EQUAL_FP64(FLT_MAX, d11);
6692   CHECK_EQUAL_FP64(FLT_MIN, d12);
6693 
6694   // Check that the NaN payload is preserved according to ARM64 conversion
6695   // rules:
6696   //  - The sign bit is preserved.
6697   //  - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6698   //  - The remaining mantissa bits are copied until they run out.
6699   //  - The low-order bits that haven't already been assigned are set to 0.
6700   CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6701   CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6702 
6703   TEARDOWN();
6704 }
6705 
6706 
TEST(fcvt_sd)6707 TEST(fcvt_sd) {
6708   INIT_V8();
6709   // There are a huge number of corner-cases to check, so this test iterates
6710   // through a list. The list is then negated and checked again (since the sign
6711   // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6712   // negative values.
6713   //
6714   // Note that this test only checks ties-to-even rounding, because that is all
6715   // that the simulator supports.
6716   struct {double in; float expected;} test[] = {
6717     // Check some simple conversions.
6718     {0.0, 0.0f},
6719     {1.0, 1.0f},
6720     {1.5, 1.5f},
6721     {2.0, 2.0f},
6722     {FLT_MAX, FLT_MAX},
6723     //  - The smallest normalized float.
6724     {pow(2.0, -126), powf(2, -126)},
6725     //  - Normal floats that need (ties-to-even) rounding.
6726     //    For normalized numbers:
6727     //         bit 29 (0x0000000020000000) is the lowest-order bit which will
6728     //                                     fit in the float's mantissa.
6729     {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6730     {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6731     {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6732     {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6733     {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6734     {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6735     {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6736     {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6737     {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6738     {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6739     {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6740     {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6741     {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6742     //  - A mantissa that overflows into the exponent during rounding.
6743     {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6744     //  - The largest double that rounds to a normal float.
6745     {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6746 
6747     // Doubles that are too big for a float.
6748     {kFP64PositiveInfinity, kFP32PositiveInfinity},
6749     {DBL_MAX, kFP32PositiveInfinity},
6750     //  - The smallest exponent that's too big for a float.
6751     {pow(2.0, 128), kFP32PositiveInfinity},
6752     //  - This exponent is in range, but the value rounds to infinity.
6753     {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6754 
6755     // Doubles that are too small for a float.
6756     //  - The smallest (subnormal) double.
6757     {DBL_MIN, 0.0},
6758     //  - The largest double which is too small for a subnormal float.
6759     {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6760 
6761     // Normal doubles that become subnormal floats.
6762     //  - The largest subnormal float.
6763     {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6764     //  - The smallest subnormal float.
6765     {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6766     //  - Subnormal floats that need (ties-to-even) rounding.
6767     //    For these subnormals:
6768     //         bit 34 (0x0000000400000000) is the lowest-order bit which will
6769     //                                     fit in the float's mantissa.
6770     {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6771     {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6772     {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6773     {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6774     {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6775     {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6776     {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6777     {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6778     {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6779     {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6780     {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6781     {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6782     {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6783     //  - The smallest double which rounds up to become a subnormal float.
6784     {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6785 
6786     // Check NaN payload preservation.
6787     {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6788     {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6789     //  - Signalling NaNs become quiet NaNs.
6790     {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6791     {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6792     {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6793   };
6794   int count = sizeof(test) / sizeof(test[0]);
6795 
6796   for (int i = 0; i < count; i++) {
6797     double in = test[i].in;
6798     float expected = test[i].expected;
6799 
6800     // We only expect positive input.
6801     DCHECK(std::signbit(in) == 0);
6802     DCHECK(std::signbit(expected) == 0);
6803 
6804     SETUP();
6805     START();
6806 
6807     __ Fmov(d10, in);
6808     __ Fcvt(s20, d10);
6809 
6810     __ Fmov(d11, -in);
6811     __ Fcvt(s21, d11);
6812 
6813     END();
6814     RUN();
6815     CHECK_EQUAL_FP32(expected, s20);
6816     CHECK_EQUAL_FP32(-expected, s21);
6817     TEARDOWN();
6818   }
6819 }
6820 
6821 
TEST(fcvtas)6822 TEST(fcvtas) {
6823   INIT_V8();
6824   SETUP();
6825 
6826   START();
6827   __ Fmov(s0, 1.0);
6828   __ Fmov(s1, 1.1);
6829   __ Fmov(s2, 2.5);
6830   __ Fmov(s3, -2.5);
6831   __ Fmov(s4, kFP32PositiveInfinity);
6832   __ Fmov(s5, kFP32NegativeInfinity);
6833   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
6834   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
6835   __ Fmov(d8, 1.0);
6836   __ Fmov(d9, 1.1);
6837   __ Fmov(d10, 2.5);
6838   __ Fmov(d11, -2.5);
6839   __ Fmov(d12, kFP64PositiveInfinity);
6840   __ Fmov(d13, kFP64NegativeInfinity);
6841   __ Fmov(d14, kWMaxInt - 1);
6842   __ Fmov(d15, kWMinInt + 1);
6843   __ Fmov(s17, 1.1);
6844   __ Fmov(s18, 2.5);
6845   __ Fmov(s19, -2.5);
6846   __ Fmov(s20, kFP32PositiveInfinity);
6847   __ Fmov(s21, kFP32NegativeInfinity);
6848   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
6849   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
6850   __ Fmov(d24, 1.1);
6851   __ Fmov(d25, 2.5);
6852   __ Fmov(d26, -2.5);
6853   __ Fmov(d27, kFP64PositiveInfinity);
6854   __ Fmov(d28, kFP64NegativeInfinity);
6855   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
6856   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
6857 
6858   __ Fcvtas(w0, s0);
6859   __ Fcvtas(w1, s1);
6860   __ Fcvtas(w2, s2);
6861   __ Fcvtas(w3, s3);
6862   __ Fcvtas(w4, s4);
6863   __ Fcvtas(w5, s5);
6864   __ Fcvtas(w6, s6);
6865   __ Fcvtas(w7, s7);
6866   __ Fcvtas(w8, d8);
6867   __ Fcvtas(w9, d9);
6868   __ Fcvtas(w10, d10);
6869   __ Fcvtas(w11, d11);
6870   __ Fcvtas(w12, d12);
6871   __ Fcvtas(w13, d13);
6872   __ Fcvtas(w14, d14);
6873   __ Fcvtas(w15, d15);
6874   __ Fcvtas(x17, s17);
6875   __ Fcvtas(x18, s18);
6876   __ Fcvtas(x19, s19);
6877   __ Fcvtas(x20, s20);
6878   __ Fcvtas(x21, s21);
6879   __ Fcvtas(x22, s22);
6880   __ Fcvtas(x23, s23);
6881   __ Fcvtas(x24, d24);
6882   __ Fcvtas(x25, d25);
6883   __ Fcvtas(x26, d26);
6884   __ Fcvtas(x27, d27);
6885   __ Fcvtas(x28, d28);
6886   __ Fcvtas(x29, d29);
6887   __ Fcvtas(x30, d30);
6888   END();
6889 
6890   RUN();
6891 
6892   CHECK_EQUAL_64(1, x0);
6893   CHECK_EQUAL_64(1, x1);
6894   CHECK_EQUAL_64(3, x2);
6895   CHECK_EQUAL_64(0xfffffffd, x3);
6896   CHECK_EQUAL_64(0x7fffffff, x4);
6897   CHECK_EQUAL_64(0x80000000, x5);
6898   CHECK_EQUAL_64(0x7fffff80, x6);
6899   CHECK_EQUAL_64(0x80000080, x7);
6900   CHECK_EQUAL_64(1, x8);
6901   CHECK_EQUAL_64(1, x9);
6902   CHECK_EQUAL_64(3, x10);
6903   CHECK_EQUAL_64(0xfffffffd, x11);
6904   CHECK_EQUAL_64(0x7fffffff, x12);
6905   CHECK_EQUAL_64(0x80000000, x13);
6906   CHECK_EQUAL_64(0x7ffffffe, x14);
6907   CHECK_EQUAL_64(0x80000001, x15);
6908   CHECK_EQUAL_64(1, x17);
6909   CHECK_EQUAL_64(3, x18);
6910   CHECK_EQUAL_64(0xfffffffffffffffdUL, x19);
6911   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
6912   CHECK_EQUAL_64(0x8000000000000000UL, x21);
6913   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
6914   CHECK_EQUAL_64(0x8000008000000000UL, x23);
6915   CHECK_EQUAL_64(1, x24);
6916   CHECK_EQUAL_64(3, x25);
6917   CHECK_EQUAL_64(0xfffffffffffffffdUL, x26);
6918   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
6919   CHECK_EQUAL_64(0x8000000000000000UL, x28);
6920   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
6921   CHECK_EQUAL_64(0x8000000000000400UL, x30);
6922 
6923   TEARDOWN();
6924 }
6925 
6926 
TEST(fcvtau)6927 TEST(fcvtau) {
6928   INIT_V8();
6929   SETUP();
6930 
6931   START();
6932   __ Fmov(s0, 1.0);
6933   __ Fmov(s1, 1.1);
6934   __ Fmov(s2, 2.5);
6935   __ Fmov(s3, -2.5);
6936   __ Fmov(s4, kFP32PositiveInfinity);
6937   __ Fmov(s5, kFP32NegativeInfinity);
6938   __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
6939   __ Fmov(d8, 1.0);
6940   __ Fmov(d9, 1.1);
6941   __ Fmov(d10, 2.5);
6942   __ Fmov(d11, -2.5);
6943   __ Fmov(d12, kFP64PositiveInfinity);
6944   __ Fmov(d13, kFP64NegativeInfinity);
6945   __ Fmov(d14, 0xfffffffe);
6946   __ Fmov(s16, 1.0);
6947   __ Fmov(s17, 1.1);
6948   __ Fmov(s18, 2.5);
6949   __ Fmov(s19, -2.5);
6950   __ Fmov(s20, kFP32PositiveInfinity);
6951   __ Fmov(s21, kFP32NegativeInfinity);
6952   __ Fmov(s22, 0xffffff0000000000UL);  // Largest float < UINT64_MAX.
6953   __ Fmov(d24, 1.1);
6954   __ Fmov(d25, 2.5);
6955   __ Fmov(d26, -2.5);
6956   __ Fmov(d27, kFP64PositiveInfinity);
6957   __ Fmov(d28, kFP64NegativeInfinity);
6958   __ Fmov(d29, 0xfffffffffffff800UL);  // Largest double < UINT64_MAX.
6959   __ Fmov(s30, 0x100000000UL);
6960 
6961   __ Fcvtau(w0, s0);
6962   __ Fcvtau(w1, s1);
6963   __ Fcvtau(w2, s2);
6964   __ Fcvtau(w3, s3);
6965   __ Fcvtau(w4, s4);
6966   __ Fcvtau(w5, s5);
6967   __ Fcvtau(w6, s6);
6968   __ Fcvtau(w8, d8);
6969   __ Fcvtau(w9, d9);
6970   __ Fcvtau(w10, d10);
6971   __ Fcvtau(w11, d11);
6972   __ Fcvtau(w12, d12);
6973   __ Fcvtau(w13, d13);
6974   __ Fcvtau(w14, d14);
6975   __ Fcvtau(w15, d15);
6976   __ Fcvtau(x16, s16);
6977   __ Fcvtau(x17, s17);
6978   __ Fcvtau(x18, s18);
6979   __ Fcvtau(x19, s19);
6980   __ Fcvtau(x20, s20);
6981   __ Fcvtau(x21, s21);
6982   __ Fcvtau(x22, s22);
6983   __ Fcvtau(x24, d24);
6984   __ Fcvtau(x25, d25);
6985   __ Fcvtau(x26, d26);
6986   __ Fcvtau(x27, d27);
6987   __ Fcvtau(x28, d28);
6988   __ Fcvtau(x29, d29);
6989   __ Fcvtau(w30, s30);
6990   END();
6991 
6992   RUN();
6993 
6994   CHECK_EQUAL_64(1, x0);
6995   CHECK_EQUAL_64(1, x1);
6996   CHECK_EQUAL_64(3, x2);
6997   CHECK_EQUAL_64(0, x3);
6998   CHECK_EQUAL_64(0xffffffff, x4);
6999   CHECK_EQUAL_64(0, x5);
7000   CHECK_EQUAL_64(0xffffff00, x6);
7001   CHECK_EQUAL_64(1, x8);
7002   CHECK_EQUAL_64(1, x9);
7003   CHECK_EQUAL_64(3, x10);
7004   CHECK_EQUAL_64(0, x11);
7005   CHECK_EQUAL_64(0xffffffff, x12);
7006   CHECK_EQUAL_64(0, x13);
7007   CHECK_EQUAL_64(0xfffffffe, x14);
7008   CHECK_EQUAL_64(1, x16);
7009   CHECK_EQUAL_64(1, x17);
7010   CHECK_EQUAL_64(3, x18);
7011   CHECK_EQUAL_64(0, x19);
7012   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7013   CHECK_EQUAL_64(0, x21);
7014   CHECK_EQUAL_64(0xffffff0000000000UL, x22);
7015   CHECK_EQUAL_64(1, x24);
7016   CHECK_EQUAL_64(3, x25);
7017   CHECK_EQUAL_64(0, x26);
7018   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7019   CHECK_EQUAL_64(0, x28);
7020   CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
7021   CHECK_EQUAL_64(0xffffffff, x30);
7022 
7023   TEARDOWN();
7024 }
7025 
7026 
TEST(fcvtms)7027 TEST(fcvtms) {
7028   INIT_V8();
7029   SETUP();
7030 
7031   START();
7032   __ Fmov(s0, 1.0);
7033   __ Fmov(s1, 1.1);
7034   __ Fmov(s2, 1.5);
7035   __ Fmov(s3, -1.5);
7036   __ Fmov(s4, kFP32PositiveInfinity);
7037   __ Fmov(s5, kFP32NegativeInfinity);
7038   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7039   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7040   __ Fmov(d8, 1.0);
7041   __ Fmov(d9, 1.1);
7042   __ Fmov(d10, 1.5);
7043   __ Fmov(d11, -1.5);
7044   __ Fmov(d12, kFP64PositiveInfinity);
7045   __ Fmov(d13, kFP64NegativeInfinity);
7046   __ Fmov(d14, kWMaxInt - 1);
7047   __ Fmov(d15, kWMinInt + 1);
7048   __ Fmov(s17, 1.1);
7049   __ Fmov(s18, 1.5);
7050   __ Fmov(s19, -1.5);
7051   __ Fmov(s20, kFP32PositiveInfinity);
7052   __ Fmov(s21, kFP32NegativeInfinity);
7053   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7054   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7055   __ Fmov(d24, 1.1);
7056   __ Fmov(d25, 1.5);
7057   __ Fmov(d26, -1.5);
7058   __ Fmov(d27, kFP64PositiveInfinity);
7059   __ Fmov(d28, kFP64NegativeInfinity);
7060   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7061   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7062 
7063   __ Fcvtms(w0, s0);
7064   __ Fcvtms(w1, s1);
7065   __ Fcvtms(w2, s2);
7066   __ Fcvtms(w3, s3);
7067   __ Fcvtms(w4, s4);
7068   __ Fcvtms(w5, s5);
7069   __ Fcvtms(w6, s6);
7070   __ Fcvtms(w7, s7);
7071   __ Fcvtms(w8, d8);
7072   __ Fcvtms(w9, d9);
7073   __ Fcvtms(w10, d10);
7074   __ Fcvtms(w11, d11);
7075   __ Fcvtms(w12, d12);
7076   __ Fcvtms(w13, d13);
7077   __ Fcvtms(w14, d14);
7078   __ Fcvtms(w15, d15);
7079   __ Fcvtms(x17, s17);
7080   __ Fcvtms(x18, s18);
7081   __ Fcvtms(x19, s19);
7082   __ Fcvtms(x20, s20);
7083   __ Fcvtms(x21, s21);
7084   __ Fcvtms(x22, s22);
7085   __ Fcvtms(x23, s23);
7086   __ Fcvtms(x24, d24);
7087   __ Fcvtms(x25, d25);
7088   __ Fcvtms(x26, d26);
7089   __ Fcvtms(x27, d27);
7090   __ Fcvtms(x28, d28);
7091   __ Fcvtms(x29, d29);
7092   __ Fcvtms(x30, d30);
7093   END();
7094 
7095   RUN();
7096 
7097   CHECK_EQUAL_64(1, x0);
7098   CHECK_EQUAL_64(1, x1);
7099   CHECK_EQUAL_64(1, x2);
7100   CHECK_EQUAL_64(0xfffffffe, x3);
7101   CHECK_EQUAL_64(0x7fffffff, x4);
7102   CHECK_EQUAL_64(0x80000000, x5);
7103   CHECK_EQUAL_64(0x7fffff80, x6);
7104   CHECK_EQUAL_64(0x80000080, x7);
7105   CHECK_EQUAL_64(1, x8);
7106   CHECK_EQUAL_64(1, x9);
7107   CHECK_EQUAL_64(1, x10);
7108   CHECK_EQUAL_64(0xfffffffe, x11);
7109   CHECK_EQUAL_64(0x7fffffff, x12);
7110   CHECK_EQUAL_64(0x80000000, x13);
7111   CHECK_EQUAL_64(0x7ffffffe, x14);
7112   CHECK_EQUAL_64(0x80000001, x15);
7113   CHECK_EQUAL_64(1, x17);
7114   CHECK_EQUAL_64(1, x18);
7115   CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
7116   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7117   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7118   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7119   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7120   CHECK_EQUAL_64(1, x24);
7121   CHECK_EQUAL_64(1, x25);
7122   CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
7123   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7124   CHECK_EQUAL_64(0x8000000000000000UL, x28);
7125   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7126   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7127 
7128   TEARDOWN();
7129 }
7130 
7131 
TEST(fcvtmu)7132 TEST(fcvtmu) {
7133   INIT_V8();
7134   SETUP();
7135 
7136   START();
7137   __ Fmov(s0, 1.0);
7138   __ Fmov(s1, 1.1);
7139   __ Fmov(s2, 1.5);
7140   __ Fmov(s3, -1.5);
7141   __ Fmov(s4, kFP32PositiveInfinity);
7142   __ Fmov(s5, kFP32NegativeInfinity);
7143   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7144   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7145   __ Fmov(d8, 1.0);
7146   __ Fmov(d9, 1.1);
7147   __ Fmov(d10, 1.5);
7148   __ Fmov(d11, -1.5);
7149   __ Fmov(d12, kFP64PositiveInfinity);
7150   __ Fmov(d13, kFP64NegativeInfinity);
7151   __ Fmov(d14, kWMaxInt - 1);
7152   __ Fmov(d15, kWMinInt + 1);
7153   __ Fmov(s17, 1.1);
7154   __ Fmov(s18, 1.5);
7155   __ Fmov(s19, -1.5);
7156   __ Fmov(s20, kFP32PositiveInfinity);
7157   __ Fmov(s21, kFP32NegativeInfinity);
7158   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7159   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7160   __ Fmov(d24, 1.1);
7161   __ Fmov(d25, 1.5);
7162   __ Fmov(d26, -1.5);
7163   __ Fmov(d27, kFP64PositiveInfinity);
7164   __ Fmov(d28, kFP64NegativeInfinity);
7165   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7166   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7167 
7168   __ Fcvtmu(w0, s0);
7169   __ Fcvtmu(w1, s1);
7170   __ Fcvtmu(w2, s2);
7171   __ Fcvtmu(w3, s3);
7172   __ Fcvtmu(w4, s4);
7173   __ Fcvtmu(w5, s5);
7174   __ Fcvtmu(w6, s6);
7175   __ Fcvtmu(w7, s7);
7176   __ Fcvtmu(w8, d8);
7177   __ Fcvtmu(w9, d9);
7178   __ Fcvtmu(w10, d10);
7179   __ Fcvtmu(w11, d11);
7180   __ Fcvtmu(w12, d12);
7181   __ Fcvtmu(w13, d13);
7182   __ Fcvtmu(w14, d14);
7183   __ Fcvtmu(x17, s17);
7184   __ Fcvtmu(x18, s18);
7185   __ Fcvtmu(x19, s19);
7186   __ Fcvtmu(x20, s20);
7187   __ Fcvtmu(x21, s21);
7188   __ Fcvtmu(x22, s22);
7189   __ Fcvtmu(x23, s23);
7190   __ Fcvtmu(x24, d24);
7191   __ Fcvtmu(x25, d25);
7192   __ Fcvtmu(x26, d26);
7193   __ Fcvtmu(x27, d27);
7194   __ Fcvtmu(x28, d28);
7195   __ Fcvtmu(x29, d29);
7196   __ Fcvtmu(x30, d30);
7197   END();
7198 
7199   RUN();
7200 
7201   CHECK_EQUAL_64(1, x0);
7202   CHECK_EQUAL_64(1, x1);
7203   CHECK_EQUAL_64(1, x2);
7204   CHECK_EQUAL_64(0, x3);
7205   CHECK_EQUAL_64(0xffffffff, x4);
7206   CHECK_EQUAL_64(0, x5);
7207   CHECK_EQUAL_64(0x7fffff80, x6);
7208   CHECK_EQUAL_64(0, x7);
7209   CHECK_EQUAL_64(1, x8);
7210   CHECK_EQUAL_64(1, x9);
7211   CHECK_EQUAL_64(1, x10);
7212   CHECK_EQUAL_64(0, x11);
7213   CHECK_EQUAL_64(0xffffffff, x12);
7214   CHECK_EQUAL_64(0, x13);
7215   CHECK_EQUAL_64(0x7ffffffe, x14);
7216   CHECK_EQUAL_64(1, x17);
7217   CHECK_EQUAL_64(1, x18);
7218   CHECK_EQUAL_64(0x0UL, x19);
7219   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7220   CHECK_EQUAL_64(0x0UL, x21);
7221   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7222   CHECK_EQUAL_64(0x0UL, x23);
7223   CHECK_EQUAL_64(1, x24);
7224   CHECK_EQUAL_64(1, x25);
7225   CHECK_EQUAL_64(0x0UL, x26);
7226   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7227   CHECK_EQUAL_64(0x0UL, x28);
7228   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7229   CHECK_EQUAL_64(0x0UL, x30);
7230 
7231   TEARDOWN();
7232 }
7233 
7234 
TEST(fcvtns)7235 TEST(fcvtns) {
7236   INIT_V8();
7237   SETUP();
7238 
7239   START();
7240   __ Fmov(s0, 1.0);
7241   __ Fmov(s1, 1.1);
7242   __ Fmov(s2, 1.5);
7243   __ Fmov(s3, -1.5);
7244   __ Fmov(s4, kFP32PositiveInfinity);
7245   __ Fmov(s5, kFP32NegativeInfinity);
7246   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7247   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7248   __ Fmov(d8, 1.0);
7249   __ Fmov(d9, 1.1);
7250   __ Fmov(d10, 1.5);
7251   __ Fmov(d11, -1.5);
7252   __ Fmov(d12, kFP64PositiveInfinity);
7253   __ Fmov(d13, kFP64NegativeInfinity);
7254   __ Fmov(d14, kWMaxInt - 1);
7255   __ Fmov(d15, kWMinInt + 1);
7256   __ Fmov(s17, 1.1);
7257   __ Fmov(s18, 1.5);
7258   __ Fmov(s19, -1.5);
7259   __ Fmov(s20, kFP32PositiveInfinity);
7260   __ Fmov(s21, kFP32NegativeInfinity);
7261   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7262   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7263   __ Fmov(d24, 1.1);
7264   __ Fmov(d25, 1.5);
7265   __ Fmov(d26, -1.5);
7266   __ Fmov(d27, kFP64PositiveInfinity);
7267   __ Fmov(d28, kFP64NegativeInfinity);
7268   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7269   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7270 
7271   __ Fcvtns(w0, s0);
7272   __ Fcvtns(w1, s1);
7273   __ Fcvtns(w2, s2);
7274   __ Fcvtns(w3, s3);
7275   __ Fcvtns(w4, s4);
7276   __ Fcvtns(w5, s5);
7277   __ Fcvtns(w6, s6);
7278   __ Fcvtns(w7, s7);
7279   __ Fcvtns(w8, d8);
7280   __ Fcvtns(w9, d9);
7281   __ Fcvtns(w10, d10);
7282   __ Fcvtns(w11, d11);
7283   __ Fcvtns(w12, d12);
7284   __ Fcvtns(w13, d13);
7285   __ Fcvtns(w14, d14);
7286   __ Fcvtns(w15, d15);
7287   __ Fcvtns(x17, s17);
7288   __ Fcvtns(x18, s18);
7289   __ Fcvtns(x19, s19);
7290   __ Fcvtns(x20, s20);
7291   __ Fcvtns(x21, s21);
7292   __ Fcvtns(x22, s22);
7293   __ Fcvtns(x23, s23);
7294   __ Fcvtns(x24, d24);
7295   __ Fcvtns(x25, d25);
7296   __ Fcvtns(x26, d26);
7297   __ Fcvtns(x27, d27);
7298 //  __ Fcvtns(x28, d28);
7299   __ Fcvtns(x29, d29);
7300   __ Fcvtns(x30, d30);
7301   END();
7302 
7303   RUN();
7304 
7305   CHECK_EQUAL_64(1, x0);
7306   CHECK_EQUAL_64(1, x1);
7307   CHECK_EQUAL_64(2, x2);
7308   CHECK_EQUAL_64(0xfffffffe, x3);
7309   CHECK_EQUAL_64(0x7fffffff, x4);
7310   CHECK_EQUAL_64(0x80000000, x5);
7311   CHECK_EQUAL_64(0x7fffff80, x6);
7312   CHECK_EQUAL_64(0x80000080, x7);
7313   CHECK_EQUAL_64(1, x8);
7314   CHECK_EQUAL_64(1, x9);
7315   CHECK_EQUAL_64(2, x10);
7316   CHECK_EQUAL_64(0xfffffffe, x11);
7317   CHECK_EQUAL_64(0x7fffffff, x12);
7318   CHECK_EQUAL_64(0x80000000, x13);
7319   CHECK_EQUAL_64(0x7ffffffe, x14);
7320   CHECK_EQUAL_64(0x80000001, x15);
7321   CHECK_EQUAL_64(1, x17);
7322   CHECK_EQUAL_64(2, x18);
7323   CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
7324   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7325   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7326   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7327   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7328   CHECK_EQUAL_64(1, x24);
7329   CHECK_EQUAL_64(2, x25);
7330   CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
7331   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7332 //  CHECK_EQUAL_64(0x8000000000000000UL, x28);
7333   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7334   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7335 
7336   TEARDOWN();
7337 }
7338 
7339 
TEST(fcvtnu)7340 TEST(fcvtnu) {
7341   INIT_V8();
7342   SETUP();
7343 
7344   START();
7345   __ Fmov(s0, 1.0);
7346   __ Fmov(s1, 1.1);
7347   __ Fmov(s2, 1.5);
7348   __ Fmov(s3, -1.5);
7349   __ Fmov(s4, kFP32PositiveInfinity);
7350   __ Fmov(s5, kFP32NegativeInfinity);
7351   __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
7352   __ Fmov(d8, 1.0);
7353   __ Fmov(d9, 1.1);
7354   __ Fmov(d10, 1.5);
7355   __ Fmov(d11, -1.5);
7356   __ Fmov(d12, kFP64PositiveInfinity);
7357   __ Fmov(d13, kFP64NegativeInfinity);
7358   __ Fmov(d14, 0xfffffffe);
7359   __ Fmov(s16, 1.0);
7360   __ Fmov(s17, 1.1);
7361   __ Fmov(s18, 1.5);
7362   __ Fmov(s19, -1.5);
7363   __ Fmov(s20, kFP32PositiveInfinity);
7364   __ Fmov(s21, kFP32NegativeInfinity);
7365   __ Fmov(s22, 0xffffff0000000000UL);   // Largest float < UINT64_MAX.
7366   __ Fmov(d24, 1.1);
7367   __ Fmov(d25, 1.5);
7368   __ Fmov(d26, -1.5);
7369   __ Fmov(d27, kFP64PositiveInfinity);
7370   __ Fmov(d28, kFP64NegativeInfinity);
7371   __ Fmov(d29, 0xfffffffffffff800UL);   // Largest double < UINT64_MAX.
7372   __ Fmov(s30, 0x100000000UL);
7373 
7374   __ Fcvtnu(w0, s0);
7375   __ Fcvtnu(w1, s1);
7376   __ Fcvtnu(w2, s2);
7377   __ Fcvtnu(w3, s3);
7378   __ Fcvtnu(w4, s4);
7379   __ Fcvtnu(w5, s5);
7380   __ Fcvtnu(w6, s6);
7381   __ Fcvtnu(w8, d8);
7382   __ Fcvtnu(w9, d9);
7383   __ Fcvtnu(w10, d10);
7384   __ Fcvtnu(w11, d11);
7385   __ Fcvtnu(w12, d12);
7386   __ Fcvtnu(w13, d13);
7387   __ Fcvtnu(w14, d14);
7388   __ Fcvtnu(w15, d15);
7389   __ Fcvtnu(x16, s16);
7390   __ Fcvtnu(x17, s17);
7391   __ Fcvtnu(x18, s18);
7392   __ Fcvtnu(x19, s19);
7393   __ Fcvtnu(x20, s20);
7394   __ Fcvtnu(x21, s21);
7395   __ Fcvtnu(x22, s22);
7396   __ Fcvtnu(x24, d24);
7397   __ Fcvtnu(x25, d25);
7398   __ Fcvtnu(x26, d26);
7399   __ Fcvtnu(x27, d27);
7400 //  __ Fcvtnu(x28, d28);
7401   __ Fcvtnu(x29, d29);
7402   __ Fcvtnu(w30, s30);
7403   END();
7404 
7405   RUN();
7406 
7407   CHECK_EQUAL_64(1, x0);
7408   CHECK_EQUAL_64(1, x1);
7409   CHECK_EQUAL_64(2, x2);
7410   CHECK_EQUAL_64(0, x3);
7411   CHECK_EQUAL_64(0xffffffff, x4);
7412   CHECK_EQUAL_64(0, x5);
7413   CHECK_EQUAL_64(0xffffff00, x6);
7414   CHECK_EQUAL_64(1, x8);
7415   CHECK_EQUAL_64(1, x9);
7416   CHECK_EQUAL_64(2, x10);
7417   CHECK_EQUAL_64(0, x11);
7418   CHECK_EQUAL_64(0xffffffff, x12);
7419   CHECK_EQUAL_64(0, x13);
7420   CHECK_EQUAL_64(0xfffffffe, x14);
7421   CHECK_EQUAL_64(1, x16);
7422   CHECK_EQUAL_64(1, x17);
7423   CHECK_EQUAL_64(2, x18);
7424   CHECK_EQUAL_64(0, x19);
7425   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7426   CHECK_EQUAL_64(0, x21);
7427   CHECK_EQUAL_64(0xffffff0000000000UL, x22);
7428   CHECK_EQUAL_64(1, x24);
7429   CHECK_EQUAL_64(2, x25);
7430   CHECK_EQUAL_64(0, x26);
7431   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7432 //  CHECK_EQUAL_64(0, x28);
7433   CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
7434   CHECK_EQUAL_64(0xffffffff, x30);
7435 
7436   TEARDOWN();
7437 }
7438 
7439 
TEST(fcvtzs)7440 TEST(fcvtzs) {
7441   INIT_V8();
7442   SETUP();
7443 
7444   START();
7445   __ Fmov(s0, 1.0);
7446   __ Fmov(s1, 1.1);
7447   __ Fmov(s2, 1.5);
7448   __ Fmov(s3, -1.5);
7449   __ Fmov(s4, kFP32PositiveInfinity);
7450   __ Fmov(s5, kFP32NegativeInfinity);
7451   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7452   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7453   __ Fmov(d8, 1.0);
7454   __ Fmov(d9, 1.1);
7455   __ Fmov(d10, 1.5);
7456   __ Fmov(d11, -1.5);
7457   __ Fmov(d12, kFP64PositiveInfinity);
7458   __ Fmov(d13, kFP64NegativeInfinity);
7459   __ Fmov(d14, kWMaxInt - 1);
7460   __ Fmov(d15, kWMinInt + 1);
7461   __ Fmov(s17, 1.1);
7462   __ Fmov(s18, 1.5);
7463   __ Fmov(s19, -1.5);
7464   __ Fmov(s20, kFP32PositiveInfinity);
7465   __ Fmov(s21, kFP32NegativeInfinity);
7466   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7467   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7468   __ Fmov(d24, 1.1);
7469   __ Fmov(d25, 1.5);
7470   __ Fmov(d26, -1.5);
7471   __ Fmov(d27, kFP64PositiveInfinity);
7472   __ Fmov(d28, kFP64NegativeInfinity);
7473   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7474   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7475 
7476   __ Fcvtzs(w0, s0);
7477   __ Fcvtzs(w1, s1);
7478   __ Fcvtzs(w2, s2);
7479   __ Fcvtzs(w3, s3);
7480   __ Fcvtzs(w4, s4);
7481   __ Fcvtzs(w5, s5);
7482   __ Fcvtzs(w6, s6);
7483   __ Fcvtzs(w7, s7);
7484   __ Fcvtzs(w8, d8);
7485   __ Fcvtzs(w9, d9);
7486   __ Fcvtzs(w10, d10);
7487   __ Fcvtzs(w11, d11);
7488   __ Fcvtzs(w12, d12);
7489   __ Fcvtzs(w13, d13);
7490   __ Fcvtzs(w14, d14);
7491   __ Fcvtzs(w15, d15);
7492   __ Fcvtzs(x17, s17);
7493   __ Fcvtzs(x18, s18);
7494   __ Fcvtzs(x19, s19);
7495   __ Fcvtzs(x20, s20);
7496   __ Fcvtzs(x21, s21);
7497   __ Fcvtzs(x22, s22);
7498   __ Fcvtzs(x23, s23);
7499   __ Fcvtzs(x24, d24);
7500   __ Fcvtzs(x25, d25);
7501   __ Fcvtzs(x26, d26);
7502   __ Fcvtzs(x27, d27);
7503   __ Fcvtzs(x28, d28);
7504   __ Fcvtzs(x29, d29);
7505   __ Fcvtzs(x30, d30);
7506   END();
7507 
7508   RUN();
7509 
7510   CHECK_EQUAL_64(1, x0);
7511   CHECK_EQUAL_64(1, x1);
7512   CHECK_EQUAL_64(1, x2);
7513   CHECK_EQUAL_64(0xffffffff, x3);
7514   CHECK_EQUAL_64(0x7fffffff, x4);
7515   CHECK_EQUAL_64(0x80000000, x5);
7516   CHECK_EQUAL_64(0x7fffff80, x6);
7517   CHECK_EQUAL_64(0x80000080, x7);
7518   CHECK_EQUAL_64(1, x8);
7519   CHECK_EQUAL_64(1, x9);
7520   CHECK_EQUAL_64(1, x10);
7521   CHECK_EQUAL_64(0xffffffff, x11);
7522   CHECK_EQUAL_64(0x7fffffff, x12);
7523   CHECK_EQUAL_64(0x80000000, x13);
7524   CHECK_EQUAL_64(0x7ffffffe, x14);
7525   CHECK_EQUAL_64(0x80000001, x15);
7526   CHECK_EQUAL_64(1, x17);
7527   CHECK_EQUAL_64(1, x18);
7528   CHECK_EQUAL_64(0xffffffffffffffffUL, x19);
7529   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7530   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7531   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7532   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7533   CHECK_EQUAL_64(1, x24);
7534   CHECK_EQUAL_64(1, x25);
7535   CHECK_EQUAL_64(0xffffffffffffffffUL, x26);
7536   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7537   CHECK_EQUAL_64(0x8000000000000000UL, x28);
7538   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7539   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7540 
7541   TEARDOWN();
7542 }
7543 
7544 
TEST(fcvtzu)7545 TEST(fcvtzu) {
7546   INIT_V8();
7547   SETUP();
7548 
7549   START();
7550   __ Fmov(s0, 1.0);
7551   __ Fmov(s1, 1.1);
7552   __ Fmov(s2, 1.5);
7553   __ Fmov(s3, -1.5);
7554   __ Fmov(s4, kFP32PositiveInfinity);
7555   __ Fmov(s5, kFP32NegativeInfinity);
7556   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7557   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7558   __ Fmov(d8, 1.0);
7559   __ Fmov(d9, 1.1);
7560   __ Fmov(d10, 1.5);
7561   __ Fmov(d11, -1.5);
7562   __ Fmov(d12, kFP64PositiveInfinity);
7563   __ Fmov(d13, kFP64NegativeInfinity);
7564   __ Fmov(d14, kWMaxInt - 1);
7565   __ Fmov(d15, kWMinInt + 1);
7566   __ Fmov(s17, 1.1);
7567   __ Fmov(s18, 1.5);
7568   __ Fmov(s19, -1.5);
7569   __ Fmov(s20, kFP32PositiveInfinity);
7570   __ Fmov(s21, kFP32NegativeInfinity);
7571   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7572   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7573   __ Fmov(d24, 1.1);
7574   __ Fmov(d25, 1.5);
7575   __ Fmov(d26, -1.5);
7576   __ Fmov(d27, kFP64PositiveInfinity);
7577   __ Fmov(d28, kFP64NegativeInfinity);
7578   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7579   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7580 
7581   __ Fcvtzu(w0, s0);
7582   __ Fcvtzu(w1, s1);
7583   __ Fcvtzu(w2, s2);
7584   __ Fcvtzu(w3, s3);
7585   __ Fcvtzu(w4, s4);
7586   __ Fcvtzu(w5, s5);
7587   __ Fcvtzu(w6, s6);
7588   __ Fcvtzu(w7, s7);
7589   __ Fcvtzu(w8, d8);
7590   __ Fcvtzu(w9, d9);
7591   __ Fcvtzu(w10, d10);
7592   __ Fcvtzu(w11, d11);
7593   __ Fcvtzu(w12, d12);
7594   __ Fcvtzu(w13, d13);
7595   __ Fcvtzu(w14, d14);
7596   __ Fcvtzu(x17, s17);
7597   __ Fcvtzu(x18, s18);
7598   __ Fcvtzu(x19, s19);
7599   __ Fcvtzu(x20, s20);
7600   __ Fcvtzu(x21, s21);
7601   __ Fcvtzu(x22, s22);
7602   __ Fcvtzu(x23, s23);
7603   __ Fcvtzu(x24, d24);
7604   __ Fcvtzu(x25, d25);
7605   __ Fcvtzu(x26, d26);
7606   __ Fcvtzu(x27, d27);
7607   __ Fcvtzu(x28, d28);
7608   __ Fcvtzu(x29, d29);
7609   __ Fcvtzu(x30, d30);
7610   END();
7611 
7612   RUN();
7613 
7614   CHECK_EQUAL_64(1, x0);
7615   CHECK_EQUAL_64(1, x1);
7616   CHECK_EQUAL_64(1, x2);
7617   CHECK_EQUAL_64(0, x3);
7618   CHECK_EQUAL_64(0xffffffff, x4);
7619   CHECK_EQUAL_64(0, x5);
7620   CHECK_EQUAL_64(0x7fffff80, x6);
7621   CHECK_EQUAL_64(0, x7);
7622   CHECK_EQUAL_64(1, x8);
7623   CHECK_EQUAL_64(1, x9);
7624   CHECK_EQUAL_64(1, x10);
7625   CHECK_EQUAL_64(0, x11);
7626   CHECK_EQUAL_64(0xffffffff, x12);
7627   CHECK_EQUAL_64(0, x13);
7628   CHECK_EQUAL_64(0x7ffffffe, x14);
7629   CHECK_EQUAL_64(1, x17);
7630   CHECK_EQUAL_64(1, x18);
7631   CHECK_EQUAL_64(0x0UL, x19);
7632   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7633   CHECK_EQUAL_64(0x0UL, x21);
7634   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7635   CHECK_EQUAL_64(0x0UL, x23);
7636   CHECK_EQUAL_64(1, x24);
7637   CHECK_EQUAL_64(1, x25);
7638   CHECK_EQUAL_64(0x0UL, x26);
7639   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7640   CHECK_EQUAL_64(0x0UL, x28);
7641   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7642   CHECK_EQUAL_64(0x0UL, x30);
7643 
7644   TEARDOWN();
7645 }
7646 
7647 
7648 // Test that scvtf and ucvtf can convert the 64-bit input into the expected
7649 // value. All possible values of 'fbits' are tested. The expected value is
7650 // modified accordingly in each case.
7651 //
7652 // The expected value is specified as the bit encoding of the expected double
7653 // produced by scvtf (expected_scvtf_bits) as well as ucvtf
7654 // (expected_ucvtf_bits).
7655 //
7656 // Where the input value is representable by int32_t or uint32_t, conversions
7657 // from W registers will also be tested.
TestUScvtfHelper(uint64_t in,uint64_t expected_scvtf_bits,uint64_t expected_ucvtf_bits)7658 static void TestUScvtfHelper(uint64_t in,
7659                              uint64_t expected_scvtf_bits,
7660                              uint64_t expected_ucvtf_bits) {
7661   uint64_t u64 = in;
7662   uint32_t u32 = u64 & 0xffffffff;
7663   int64_t s64 = static_cast<int64_t>(in);
7664   int32_t s32 = s64 & 0x7fffffff;
7665 
7666   bool cvtf_s32 = (s64 == s32);
7667   bool cvtf_u32 = (u64 == u32);
7668 
7669   double results_scvtf_x[65];
7670   double results_ucvtf_x[65];
7671   double results_scvtf_w[33];
7672   double results_ucvtf_w[33];
7673 
7674   SETUP();
7675   START();
7676 
7677   __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7678   __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7679   __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7680   __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7681 
7682   __ Mov(x10, s64);
7683 
7684   // Corrupt the top word, in case it is accidentally used during W-register
7685   // conversions.
7686   __ Mov(x11, 0x5555555555555555);
7687   __ Bfi(x11, x10, 0, kWRegSizeInBits);
7688 
7689   // Test integer conversions.
7690   __ Scvtf(d0, x10);
7691   __ Ucvtf(d1, x10);
7692   __ Scvtf(d2, w11);
7693   __ Ucvtf(d3, w11);
7694   __ Str(d0, MemOperand(x0));
7695   __ Str(d1, MemOperand(x1));
7696   __ Str(d2, MemOperand(x2));
7697   __ Str(d3, MemOperand(x3));
7698 
7699   // Test all possible values of fbits.
7700   for (int fbits = 1; fbits <= 32; fbits++) {
7701     __ Scvtf(d0, x10, fbits);
7702     __ Ucvtf(d1, x10, fbits);
7703     __ Scvtf(d2, w11, fbits);
7704     __ Ucvtf(d3, w11, fbits);
7705     __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7706     __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7707     __ Str(d2, MemOperand(x2, fbits * kDRegSize));
7708     __ Str(d3, MemOperand(x3, fbits * kDRegSize));
7709   }
7710 
7711   // Conversions from W registers can only handle fbits values <= 32, so just
7712   // test conversions from X registers for 32 < fbits <= 64.
7713   for (int fbits = 33; fbits <= 64; fbits++) {
7714     __ Scvtf(d0, x10, fbits);
7715     __ Ucvtf(d1, x10, fbits);
7716     __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7717     __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7718   }
7719 
7720   END();
7721   RUN();
7722 
7723   // Check the results.
7724   double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7725   double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7726 
7727   for (int fbits = 0; fbits <= 32; fbits++) {
7728     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7729     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7730     CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7731     CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7732     if (cvtf_s32) CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7733     if (cvtf_u32) CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7734   }
7735   for (int fbits = 33; fbits <= 64; fbits++) {
7736     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7737     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7738     CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7739     CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7740   }
7741 
7742   TEARDOWN();
7743 }
7744 
7745 
TEST(scvtf_ucvtf_double)7746 TEST(scvtf_ucvtf_double) {
7747   INIT_V8();
7748   // Simple conversions of positive numbers which require no rounding; the
7749   // results should not depened on the rounding mode, and ucvtf and scvtf should
7750   // produce the same result.
7751   TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7752   TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7753   TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7754   TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7755   TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7756   // Test mantissa extremities.
7757   TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7758   // The largest int32_t that fits in a double.
7759   TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7760   // Values that would be negative if treated as an int32_t.
7761   TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7762   TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7763   TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7764   // The largest int64_t that fits in a double.
7765   TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7766   // Check for bit pattern reproduction.
7767   TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7768   TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7769 
7770   // Simple conversions of negative int64_t values. These require no rounding,
7771   // and the results should not depend on the rounding mode.
7772   TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7773   TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7774   TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7775 
7776   // Conversions which require rounding.
7777   TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7778   TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7779   TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7780   TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7781   TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7782   TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7783   TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7784   TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7785   TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7786   TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7787   TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7788   TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7789   TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7790   // Check rounding of negative int64_t values (and large uint64_t values).
7791   TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7792   TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7793   TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7794   TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7795   TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7796   TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7797   TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7798   TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7799   TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7800   TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7801   TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7802   TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7803   TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7804   // Round up to produce a result that's too big for the input to represent.
7805   TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7806   TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7807   TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7808   TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7809 }
7810 
7811 
7812 // The same as TestUScvtfHelper, but convert to floats.
TestUScvtf32Helper(uint64_t in,uint32_t expected_scvtf_bits,uint32_t expected_ucvtf_bits)7813 static void TestUScvtf32Helper(uint64_t in,
7814                                uint32_t expected_scvtf_bits,
7815                                uint32_t expected_ucvtf_bits) {
7816   uint64_t u64 = in;
7817   uint32_t u32 = u64 & 0xffffffff;
7818   int64_t s64 = static_cast<int64_t>(in);
7819   int32_t s32 = s64 & 0x7fffffff;
7820 
7821   bool cvtf_s32 = (s64 == s32);
7822   bool cvtf_u32 = (u64 == u32);
7823 
7824   float results_scvtf_x[65];
7825   float results_ucvtf_x[65];
7826   float results_scvtf_w[33];
7827   float results_ucvtf_w[33];
7828 
7829   SETUP();
7830   START();
7831 
7832   __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7833   __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7834   __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7835   __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7836 
7837   __ Mov(x10, s64);
7838 
7839   // Corrupt the top word, in case it is accidentally used during W-register
7840   // conversions.
7841   __ Mov(x11, 0x5555555555555555);
7842   __ Bfi(x11, x10, 0, kWRegSizeInBits);
7843 
7844   // Test integer conversions.
7845   __ Scvtf(s0, x10);
7846   __ Ucvtf(s1, x10);
7847   __ Scvtf(s2, w11);
7848   __ Ucvtf(s3, w11);
7849   __ Str(s0, MemOperand(x0));
7850   __ Str(s1, MemOperand(x1));
7851   __ Str(s2, MemOperand(x2));
7852   __ Str(s3, MemOperand(x3));
7853 
7854   // Test all possible values of fbits.
7855   for (int fbits = 1; fbits <= 32; fbits++) {
7856     __ Scvtf(s0, x10, fbits);
7857     __ Ucvtf(s1, x10, fbits);
7858     __ Scvtf(s2, w11, fbits);
7859     __ Ucvtf(s3, w11, fbits);
7860     __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7861     __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7862     __ Str(s2, MemOperand(x2, fbits * kSRegSize));
7863     __ Str(s3, MemOperand(x3, fbits * kSRegSize));
7864   }
7865 
7866   // Conversions from W registers can only handle fbits values <= 32, so just
7867   // test conversions from X registers for 32 < fbits <= 64.
7868   for (int fbits = 33; fbits <= 64; fbits++) {
7869     __ Scvtf(s0, x10, fbits);
7870     __ Ucvtf(s1, x10, fbits);
7871     __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7872     __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7873   }
7874 
7875   END();
7876   RUN();
7877 
7878   // Check the results.
7879   float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7880   float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7881 
7882   for (int fbits = 0; fbits <= 32; fbits++) {
7883     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7884     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7885     CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7886     CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7887     if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7888     if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7889     break;
7890   }
7891   for (int fbits = 33; fbits <= 64; fbits++) {
7892     break;
7893     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7894     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7895     CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7896     CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7897   }
7898 
7899   TEARDOWN();
7900 }
7901 
7902 
TEST(scvtf_ucvtf_float)7903 TEST(scvtf_ucvtf_float) {
7904   INIT_V8();
7905   // Simple conversions of positive numbers which require no rounding; the
7906   // results should not depened on the rounding mode, and ucvtf and scvtf should
7907   // produce the same result.
7908   TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7909   TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7910   TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7911   TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7912   TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7913   // Test mantissa extremities.
7914   TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7915   TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7916   // The largest int32_t that fits in a float.
7917   TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7918   // Values that would be negative if treated as an int32_t.
7919   TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7920   TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7921   TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7922   // The largest int64_t that fits in a float.
7923   TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7924   // Check for bit pattern reproduction.
7925   TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7926 
7927   // Simple conversions of negative int64_t values. These require no rounding,
7928   // and the results should not depend on the rounding mode.
7929   TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7930   TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7931 
7932   // Conversions which require rounding.
7933   TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7934   TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7935   TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7936   TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7937   TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7938   TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7939   TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7940   TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7941   TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7942   TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7943   TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7944   TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7945   TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7946   // Check rounding of negative int64_t values (and large uint64_t values).
7947   TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7948   TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7949   TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7950   TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7951   TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7952   TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7953   TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7954   TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7955   TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7956   TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7957   TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7958   TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7959   TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7960   // Round up to produce a result that's too big for the input to represent.
7961   TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7962   TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7963   TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
7964   TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
7965   TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
7966   TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
7967   TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
7968   TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
7969 }
7970 
7971 
TEST(system_mrs)7972 TEST(system_mrs) {
7973   INIT_V8();
7974   SETUP();
7975 
7976   START();
7977   __ Mov(w0, 0);
7978   __ Mov(w1, 1);
7979   __ Mov(w2, 0x80000000);
7980 
7981   // Set the Z and C flags.
7982   __ Cmp(w0, w0);
7983   __ Mrs(x3, NZCV);
7984 
7985   // Set the N flag.
7986   __ Cmp(w0, w1);
7987   __ Mrs(x4, NZCV);
7988 
7989   // Set the Z, C and V flags.
7990   __ Adds(w0, w2, w2);
7991   __ Mrs(x5, NZCV);
7992 
7993   // Read the default FPCR.
7994   __ Mrs(x6, FPCR);
7995   END();
7996 
7997   RUN();
7998 
7999   // NZCV
8000   CHECK_EQUAL_32(ZCFlag, w3);
8001   CHECK_EQUAL_32(NFlag, w4);
8002   CHECK_EQUAL_32(ZCVFlag, w5);
8003 
8004   // FPCR
8005   // The default FPCR on Linux-based platforms is 0.
8006   CHECK_EQUAL_32(0, w6);
8007 
8008   TEARDOWN();
8009 }
8010 
8011 
TEST(system_msr)8012 TEST(system_msr) {
8013   INIT_V8();
8014   // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
8015   const uint64_t fpcr_core = 0x07c00000;
8016 
8017   // All FPCR fields (including fields which may be read-as-zero):
8018   //  Stride, Len
8019   //  IDE, IXE, UFE, OFE, DZE, IOE
8020   const uint64_t fpcr_all = fpcr_core | 0x00379f00;
8021 
8022   SETUP();
8023 
8024   START();
8025   __ Mov(w0, 0);
8026   __ Mov(w1, 0x7fffffff);
8027 
8028   __ Mov(x7, 0);
8029 
8030   __ Mov(x10, NVFlag);
8031   __ Cmp(w0, w0);     // Set Z and C.
8032   __ Msr(NZCV, x10);  // Set N and V.
8033   // The Msr should have overwritten every flag set by the Cmp.
8034   __ Cinc(x7, x7, mi);  // N
8035   __ Cinc(x7, x7, ne);  // !Z
8036   __ Cinc(x7, x7, lo);  // !C
8037   __ Cinc(x7, x7, vs);  // V
8038 
8039   __ Mov(x10, ZCFlag);
8040   __ Cmn(w1, w1);     // Set N and V.
8041   __ Msr(NZCV, x10);  // Set Z and C.
8042   // The Msr should have overwritten every flag set by the Cmn.
8043   __ Cinc(x7, x7, pl);  // !N
8044   __ Cinc(x7, x7, eq);  // Z
8045   __ Cinc(x7, x7, hs);  // C
8046   __ Cinc(x7, x7, vc);  // !V
8047 
8048   // All core FPCR fields must be writable.
8049   __ Mov(x8, fpcr_core);
8050   __ Msr(FPCR, x8);
8051   __ Mrs(x8, FPCR);
8052 
8053   // All FPCR fields, including optional ones. This part of the test doesn't
8054   // achieve much other than ensuring that supported fields can be cleared by
8055   // the next test.
8056   __ Mov(x9, fpcr_all);
8057   __ Msr(FPCR, x9);
8058   __ Mrs(x9, FPCR);
8059   __ And(x9, x9, fpcr_core);
8060 
8061   // The undefined bits must ignore writes.
8062   // It's conceivable that a future version of the architecture could use these
8063   // fields (making this test fail), but in the meantime this is a useful test
8064   // for the simulator.
8065   __ Mov(x10, ~fpcr_all);
8066   __ Msr(FPCR, x10);
8067   __ Mrs(x10, FPCR);
8068 
8069   END();
8070 
8071   RUN();
8072 
8073   // We should have incremented x7 (from 0) exactly 8 times.
8074   CHECK_EQUAL_64(8, x7);
8075 
8076   CHECK_EQUAL_64(fpcr_core, x8);
8077   CHECK_EQUAL_64(fpcr_core, x9);
8078   CHECK_EQUAL_64(0, x10);
8079 
8080   TEARDOWN();
8081 }
8082 
8083 
TEST(system_nop)8084 TEST(system_nop) {
8085   INIT_V8();
8086   SETUP();
8087   RegisterDump before;
8088 
8089   START();
8090   before.Dump(&masm);
8091   __ Nop();
8092   END();
8093 
8094   RUN();
8095 
8096   CHECK_EQUAL_REGISTERS(before);
8097   CHECK_EQUAL_NZCV(before.flags_nzcv());
8098 
8099   TEARDOWN();
8100 }
8101 
8102 
TEST(zero_dest)8103 TEST(zero_dest) {
8104   INIT_V8();
8105   SETUP();
8106   RegisterDump before;
8107 
8108   START();
8109   // Preserve the system stack pointer, in case we clobber it.
8110   __ Mov(x30, csp);
8111   // Initialize the other registers used in this test.
8112   uint64_t literal_base = 0x0100001000100101UL;
8113   __ Mov(x0, 0);
8114   __ Mov(x1, literal_base);
8115   for (unsigned i = 2; i < x30.code(); i++) {
8116     __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
8117   }
8118   before.Dump(&masm);
8119 
8120   // All of these instructions should be NOPs in these forms, but have
8121   // alternate forms which can write into the stack pointer.
8122   __ add(xzr, x0, x1);
8123   __ add(xzr, x1, xzr);
8124   __ add(xzr, xzr, x1);
8125 
8126   __ and_(xzr, x0, x2);
8127   __ and_(xzr, x2, xzr);
8128   __ and_(xzr, xzr, x2);
8129 
8130   __ bic(xzr, x0, x3);
8131   __ bic(xzr, x3, xzr);
8132   __ bic(xzr, xzr, x3);
8133 
8134   __ eon(xzr, x0, x4);
8135   __ eon(xzr, x4, xzr);
8136   __ eon(xzr, xzr, x4);
8137 
8138   __ eor(xzr, x0, x5);
8139   __ eor(xzr, x5, xzr);
8140   __ eor(xzr, xzr, x5);
8141 
8142   __ orr(xzr, x0, x6);
8143   __ orr(xzr, x6, xzr);
8144   __ orr(xzr, xzr, x6);
8145 
8146   __ sub(xzr, x0, x7);
8147   __ sub(xzr, x7, xzr);
8148   __ sub(xzr, xzr, x7);
8149 
8150   // Swap the saved system stack pointer with the real one. If csp was written
8151   // during the test, it will show up in x30. This is done because the test
8152   // framework assumes that csp will be valid at the end of the test.
8153   __ Mov(x29, x30);
8154   __ Mov(x30, csp);
8155   __ Mov(csp, x29);
8156   // We used x29 as a scratch register, so reset it to make sure it doesn't
8157   // trigger a test failure.
8158   __ Add(x29, x28, x1);
8159   END();
8160 
8161   RUN();
8162 
8163   CHECK_EQUAL_REGISTERS(before);
8164   CHECK_EQUAL_NZCV(before.flags_nzcv());
8165 
8166   TEARDOWN();
8167 }
8168 
8169 
TEST(zero_dest_setflags)8170 TEST(zero_dest_setflags) {
8171   INIT_V8();
8172   SETUP();
8173   RegisterDump before;
8174 
8175   START();
8176   // Preserve the system stack pointer, in case we clobber it.
8177   __ Mov(x30, csp);
8178   // Initialize the other registers used in this test.
8179   uint64_t literal_base = 0x0100001000100101UL;
8180   __ Mov(x0, 0);
8181   __ Mov(x1, literal_base);
8182   for (int i = 2; i < 30; i++) {
8183     __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
8184   }
8185   before.Dump(&masm);
8186 
8187   // All of these instructions should only write to the flags in these forms,
8188   // but have alternate forms which can write into the stack pointer.
8189   __ adds(xzr, x0, Operand(x1, UXTX));
8190   __ adds(xzr, x1, Operand(xzr, UXTX));
8191   __ adds(xzr, x1, 1234);
8192   __ adds(xzr, x0, x1);
8193   __ adds(xzr, x1, xzr);
8194   __ adds(xzr, xzr, x1);
8195 
8196   __ ands(xzr, x2, ~0xf);
8197   __ ands(xzr, xzr, ~0xf);
8198   __ ands(xzr, x0, x2);
8199   __ ands(xzr, x2, xzr);
8200   __ ands(xzr, xzr, x2);
8201 
8202   __ bics(xzr, x3, ~0xf);
8203   __ bics(xzr, xzr, ~0xf);
8204   __ bics(xzr, x0, x3);
8205   __ bics(xzr, x3, xzr);
8206   __ bics(xzr, xzr, x3);
8207 
8208   __ subs(xzr, x0, Operand(x3, UXTX));
8209   __ subs(xzr, x3, Operand(xzr, UXTX));
8210   __ subs(xzr, x3, 1234);
8211   __ subs(xzr, x0, x3);
8212   __ subs(xzr, x3, xzr);
8213   __ subs(xzr, xzr, x3);
8214 
8215   // Swap the saved system stack pointer with the real one. If csp was written
8216   // during the test, it will show up in x30. This is done because the test
8217   // framework assumes that csp will be valid at the end of the test.
8218   __ Mov(x29, x30);
8219   __ Mov(x30, csp);
8220   __ Mov(csp, x29);
8221   // We used x29 as a scratch register, so reset it to make sure it doesn't
8222   // trigger a test failure.
8223   __ Add(x29, x28, x1);
8224   END();
8225 
8226   RUN();
8227 
8228   CHECK_EQUAL_REGISTERS(before);
8229 
8230   TEARDOWN();
8231 }
8232 
8233 
TEST(register_bit)8234 TEST(register_bit) {
8235   // No code generation takes place in this test, so no need to setup and
8236   // teardown.
8237 
8238   // Simple tests.
8239   CHECK(x0.Bit() == (1UL << 0));
8240   CHECK(x1.Bit() == (1UL << 1));
8241   CHECK(x10.Bit() == (1UL << 10));
8242 
8243   // AAPCS64 definitions.
8244   CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
8245   CHECK(lr.Bit() == (1UL << kLinkRegCode));
8246 
8247   // Fixed (hardware) definitions.
8248   CHECK(xzr.Bit() == (1UL << kZeroRegCode));
8249 
8250   // Internal ABI definitions.
8251   CHECK(jssp.Bit() == (1UL << kJSSPCode));
8252   CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
8253   CHECK(csp.Bit() != xzr.Bit());
8254 
8255   // xn.Bit() == wn.Bit() at all times, for the same n.
8256   CHECK(x0.Bit() == w0.Bit());
8257   CHECK(x1.Bit() == w1.Bit());
8258   CHECK(x10.Bit() == w10.Bit());
8259   CHECK(jssp.Bit() == wjssp.Bit());
8260   CHECK(xzr.Bit() == wzr.Bit());
8261   CHECK(csp.Bit() == wcsp.Bit());
8262 }
8263 
8264 
TEST(stack_pointer_override)8265 TEST(stack_pointer_override) {
8266   // This test generates some stack maintenance code, but the test only checks
8267   // the reported state.
8268   INIT_V8();
8269   SETUP();
8270   START();
8271 
8272   // The default stack pointer in V8 is jssp, but for compatibility with W16,
8273   // the test framework sets it to csp before calling the test.
8274   CHECK(csp.Is(__ StackPointer()));
8275   __ SetStackPointer(x0);
8276   CHECK(x0.Is(__ StackPointer()));
8277   __ SetStackPointer(jssp);
8278   CHECK(jssp.Is(__ StackPointer()));
8279   __ SetStackPointer(csp);
8280   CHECK(csp.Is(__ StackPointer()));
8281 
8282   END();
8283   RUN();
8284   TEARDOWN();
8285 }
8286 
8287 
TEST(peek_poke_simple)8288 TEST(peek_poke_simple) {
8289   INIT_V8();
8290   SETUP();
8291   START();
8292 
8293   static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
8294   static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
8295                                     x12.Bit() | x13.Bit();
8296 
8297   // The literal base is chosen to have two useful properties:
8298   //  * When multiplied by small values (such as a register index), this value
8299   //    is clearly readable in the result.
8300   //  * The value is not formed from repeating fixed-size smaller values, so it
8301   //    can be used to detect endianness-related errors.
8302   uint64_t literal_base = 0x0100001000100101UL;
8303 
8304   // Initialize the registers.
8305   __ Mov(x0, literal_base);
8306   __ Add(x1, x0, x0);
8307   __ Add(x2, x1, x0);
8308   __ Add(x3, x2, x0);
8309 
8310   __ Claim(4);
8311 
8312   // Simple exchange.
8313   //  After this test:
8314   //    x0-x3 should be unchanged.
8315   //    w10-w13 should contain the lower words of x0-x3.
8316   __ Poke(x0, 0);
8317   __ Poke(x1, 8);
8318   __ Poke(x2, 16);
8319   __ Poke(x3, 24);
8320   Clobber(&masm, x0_to_x3);
8321   __ Peek(x0, 0);
8322   __ Peek(x1, 8);
8323   __ Peek(x2, 16);
8324   __ Peek(x3, 24);
8325 
8326   __ Poke(w0, 0);
8327   __ Poke(w1, 4);
8328   __ Poke(w2, 8);
8329   __ Poke(w3, 12);
8330   Clobber(&masm, x10_to_x13);
8331   __ Peek(w10, 0);
8332   __ Peek(w11, 4);
8333   __ Peek(w12, 8);
8334   __ Peek(w13, 12);
8335 
8336   __ Drop(4);
8337 
8338   END();
8339   RUN();
8340 
8341   CHECK_EQUAL_64(literal_base * 1, x0);
8342   CHECK_EQUAL_64(literal_base * 2, x1);
8343   CHECK_EQUAL_64(literal_base * 3, x2);
8344   CHECK_EQUAL_64(literal_base * 4, x3);
8345 
8346   CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8347   CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8348   CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8349   CHECK_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
8350 
8351   TEARDOWN();
8352 }
8353 
8354 
TEST(peek_poke_unaligned)8355 TEST(peek_poke_unaligned) {
8356   INIT_V8();
8357   SETUP();
8358   START();
8359 
8360   // The literal base is chosen to have two useful properties:
8361   //  * When multiplied by small values (such as a register index), this value
8362   //    is clearly readable in the result.
8363   //  * The value is not formed from repeating fixed-size smaller values, so it
8364   //    can be used to detect endianness-related errors.
8365   uint64_t literal_base = 0x0100001000100101UL;
8366 
8367   // Initialize the registers.
8368   __ Mov(x0, literal_base);
8369   __ Add(x1, x0, x0);
8370   __ Add(x2, x1, x0);
8371   __ Add(x3, x2, x0);
8372   __ Add(x4, x3, x0);
8373   __ Add(x5, x4, x0);
8374   __ Add(x6, x5, x0);
8375 
8376   __ Claim(4);
8377 
8378   // Unaligned exchanges.
8379   //  After this test:
8380   //    x0-x6 should be unchanged.
8381   //    w10-w12 should contain the lower words of x0-x2.
8382   __ Poke(x0, 1);
8383   Clobber(&masm, x0.Bit());
8384   __ Peek(x0, 1);
8385   __ Poke(x1, 2);
8386   Clobber(&masm, x1.Bit());
8387   __ Peek(x1, 2);
8388   __ Poke(x2, 3);
8389   Clobber(&masm, x2.Bit());
8390   __ Peek(x2, 3);
8391   __ Poke(x3, 4);
8392   Clobber(&masm, x3.Bit());
8393   __ Peek(x3, 4);
8394   __ Poke(x4, 5);
8395   Clobber(&masm, x4.Bit());
8396   __ Peek(x4, 5);
8397   __ Poke(x5, 6);
8398   Clobber(&masm, x5.Bit());
8399   __ Peek(x5, 6);
8400   __ Poke(x6, 7);
8401   Clobber(&masm, x6.Bit());
8402   __ Peek(x6, 7);
8403 
8404   __ Poke(w0, 1);
8405   Clobber(&masm, w10.Bit());
8406   __ Peek(w10, 1);
8407   __ Poke(w1, 2);
8408   Clobber(&masm, w11.Bit());
8409   __ Peek(w11, 2);
8410   __ Poke(w2, 3);
8411   Clobber(&masm, w12.Bit());
8412   __ Peek(w12, 3);
8413 
8414   __ Drop(4);
8415 
8416   END();
8417   RUN();
8418 
8419   CHECK_EQUAL_64(literal_base * 1, x0);
8420   CHECK_EQUAL_64(literal_base * 2, x1);
8421   CHECK_EQUAL_64(literal_base * 3, x2);
8422   CHECK_EQUAL_64(literal_base * 4, x3);
8423   CHECK_EQUAL_64(literal_base * 5, x4);
8424   CHECK_EQUAL_64(literal_base * 6, x5);
8425   CHECK_EQUAL_64(literal_base * 7, x6);
8426 
8427   CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8428   CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8429   CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8430 
8431   TEARDOWN();
8432 }
8433 
8434 
TEST(peek_poke_endianness)8435 TEST(peek_poke_endianness) {
8436   INIT_V8();
8437   SETUP();
8438   START();
8439 
8440   // The literal base is chosen to have two useful properties:
8441   //  * When multiplied by small values (such as a register index), this value
8442   //    is clearly readable in the result.
8443   //  * The value is not formed from repeating fixed-size smaller values, so it
8444   //    can be used to detect endianness-related errors.
8445   uint64_t literal_base = 0x0100001000100101UL;
8446 
8447   // Initialize the registers.
8448   __ Mov(x0, literal_base);
8449   __ Add(x1, x0, x0);
8450 
8451   __ Claim(4);
8452 
8453   // Endianness tests.
8454   //  After this section:
8455   //    x4 should match x0[31:0]:x0[63:32]
8456   //    w5 should match w1[15:0]:w1[31:16]
8457   __ Poke(x0, 0);
8458   __ Poke(x0, 8);
8459   __ Peek(x4, 4);
8460 
8461   __ Poke(w1, 0);
8462   __ Poke(w1, 4);
8463   __ Peek(w5, 2);
8464 
8465   __ Drop(4);
8466 
8467   END();
8468   RUN();
8469 
8470   uint64_t x0_expected = literal_base * 1;
8471   uint64_t x1_expected = literal_base * 2;
8472   uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
8473   uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
8474                          ((x1_expected >> 16) & 0x0000ffff);
8475 
8476   CHECK_EQUAL_64(x0_expected, x0);
8477   CHECK_EQUAL_64(x1_expected, x1);
8478   CHECK_EQUAL_64(x4_expected, x4);
8479   CHECK_EQUAL_64(x5_expected, x5);
8480 
8481   TEARDOWN();
8482 }
8483 
8484 
TEST(peek_poke_mixed)8485 TEST(peek_poke_mixed) {
8486   INIT_V8();
8487   SETUP();
8488   START();
8489 
8490   // The literal base is chosen to have two useful properties:
8491   //  * When multiplied by small values (such as a register index), this value
8492   //    is clearly readable in the result.
8493   //  * The value is not formed from repeating fixed-size smaller values, so it
8494   //    can be used to detect endianness-related errors.
8495   uint64_t literal_base = 0x0100001000100101UL;
8496 
8497   // Initialize the registers.
8498   __ Mov(x0, literal_base);
8499   __ Add(x1, x0, x0);
8500   __ Add(x2, x1, x0);
8501   __ Add(x3, x2, x0);
8502 
8503   __ Claim(4);
8504 
8505   // Mix with other stack operations.
8506   //  After this section:
8507   //    x0-x3 should be unchanged.
8508   //    x6 should match x1[31:0]:x0[63:32]
8509   //    w7 should match x1[15:0]:x0[63:48]
8510   __ Poke(x1, 8);
8511   __ Poke(x0, 0);
8512   {
8513     DCHECK(__ StackPointer().Is(csp));
8514     __ Mov(x4, __ StackPointer());
8515     __ SetStackPointer(x4);
8516 
8517     __ Poke(wzr, 0);    // Clobber the space we're about to drop.
8518     __ Drop(1, kWRegSize);
8519     __ Peek(x6, 0);
8520     __ Claim(1);
8521     __ Peek(w7, 10);
8522     __ Poke(x3, 28);
8523     __ Poke(xzr, 0);    // Clobber the space we're about to drop.
8524     __ Drop(1);
8525     __ Poke(x2, 12);
8526     __ Push(w0);
8527 
8528     __ Mov(csp, __ StackPointer());
8529     __ SetStackPointer(csp);
8530   }
8531 
8532   __ Pop(x0, x1, x2, x3);
8533 
8534   END();
8535   RUN();
8536 
8537   uint64_t x0_expected = literal_base * 1;
8538   uint64_t x1_expected = literal_base * 2;
8539   uint64_t x2_expected = literal_base * 3;
8540   uint64_t x3_expected = literal_base * 4;
8541   uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8542   uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8543                          ((x0_expected >> 48) & 0x0000ffff);
8544 
8545   CHECK_EQUAL_64(x0_expected, x0);
8546   CHECK_EQUAL_64(x1_expected, x1);
8547   CHECK_EQUAL_64(x2_expected, x2);
8548   CHECK_EQUAL_64(x3_expected, x3);
8549   CHECK_EQUAL_64(x6_expected, x6);
8550   CHECK_EQUAL_64(x7_expected, x7);
8551 
8552   TEARDOWN();
8553 }
8554 
8555 
8556 // This enum is used only as an argument to the push-pop test helpers.
8557 enum PushPopMethod {
8558   // Push or Pop using the Push and Pop methods, with blocks of up to four
8559   // registers. (Smaller blocks will be used if necessary.)
8560   PushPopByFour,
8561 
8562   // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8563   PushPopRegList
8564 };
8565 
8566 
8567 // The maximum number of registers that can be used by the PushPopJssp* tests,
8568 // where a reg_count field is provided.
8569 static int const kPushPopJsspMaxRegCount = -1;
8570 
8571 // Test a simple push-pop pattern:
8572 //  * Claim <claim> bytes to set the stack alignment.
8573 //  * Push <reg_count> registers with size <reg_size>.
8574 //  * Clobber the register contents.
8575 //  * Pop <reg_count> registers to restore the original contents.
8576 //  * Drop <claim> bytes to restore the original stack pointer.
8577 //
8578 // Different push and pop methods can be specified independently to test for
8579 // proper word-endian behaviour.
PushPopJsspSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)8580 static void PushPopJsspSimpleHelper(int reg_count,
8581                                     int claim,
8582                                     int reg_size,
8583                                     PushPopMethod push_method,
8584                                     PushPopMethod pop_method) {
8585   SETUP();
8586 
8587   START();
8588 
8589   // Registers in the TmpList can be used by the macro assembler for debug code
8590   // (for example in 'Pop'), so we can't use them here. We can't use jssp
8591   // because it will be the stack pointer for this test.
8592   static RegList const allowed = ~(masm.TmpList()->list() | jssp.Bit());
8593   if (reg_count == kPushPopJsspMaxRegCount) {
8594     reg_count = CountSetBits(allowed, kNumberOfRegisters);
8595   }
8596   // Work out which registers to use, based on reg_size.
8597   Register r[kNumberOfRegisters];
8598   Register x[kNumberOfRegisters];
8599   RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8600                                        allowed);
8601 
8602   // The literal base is chosen to have two useful properties:
8603   //  * When multiplied by small values (such as a register index), this value
8604   //    is clearly readable in the result.
8605   //  * The value is not formed from repeating fixed-size smaller values, so it
8606   //    can be used to detect endianness-related errors.
8607   uint64_t literal_base = 0x0100001000100101UL;
8608 
8609   {
8610     DCHECK(__ StackPointer().Is(csp));
8611     __ Mov(jssp, __ StackPointer());
8612     __ SetStackPointer(jssp);
8613 
8614     int i;
8615 
8616     // Initialize the registers.
8617     for (i = 0; i < reg_count; i++) {
8618       // Always write into the X register, to ensure that the upper word is
8619       // properly ignored by Push when testing W registers.
8620       if (!x[i].IsZero()) {
8621         __ Mov(x[i], literal_base * i);
8622       }
8623     }
8624 
8625     // Claim memory first, as requested.
8626     __ Claim(claim, kByteSizeInBytes);
8627 
8628     switch (push_method) {
8629       case PushPopByFour:
8630         // Push high-numbered registers first (to the highest addresses).
8631         for (i = reg_count; i >= 4; i -= 4) {
8632           __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8633         }
8634         // Finish off the leftovers.
8635         switch (i) {
8636           case 3:  __ Push(r[2], r[1], r[0]); break;
8637           case 2:  __ Push(r[1], r[0]);       break;
8638           case 1:  __ Push(r[0]);             break;
8639           default: DCHECK(i == 0);            break;
8640         }
8641         break;
8642       case PushPopRegList:
8643         __ PushSizeRegList(list, reg_size);
8644         break;
8645     }
8646 
8647     // Clobber all the registers, to ensure that they get repopulated by Pop.
8648     Clobber(&masm, list);
8649 
8650     switch (pop_method) {
8651       case PushPopByFour:
8652         // Pop low-numbered registers first (from the lowest addresses).
8653         for (i = 0; i <= (reg_count-4); i += 4) {
8654           __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8655         }
8656         // Finish off the leftovers.
8657         switch (reg_count - i) {
8658           case 3:  __ Pop(r[i], r[i+1], r[i+2]); break;
8659           case 2:  __ Pop(r[i], r[i+1]);         break;
8660           case 1:  __ Pop(r[i]);                 break;
8661           default: DCHECK(i == reg_count);       break;
8662         }
8663         break;
8664       case PushPopRegList:
8665         __ PopSizeRegList(list, reg_size);
8666         break;
8667     }
8668 
8669     // Drop memory to restore jssp.
8670     __ Drop(claim, kByteSizeInBytes);
8671 
8672     __ Mov(csp, __ StackPointer());
8673     __ SetStackPointer(csp);
8674   }
8675 
8676   END();
8677 
8678   RUN();
8679 
8680   // Check that the register contents were preserved.
8681   // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
8682   // that the upper word was properly cleared by Pop.
8683   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8684   for (int i = 0; i < reg_count; i++) {
8685     if (x[i].IsZero()) {
8686       CHECK_EQUAL_64(0, x[i]);
8687     } else {
8688       CHECK_EQUAL_64(literal_base * i, x[i]);
8689     }
8690   }
8691 
8692   TEARDOWN();
8693 }
8694 
8695 
TEST(push_pop_jssp_simple_32)8696 TEST(push_pop_jssp_simple_32) {
8697   INIT_V8();
8698   for (int claim = 0; claim <= 8; claim++) {
8699     for (int count = 0; count <= 8; count++) {
8700       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8701                               PushPopByFour, PushPopByFour);
8702       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8703                               PushPopByFour, PushPopRegList);
8704       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8705                               PushPopRegList, PushPopByFour);
8706       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8707                               PushPopRegList, PushPopRegList);
8708     }
8709     // Test with the maximum number of registers.
8710     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8711                             PushPopByFour, PushPopByFour);
8712     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8713                             PushPopByFour, PushPopRegList);
8714     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8715                             PushPopRegList, PushPopByFour);
8716     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8717                             PushPopRegList, PushPopRegList);
8718   }
8719 }
8720 
8721 
TEST(push_pop_jssp_simple_64)8722 TEST(push_pop_jssp_simple_64) {
8723   INIT_V8();
8724   for (int claim = 0; claim <= 8; claim++) {
8725     for (int count = 0; count <= 8; count++) {
8726       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8727                               PushPopByFour, PushPopByFour);
8728       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8729                               PushPopByFour, PushPopRegList);
8730       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8731                               PushPopRegList, PushPopByFour);
8732       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8733                               PushPopRegList, PushPopRegList);
8734     }
8735     // Test with the maximum number of registers.
8736     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8737                             PushPopByFour, PushPopByFour);
8738     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8739                             PushPopByFour, PushPopRegList);
8740     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8741                             PushPopRegList, PushPopByFour);
8742     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8743                             PushPopRegList, PushPopRegList);
8744   }
8745 }
8746 
8747 
8748 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8749 // where a reg_count field is provided.
8750 static int const kPushPopFPJsspMaxRegCount = -1;
8751 
8752 // Test a simple push-pop pattern:
8753 //  * Claim <claim> bytes to set the stack alignment.
8754 //  * Push <reg_count> FP registers with size <reg_size>.
8755 //  * Clobber the register contents.
8756 //  * Pop <reg_count> FP registers to restore the original contents.
8757 //  * Drop <claim> bytes to restore the original stack pointer.
8758 //
8759 // Different push and pop methods can be specified independently to test for
8760 // proper word-endian behaviour.
PushPopFPJsspSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)8761 static void PushPopFPJsspSimpleHelper(int reg_count,
8762                                       int claim,
8763                                       int reg_size,
8764                                       PushPopMethod push_method,
8765                                       PushPopMethod pop_method) {
8766   SETUP();
8767 
8768   START();
8769 
8770   // We can use any floating-point register. None of them are reserved for
8771   // debug code, for example.
8772   static RegList const allowed = ~0;
8773   if (reg_count == kPushPopFPJsspMaxRegCount) {
8774     reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8775   }
8776   // Work out which registers to use, based on reg_size.
8777   FPRegister v[kNumberOfRegisters];
8778   FPRegister d[kNumberOfRegisters];
8779   RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8780                                          allowed);
8781 
8782   // The literal base is chosen to have two useful properties:
8783   //  * When multiplied (using an integer) by small values (such as a register
8784   //    index), this value is clearly readable in the result.
8785   //  * The value is not formed from repeating fixed-size smaller values, so it
8786   //    can be used to detect endianness-related errors.
8787   //  * It is never a floating-point NaN, and will therefore always compare
8788   //    equal to itself.
8789   uint64_t literal_base = 0x0100001000100101UL;
8790 
8791   {
8792     DCHECK(__ StackPointer().Is(csp));
8793     __ Mov(jssp, __ StackPointer());
8794     __ SetStackPointer(jssp);
8795 
8796     int i;
8797 
8798     // Initialize the registers, using X registers to load the literal.
8799     __ Mov(x0, 0);
8800     __ Mov(x1, literal_base);
8801     for (i = 0; i < reg_count; i++) {
8802       // Always write into the D register, to ensure that the upper word is
8803       // properly ignored by Push when testing S registers.
8804       __ Fmov(d[i], x0);
8805       // Calculate the next literal.
8806       __ Add(x0, x0, x1);
8807     }
8808 
8809     // Claim memory first, as requested.
8810     __ Claim(claim, kByteSizeInBytes);
8811 
8812     switch (push_method) {
8813       case PushPopByFour:
8814         // Push high-numbered registers first (to the highest addresses).
8815         for (i = reg_count; i >= 4; i -= 4) {
8816           __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8817         }
8818         // Finish off the leftovers.
8819         switch (i) {
8820           case 3:  __ Push(v[2], v[1], v[0]); break;
8821           case 2:  __ Push(v[1], v[0]);       break;
8822           case 1:  __ Push(v[0]);             break;
8823           default: DCHECK(i == 0);            break;
8824         }
8825         break;
8826       case PushPopRegList:
8827         __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8828         break;
8829     }
8830 
8831     // Clobber all the registers, to ensure that they get repopulated by Pop.
8832     ClobberFP(&masm, list);
8833 
8834     switch (pop_method) {
8835       case PushPopByFour:
8836         // Pop low-numbered registers first (from the lowest addresses).
8837         for (i = 0; i <= (reg_count-4); i += 4) {
8838           __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8839         }
8840         // Finish off the leftovers.
8841         switch (reg_count - i) {
8842           case 3:  __ Pop(v[i], v[i+1], v[i+2]); break;
8843           case 2:  __ Pop(v[i], v[i+1]);         break;
8844           case 1:  __ Pop(v[i]);                 break;
8845           default: DCHECK(i == reg_count);       break;
8846         }
8847         break;
8848       case PushPopRegList:
8849         __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8850         break;
8851     }
8852 
8853     // Drop memory to restore jssp.
8854     __ Drop(claim, kByteSizeInBytes);
8855 
8856     __ Mov(csp, __ StackPointer());
8857     __ SetStackPointer(csp);
8858   }
8859 
8860   END();
8861 
8862   RUN();
8863 
8864   // Check that the register contents were preserved.
8865   // Always use CHECK_EQUAL_FP64, even when testing S registers, so we can
8866   // test that the upper word was properly cleared by Pop.
8867   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8868   for (int i = 0; i < reg_count; i++) {
8869     uint64_t literal = literal_base * i;
8870     double expected;
8871     memcpy(&expected, &literal, sizeof(expected));
8872     CHECK_EQUAL_FP64(expected, d[i]);
8873   }
8874 
8875   TEARDOWN();
8876 }
8877 
8878 
TEST(push_pop_fp_jssp_simple_32)8879 TEST(push_pop_fp_jssp_simple_32) {
8880   INIT_V8();
8881   for (int claim = 0; claim <= 8; claim++) {
8882     for (int count = 0; count <= 8; count++) {
8883       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8884                                 PushPopByFour, PushPopByFour);
8885       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8886                                 PushPopByFour, PushPopRegList);
8887       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8888                                 PushPopRegList, PushPopByFour);
8889       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8890                                 PushPopRegList, PushPopRegList);
8891     }
8892     // Test with the maximum number of registers.
8893     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8894                               PushPopByFour, PushPopByFour);
8895     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8896                               PushPopByFour, PushPopRegList);
8897     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8898                               PushPopRegList, PushPopByFour);
8899     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8900                               PushPopRegList, PushPopRegList);
8901   }
8902 }
8903 
8904 
TEST(push_pop_fp_jssp_simple_64)8905 TEST(push_pop_fp_jssp_simple_64) {
8906   INIT_V8();
8907   for (int claim = 0; claim <= 8; claim++) {
8908     for (int count = 0; count <= 8; count++) {
8909       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8910                                 PushPopByFour, PushPopByFour);
8911       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8912                                 PushPopByFour, PushPopRegList);
8913       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8914                                 PushPopRegList, PushPopByFour);
8915       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8916                                 PushPopRegList, PushPopRegList);
8917     }
8918     // Test with the maximum number of registers.
8919     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8920                               PushPopByFour, PushPopByFour);
8921     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8922                               PushPopByFour, PushPopRegList);
8923     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8924                               PushPopRegList, PushPopByFour);
8925     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8926                               PushPopRegList, PushPopRegList);
8927   }
8928 }
8929 
8930 
8931 // Push and pop data using an overlapping combination of Push/Pop and
8932 // RegList-based methods.
PushPopJsspMixedMethodsHelper(int claim,int reg_size)8933 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8934   SETUP();
8935 
8936   // Registers x8 and x9 are used by the macro assembler for debug code (for
8937   // example in 'Pop'), so we can't use them here. We can't use jssp because it
8938   // will be the stack pointer for this test.
8939   static RegList const allowed =
8940       ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8941   // Work out which registers to use, based on reg_size.
8942   Register r[10];
8943   Register x[10];
8944   PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8945 
8946   // Calculate some handy register lists.
8947   RegList r0_to_r3 = 0;
8948   for (int i = 0; i <= 3; i++) {
8949     r0_to_r3 |= x[i].Bit();
8950   }
8951   RegList r4_to_r5 = 0;
8952   for (int i = 4; i <= 5; i++) {
8953     r4_to_r5 |= x[i].Bit();
8954   }
8955   RegList r6_to_r9 = 0;
8956   for (int i = 6; i <= 9; i++) {
8957     r6_to_r9 |= x[i].Bit();
8958   }
8959 
8960   // The literal base is chosen to have two useful properties:
8961   //  * When multiplied by small values (such as a register index), this value
8962   //    is clearly readable in the result.
8963   //  * The value is not formed from repeating fixed-size smaller values, so it
8964   //    can be used to detect endianness-related errors.
8965   uint64_t literal_base = 0x0100001000100101UL;
8966 
8967   START();
8968   {
8969     DCHECK(__ StackPointer().Is(csp));
8970     __ Mov(jssp, __ StackPointer());
8971     __ SetStackPointer(jssp);
8972 
8973     // Claim memory first, as requested.
8974     __ Claim(claim, kByteSizeInBytes);
8975 
8976     __ Mov(x[3], literal_base * 3);
8977     __ Mov(x[2], literal_base * 2);
8978     __ Mov(x[1], literal_base * 1);
8979     __ Mov(x[0], literal_base * 0);
8980 
8981     __ PushSizeRegList(r0_to_r3, reg_size);
8982     __ Push(r[3], r[2]);
8983 
8984     Clobber(&masm, r0_to_r3);
8985     __ PopSizeRegList(r0_to_r3, reg_size);
8986 
8987     __ Push(r[2], r[1], r[3], r[0]);
8988 
8989     Clobber(&masm, r4_to_r5);
8990     __ Pop(r[4], r[5]);
8991     Clobber(&masm, r6_to_r9);
8992     __ Pop(r[6], r[7], r[8], r[9]);
8993 
8994     // Drop memory to restore jssp.
8995     __ Drop(claim, kByteSizeInBytes);
8996 
8997     __ Mov(csp, __ StackPointer());
8998     __ SetStackPointer(csp);
8999   }
9000 
9001   END();
9002 
9003   RUN();
9004 
9005   // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
9006   // that the upper word was properly cleared by Pop.
9007   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
9008 
9009   CHECK_EQUAL_64(literal_base * 3, x[9]);
9010   CHECK_EQUAL_64(literal_base * 2, x[8]);
9011   CHECK_EQUAL_64(literal_base * 0, x[7]);
9012   CHECK_EQUAL_64(literal_base * 3, x[6]);
9013   CHECK_EQUAL_64(literal_base * 1, x[5]);
9014   CHECK_EQUAL_64(literal_base * 2, x[4]);
9015 
9016   TEARDOWN();
9017 }
9018 
9019 
TEST(push_pop_jssp_mixed_methods_64)9020 TEST(push_pop_jssp_mixed_methods_64) {
9021   INIT_V8();
9022   for (int claim = 0; claim <= 8; claim++) {
9023     PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
9024   }
9025 }
9026 
9027 
TEST(push_pop_jssp_mixed_methods_32)9028 TEST(push_pop_jssp_mixed_methods_32) {
9029   INIT_V8();
9030   for (int claim = 0; claim <= 8; claim++) {
9031     PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
9032   }
9033 }
9034 
9035 
9036 // Push and pop data using overlapping X- and W-sized quantities.
PushPopJsspWXOverlapHelper(int reg_count,int claim)9037 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
9038   // This test emits rather a lot of code.
9039   SETUP_SIZE(BUF_SIZE * 2);
9040 
9041   // Work out which registers to use, based on reg_size.
9042   Register tmp = x8;
9043   static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
9044   if (reg_count == kPushPopJsspMaxRegCount) {
9045     reg_count = CountSetBits(allowed, kNumberOfRegisters);
9046   }
9047   Register w[kNumberOfRegisters];
9048   Register x[kNumberOfRegisters];
9049   RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
9050 
9051   // The number of W-sized slots we expect to pop. When we pop, we alternate
9052   // between W and X registers, so we need reg_count*1.5 W-sized slots.
9053   int const requested_w_slots = reg_count + reg_count / 2;
9054 
9055   // Track what _should_ be on the stack, using W-sized slots.
9056   static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
9057   uint32_t stack[kMaxWSlots];
9058   for (int i = 0; i < kMaxWSlots; i++) {
9059     stack[i] = 0xdeadbeef;
9060   }
9061 
9062   // The literal base is chosen to have two useful properties:
9063   //  * When multiplied by small values (such as a register index), this value
9064   //    is clearly readable in the result.
9065   //  * The value is not formed from repeating fixed-size smaller values, so it
9066   //    can be used to detect endianness-related errors.
9067   static uint64_t const literal_base = 0x0100001000100101UL;
9068   static uint64_t const literal_base_hi = literal_base >> 32;
9069   static uint64_t const literal_base_lo = literal_base & 0xffffffff;
9070   static uint64_t const literal_base_w = literal_base & 0xffffffff;
9071 
9072   START();
9073   {
9074     DCHECK(__ StackPointer().Is(csp));
9075     __ Mov(jssp, __ StackPointer());
9076     __ SetStackPointer(jssp);
9077 
9078     // Initialize the registers.
9079     for (int i = 0; i < reg_count; i++) {
9080       // Always write into the X register, to ensure that the upper word is
9081       // properly ignored by Push when testing W registers.
9082       if (!x[i].IsZero()) {
9083         __ Mov(x[i], literal_base * i);
9084       }
9085     }
9086 
9087     // Claim memory first, as requested.
9088     __ Claim(claim, kByteSizeInBytes);
9089 
9090     // The push-pop pattern is as follows:
9091     // Push:           Pop:
9092     //  x[0](hi)   ->   w[0]
9093     //  x[0](lo)   ->   x[1](hi)
9094     //  w[1]       ->   x[1](lo)
9095     //  w[1]       ->   w[2]
9096     //  x[2](hi)   ->   x[2](hi)
9097     //  x[2](lo)   ->   x[2](lo)
9098     //  x[2](hi)   ->   w[3]
9099     //  x[2](lo)   ->   x[4](hi)
9100     //  x[2](hi)   ->   x[4](lo)
9101     //  x[2](lo)   ->   w[5]
9102     //  w[3]       ->   x[5](hi)
9103     //  w[3]       ->   x[6](lo)
9104     //  w[3]       ->   w[7]
9105     //  w[3]       ->   x[8](hi)
9106     //  x[4](hi)   ->   x[8](lo)
9107     //  x[4](lo)   ->   w[9]
9108     // ... pattern continues ...
9109     //
9110     // That is, registers are pushed starting with the lower numbers,
9111     // alternating between x and w registers, and pushing i%4+1 copies of each,
9112     // where i is the register number.
9113     // Registers are popped starting with the higher numbers one-by-one,
9114     // alternating between x and w registers, but only popping one at a time.
9115     //
9116     // This pattern provides a wide variety of alignment effects and overlaps.
9117 
9118     // ---- Push ----
9119 
9120     int active_w_slots = 0;
9121     for (int i = 0; active_w_slots < requested_w_slots; i++) {
9122       DCHECK(i < reg_count);
9123       // In order to test various arguments to PushMultipleTimes, and to try to
9124       // exercise different alignment and overlap effects, we push each
9125       // register a different number of times.
9126       int times = i % 4 + 1;
9127       if (i & 1) {
9128         // Push odd-numbered registers as W registers.
9129         if (i & 2) {
9130           __ PushMultipleTimes(w[i], times);
9131         } else {
9132           // Use a register to specify the count.
9133           __ Mov(tmp.W(), times);
9134           __ PushMultipleTimes(w[i], tmp.W());
9135         }
9136         // Fill in the expected stack slots.
9137         for (int j = 0; j < times; j++) {
9138           if (w[i].Is(wzr)) {
9139             // The zero register always writes zeroes.
9140             stack[active_w_slots++] = 0;
9141           } else {
9142             stack[active_w_slots++] = literal_base_w * i;
9143           }
9144         }
9145       } else {
9146         // Push even-numbered registers as X registers.
9147         if (i & 2) {
9148           __ PushMultipleTimes(x[i], times);
9149         } else {
9150           // Use a register to specify the count.
9151           __ Mov(tmp, times);
9152           __ PushMultipleTimes(x[i], tmp);
9153         }
9154         // Fill in the expected stack slots.
9155         for (int j = 0; j < times; j++) {
9156           if (x[i].IsZero()) {
9157             // The zero register always writes zeroes.
9158             stack[active_w_slots++] = 0;
9159             stack[active_w_slots++] = 0;
9160           } else {
9161             stack[active_w_slots++] = literal_base_hi * i;
9162             stack[active_w_slots++] = literal_base_lo * i;
9163           }
9164         }
9165       }
9166     }
9167     // Because we were pushing several registers at a time, we probably pushed
9168     // more than we needed to.
9169     if (active_w_slots > requested_w_slots) {
9170       __ Drop(active_w_slots - requested_w_slots, kWRegSize);
9171       // Bump the number of active W-sized slots back to where it should be,
9172       // and fill the empty space with a dummy value.
9173       do {
9174         stack[active_w_slots--] = 0xdeadbeef;
9175       } while (active_w_slots > requested_w_slots);
9176     }
9177 
9178     // ---- Pop ----
9179 
9180     Clobber(&masm, list);
9181 
9182     // If popping an even number of registers, the first one will be X-sized.
9183     // Otherwise, the first one will be W-sized.
9184     bool next_is_64 = !(reg_count & 1);
9185     for (int i = reg_count-1; i >= 0; i--) {
9186       if (next_is_64) {
9187         __ Pop(x[i]);
9188         active_w_slots -= 2;
9189       } else {
9190         __ Pop(w[i]);
9191         active_w_slots -= 1;
9192       }
9193       next_is_64 = !next_is_64;
9194     }
9195     DCHECK(active_w_slots == 0);
9196 
9197     // Drop memory to restore jssp.
9198     __ Drop(claim, kByteSizeInBytes);
9199 
9200     __ Mov(csp, __ StackPointer());
9201     __ SetStackPointer(csp);
9202   }
9203 
9204   END();
9205 
9206   RUN();
9207 
9208   int slot = 0;
9209   for (int i = 0; i < reg_count; i++) {
9210     // Even-numbered registers were written as W registers.
9211     // Odd-numbered registers were written as X registers.
9212     bool expect_64 = (i & 1);
9213     uint64_t expected;
9214 
9215     if (expect_64) {
9216       uint64_t hi = stack[slot++];
9217       uint64_t lo = stack[slot++];
9218       expected = (hi << 32) | lo;
9219     } else {
9220       expected = stack[slot++];
9221     }
9222 
9223     // Always use CHECK_EQUAL_64, even when testing W registers, so we can
9224     // test that the upper word was properly cleared by Pop.
9225     if (x[i].IsZero()) {
9226       CHECK_EQUAL_64(0, x[i]);
9227     } else {
9228       CHECK_EQUAL_64(expected, x[i]);
9229     }
9230   }
9231   DCHECK(slot == requested_w_slots);
9232 
9233   TEARDOWN();
9234 }
9235 
9236 
TEST(push_pop_jssp_wx_overlap)9237 TEST(push_pop_jssp_wx_overlap) {
9238   INIT_V8();
9239   for (int claim = 0; claim <= 8; claim++) {
9240     for (int count = 1; count <= 8; count++) {
9241       PushPopJsspWXOverlapHelper(count, claim);
9242       PushPopJsspWXOverlapHelper(count, claim);
9243       PushPopJsspWXOverlapHelper(count, claim);
9244       PushPopJsspWXOverlapHelper(count, claim);
9245     }
9246     // Test with the maximum number of registers.
9247     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9248     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9249     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9250     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9251   }
9252 }
9253 
9254 
TEST(push_pop_csp)9255 TEST(push_pop_csp) {
9256   INIT_V8();
9257   SETUP();
9258 
9259   START();
9260 
9261   DCHECK(csp.Is(__ StackPointer()));
9262 
9263   __ Mov(x3, 0x3333333333333333UL);
9264   __ Mov(x2, 0x2222222222222222UL);
9265   __ Mov(x1, 0x1111111111111111UL);
9266   __ Mov(x0, 0x0000000000000000UL);
9267   __ Claim(2);
9268   __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9269   __ Push(x3, x2);
9270   __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9271   __ Push(x2, x1, x3, x0);
9272   __ Pop(x4, x5);
9273   __ Pop(x6, x7, x8, x9);
9274 
9275   __ Claim(2);
9276   __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
9277   __ Push(w3, w1, w2, w0);
9278   __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
9279   __ Pop(w14, w15, w16, w17);
9280 
9281   __ Claim(2);
9282   __ Push(w2, w2, w1, w1);
9283   __ Push(x3, x3);
9284   __ Pop(w18, w19, w20, w21);
9285   __ Pop(x22, x23);
9286 
9287   __ Claim(2);
9288   __ PushXRegList(x1.Bit() | x22.Bit());
9289   __ PopXRegList(x24.Bit() | x26.Bit());
9290 
9291   __ Claim(2);
9292   __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
9293   __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
9294 
9295   __ Claim(2);
9296   __ PushXRegList(0);
9297   __ PopXRegList(0);
9298   __ PushXRegList(0xffffffff);
9299   __ PopXRegList(0xffffffff);
9300   __ Drop(12);
9301 
9302   END();
9303 
9304   RUN();
9305 
9306   CHECK_EQUAL_64(0x1111111111111111UL, x3);
9307   CHECK_EQUAL_64(0x0000000000000000UL, x2);
9308   CHECK_EQUAL_64(0x3333333333333333UL, x1);
9309   CHECK_EQUAL_64(0x2222222222222222UL, x0);
9310   CHECK_EQUAL_64(0x3333333333333333UL, x9);
9311   CHECK_EQUAL_64(0x2222222222222222UL, x8);
9312   CHECK_EQUAL_64(0x0000000000000000UL, x7);
9313   CHECK_EQUAL_64(0x3333333333333333UL, x6);
9314   CHECK_EQUAL_64(0x1111111111111111UL, x5);
9315   CHECK_EQUAL_64(0x2222222222222222UL, x4);
9316 
9317   CHECK_EQUAL_32(0x11111111U, w13);
9318   CHECK_EQUAL_32(0x33333333U, w12);
9319   CHECK_EQUAL_32(0x00000000U, w11);
9320   CHECK_EQUAL_32(0x22222222U, w10);
9321   CHECK_EQUAL_32(0x11111111U, w17);
9322   CHECK_EQUAL_32(0x00000000U, w16);
9323   CHECK_EQUAL_32(0x33333333U, w15);
9324   CHECK_EQUAL_32(0x22222222U, w14);
9325 
9326   CHECK_EQUAL_32(0x11111111U, w18);
9327   CHECK_EQUAL_32(0x11111111U, w19);
9328   CHECK_EQUAL_32(0x11111111U, w20);
9329   CHECK_EQUAL_32(0x11111111U, w21);
9330   CHECK_EQUAL_64(0x3333333333333333UL, x22);
9331   CHECK_EQUAL_64(0x0000000000000000UL, x23);
9332 
9333   CHECK_EQUAL_64(0x3333333333333333UL, x24);
9334   CHECK_EQUAL_64(0x3333333333333333UL, x26);
9335 
9336   CHECK_EQUAL_32(0x33333333U, w25);
9337   CHECK_EQUAL_32(0x00000000U, w27);
9338   CHECK_EQUAL_32(0x22222222U, w28);
9339   CHECK_EQUAL_32(0x33333333U, w29);
9340   TEARDOWN();
9341 }
9342 
9343 
TEST(push_queued)9344 TEST(push_queued) {
9345   INIT_V8();
9346   SETUP();
9347 
9348   START();
9349 
9350   DCHECK(__ StackPointer().Is(csp));
9351   __ Mov(jssp, __ StackPointer());
9352   __ SetStackPointer(jssp);
9353 
9354   MacroAssembler::PushPopQueue queue(&masm);
9355 
9356   // Queue up registers.
9357   queue.Queue(x0);
9358   queue.Queue(x1);
9359   queue.Queue(x2);
9360   queue.Queue(x3);
9361 
9362   queue.Queue(w4);
9363   queue.Queue(w5);
9364   queue.Queue(w6);
9365 
9366   queue.Queue(d0);
9367   queue.Queue(d1);
9368 
9369   queue.Queue(s2);
9370 
9371   __ Mov(x0, 0x1234000000000000);
9372   __ Mov(x1, 0x1234000100010001);
9373   __ Mov(x2, 0x1234000200020002);
9374   __ Mov(x3, 0x1234000300030003);
9375   __ Mov(w4, 0x12340004);
9376   __ Mov(w5, 0x12340005);
9377   __ Mov(w6, 0x12340006);
9378   __ Fmov(d0, 123400.0);
9379   __ Fmov(d1, 123401.0);
9380   __ Fmov(s2, 123402.0);
9381 
9382   // Actually push them.
9383   queue.PushQueued();
9384 
9385   Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9386   Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9387 
9388   // Pop them conventionally.
9389   __ Pop(s2);
9390   __ Pop(d1, d0);
9391   __ Pop(w6, w5, w4);
9392   __ Pop(x3, x2, x1, x0);
9393 
9394   __ Mov(csp, __ StackPointer());
9395   __ SetStackPointer(csp);
9396 
9397   END();
9398 
9399   RUN();
9400 
9401   CHECK_EQUAL_64(0x1234000000000000, x0);
9402   CHECK_EQUAL_64(0x1234000100010001, x1);
9403   CHECK_EQUAL_64(0x1234000200020002, x2);
9404   CHECK_EQUAL_64(0x1234000300030003, x3);
9405 
9406   CHECK_EQUAL_32(0x12340004, w4);
9407   CHECK_EQUAL_32(0x12340005, w5);
9408   CHECK_EQUAL_32(0x12340006, w6);
9409 
9410   CHECK_EQUAL_FP64(123400.0, d0);
9411   CHECK_EQUAL_FP64(123401.0, d1);
9412 
9413   CHECK_EQUAL_FP32(123402.0, s2);
9414 
9415   TEARDOWN();
9416 }
9417 
9418 
TEST(pop_queued)9419 TEST(pop_queued) {
9420   INIT_V8();
9421   SETUP();
9422 
9423   START();
9424 
9425   DCHECK(__ StackPointer().Is(csp));
9426   __ Mov(jssp, __ StackPointer());
9427   __ SetStackPointer(jssp);
9428 
9429   MacroAssembler::PushPopQueue queue(&masm);
9430 
9431   __ Mov(x0, 0x1234000000000000);
9432   __ Mov(x1, 0x1234000100010001);
9433   __ Mov(x2, 0x1234000200020002);
9434   __ Mov(x3, 0x1234000300030003);
9435   __ Mov(w4, 0x12340004);
9436   __ Mov(w5, 0x12340005);
9437   __ Mov(w6, 0x12340006);
9438   __ Fmov(d0, 123400.0);
9439   __ Fmov(d1, 123401.0);
9440   __ Fmov(s2, 123402.0);
9441 
9442   // Push registers conventionally.
9443   __ Push(x0, x1, x2, x3);
9444   __ Push(w4, w5, w6);
9445   __ Push(d0, d1);
9446   __ Push(s2);
9447 
9448   // Queue up a pop.
9449   queue.Queue(s2);
9450 
9451   queue.Queue(d1);
9452   queue.Queue(d0);
9453 
9454   queue.Queue(w6);
9455   queue.Queue(w5);
9456   queue.Queue(w4);
9457 
9458   queue.Queue(x3);
9459   queue.Queue(x2);
9460   queue.Queue(x1);
9461   queue.Queue(x0);
9462 
9463   Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9464   Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9465 
9466   // Actually pop them.
9467   queue.PopQueued();
9468 
9469   __ Mov(csp, __ StackPointer());
9470   __ SetStackPointer(csp);
9471 
9472   END();
9473 
9474   RUN();
9475 
9476   CHECK_EQUAL_64(0x1234000000000000, x0);
9477   CHECK_EQUAL_64(0x1234000100010001, x1);
9478   CHECK_EQUAL_64(0x1234000200020002, x2);
9479   CHECK_EQUAL_64(0x1234000300030003, x3);
9480 
9481   CHECK_EQUAL_64(0x0000000012340004, x4);
9482   CHECK_EQUAL_64(0x0000000012340005, x5);
9483   CHECK_EQUAL_64(0x0000000012340006, x6);
9484 
9485   CHECK_EQUAL_FP64(123400.0, d0);
9486   CHECK_EQUAL_FP64(123401.0, d1);
9487 
9488   CHECK_EQUAL_FP32(123402.0, s2);
9489 
9490   TEARDOWN();
9491 }
9492 
9493 
TEST(jump_both_smi)9494 TEST(jump_both_smi) {
9495   INIT_V8();
9496   SETUP();
9497 
9498   Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9499   Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9500   Label return1, return2, return3, done;
9501 
9502   START();
9503 
9504   __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9505   __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9506   __ Mov(x2, 0x1234567800000000UL);  // A smi.
9507   __ Mov(x3, 0x8765432100000000UL);  // A smi.
9508   __ Mov(x4, 0xdead);
9509   __ Mov(x5, 0xdead);
9510   __ Mov(x6, 0xdead);
9511   __ Mov(x7, 0xdead);
9512 
9513   __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9514   __ Bind(&return1);
9515   __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9516   __ Bind(&return2);
9517   __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9518   __ Bind(&return3);
9519   __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9520 
9521   __ Bind(&cond_fail_00);
9522   __ Mov(x4, 0);
9523   __ B(&return1);
9524   __ Bind(&cond_pass_00);
9525   __ Mov(x4, 1);
9526   __ B(&return1);
9527 
9528   __ Bind(&cond_fail_01);
9529   __ Mov(x5, 0);
9530   __ B(&return2);
9531   __ Bind(&cond_pass_01);
9532   __ Mov(x5, 1);
9533   __ B(&return2);
9534 
9535   __ Bind(&cond_fail_10);
9536   __ Mov(x6, 0);
9537   __ B(&return3);
9538   __ Bind(&cond_pass_10);
9539   __ Mov(x6, 1);
9540   __ B(&return3);
9541 
9542   __ Bind(&cond_fail_11);
9543   __ Mov(x7, 0);
9544   __ B(&done);
9545   __ Bind(&cond_pass_11);
9546   __ Mov(x7, 1);
9547 
9548   __ Bind(&done);
9549 
9550   END();
9551 
9552   RUN();
9553 
9554   CHECK_EQUAL_64(0x5555555500000001UL, x0);
9555   CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9556   CHECK_EQUAL_64(0x1234567800000000UL, x2);
9557   CHECK_EQUAL_64(0x8765432100000000UL, x3);
9558   CHECK_EQUAL_64(0, x4);
9559   CHECK_EQUAL_64(0, x5);
9560   CHECK_EQUAL_64(0, x6);
9561   CHECK_EQUAL_64(1, x7);
9562 
9563   TEARDOWN();
9564 }
9565 
9566 
TEST(jump_either_smi)9567 TEST(jump_either_smi) {
9568   INIT_V8();
9569   SETUP();
9570 
9571   Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9572   Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9573   Label return1, return2, return3, done;
9574 
9575   START();
9576 
9577   __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9578   __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9579   __ Mov(x2, 0x1234567800000000UL);  // A smi.
9580   __ Mov(x3, 0x8765432100000000UL);  // A smi.
9581   __ Mov(x4, 0xdead);
9582   __ Mov(x5, 0xdead);
9583   __ Mov(x6, 0xdead);
9584   __ Mov(x7, 0xdead);
9585 
9586   __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9587   __ Bind(&return1);
9588   __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9589   __ Bind(&return2);
9590   __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9591   __ Bind(&return3);
9592   __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9593 
9594   __ Bind(&cond_fail_00);
9595   __ Mov(x4, 0);
9596   __ B(&return1);
9597   __ Bind(&cond_pass_00);
9598   __ Mov(x4, 1);
9599   __ B(&return1);
9600 
9601   __ Bind(&cond_fail_01);
9602   __ Mov(x5, 0);
9603   __ B(&return2);
9604   __ Bind(&cond_pass_01);
9605   __ Mov(x5, 1);
9606   __ B(&return2);
9607 
9608   __ Bind(&cond_fail_10);
9609   __ Mov(x6, 0);
9610   __ B(&return3);
9611   __ Bind(&cond_pass_10);
9612   __ Mov(x6, 1);
9613   __ B(&return3);
9614 
9615   __ Bind(&cond_fail_11);
9616   __ Mov(x7, 0);
9617   __ B(&done);
9618   __ Bind(&cond_pass_11);
9619   __ Mov(x7, 1);
9620 
9621   __ Bind(&done);
9622 
9623   END();
9624 
9625   RUN();
9626 
9627   CHECK_EQUAL_64(0x5555555500000001UL, x0);
9628   CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9629   CHECK_EQUAL_64(0x1234567800000000UL, x2);
9630   CHECK_EQUAL_64(0x8765432100000000UL, x3);
9631   CHECK_EQUAL_64(0, x4);
9632   CHECK_EQUAL_64(1, x5);
9633   CHECK_EQUAL_64(1, x6);
9634   CHECK_EQUAL_64(1, x7);
9635 
9636   TEARDOWN();
9637 }
9638 
9639 
TEST(noreg)9640 TEST(noreg) {
9641   // This test doesn't generate any code, but it verifies some invariants
9642   // related to NoReg.
9643   CHECK(NoReg.Is(NoFPReg));
9644   CHECK(NoFPReg.Is(NoReg));
9645   CHECK(NoReg.Is(NoCPUReg));
9646   CHECK(NoCPUReg.Is(NoReg));
9647   CHECK(NoFPReg.Is(NoCPUReg));
9648   CHECK(NoCPUReg.Is(NoFPReg));
9649 
9650   CHECK(NoReg.IsNone());
9651   CHECK(NoFPReg.IsNone());
9652   CHECK(NoCPUReg.IsNone());
9653 }
9654 
9655 
TEST(isvalid)9656 TEST(isvalid) {
9657   // This test doesn't generate any code, but it verifies some invariants
9658   // related to IsValid().
9659   CHECK(!NoReg.IsValid());
9660   CHECK(!NoFPReg.IsValid());
9661   CHECK(!NoCPUReg.IsValid());
9662 
9663   CHECK(x0.IsValid());
9664   CHECK(w0.IsValid());
9665   CHECK(x30.IsValid());
9666   CHECK(w30.IsValid());
9667   CHECK(xzr.IsValid());
9668   CHECK(wzr.IsValid());
9669 
9670   CHECK(csp.IsValid());
9671   CHECK(wcsp.IsValid());
9672 
9673   CHECK(d0.IsValid());
9674   CHECK(s0.IsValid());
9675   CHECK(d31.IsValid());
9676   CHECK(s31.IsValid());
9677 
9678   CHECK(x0.IsValidRegister());
9679   CHECK(w0.IsValidRegister());
9680   CHECK(xzr.IsValidRegister());
9681   CHECK(wzr.IsValidRegister());
9682   CHECK(csp.IsValidRegister());
9683   CHECK(wcsp.IsValidRegister());
9684   CHECK(!x0.IsValidFPRegister());
9685   CHECK(!w0.IsValidFPRegister());
9686   CHECK(!xzr.IsValidFPRegister());
9687   CHECK(!wzr.IsValidFPRegister());
9688   CHECK(!csp.IsValidFPRegister());
9689   CHECK(!wcsp.IsValidFPRegister());
9690 
9691   CHECK(d0.IsValidFPRegister());
9692   CHECK(s0.IsValidFPRegister());
9693   CHECK(!d0.IsValidRegister());
9694   CHECK(!s0.IsValidRegister());
9695 
9696   // Test the same as before, but using CPURegister types. This shouldn't make
9697   // any difference.
9698   CHECK(static_cast<CPURegister>(x0).IsValid());
9699   CHECK(static_cast<CPURegister>(w0).IsValid());
9700   CHECK(static_cast<CPURegister>(x30).IsValid());
9701   CHECK(static_cast<CPURegister>(w30).IsValid());
9702   CHECK(static_cast<CPURegister>(xzr).IsValid());
9703   CHECK(static_cast<CPURegister>(wzr).IsValid());
9704 
9705   CHECK(static_cast<CPURegister>(csp).IsValid());
9706   CHECK(static_cast<CPURegister>(wcsp).IsValid());
9707 
9708   CHECK(static_cast<CPURegister>(d0).IsValid());
9709   CHECK(static_cast<CPURegister>(s0).IsValid());
9710   CHECK(static_cast<CPURegister>(d31).IsValid());
9711   CHECK(static_cast<CPURegister>(s31).IsValid());
9712 
9713   CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9714   CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9715   CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9716   CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9717   CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9718   CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9719   CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9720   CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9721   CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9722   CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9723   CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9724   CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9725 
9726   CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9727   CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9728   CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9729   CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9730 }
9731 
9732 
TEST(cpureglist_utils_x)9733 TEST(cpureglist_utils_x) {
9734   // This test doesn't generate any code, but it verifies the behaviour of
9735   // the CPURegList utility methods.
9736 
9737   // Test a list of X registers.
9738   CPURegList test(x0, x1, x2, x3);
9739 
9740   CHECK(test.IncludesAliasOf(x0));
9741   CHECK(test.IncludesAliasOf(x1));
9742   CHECK(test.IncludesAliasOf(x2));
9743   CHECK(test.IncludesAliasOf(x3));
9744   CHECK(test.IncludesAliasOf(w0));
9745   CHECK(test.IncludesAliasOf(w1));
9746   CHECK(test.IncludesAliasOf(w2));
9747   CHECK(test.IncludesAliasOf(w3));
9748 
9749   CHECK(!test.IncludesAliasOf(x4));
9750   CHECK(!test.IncludesAliasOf(x30));
9751   CHECK(!test.IncludesAliasOf(xzr));
9752   CHECK(!test.IncludesAliasOf(csp));
9753   CHECK(!test.IncludesAliasOf(w4));
9754   CHECK(!test.IncludesAliasOf(w30));
9755   CHECK(!test.IncludesAliasOf(wzr));
9756   CHECK(!test.IncludesAliasOf(wcsp));
9757 
9758   CHECK(!test.IncludesAliasOf(d0));
9759   CHECK(!test.IncludesAliasOf(d1));
9760   CHECK(!test.IncludesAliasOf(d2));
9761   CHECK(!test.IncludesAliasOf(d3));
9762   CHECK(!test.IncludesAliasOf(s0));
9763   CHECK(!test.IncludesAliasOf(s1));
9764   CHECK(!test.IncludesAliasOf(s2));
9765   CHECK(!test.IncludesAliasOf(s3));
9766 
9767   CHECK(!test.IsEmpty());
9768 
9769   CHECK(test.type() == x0.type());
9770 
9771   CHECK(test.PopHighestIndex().Is(x3));
9772   CHECK(test.PopLowestIndex().Is(x0));
9773 
9774   CHECK(test.IncludesAliasOf(x1));
9775   CHECK(test.IncludesAliasOf(x2));
9776   CHECK(test.IncludesAliasOf(w1));
9777   CHECK(test.IncludesAliasOf(w2));
9778   CHECK(!test.IncludesAliasOf(x0));
9779   CHECK(!test.IncludesAliasOf(x3));
9780   CHECK(!test.IncludesAliasOf(w0));
9781   CHECK(!test.IncludesAliasOf(w3));
9782 
9783   CHECK(test.PopHighestIndex().Is(x2));
9784   CHECK(test.PopLowestIndex().Is(x1));
9785 
9786   CHECK(!test.IncludesAliasOf(x1));
9787   CHECK(!test.IncludesAliasOf(x2));
9788   CHECK(!test.IncludesAliasOf(w1));
9789   CHECK(!test.IncludesAliasOf(w2));
9790 
9791   CHECK(test.IsEmpty());
9792 }
9793 
9794 
TEST(cpureglist_utils_w)9795 TEST(cpureglist_utils_w) {
9796   // This test doesn't generate any code, but it verifies the behaviour of
9797   // the CPURegList utility methods.
9798 
9799   // Test a list of W registers.
9800   CPURegList test(w10, w11, w12, w13);
9801 
9802   CHECK(test.IncludesAliasOf(x10));
9803   CHECK(test.IncludesAliasOf(x11));
9804   CHECK(test.IncludesAliasOf(x12));
9805   CHECK(test.IncludesAliasOf(x13));
9806   CHECK(test.IncludesAliasOf(w10));
9807   CHECK(test.IncludesAliasOf(w11));
9808   CHECK(test.IncludesAliasOf(w12));
9809   CHECK(test.IncludesAliasOf(w13));
9810 
9811   CHECK(!test.IncludesAliasOf(x0));
9812   CHECK(!test.IncludesAliasOf(x9));
9813   CHECK(!test.IncludesAliasOf(x14));
9814   CHECK(!test.IncludesAliasOf(x30));
9815   CHECK(!test.IncludesAliasOf(xzr));
9816   CHECK(!test.IncludesAliasOf(csp));
9817   CHECK(!test.IncludesAliasOf(w0));
9818   CHECK(!test.IncludesAliasOf(w9));
9819   CHECK(!test.IncludesAliasOf(w14));
9820   CHECK(!test.IncludesAliasOf(w30));
9821   CHECK(!test.IncludesAliasOf(wzr));
9822   CHECK(!test.IncludesAliasOf(wcsp));
9823 
9824   CHECK(!test.IncludesAliasOf(d10));
9825   CHECK(!test.IncludesAliasOf(d11));
9826   CHECK(!test.IncludesAliasOf(d12));
9827   CHECK(!test.IncludesAliasOf(d13));
9828   CHECK(!test.IncludesAliasOf(s10));
9829   CHECK(!test.IncludesAliasOf(s11));
9830   CHECK(!test.IncludesAliasOf(s12));
9831   CHECK(!test.IncludesAliasOf(s13));
9832 
9833   CHECK(!test.IsEmpty());
9834 
9835   CHECK(test.type() == w10.type());
9836 
9837   CHECK(test.PopHighestIndex().Is(w13));
9838   CHECK(test.PopLowestIndex().Is(w10));
9839 
9840   CHECK(test.IncludesAliasOf(x11));
9841   CHECK(test.IncludesAliasOf(x12));
9842   CHECK(test.IncludesAliasOf(w11));
9843   CHECK(test.IncludesAliasOf(w12));
9844   CHECK(!test.IncludesAliasOf(x10));
9845   CHECK(!test.IncludesAliasOf(x13));
9846   CHECK(!test.IncludesAliasOf(w10));
9847   CHECK(!test.IncludesAliasOf(w13));
9848 
9849   CHECK(test.PopHighestIndex().Is(w12));
9850   CHECK(test.PopLowestIndex().Is(w11));
9851 
9852   CHECK(!test.IncludesAliasOf(x11));
9853   CHECK(!test.IncludesAliasOf(x12));
9854   CHECK(!test.IncludesAliasOf(w11));
9855   CHECK(!test.IncludesAliasOf(w12));
9856 
9857   CHECK(test.IsEmpty());
9858 }
9859 
9860 
TEST(cpureglist_utils_d)9861 TEST(cpureglist_utils_d) {
9862   // This test doesn't generate any code, but it verifies the behaviour of
9863   // the CPURegList utility methods.
9864 
9865   // Test a list of D registers.
9866   CPURegList test(d20, d21, d22, d23);
9867 
9868   CHECK(test.IncludesAliasOf(d20));
9869   CHECK(test.IncludesAliasOf(d21));
9870   CHECK(test.IncludesAliasOf(d22));
9871   CHECK(test.IncludesAliasOf(d23));
9872   CHECK(test.IncludesAliasOf(s20));
9873   CHECK(test.IncludesAliasOf(s21));
9874   CHECK(test.IncludesAliasOf(s22));
9875   CHECK(test.IncludesAliasOf(s23));
9876 
9877   CHECK(!test.IncludesAliasOf(d0));
9878   CHECK(!test.IncludesAliasOf(d19));
9879   CHECK(!test.IncludesAliasOf(d24));
9880   CHECK(!test.IncludesAliasOf(d31));
9881   CHECK(!test.IncludesAliasOf(s0));
9882   CHECK(!test.IncludesAliasOf(s19));
9883   CHECK(!test.IncludesAliasOf(s24));
9884   CHECK(!test.IncludesAliasOf(s31));
9885 
9886   CHECK(!test.IncludesAliasOf(x20));
9887   CHECK(!test.IncludesAliasOf(x21));
9888   CHECK(!test.IncludesAliasOf(x22));
9889   CHECK(!test.IncludesAliasOf(x23));
9890   CHECK(!test.IncludesAliasOf(w20));
9891   CHECK(!test.IncludesAliasOf(w21));
9892   CHECK(!test.IncludesAliasOf(w22));
9893   CHECK(!test.IncludesAliasOf(w23));
9894 
9895   CHECK(!test.IncludesAliasOf(xzr));
9896   CHECK(!test.IncludesAliasOf(wzr));
9897   CHECK(!test.IncludesAliasOf(csp));
9898   CHECK(!test.IncludesAliasOf(wcsp));
9899 
9900   CHECK(!test.IsEmpty());
9901 
9902   CHECK(test.type() == d20.type());
9903 
9904   CHECK(test.PopHighestIndex().Is(d23));
9905   CHECK(test.PopLowestIndex().Is(d20));
9906 
9907   CHECK(test.IncludesAliasOf(d21));
9908   CHECK(test.IncludesAliasOf(d22));
9909   CHECK(test.IncludesAliasOf(s21));
9910   CHECK(test.IncludesAliasOf(s22));
9911   CHECK(!test.IncludesAliasOf(d20));
9912   CHECK(!test.IncludesAliasOf(d23));
9913   CHECK(!test.IncludesAliasOf(s20));
9914   CHECK(!test.IncludesAliasOf(s23));
9915 
9916   CHECK(test.PopHighestIndex().Is(d22));
9917   CHECK(test.PopLowestIndex().Is(d21));
9918 
9919   CHECK(!test.IncludesAliasOf(d21));
9920   CHECK(!test.IncludesAliasOf(d22));
9921   CHECK(!test.IncludesAliasOf(s21));
9922   CHECK(!test.IncludesAliasOf(s22));
9923 
9924   CHECK(test.IsEmpty());
9925 }
9926 
9927 
TEST(cpureglist_utils_s)9928 TEST(cpureglist_utils_s) {
9929   // This test doesn't generate any code, but it verifies the behaviour of
9930   // the CPURegList utility methods.
9931 
9932   // Test a list of S registers.
9933   CPURegList test(s20, s21, s22, s23);
9934 
9935   // The type and size mechanisms are already covered, so here we just test
9936   // that lists of S registers alias individual D registers.
9937 
9938   CHECK(test.IncludesAliasOf(d20));
9939   CHECK(test.IncludesAliasOf(d21));
9940   CHECK(test.IncludesAliasOf(d22));
9941   CHECK(test.IncludesAliasOf(d23));
9942   CHECK(test.IncludesAliasOf(s20));
9943   CHECK(test.IncludesAliasOf(s21));
9944   CHECK(test.IncludesAliasOf(s22));
9945   CHECK(test.IncludesAliasOf(s23));
9946 }
9947 
9948 
TEST(cpureglist_utils_empty)9949 TEST(cpureglist_utils_empty) {
9950   // This test doesn't generate any code, but it verifies the behaviour of
9951   // the CPURegList utility methods.
9952 
9953   // Test an empty list.
9954   // Empty lists can have type and size properties. Check that we can create
9955   // them, and that they are empty.
9956   CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
9957   CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
9958   CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
9959   CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
9960 
9961   CHECK(reg32.IsEmpty());
9962   CHECK(reg64.IsEmpty());
9963   CHECK(fpreg32.IsEmpty());
9964   CHECK(fpreg64.IsEmpty());
9965 
9966   CHECK(reg32.PopLowestIndex().IsNone());
9967   CHECK(reg64.PopLowestIndex().IsNone());
9968   CHECK(fpreg32.PopLowestIndex().IsNone());
9969   CHECK(fpreg64.PopLowestIndex().IsNone());
9970 
9971   CHECK(reg32.PopHighestIndex().IsNone());
9972   CHECK(reg64.PopHighestIndex().IsNone());
9973   CHECK(fpreg32.PopHighestIndex().IsNone());
9974   CHECK(fpreg64.PopHighestIndex().IsNone());
9975 
9976   CHECK(reg32.IsEmpty());
9977   CHECK(reg64.IsEmpty());
9978   CHECK(fpreg32.IsEmpty());
9979   CHECK(fpreg64.IsEmpty());
9980 }
9981 
9982 
TEST(printf)9983 TEST(printf) {
9984   INIT_V8();
9985   SETUP_SIZE(BUF_SIZE * 2);
9986   START();
9987 
9988   char const * test_plain_string = "Printf with no arguments.\n";
9989   char const * test_substring = "'This is a substring.'";
9990   RegisterDump before;
9991 
9992   // Initialize x29 to the value of the stack pointer. We will use x29 as a
9993   // temporary stack pointer later, and initializing it in this way allows the
9994   // RegisterDump check to pass.
9995   __ Mov(x29, __ StackPointer());
9996 
9997   // Test simple integer arguments.
9998   __ Mov(x0, 1234);
9999   __ Mov(x1, 0x1234);
10000 
10001   // Test simple floating-point arguments.
10002   __ Fmov(d0, 1.234);
10003 
10004   // Test pointer (string) arguments.
10005   __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
10006 
10007   // Test the maximum number of arguments, and sign extension.
10008   __ Mov(w3, 0xffffffff);
10009   __ Mov(w4, 0xffffffff);
10010   __ Mov(x5, 0xffffffffffffffff);
10011   __ Mov(x6, 0xffffffffffffffff);
10012   __ Fmov(s1, 1.234);
10013   __ Fmov(s2, 2.345);
10014   __ Fmov(d3, 3.456);
10015   __ Fmov(d4, 4.567);
10016 
10017   // Test printing callee-saved registers.
10018   __ Mov(x28, 0x123456789abcdef);
10019   __ Fmov(d10, 42.0);
10020 
10021   // Test with three arguments.
10022   __ Mov(x10, 3);
10023   __ Mov(x11, 40);
10024   __ Mov(x12, 500);
10025 
10026   // A single character.
10027   __ Mov(w13, 'x');
10028 
10029   // Check that we don't clobber any registers.
10030   before.Dump(&masm);
10031 
10032   __ Printf(test_plain_string);   // NOLINT(runtime/printf)
10033   __ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
10034   __ Printf("w5: %" PRId32 ", x5: %" PRId64"\n", w5, x5);
10035   __ Printf("d0: %f\n", d0);
10036   __ Printf("Test %%s: %s\n", x2);
10037   __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
10038             "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
10039             w3, w4, x5, x6);
10040   __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
10041   __ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
10042   __ Printf("%g\n", d10);
10043   __ Printf("%%%%%s%%%c%%\n", x2, w13);
10044 
10045   // Print the stack pointer (csp).
10046   DCHECK(csp.Is(__ StackPointer()));
10047   __ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10048             __ StackPointer(), __ StackPointer().W());
10049 
10050   // Test with a different stack pointer.
10051   const Register old_stack_pointer = __ StackPointer();
10052   __ Mov(x29, old_stack_pointer);
10053   __ SetStackPointer(x29);
10054   // Print the stack pointer (not csp).
10055   __ Printf("StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10056             __ StackPointer(), __ StackPointer().W());
10057   __ Mov(old_stack_pointer, __ StackPointer());
10058   __ SetStackPointer(old_stack_pointer);
10059 
10060   // Test with three arguments.
10061   __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
10062 
10063   // Mixed argument types.
10064   __ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
10065             w3, s1, x5, d3);
10066   __ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
10067             s1, d3, w3, x5);
10068 
10069   END();
10070   RUN();
10071 
10072   // We cannot easily test the output of the Printf sequences, and because
10073   // Printf preserves all registers by default, we can't look at the number of
10074   // bytes that were printed. However, the printf_no_preserve test should check
10075   // that, and here we just test that we didn't clobber any registers.
10076   CHECK_EQUAL_REGISTERS(before);
10077 
10078   TEARDOWN();
10079 }
10080 
10081 
TEST(printf_no_preserve)10082 TEST(printf_no_preserve) {
10083   INIT_V8();
10084   SETUP();
10085   START();
10086 
10087   char const * test_plain_string = "Printf with no arguments.\n";
10088   char const * test_substring = "'This is a substring.'";
10089 
10090   __ PrintfNoPreserve(test_plain_string);
10091   __ Mov(x19, x0);
10092 
10093   // Test simple integer arguments.
10094   __ Mov(x0, 1234);
10095   __ Mov(x1, 0x1234);
10096   __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
10097   __ Mov(x20, x0);
10098 
10099   // Test simple floating-point arguments.
10100   __ Fmov(d0, 1.234);
10101   __ PrintfNoPreserve("d0: %f\n", d0);
10102   __ Mov(x21, x0);
10103 
10104   // Test pointer (string) arguments.
10105   __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
10106   __ PrintfNoPreserve("Test %%s: %s\n", x2);
10107   __ Mov(x22, x0);
10108 
10109   // Test the maximum number of arguments, and sign extension.
10110   __ Mov(w3, 0xffffffff);
10111   __ Mov(w4, 0xffffffff);
10112   __ Mov(x5, 0xffffffffffffffff);
10113   __ Mov(x6, 0xffffffffffffffff);
10114   __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
10115                       "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
10116                       w3, w4, x5, x6);
10117   __ Mov(x23, x0);
10118 
10119   __ Fmov(s1, 1.234);
10120   __ Fmov(s2, 2.345);
10121   __ Fmov(d3, 3.456);
10122   __ Fmov(d4, 4.567);
10123   __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
10124   __ Mov(x24, x0);
10125 
10126   // Test printing callee-saved registers.
10127   __ Mov(x28, 0x123456789abcdef);
10128   __ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
10129   __ Mov(x25, x0);
10130 
10131   __ Fmov(d10, 42.0);
10132   __ PrintfNoPreserve("%g\n", d10);
10133   __ Mov(x26, x0);
10134 
10135   // Test with a different stack pointer.
10136   const Register old_stack_pointer = __ StackPointer();
10137   __ Mov(x29, old_stack_pointer);
10138   __ SetStackPointer(x29);
10139   // Print the stack pointer (not csp).
10140   __ PrintfNoPreserve(
10141       "StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10142       __ StackPointer(), __ StackPointer().W());
10143   __ Mov(x27, x0);
10144   __ Mov(old_stack_pointer, __ StackPointer());
10145   __ SetStackPointer(old_stack_pointer);
10146 
10147   // Test with three arguments.
10148   __ Mov(x3, 3);
10149   __ Mov(x4, 40);
10150   __ Mov(x5, 500);
10151   __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
10152   __ Mov(x28, x0);
10153 
10154   // Mixed argument types.
10155   __ Mov(w3, 0xffffffff);
10156   __ Fmov(s1, 1.234);
10157   __ Mov(x5, 0xffffffffffffffff);
10158   __ Fmov(d3, 3.456);
10159   __ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
10160                       w3, s1, x5, d3);
10161   __ Mov(x29, x0);
10162 
10163   END();
10164   RUN();
10165 
10166   // We cannot easily test the exact output of the Printf sequences, but we can
10167   // use the return code to check that the string length was correct.
10168 
10169   // Printf with no arguments.
10170   CHECK_EQUAL_64(strlen(test_plain_string), x19);
10171   // x0: 1234, x1: 0x00001234
10172   CHECK_EQUAL_64(25, x20);
10173   // d0: 1.234000
10174   CHECK_EQUAL_64(13, x21);
10175   // Test %s: 'This is a substring.'
10176   CHECK_EQUAL_64(32, x22);
10177   // w3(uint32): 4294967295
10178   // w4(int32): -1
10179   // x5(uint64): 18446744073709551615
10180   // x6(int64): -1
10181   CHECK_EQUAL_64(23 + 14 + 33 + 14, x23);
10182   // %f: 1.234000
10183   // %g: 2.345
10184   // %e: 3.456000e+00
10185   // %E: 4.567000E+00
10186   CHECK_EQUAL_64(13 + 10 + 17 + 17, x24);
10187   // 0x89abcdef, 0x123456789abcdef
10188   CHECK_EQUAL_64(30, x25);
10189   // 42
10190   CHECK_EQUAL_64(3, x26);
10191   // StackPointer(not csp): 0x00007fb037ae2370, 0x37ae2370
10192   // Note: This is an example value, but the field width is fixed here so the
10193   // string length is still predictable.
10194   CHECK_EQUAL_64(54, x27);
10195   // 3=3, 4=40, 5=500
10196   CHECK_EQUAL_64(17, x28);
10197   // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
10198   CHECK_EQUAL_64(69, x29);
10199 
10200   TEARDOWN();
10201 }
10202 
10203 
10204 // This is a V8-specific test.
CopyFieldsHelper(CPURegList temps)10205 static void CopyFieldsHelper(CPURegList temps) {
10206   static const uint64_t kLiteralBase = 0x0100001000100101UL;
10207   static const uint64_t src[] = {kLiteralBase * 1,
10208                                  kLiteralBase * 2,
10209                                  kLiteralBase * 3,
10210                                  kLiteralBase * 4,
10211                                  kLiteralBase * 5,
10212                                  kLiteralBase * 6,
10213                                  kLiteralBase * 7,
10214                                  kLiteralBase * 8,
10215                                  kLiteralBase * 9,
10216                                  kLiteralBase * 10,
10217                                  kLiteralBase * 11};
10218   static const uint64_t src_tagged =
10219       reinterpret_cast<uint64_t>(src) + kHeapObjectTag;
10220 
10221   static const unsigned kTestCount = sizeof(src) / sizeof(src[0]) + 1;
10222   uint64_t* dst[kTestCount];
10223   uint64_t dst_tagged[kTestCount];
10224 
10225   // The first test will be to copy 0 fields. The destination (and source)
10226   // should not be accessed in any way.
10227   dst[0] = NULL;
10228   dst_tagged[0] = kHeapObjectTag;
10229 
10230   // Allocate memory for each other test. Each test <n> will have <n> fields.
10231   // This is intended to exercise as many paths in CopyFields as possible.
10232   for (unsigned i = 1; i < kTestCount; i++) {
10233     dst[i] = new uint64_t[i];
10234     memset(dst[i], 0, i * sizeof(kLiteralBase));
10235     dst_tagged[i] = reinterpret_cast<uint64_t>(dst[i]) + kHeapObjectTag;
10236   }
10237 
10238   SETUP();
10239   START();
10240 
10241   __ Mov(x0, dst_tagged[0]);
10242   __ Mov(x1, 0);
10243   __ CopyFields(x0, x1, temps, 0);
10244   for (unsigned i = 1; i < kTestCount; i++) {
10245     __ Mov(x0, dst_tagged[i]);
10246     __ Mov(x1, src_tagged);
10247     __ CopyFields(x0, x1, temps, i);
10248   }
10249 
10250   END();
10251   RUN();
10252   TEARDOWN();
10253 
10254   for (unsigned i = 1; i < kTestCount; i++) {
10255     for (unsigned j = 0; j < i; j++) {
10256       CHECK(src[j] == dst[i][j]);
10257     }
10258     delete [] dst[i];
10259   }
10260 }
10261 
10262 
10263 // This is a V8-specific test.
TEST(copyfields)10264 TEST(copyfields) {
10265   INIT_V8();
10266   CopyFieldsHelper(CPURegList(x10));
10267   CopyFieldsHelper(CPURegList(x10, x11));
10268   CopyFieldsHelper(CPURegList(x10, x11, x12));
10269   CopyFieldsHelper(CPURegList(x10, x11, x12, x13));
10270 }
10271 
10272 
TEST(blr_lr)10273 TEST(blr_lr) {
10274   // A simple test to check that the simulator correcty handle "blr lr".
10275   INIT_V8();
10276   SETUP();
10277 
10278   START();
10279   Label target;
10280   Label end;
10281 
10282   __ Mov(x0, 0x0);
10283   __ Adr(lr, &target);
10284 
10285   __ Blr(lr);
10286   __ Mov(x0, 0xdeadbeef);
10287   __ B(&end);
10288 
10289   __ Bind(&target);
10290   __ Mov(x0, 0xc001c0de);
10291 
10292   __ Bind(&end);
10293   END();
10294 
10295   RUN();
10296 
10297   CHECK_EQUAL_64(0xc001c0de, x0);
10298 
10299   TEARDOWN();
10300 }
10301 
10302 
TEST(barriers)10303 TEST(barriers) {
10304   // Generate all supported barriers, this is just a smoke test
10305   INIT_V8();
10306   SETUP();
10307 
10308   START();
10309 
10310   // DMB
10311   __ Dmb(FullSystem, BarrierAll);
10312   __ Dmb(FullSystem, BarrierReads);
10313   __ Dmb(FullSystem, BarrierWrites);
10314   __ Dmb(FullSystem, BarrierOther);
10315 
10316   __ Dmb(InnerShareable, BarrierAll);
10317   __ Dmb(InnerShareable, BarrierReads);
10318   __ Dmb(InnerShareable, BarrierWrites);
10319   __ Dmb(InnerShareable, BarrierOther);
10320 
10321   __ Dmb(NonShareable, BarrierAll);
10322   __ Dmb(NonShareable, BarrierReads);
10323   __ Dmb(NonShareable, BarrierWrites);
10324   __ Dmb(NonShareable, BarrierOther);
10325 
10326   __ Dmb(OuterShareable, BarrierAll);
10327   __ Dmb(OuterShareable, BarrierReads);
10328   __ Dmb(OuterShareable, BarrierWrites);
10329   __ Dmb(OuterShareable, BarrierOther);
10330 
10331   // DSB
10332   __ Dsb(FullSystem, BarrierAll);
10333   __ Dsb(FullSystem, BarrierReads);
10334   __ Dsb(FullSystem, BarrierWrites);
10335   __ Dsb(FullSystem, BarrierOther);
10336 
10337   __ Dsb(InnerShareable, BarrierAll);
10338   __ Dsb(InnerShareable, BarrierReads);
10339   __ Dsb(InnerShareable, BarrierWrites);
10340   __ Dsb(InnerShareable, BarrierOther);
10341 
10342   __ Dsb(NonShareable, BarrierAll);
10343   __ Dsb(NonShareable, BarrierReads);
10344   __ Dsb(NonShareable, BarrierWrites);
10345   __ Dsb(NonShareable, BarrierOther);
10346 
10347   __ Dsb(OuterShareable, BarrierAll);
10348   __ Dsb(OuterShareable, BarrierReads);
10349   __ Dsb(OuterShareable, BarrierWrites);
10350   __ Dsb(OuterShareable, BarrierOther);
10351 
10352   // ISB
10353   __ Isb();
10354 
10355   END();
10356 
10357   RUN();
10358 
10359   TEARDOWN();
10360 }
10361 
10362 
TEST(process_nan_double)10363 TEST(process_nan_double) {
10364   INIT_V8();
10365   // Make sure that NaN propagation works correctly.
10366   double sn = rawbits_to_double(0x7ff5555511111111);
10367   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10368   DCHECK(IsSignallingNaN(sn));
10369   DCHECK(IsQuietNaN(qn));
10370 
10371   // The input NaNs after passing through ProcessNaN.
10372   double sn_proc = rawbits_to_double(0x7ffd555511111111);
10373   double qn_proc = qn;
10374   DCHECK(IsQuietNaN(sn_proc));
10375   DCHECK(IsQuietNaN(qn_proc));
10376 
10377   SETUP();
10378   START();
10379 
10380   // Execute a number of instructions which all use ProcessNaN, and check that
10381   // they all handle the NaN correctly.
10382   __ Fmov(d0, sn);
10383   __ Fmov(d10, qn);
10384 
10385   // Operations that always propagate NaNs unchanged, even signalling NaNs.
10386   //   - Signalling NaN
10387   __ Fmov(d1, d0);
10388   __ Fabs(d2, d0);
10389   __ Fneg(d3, d0);
10390   //   - Quiet NaN
10391   __ Fmov(d11, d10);
10392   __ Fabs(d12, d10);
10393   __ Fneg(d13, d10);
10394 
10395   // Operations that use ProcessNaN.
10396   //   - Signalling NaN
10397   __ Fsqrt(d4, d0);
10398   __ Frinta(d5, d0);
10399   __ Frintn(d6, d0);
10400   __ Frintz(d7, d0);
10401   //   - Quiet NaN
10402   __ Fsqrt(d14, d10);
10403   __ Frinta(d15, d10);
10404   __ Frintn(d16, d10);
10405   __ Frintz(d17, d10);
10406 
10407   // The behaviour of fcvt is checked in TEST(fcvt_sd).
10408 
10409   END();
10410   RUN();
10411 
10412   uint64_t qn_raw = double_to_rawbits(qn);
10413   uint64_t sn_raw = double_to_rawbits(sn);
10414 
10415   //   - Signalling NaN
10416   CHECK_EQUAL_FP64(sn, d1);
10417   CHECK_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
10418   CHECK_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
10419   //   - Quiet NaN
10420   CHECK_EQUAL_FP64(qn, d11);
10421   CHECK_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
10422   CHECK_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
10423 
10424   //   - Signalling NaN
10425   CHECK_EQUAL_FP64(sn_proc, d4);
10426   CHECK_EQUAL_FP64(sn_proc, d5);
10427   CHECK_EQUAL_FP64(sn_proc, d6);
10428   CHECK_EQUAL_FP64(sn_proc, d7);
10429   //   - Quiet NaN
10430   CHECK_EQUAL_FP64(qn_proc, d14);
10431   CHECK_EQUAL_FP64(qn_proc, d15);
10432   CHECK_EQUAL_FP64(qn_proc, d16);
10433   CHECK_EQUAL_FP64(qn_proc, d17);
10434 
10435   TEARDOWN();
10436 }
10437 
10438 
TEST(process_nan_float)10439 TEST(process_nan_float) {
10440   INIT_V8();
10441   // Make sure that NaN propagation works correctly.
10442   float sn = rawbits_to_float(0x7f951111);
10443   float qn = rawbits_to_float(0x7fea1111);
10444   DCHECK(IsSignallingNaN(sn));
10445   DCHECK(IsQuietNaN(qn));
10446 
10447   // The input NaNs after passing through ProcessNaN.
10448   float sn_proc = rawbits_to_float(0x7fd51111);
10449   float qn_proc = qn;
10450   DCHECK(IsQuietNaN(sn_proc));
10451   DCHECK(IsQuietNaN(qn_proc));
10452 
10453   SETUP();
10454   START();
10455 
10456   // Execute a number of instructions which all use ProcessNaN, and check that
10457   // they all handle the NaN correctly.
10458   __ Fmov(s0, sn);
10459   __ Fmov(s10, qn);
10460 
10461   // Operations that always propagate NaNs unchanged, even signalling NaNs.
10462   //   - Signalling NaN
10463   __ Fmov(s1, s0);
10464   __ Fabs(s2, s0);
10465   __ Fneg(s3, s0);
10466   //   - Quiet NaN
10467   __ Fmov(s11, s10);
10468   __ Fabs(s12, s10);
10469   __ Fneg(s13, s10);
10470 
10471   // Operations that use ProcessNaN.
10472   //   - Signalling NaN
10473   __ Fsqrt(s4, s0);
10474   __ Frinta(s5, s0);
10475   __ Frintn(s6, s0);
10476   __ Frintz(s7, s0);
10477   //   - Quiet NaN
10478   __ Fsqrt(s14, s10);
10479   __ Frinta(s15, s10);
10480   __ Frintn(s16, s10);
10481   __ Frintz(s17, s10);
10482 
10483   // The behaviour of fcvt is checked in TEST(fcvt_sd).
10484 
10485   END();
10486   RUN();
10487 
10488   uint32_t qn_raw = float_to_rawbits(qn);
10489   uint32_t sn_raw = float_to_rawbits(sn);
10490 
10491   //   - Signalling NaN
10492   CHECK_EQUAL_FP32(sn, s1);
10493   CHECK_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
10494   CHECK_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
10495   //   - Quiet NaN
10496   CHECK_EQUAL_FP32(qn, s11);
10497   CHECK_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
10498   CHECK_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
10499 
10500   //   - Signalling NaN
10501   CHECK_EQUAL_FP32(sn_proc, s4);
10502   CHECK_EQUAL_FP32(sn_proc, s5);
10503   CHECK_EQUAL_FP32(sn_proc, s6);
10504   CHECK_EQUAL_FP32(sn_proc, s7);
10505   //   - Quiet NaN
10506   CHECK_EQUAL_FP32(qn_proc, s14);
10507   CHECK_EQUAL_FP32(qn_proc, s15);
10508   CHECK_EQUAL_FP32(qn_proc, s16);
10509   CHECK_EQUAL_FP32(qn_proc, s17);
10510 
10511   TEARDOWN();
10512 }
10513 
10514 
ProcessNaNsHelper(double n,double m,double expected)10515 static void ProcessNaNsHelper(double n, double m, double expected) {
10516   DCHECK(std::isnan(n) || std::isnan(m));
10517   DCHECK(std::isnan(expected));
10518 
10519   SETUP();
10520   START();
10521 
10522   // Execute a number of instructions which all use ProcessNaNs, and check that
10523   // they all propagate NaNs correctly.
10524   __ Fmov(d0, n);
10525   __ Fmov(d1, m);
10526 
10527   __ Fadd(d2, d0, d1);
10528   __ Fsub(d3, d0, d1);
10529   __ Fmul(d4, d0, d1);
10530   __ Fdiv(d5, d0, d1);
10531   __ Fmax(d6, d0, d1);
10532   __ Fmin(d7, d0, d1);
10533 
10534   END();
10535   RUN();
10536 
10537   CHECK_EQUAL_FP64(expected, d2);
10538   CHECK_EQUAL_FP64(expected, d3);
10539   CHECK_EQUAL_FP64(expected, d4);
10540   CHECK_EQUAL_FP64(expected, d5);
10541   CHECK_EQUAL_FP64(expected, d6);
10542   CHECK_EQUAL_FP64(expected, d7);
10543 
10544   TEARDOWN();
10545 }
10546 
10547 
TEST(process_nans_double)10548 TEST(process_nans_double) {
10549   INIT_V8();
10550   // Make sure that NaN propagation works correctly.
10551   double sn = rawbits_to_double(0x7ff5555511111111);
10552   double sm = rawbits_to_double(0x7ff5555522222222);
10553   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10554   double qm = rawbits_to_double(0x7ffaaaaa22222222);
10555   DCHECK(IsSignallingNaN(sn));
10556   DCHECK(IsSignallingNaN(sm));
10557   DCHECK(IsQuietNaN(qn));
10558   DCHECK(IsQuietNaN(qm));
10559 
10560   // The input NaNs after passing through ProcessNaN.
10561   double sn_proc = rawbits_to_double(0x7ffd555511111111);
10562   double sm_proc = rawbits_to_double(0x7ffd555522222222);
10563   double qn_proc = qn;
10564   double qm_proc = qm;
10565   DCHECK(IsQuietNaN(sn_proc));
10566   DCHECK(IsQuietNaN(sm_proc));
10567   DCHECK(IsQuietNaN(qn_proc));
10568   DCHECK(IsQuietNaN(qm_proc));
10569 
10570   // Quiet NaNs are propagated.
10571   ProcessNaNsHelper(qn, 0, qn_proc);
10572   ProcessNaNsHelper(0, qm, qm_proc);
10573   ProcessNaNsHelper(qn, qm, qn_proc);
10574 
10575   // Signalling NaNs are propagated, and made quiet.
10576   ProcessNaNsHelper(sn, 0, sn_proc);
10577   ProcessNaNsHelper(0, sm, sm_proc);
10578   ProcessNaNsHelper(sn, sm, sn_proc);
10579 
10580   // Signalling NaNs take precedence over quiet NaNs.
10581   ProcessNaNsHelper(sn, qm, sn_proc);
10582   ProcessNaNsHelper(qn, sm, sm_proc);
10583   ProcessNaNsHelper(sn, sm, sn_proc);
10584 }
10585 
10586 
ProcessNaNsHelper(float n,float m,float expected)10587 static void ProcessNaNsHelper(float n, float m, float expected) {
10588   DCHECK(std::isnan(n) || std::isnan(m));
10589   DCHECK(std::isnan(expected));
10590 
10591   SETUP();
10592   START();
10593 
10594   // Execute a number of instructions which all use ProcessNaNs, and check that
10595   // they all propagate NaNs correctly.
10596   __ Fmov(s0, n);
10597   __ Fmov(s1, m);
10598 
10599   __ Fadd(s2, s0, s1);
10600   __ Fsub(s3, s0, s1);
10601   __ Fmul(s4, s0, s1);
10602   __ Fdiv(s5, s0, s1);
10603   __ Fmax(s6, s0, s1);
10604   __ Fmin(s7, s0, s1);
10605 
10606   END();
10607   RUN();
10608 
10609   CHECK_EQUAL_FP32(expected, s2);
10610   CHECK_EQUAL_FP32(expected, s3);
10611   CHECK_EQUAL_FP32(expected, s4);
10612   CHECK_EQUAL_FP32(expected, s5);
10613   CHECK_EQUAL_FP32(expected, s6);
10614   CHECK_EQUAL_FP32(expected, s7);
10615 
10616   TEARDOWN();
10617 }
10618 
10619 
TEST(process_nans_float)10620 TEST(process_nans_float) {
10621   INIT_V8();
10622   // Make sure that NaN propagation works correctly.
10623   float sn = rawbits_to_float(0x7f951111);
10624   float sm = rawbits_to_float(0x7f952222);
10625   float qn = rawbits_to_float(0x7fea1111);
10626   float qm = rawbits_to_float(0x7fea2222);
10627   DCHECK(IsSignallingNaN(sn));
10628   DCHECK(IsSignallingNaN(sm));
10629   DCHECK(IsQuietNaN(qn));
10630   DCHECK(IsQuietNaN(qm));
10631 
10632   // The input NaNs after passing through ProcessNaN.
10633   float sn_proc = rawbits_to_float(0x7fd51111);
10634   float sm_proc = rawbits_to_float(0x7fd52222);
10635   float qn_proc = qn;
10636   float qm_proc = qm;
10637   DCHECK(IsQuietNaN(sn_proc));
10638   DCHECK(IsQuietNaN(sm_proc));
10639   DCHECK(IsQuietNaN(qn_proc));
10640   DCHECK(IsQuietNaN(qm_proc));
10641 
10642   // Quiet NaNs are propagated.
10643   ProcessNaNsHelper(qn, 0, qn_proc);
10644   ProcessNaNsHelper(0, qm, qm_proc);
10645   ProcessNaNsHelper(qn, qm, qn_proc);
10646 
10647   // Signalling NaNs are propagated, and made quiet.
10648   ProcessNaNsHelper(sn, 0, sn_proc);
10649   ProcessNaNsHelper(0, sm, sm_proc);
10650   ProcessNaNsHelper(sn, sm, sn_proc);
10651 
10652   // Signalling NaNs take precedence over quiet NaNs.
10653   ProcessNaNsHelper(sn, qm, sn_proc);
10654   ProcessNaNsHelper(qn, sm, sm_proc);
10655   ProcessNaNsHelper(sn, sm, sn_proc);
10656 }
10657 
10658 
DefaultNaNHelper(float n,float m,float a)10659 static void DefaultNaNHelper(float n, float m, float a) {
10660   DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
10661 
10662   bool test_1op = std::isnan(n);
10663   bool test_2op = std::isnan(n) || std::isnan(m);
10664 
10665   SETUP();
10666   START();
10667 
10668   // Enable Default-NaN mode in the FPCR.
10669   __ Mrs(x0, FPCR);
10670   __ Orr(x1, x0, DN_mask);
10671   __ Msr(FPCR, x1);
10672 
10673   // Execute a number of instructions which all use ProcessNaNs, and check that
10674   // they all produce the default NaN.
10675   __ Fmov(s0, n);
10676   __ Fmov(s1, m);
10677   __ Fmov(s2, a);
10678 
10679   if (test_1op) {
10680     // Operations that always propagate NaNs unchanged, even signalling NaNs.
10681     __ Fmov(s10, s0);
10682     __ Fabs(s11, s0);
10683     __ Fneg(s12, s0);
10684 
10685     // Operations that use ProcessNaN.
10686     __ Fsqrt(s13, s0);
10687     __ Frinta(s14, s0);
10688     __ Frintn(s15, s0);
10689     __ Frintz(s16, s0);
10690 
10691     // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10692     __ Fcvt(d17, s0);
10693   }
10694 
10695   if (test_2op) {
10696     __ Fadd(s18, s0, s1);
10697     __ Fsub(s19, s0, s1);
10698     __ Fmul(s20, s0, s1);
10699     __ Fdiv(s21, s0, s1);
10700     __ Fmax(s22, s0, s1);
10701     __ Fmin(s23, s0, s1);
10702   }
10703 
10704   __ Fmadd(s24, s0, s1, s2);
10705   __ Fmsub(s25, s0, s1, s2);
10706   __ Fnmadd(s26, s0, s1, s2);
10707   __ Fnmsub(s27, s0, s1, s2);
10708 
10709   // Restore FPCR.
10710   __ Msr(FPCR, x0);
10711 
10712   END();
10713   RUN();
10714 
10715   if (test_1op) {
10716     uint32_t n_raw = float_to_rawbits(n);
10717     CHECK_EQUAL_FP32(n, s10);
10718     CHECK_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
10719     CHECK_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
10720     CHECK_EQUAL_FP32(kFP32DefaultNaN, s13);
10721     CHECK_EQUAL_FP32(kFP32DefaultNaN, s14);
10722     CHECK_EQUAL_FP32(kFP32DefaultNaN, s15);
10723     CHECK_EQUAL_FP32(kFP32DefaultNaN, s16);
10724     CHECK_EQUAL_FP64(kFP64DefaultNaN, d17);
10725   }
10726 
10727   if (test_2op) {
10728     CHECK_EQUAL_FP32(kFP32DefaultNaN, s18);
10729     CHECK_EQUAL_FP32(kFP32DefaultNaN, s19);
10730     CHECK_EQUAL_FP32(kFP32DefaultNaN, s20);
10731     CHECK_EQUAL_FP32(kFP32DefaultNaN, s21);
10732     CHECK_EQUAL_FP32(kFP32DefaultNaN, s22);
10733     CHECK_EQUAL_FP32(kFP32DefaultNaN, s23);
10734   }
10735 
10736   CHECK_EQUAL_FP32(kFP32DefaultNaN, s24);
10737   CHECK_EQUAL_FP32(kFP32DefaultNaN, s25);
10738   CHECK_EQUAL_FP32(kFP32DefaultNaN, s26);
10739   CHECK_EQUAL_FP32(kFP32DefaultNaN, s27);
10740 
10741   TEARDOWN();
10742 }
10743 
10744 
TEST(default_nan_float)10745 TEST(default_nan_float) {
10746   INIT_V8();
10747   float sn = rawbits_to_float(0x7f951111);
10748   float sm = rawbits_to_float(0x7f952222);
10749   float sa = rawbits_to_float(0x7f95aaaa);
10750   float qn = rawbits_to_float(0x7fea1111);
10751   float qm = rawbits_to_float(0x7fea2222);
10752   float qa = rawbits_to_float(0x7feaaaaa);
10753   DCHECK(IsSignallingNaN(sn));
10754   DCHECK(IsSignallingNaN(sm));
10755   DCHECK(IsSignallingNaN(sa));
10756   DCHECK(IsQuietNaN(qn));
10757   DCHECK(IsQuietNaN(qm));
10758   DCHECK(IsQuietNaN(qa));
10759 
10760   //   - Signalling NaNs
10761   DefaultNaNHelper(sn, 0.0f, 0.0f);
10762   DefaultNaNHelper(0.0f, sm, 0.0f);
10763   DefaultNaNHelper(0.0f, 0.0f, sa);
10764   DefaultNaNHelper(sn, sm, 0.0f);
10765   DefaultNaNHelper(0.0f, sm, sa);
10766   DefaultNaNHelper(sn, 0.0f, sa);
10767   DefaultNaNHelper(sn, sm, sa);
10768   //   - Quiet NaNs
10769   DefaultNaNHelper(qn, 0.0f, 0.0f);
10770   DefaultNaNHelper(0.0f, qm, 0.0f);
10771   DefaultNaNHelper(0.0f, 0.0f, qa);
10772   DefaultNaNHelper(qn, qm, 0.0f);
10773   DefaultNaNHelper(0.0f, qm, qa);
10774   DefaultNaNHelper(qn, 0.0f, qa);
10775   DefaultNaNHelper(qn, qm, qa);
10776   //   - Mixed NaNs
10777   DefaultNaNHelper(qn, sm, sa);
10778   DefaultNaNHelper(sn, qm, sa);
10779   DefaultNaNHelper(sn, sm, qa);
10780   DefaultNaNHelper(qn, qm, sa);
10781   DefaultNaNHelper(sn, qm, qa);
10782   DefaultNaNHelper(qn, sm, qa);
10783   DefaultNaNHelper(qn, qm, qa);
10784 }
10785 
10786 
DefaultNaNHelper(double n,double m,double a)10787 static void DefaultNaNHelper(double n, double m, double a) {
10788   DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
10789 
10790   bool test_1op = std::isnan(n);
10791   bool test_2op = std::isnan(n) || std::isnan(m);
10792 
10793   SETUP();
10794   START();
10795 
10796   // Enable Default-NaN mode in the FPCR.
10797   __ Mrs(x0, FPCR);
10798   __ Orr(x1, x0, DN_mask);
10799   __ Msr(FPCR, x1);
10800 
10801   // Execute a number of instructions which all use ProcessNaNs, and check that
10802   // they all produce the default NaN.
10803   __ Fmov(d0, n);
10804   __ Fmov(d1, m);
10805   __ Fmov(d2, a);
10806 
10807   if (test_1op) {
10808     // Operations that always propagate NaNs unchanged, even signalling NaNs.
10809     __ Fmov(d10, d0);
10810     __ Fabs(d11, d0);
10811     __ Fneg(d12, d0);
10812 
10813     // Operations that use ProcessNaN.
10814     __ Fsqrt(d13, d0);
10815     __ Frinta(d14, d0);
10816     __ Frintn(d15, d0);
10817     __ Frintz(d16, d0);
10818 
10819     // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10820     __ Fcvt(s17, d0);
10821   }
10822 
10823   if (test_2op) {
10824     __ Fadd(d18, d0, d1);
10825     __ Fsub(d19, d0, d1);
10826     __ Fmul(d20, d0, d1);
10827     __ Fdiv(d21, d0, d1);
10828     __ Fmax(d22, d0, d1);
10829     __ Fmin(d23, d0, d1);
10830   }
10831 
10832   __ Fmadd(d24, d0, d1, d2);
10833   __ Fmsub(d25, d0, d1, d2);
10834   __ Fnmadd(d26, d0, d1, d2);
10835   __ Fnmsub(d27, d0, d1, d2);
10836 
10837   // Restore FPCR.
10838   __ Msr(FPCR, x0);
10839 
10840   END();
10841   RUN();
10842 
10843   if (test_1op) {
10844     uint64_t n_raw = double_to_rawbits(n);
10845     CHECK_EQUAL_FP64(n, d10);
10846     CHECK_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10847     CHECK_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10848     CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
10849     CHECK_EQUAL_FP64(kFP64DefaultNaN, d14);
10850     CHECK_EQUAL_FP64(kFP64DefaultNaN, d15);
10851     CHECK_EQUAL_FP64(kFP64DefaultNaN, d16);
10852     CHECK_EQUAL_FP32(kFP32DefaultNaN, s17);
10853   }
10854 
10855   if (test_2op) {
10856     CHECK_EQUAL_FP64(kFP64DefaultNaN, d18);
10857     CHECK_EQUAL_FP64(kFP64DefaultNaN, d19);
10858     CHECK_EQUAL_FP64(kFP64DefaultNaN, d20);
10859     CHECK_EQUAL_FP64(kFP64DefaultNaN, d21);
10860     CHECK_EQUAL_FP64(kFP64DefaultNaN, d22);
10861     CHECK_EQUAL_FP64(kFP64DefaultNaN, d23);
10862   }
10863 
10864   CHECK_EQUAL_FP64(kFP64DefaultNaN, d24);
10865   CHECK_EQUAL_FP64(kFP64DefaultNaN, d25);
10866   CHECK_EQUAL_FP64(kFP64DefaultNaN, d26);
10867   CHECK_EQUAL_FP64(kFP64DefaultNaN, d27);
10868 
10869   TEARDOWN();
10870 }
10871 
10872 
TEST(default_nan_double)10873 TEST(default_nan_double) {
10874   INIT_V8();
10875   double sn = rawbits_to_double(0x7ff5555511111111);
10876   double sm = rawbits_to_double(0x7ff5555522222222);
10877   double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10878   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10879   double qm = rawbits_to_double(0x7ffaaaaa22222222);
10880   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10881   DCHECK(IsSignallingNaN(sn));
10882   DCHECK(IsSignallingNaN(sm));
10883   DCHECK(IsSignallingNaN(sa));
10884   DCHECK(IsQuietNaN(qn));
10885   DCHECK(IsQuietNaN(qm));
10886   DCHECK(IsQuietNaN(qa));
10887 
10888   //   - Signalling NaNs
10889   DefaultNaNHelper(sn, 0.0, 0.0);
10890   DefaultNaNHelper(0.0, sm, 0.0);
10891   DefaultNaNHelper(0.0, 0.0, sa);
10892   DefaultNaNHelper(sn, sm, 0.0);
10893   DefaultNaNHelper(0.0, sm, sa);
10894   DefaultNaNHelper(sn, 0.0, sa);
10895   DefaultNaNHelper(sn, sm, sa);
10896   //   - Quiet NaNs
10897   DefaultNaNHelper(qn, 0.0, 0.0);
10898   DefaultNaNHelper(0.0, qm, 0.0);
10899   DefaultNaNHelper(0.0, 0.0, qa);
10900   DefaultNaNHelper(qn, qm, 0.0);
10901   DefaultNaNHelper(0.0, qm, qa);
10902   DefaultNaNHelper(qn, 0.0, qa);
10903   DefaultNaNHelper(qn, qm, qa);
10904   //   - Mixed NaNs
10905   DefaultNaNHelper(qn, sm, sa);
10906   DefaultNaNHelper(sn, qm, sa);
10907   DefaultNaNHelper(sn, sm, qa);
10908   DefaultNaNHelper(qn, qm, sa);
10909   DefaultNaNHelper(sn, qm, qa);
10910   DefaultNaNHelper(qn, sm, qa);
10911   DefaultNaNHelper(qn, qm, qa);
10912 }
10913 
10914 
TEST(call_no_relocation)10915 TEST(call_no_relocation) {
10916   Address call_start;
10917   Address return_address;
10918 
10919   INIT_V8();
10920   SETUP();
10921 
10922   START();
10923 
10924   Label function;
10925   Label test;
10926 
10927   __ B(&test);
10928 
10929   __ Bind(&function);
10930   __ Mov(x0, 0x1);
10931   __ Ret();
10932 
10933   __ Bind(&test);
10934   __ Mov(x0, 0x0);
10935   __ Push(lr, xzr);
10936   {
10937     Assembler::BlockConstPoolScope scope(&masm);
10938     call_start = buf + __ pc_offset();
10939     __ Call(buf + function.pos(), RelocInfo::NONE64);
10940     return_address = buf + __ pc_offset();
10941   }
10942   __ Pop(xzr, lr);
10943   END();
10944 
10945   RUN();
10946 
10947   CHECK_EQUAL_64(1, x0);
10948 
10949   // The return_address_from_call_start function doesn't currently encounter any
10950   // non-relocatable sequences, so we check it here to make sure it works.
10951   // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
10952   // non-relocatable calls at all.
10953   CHECK(return_address ==
10954         Assembler::return_address_from_call_start(call_start));
10955 
10956   TEARDOWN();
10957 }
10958 
10959 
AbsHelperX(int64_t value)10960 static void AbsHelperX(int64_t value) {
10961   int64_t expected;
10962 
10963   SETUP();
10964   START();
10965 
10966   Label fail;
10967   Label done;
10968 
10969   __ Mov(x0, 0);
10970   __ Mov(x1, value);
10971 
10972   if (value != kXMinInt) {
10973     expected = labs(value);
10974 
10975     Label next;
10976     // The result is representable.
10977     __ Abs(x10, x1);
10978     __ Abs(x11, x1, &fail);
10979     __ Abs(x12, x1, &fail, &next);
10980     __ Bind(&next);
10981     __ Abs(x13, x1, NULL, &done);
10982   } else {
10983     // labs is undefined for kXMinInt but our implementation in the
10984     // MacroAssembler will return kXMinInt in such a case.
10985     expected = kXMinInt;
10986 
10987     Label next;
10988     // The result is not representable.
10989     __ Abs(x10, x1);
10990     __ Abs(x11, x1, NULL, &fail);
10991     __ Abs(x12, x1, &next, &fail);
10992     __ Bind(&next);
10993     __ Abs(x13, x1, &done);
10994   }
10995 
10996   __ Bind(&fail);
10997   __ Mov(x0, -1);
10998 
10999   __ Bind(&done);
11000 
11001   END();
11002   RUN();
11003 
11004   CHECK_EQUAL_64(0, x0);
11005   CHECK_EQUAL_64(value, x1);
11006   CHECK_EQUAL_64(expected, x10);
11007   CHECK_EQUAL_64(expected, x11);
11008   CHECK_EQUAL_64(expected, x12);
11009   CHECK_EQUAL_64(expected, x13);
11010 
11011   TEARDOWN();
11012 }
11013 
11014 
AbsHelperW(int32_t value)11015 static void AbsHelperW(int32_t value) {
11016   int32_t expected;
11017 
11018   SETUP();
11019   START();
11020 
11021   Label fail;
11022   Label done;
11023 
11024   __ Mov(w0, 0);
11025   // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
11026   // Once it is fixed, we should remove the cast.
11027   __ Mov(w1, static_cast<uint32_t>(value));
11028 
11029   if (value != kWMinInt) {
11030     expected = abs(value);
11031 
11032     Label next;
11033     // The result is representable.
11034     __ Abs(w10, w1);
11035     __ Abs(w11, w1, &fail);
11036     __ Abs(w12, w1, &fail, &next);
11037     __ Bind(&next);
11038     __ Abs(w13, w1, NULL, &done);
11039   } else {
11040     // abs is undefined for kWMinInt but our implementation in the
11041     // MacroAssembler will return kWMinInt in such a case.
11042     expected = kWMinInt;
11043 
11044     Label next;
11045     // The result is not representable.
11046     __ Abs(w10, w1);
11047     __ Abs(w11, w1, NULL, &fail);
11048     __ Abs(w12, w1, &next, &fail);
11049     __ Bind(&next);
11050     __ Abs(w13, w1, &done);
11051   }
11052 
11053   __ Bind(&fail);
11054   __ Mov(w0, -1);
11055 
11056   __ Bind(&done);
11057 
11058   END();
11059   RUN();
11060 
11061   CHECK_EQUAL_32(0, w0);
11062   CHECK_EQUAL_32(value, w1);
11063   CHECK_EQUAL_32(expected, w10);
11064   CHECK_EQUAL_32(expected, w11);
11065   CHECK_EQUAL_32(expected, w12);
11066   CHECK_EQUAL_32(expected, w13);
11067 
11068   TEARDOWN();
11069 }
11070 
11071 
TEST(abs)11072 TEST(abs) {
11073   INIT_V8();
11074   AbsHelperX(0);
11075   AbsHelperX(42);
11076   AbsHelperX(-42);
11077   AbsHelperX(kXMinInt);
11078   AbsHelperX(kXMaxInt);
11079 
11080   AbsHelperW(0);
11081   AbsHelperW(42);
11082   AbsHelperW(-42);
11083   AbsHelperW(kWMinInt);
11084   AbsHelperW(kWMaxInt);
11085 }
11086 
11087 
TEST(pool_size)11088 TEST(pool_size) {
11089   INIT_V8();
11090   SETUP();
11091 
11092   // This test does not execute any code. It only tests that the size of the
11093   // pools is read correctly from the RelocInfo.
11094 
11095   Label exit;
11096   __ b(&exit);
11097 
11098   const unsigned constant_pool_size = 312;
11099   const unsigned veneer_pool_size = 184;
11100 
11101   __ RecordConstPool(constant_pool_size);
11102   for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
11103     __ dc32(0);
11104   }
11105 
11106   __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
11107   for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
11108     __ nop();
11109   }
11110 
11111   __ bind(&exit);
11112 
11113   HandleScope handle_scope(isolate);
11114   CodeDesc desc;
11115   masm.GetCode(&desc);
11116   Handle<Code> code = isolate->factory()->NewCode(desc, 0, masm.CodeObject());
11117 
11118   unsigned pool_count = 0;
11119   int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
11120                   RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
11121   for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
11122     RelocInfo* info = it.rinfo();
11123     if (RelocInfo::IsConstPool(info->rmode())) {
11124       DCHECK(info->data() == constant_pool_size);
11125       ++pool_count;
11126     }
11127     if (RelocInfo::IsVeneerPool(info->rmode())) {
11128       DCHECK(info->data() == veneer_pool_size);
11129       ++pool_count;
11130     }
11131   }
11132 
11133   DCHECK(pool_count == 2);
11134 
11135   TEARDOWN();
11136 }
11137