1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 //     * Redistributions of source code must retain the above copyright
7 //       notice, this list of conditions and the following disclaimer.
8 //     * Redistributions in binary form must reproduce the above
9 //       copyright notice, this list of conditions and the following
10 //       disclaimer in the documentation and/or other materials provided
11 //       with the distribution.
12 //     * Neither the name of Google Inc. nor the names of its
13 //       contributors may be used to endorse or promote products derived
14 //       from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 
28 #include <stdio.h>
29 #include <stdlib.h>
30 #include <string.h>
31 #include <cmath>
32 #include <limits>
33 
34 #include "src/v8.h"
35 
36 #include "src/arm64/decoder-arm64-inl.h"
37 #include "src/arm64/disasm-arm64.h"
38 #include "src/arm64/simulator-arm64.h"
39 #include "src/arm64/utils-arm64.h"
40 #include "src/base/utils/random-number-generator.h"
41 #include "src/macro-assembler.h"
42 #include "test/cctest/cctest.h"
43 #include "test/cctest/test-utils-arm64.h"
44 
45 using namespace v8::internal;
46 
47 // Test infrastructure.
48 //
49 // Tests are functions which accept no parameters and have no return values.
50 // The testing code should not perform an explicit return once completed. For
51 // example to test the mov immediate instruction a very simple test would be:
52 //
53 //   TEST(mov_x0_one) {
54 //     SETUP();
55 //
56 //     START();
57 //     __ mov(x0, Operand(1));
58 //     END();
59 //
60 //     RUN();
61 //
62 //     CHECK_EQUAL_64(1, x0);
63 //
64 //     TEARDOWN();
65 //   }
66 //
67 // Within a START ... END block all registers but sp can be modified. sp has to
68 // be explicitly saved/restored. The END() macro replaces the function return
69 // so it may appear multiple times in a test if the test has multiple exit
70 // points.
71 //
72 // Once the test has been run all integer and floating point registers as well
73 // as flags are accessible through a RegisterDump instance, see
74 // utils-arm64.cc for more info on RegisterDump.
75 //
76 // We provide some helper assert to handle common cases:
77 //
78 //   CHECK_EQUAL_32(int32_t, int_32t)
79 //   CHECK_EQUAL_FP32(float, float)
80 //   CHECK_EQUAL_32(int32_t, W register)
81 //   CHECK_EQUAL_FP32(float, S register)
82 //   CHECK_EQUAL_64(int64_t, int_64t)
83 //   CHECK_EQUAL_FP64(double, double)
84 //   CHECK_EQUAL_64(int64_t, X register)
85 //   CHECK_EQUAL_64(X register, X register)
86 //   CHECK_EQUAL_FP64(double, D register)
87 //
88 // e.g. CHECK_EQUAL_64(0.5, d30);
89 //
90 // If more advance computation is required before the assert then access the
91 // RegisterDump named core directly:
92 //
93 //   CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
94 
95 
96 #if 0  // TODO(all): enable.
97 static v8::Persistent<v8::Context> env;
98 
99 static void InitializeVM() {
100   if (env.IsEmpty()) {
101     env = v8::Context::New();
102   }
103 }
104 #endif
105 
106 #define __ masm.
107 
108 #define BUF_SIZE 8192
109 #define SETUP() SETUP_SIZE(BUF_SIZE)
110 
111 #define INIT_V8()                                                              \
112   CcTest::InitializeVM();                                                      \
113 
114 #ifdef USE_SIMULATOR
115 
116 // Run tests with the simulator.
117 #define SETUP_SIZE(buf_size)                                   \
118   Isolate* isolate = CcTest::i_isolate();                      \
119   HandleScope scope(isolate);                                  \
120   CHECK(isolate != NULL);                                      \
121   byte* buf = new byte[buf_size];                              \
122   MacroAssembler masm(isolate, buf, buf_size,                  \
123                       v8::internal::CodeObjectRequired::kYes); \
124   Decoder<DispatchingDecoderVisitor>* decoder =                \
125       new Decoder<DispatchingDecoderVisitor>();                \
126   Simulator simulator(decoder);                                \
127   PrintDisassembler* pdis = NULL;                              \
128   RegisterDump core;
129 
130 /*  if (Cctest::trace_sim()) {                                                 \
131     pdis = new PrintDisassembler(stdout);                                      \
132     decoder.PrependVisitor(pdis);                                              \
133   }                                                                            \
134   */
135 
136 // Reset the assembler and simulator, so that instructions can be generated,
137 // but don't actually emit any code. This can be used by tests that need to
138 // emit instructions at the start of the buffer. Note that START_AFTER_RESET
139 // must be called before any callee-saved register is modified, and before an
140 // END is encountered.
141 //
142 // Most tests should call START, rather than call RESET directly.
143 #define RESET()                                                                \
144   __ Reset();                                                                  \
145   simulator.ResetState();
146 
147 #define START_AFTER_RESET()                                                    \
148   __ SetStackPointer(csp);                                                     \
149   __ PushCalleeSavedRegisters();                                               \
150   __ Debug("Start test.", __LINE__, TRACE_ENABLE | LOG_ALL);
151 
152 #define START()                                                                \
153   RESET();                                                                     \
154   START_AFTER_RESET();
155 
156 #define RUN()                                                                  \
157   simulator.RunFrom(reinterpret_cast<Instruction*>(buf))
158 
159 #define END()                                                                  \
160   __ Debug("End test.", __LINE__, TRACE_DISABLE | LOG_ALL);                    \
161   core.Dump(&masm);                                                            \
162   __ PopCalleeSavedRegisters();                                                \
163   __ Ret();                                                                    \
164   __ GetCode(NULL);
165 
166 #define TEARDOWN()                                                             \
167   delete pdis;                                                                 \
168   delete[] buf;
169 
170 #else  // ifdef USE_SIMULATOR.
171 // Run the test on real hardware or models.
172 #define SETUP_SIZE(buf_size)                                   \
173   Isolate* isolate = CcTest::i_isolate();                      \
174   HandleScope scope(isolate);                                  \
175   CHECK(isolate != NULL);                                      \
176   byte* buf = new byte[buf_size];                              \
177   MacroAssembler masm(isolate, buf, buf_size,                  \
178                       v8::internal::CodeObjectRequired::kYes); \
179   RegisterDump core;
180 
181 #define RESET()                                                                \
182   __ Reset();                                                                  \
183   /* Reset the machine state (like simulator.ResetState()). */                 \
184   __ Msr(NZCV, xzr);                                                           \
185   __ Msr(FPCR, xzr);
186 
187 
188 #define START_AFTER_RESET()                                                    \
189   __ SetStackPointer(csp);                                                     \
190   __ PushCalleeSavedRegisters();
191 
192 #define START()                                                                \
193   RESET();                                                                     \
194   START_AFTER_RESET();
195 
196 #define RUN()                                                       \
197   Assembler::FlushICache(isolate, buf, masm.SizeOfGeneratedCode()); \
198   {                                                                 \
199     void (*test_function)(void);                                    \
200     memcpy(&test_function, &buf, sizeof(buf));                      \
201     test_function();                                                \
202   }
203 
204 #define END()                                                                  \
205   core.Dump(&masm);                                                            \
206   __ PopCalleeSavedRegisters();                                                \
207   __ Ret();                                                                    \
208   __ GetCode(NULL);
209 
210 #define TEARDOWN()                                                             \
211   delete[] buf;
212 
213 #endif  // ifdef USE_SIMULATOR.
214 
215 #define CHECK_EQUAL_NZCV(expected)                                            \
216   CHECK(EqualNzcv(expected, core.flags_nzcv()))
217 
218 #define CHECK_EQUAL_REGISTERS(expected)                                       \
219   CHECK(EqualRegisters(&expected, &core))
220 
221 #define CHECK_EQUAL_32(expected, result)                                      \
222   CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
223 
224 #define CHECK_EQUAL_FP32(expected, result)                                    \
225   CHECK(EqualFP32(expected, &core, result))
226 
227 #define CHECK_EQUAL_64(expected, result)                                      \
228   CHECK(Equal64(expected, &core, result))
229 
230 #define CHECK_EQUAL_FP64(expected, result)                                    \
231   CHECK(EqualFP64(expected, &core, result))
232 
233 #ifdef DEBUG
234 #define CHECK_LITERAL_POOL_SIZE(expected) \
235   CHECK((expected) == (__ LiteralPoolSize()))
236 #else
237 #define CHECK_LITERAL_POOL_SIZE(expected) ((void)0)
238 #endif
239 
240 
TEST(stack_ops)241 TEST(stack_ops) {
242   INIT_V8();
243   SETUP();
244 
245   START();
246   // save csp.
247   __ Mov(x29, csp);
248 
249   // Set the csp to a known value.
250   __ Mov(x16, 0x1000);
251   __ Mov(csp, x16);
252   __ Mov(x0, csp);
253 
254   // Add immediate to the csp, and move the result to a normal register.
255   __ Add(csp, csp, Operand(0x50));
256   __ Mov(x1, csp);
257 
258   // Add extended to the csp, and move the result to a normal register.
259   __ Mov(x17, 0xfff);
260   __ Add(csp, csp, Operand(x17, SXTB));
261   __ Mov(x2, csp);
262 
263   // Create an csp using a logical instruction, and move to normal register.
264   __ Orr(csp, xzr, Operand(0x1fff));
265   __ Mov(x3, csp);
266 
267   // Write wcsp using a logical instruction.
268   __ Orr(wcsp, wzr, Operand(0xfffffff8L));
269   __ Mov(x4, csp);
270 
271   // Write csp, and read back wcsp.
272   __ Orr(csp, xzr, Operand(0xfffffff8L));
273   __ Mov(w5, wcsp);
274 
275   //  restore csp.
276   __ Mov(csp, x29);
277   END();
278 
279   RUN();
280 
281   CHECK_EQUAL_64(0x1000, x0);
282   CHECK_EQUAL_64(0x1050, x1);
283   CHECK_EQUAL_64(0x104f, x2);
284   CHECK_EQUAL_64(0x1fff, x3);
285   CHECK_EQUAL_64(0xfffffff8, x4);
286   CHECK_EQUAL_64(0xfffffff8, x5);
287 
288   TEARDOWN();
289 }
290 
291 
TEST(mvn)292 TEST(mvn) {
293   INIT_V8();
294   SETUP();
295 
296   START();
297   __ Mvn(w0, 0xfff);
298   __ Mvn(x1, 0xfff);
299   __ Mvn(w2, Operand(w0, LSL, 1));
300   __ Mvn(x3, Operand(x1, LSL, 2));
301   __ Mvn(w4, Operand(w0, LSR, 3));
302   __ Mvn(x5, Operand(x1, LSR, 4));
303   __ Mvn(w6, Operand(w0, ASR, 11));
304   __ Mvn(x7, Operand(x1, ASR, 12));
305   __ Mvn(w8, Operand(w0, ROR, 13));
306   __ Mvn(x9, Operand(x1, ROR, 14));
307   __ Mvn(w10, Operand(w2, UXTB));
308   __ Mvn(x11, Operand(x2, SXTB, 1));
309   __ Mvn(w12, Operand(w2, UXTH, 2));
310   __ Mvn(x13, Operand(x2, SXTH, 3));
311   __ Mvn(x14, Operand(w2, UXTW, 4));
312   __ Mvn(x15, Operand(w2, SXTW, 4));
313   END();
314 
315   RUN();
316 
317   CHECK_EQUAL_64(0xfffff000, x0);
318   CHECK_EQUAL_64(0xfffffffffffff000UL, x1);
319   CHECK_EQUAL_64(0x00001fff, x2);
320   CHECK_EQUAL_64(0x0000000000003fffUL, x3);
321   CHECK_EQUAL_64(0xe00001ff, x4);
322   CHECK_EQUAL_64(0xf0000000000000ffUL, x5);
323   CHECK_EQUAL_64(0x00000001, x6);
324   CHECK_EQUAL_64(0x0, x7);
325   CHECK_EQUAL_64(0x7ff80000, x8);
326   CHECK_EQUAL_64(0x3ffc000000000000UL, x9);
327   CHECK_EQUAL_64(0xffffff00, x10);
328   CHECK_EQUAL_64(0x0000000000000001UL, x11);
329   CHECK_EQUAL_64(0xffff8003, x12);
330   CHECK_EQUAL_64(0xffffffffffff0007UL, x13);
331   CHECK_EQUAL_64(0xfffffffffffe000fUL, x14);
332   CHECK_EQUAL_64(0xfffffffffffe000fUL, x15);
333 
334   TEARDOWN();
335 }
336 
337 
TEST(mov)338 TEST(mov) {
339   INIT_V8();
340   SETUP();
341 
342   START();
343   __ Mov(x0, 0xffffffffffffffffL);
344   __ Mov(x1, 0xffffffffffffffffL);
345   __ Mov(x2, 0xffffffffffffffffL);
346   __ Mov(x3, 0xffffffffffffffffL);
347 
348   __ Mov(x0, 0x0123456789abcdefL);
349 
350   __ movz(x1, 0xabcdL << 16);
351   __ movk(x2, 0xabcdL << 32);
352   __ movn(x3, 0xabcdL << 48);
353 
354   __ Mov(x4, 0x0123456789abcdefL);
355   __ Mov(x5, x4);
356 
357   __ Mov(w6, -1);
358 
359   // Test that moves back to the same register have the desired effect. This
360   // is a no-op for X registers, and a truncation for W registers.
361   __ Mov(x7, 0x0123456789abcdefL);
362   __ Mov(x7, x7);
363   __ Mov(x8, 0x0123456789abcdefL);
364   __ Mov(w8, w8);
365   __ Mov(x9, 0x0123456789abcdefL);
366   __ Mov(x9, Operand(x9));
367   __ Mov(x10, 0x0123456789abcdefL);
368   __ Mov(w10, Operand(w10));
369 
370   __ Mov(w11, 0xfff);
371   __ Mov(x12, 0xfff);
372   __ Mov(w13, Operand(w11, LSL, 1));
373   __ Mov(x14, Operand(x12, LSL, 2));
374   __ Mov(w15, Operand(w11, LSR, 3));
375   __ Mov(x18, Operand(x12, LSR, 4));
376   __ Mov(w19, Operand(w11, ASR, 11));
377   __ Mov(x20, Operand(x12, ASR, 12));
378   __ Mov(w21, Operand(w11, ROR, 13));
379   __ Mov(x22, Operand(x12, ROR, 14));
380   __ Mov(w23, Operand(w13, UXTB));
381   __ Mov(x24, Operand(x13, SXTB, 1));
382   __ Mov(w25, Operand(w13, UXTH, 2));
383   __ Mov(x26, Operand(x13, SXTH, 3));
384   __ Mov(x27, Operand(w13, UXTW, 4));
385   END();
386 
387   RUN();
388 
389   CHECK_EQUAL_64(0x0123456789abcdefL, x0);
390   CHECK_EQUAL_64(0x00000000abcd0000L, x1);
391   CHECK_EQUAL_64(0xffffabcdffffffffL, x2);
392   CHECK_EQUAL_64(0x5432ffffffffffffL, x3);
393   CHECK_EQUAL_64(x4, x5);
394   CHECK_EQUAL_32(-1, w6);
395   CHECK_EQUAL_64(0x0123456789abcdefL, x7);
396   CHECK_EQUAL_32(0x89abcdefL, w8);
397   CHECK_EQUAL_64(0x0123456789abcdefL, x9);
398   CHECK_EQUAL_32(0x89abcdefL, w10);
399   CHECK_EQUAL_64(0x00000fff, x11);
400   CHECK_EQUAL_64(0x0000000000000fffUL, x12);
401   CHECK_EQUAL_64(0x00001ffe, x13);
402   CHECK_EQUAL_64(0x0000000000003ffcUL, x14);
403   CHECK_EQUAL_64(0x000001ff, x15);
404   CHECK_EQUAL_64(0x00000000000000ffUL, x18);
405   CHECK_EQUAL_64(0x00000001, x19);
406   CHECK_EQUAL_64(0x0, x20);
407   CHECK_EQUAL_64(0x7ff80000, x21);
408   CHECK_EQUAL_64(0x3ffc000000000000UL, x22);
409   CHECK_EQUAL_64(0x000000fe, x23);
410   CHECK_EQUAL_64(0xfffffffffffffffcUL, x24);
411   CHECK_EQUAL_64(0x00007ff8, x25);
412   CHECK_EQUAL_64(0x000000000000fff0UL, x26);
413   CHECK_EQUAL_64(0x000000000001ffe0UL, x27);
414 
415   TEARDOWN();
416 }
417 
418 
TEST(mov_imm_w)419 TEST(mov_imm_w) {
420   INIT_V8();
421   SETUP();
422 
423   START();
424   __ Mov(w0, 0xffffffffL);
425   __ Mov(w1, 0xffff1234L);
426   __ Mov(w2, 0x1234ffffL);
427   __ Mov(w3, 0x00000000L);
428   __ Mov(w4, 0x00001234L);
429   __ Mov(w5, 0x12340000L);
430   __ Mov(w6, 0x12345678L);
431   __ Mov(w7, (int32_t)0x80000000);
432   __ Mov(w8, (int32_t)0xffff0000);
433   __ Mov(w9, kWMinInt);
434   END();
435 
436   RUN();
437 
438   CHECK_EQUAL_64(0xffffffffL, x0);
439   CHECK_EQUAL_64(0xffff1234L, x1);
440   CHECK_EQUAL_64(0x1234ffffL, x2);
441   CHECK_EQUAL_64(0x00000000L, x3);
442   CHECK_EQUAL_64(0x00001234L, x4);
443   CHECK_EQUAL_64(0x12340000L, x5);
444   CHECK_EQUAL_64(0x12345678L, x6);
445   CHECK_EQUAL_64(0x80000000L, x7);
446   CHECK_EQUAL_64(0xffff0000L, x8);
447   CHECK_EQUAL_32(kWMinInt, w9);
448 
449   TEARDOWN();
450 }
451 
452 
TEST(mov_imm_x)453 TEST(mov_imm_x) {
454   INIT_V8();
455   SETUP();
456 
457   START();
458   __ Mov(x0, 0xffffffffffffffffL);
459   __ Mov(x1, 0xffffffffffff1234L);
460   __ Mov(x2, 0xffffffff12345678L);
461   __ Mov(x3, 0xffff1234ffff5678L);
462   __ Mov(x4, 0x1234ffffffff5678L);
463   __ Mov(x5, 0x1234ffff5678ffffL);
464   __ Mov(x6, 0x12345678ffffffffL);
465   __ Mov(x7, 0x1234ffffffffffffL);
466   __ Mov(x8, 0x123456789abcffffL);
467   __ Mov(x9, 0x12345678ffff9abcL);
468   __ Mov(x10, 0x1234ffff56789abcL);
469   __ Mov(x11, 0xffff123456789abcL);
470   __ Mov(x12, 0x0000000000000000L);
471   __ Mov(x13, 0x0000000000001234L);
472   __ Mov(x14, 0x0000000012345678L);
473   __ Mov(x15, 0x0000123400005678L);
474   __ Mov(x18, 0x1234000000005678L);
475   __ Mov(x19, 0x1234000056780000L);
476   __ Mov(x20, 0x1234567800000000L);
477   __ Mov(x21, 0x1234000000000000L);
478   __ Mov(x22, 0x123456789abc0000L);
479   __ Mov(x23, 0x1234567800009abcL);
480   __ Mov(x24, 0x1234000056789abcL);
481   __ Mov(x25, 0x0000123456789abcL);
482   __ Mov(x26, 0x123456789abcdef0L);
483   __ Mov(x27, 0xffff000000000001L);
484   __ Mov(x28, 0x8000ffff00000000L);
485   END();
486 
487   RUN();
488 
489   CHECK_EQUAL_64(0xffffffffffff1234L, x1);
490   CHECK_EQUAL_64(0xffffffff12345678L, x2);
491   CHECK_EQUAL_64(0xffff1234ffff5678L, x3);
492   CHECK_EQUAL_64(0x1234ffffffff5678L, x4);
493   CHECK_EQUAL_64(0x1234ffff5678ffffL, x5);
494   CHECK_EQUAL_64(0x12345678ffffffffL, x6);
495   CHECK_EQUAL_64(0x1234ffffffffffffL, x7);
496   CHECK_EQUAL_64(0x123456789abcffffL, x8);
497   CHECK_EQUAL_64(0x12345678ffff9abcL, x9);
498   CHECK_EQUAL_64(0x1234ffff56789abcL, x10);
499   CHECK_EQUAL_64(0xffff123456789abcL, x11);
500   CHECK_EQUAL_64(0x0000000000000000L, x12);
501   CHECK_EQUAL_64(0x0000000000001234L, x13);
502   CHECK_EQUAL_64(0x0000000012345678L, x14);
503   CHECK_EQUAL_64(0x0000123400005678L, x15);
504   CHECK_EQUAL_64(0x1234000000005678L, x18);
505   CHECK_EQUAL_64(0x1234000056780000L, x19);
506   CHECK_EQUAL_64(0x1234567800000000L, x20);
507   CHECK_EQUAL_64(0x1234000000000000L, x21);
508   CHECK_EQUAL_64(0x123456789abc0000L, x22);
509   CHECK_EQUAL_64(0x1234567800009abcL, x23);
510   CHECK_EQUAL_64(0x1234000056789abcL, x24);
511   CHECK_EQUAL_64(0x0000123456789abcL, x25);
512   CHECK_EQUAL_64(0x123456789abcdef0L, x26);
513   CHECK_EQUAL_64(0xffff000000000001L, x27);
514   CHECK_EQUAL_64(0x8000ffff00000000L, x28);
515 
516   TEARDOWN();
517 }
518 
519 
TEST(orr)520 TEST(orr) {
521   INIT_V8();
522   SETUP();
523 
524   START();
525   __ Mov(x0, 0xf0f0);
526   __ Mov(x1, 0xf00000ff);
527 
528   __ Orr(x2, x0, Operand(x1));
529   __ Orr(w3, w0, Operand(w1, LSL, 28));
530   __ Orr(x4, x0, Operand(x1, LSL, 32));
531   __ Orr(x5, x0, Operand(x1, LSR, 4));
532   __ Orr(w6, w0, Operand(w1, ASR, 4));
533   __ Orr(x7, x0, Operand(x1, ASR, 4));
534   __ Orr(w8, w0, Operand(w1, ROR, 12));
535   __ Orr(x9, x0, Operand(x1, ROR, 12));
536   __ Orr(w10, w0, Operand(0xf));
537   __ Orr(x11, x0, Operand(0xf0000000f0000000L));
538   END();
539 
540   RUN();
541 
542   CHECK_EQUAL_64(0xf000f0ff, x2);
543   CHECK_EQUAL_64(0xf000f0f0, x3);
544   CHECK_EQUAL_64(0xf00000ff0000f0f0L, x4);
545   CHECK_EQUAL_64(0x0f00f0ff, x5);
546   CHECK_EQUAL_64(0xff00f0ff, x6);
547   CHECK_EQUAL_64(0x0f00f0ff, x7);
548   CHECK_EQUAL_64(0x0ffff0f0, x8);
549   CHECK_EQUAL_64(0x0ff00000000ff0f0L, x9);
550   CHECK_EQUAL_64(0xf0ff, x10);
551   CHECK_EQUAL_64(0xf0000000f000f0f0L, x11);
552 
553   TEARDOWN();
554 }
555 
556 
TEST(orr_extend)557 TEST(orr_extend) {
558   INIT_V8();
559   SETUP();
560 
561   START();
562   __ Mov(x0, 1);
563   __ Mov(x1, 0x8000000080008080UL);
564   __ Orr(w6, w0, Operand(w1, UXTB));
565   __ Orr(x7, x0, Operand(x1, UXTH, 1));
566   __ Orr(w8, w0, Operand(w1, UXTW, 2));
567   __ Orr(x9, x0, Operand(x1, UXTX, 3));
568   __ Orr(w10, w0, Operand(w1, SXTB));
569   __ Orr(x11, x0, Operand(x1, SXTH, 1));
570   __ Orr(x12, x0, Operand(x1, SXTW, 2));
571   __ Orr(x13, x0, Operand(x1, SXTX, 3));
572   END();
573 
574   RUN();
575 
576   CHECK_EQUAL_64(0x00000081, x6);
577   CHECK_EQUAL_64(0x00010101, x7);
578   CHECK_EQUAL_64(0x00020201, x8);
579   CHECK_EQUAL_64(0x0000000400040401UL, x9);
580   CHECK_EQUAL_64(0x00000000ffffff81UL, x10);
581   CHECK_EQUAL_64(0xffffffffffff0101UL, x11);
582   CHECK_EQUAL_64(0xfffffffe00020201UL, x12);
583   CHECK_EQUAL_64(0x0000000400040401UL, x13);
584 
585   TEARDOWN();
586 }
587 
588 
TEST(bitwise_wide_imm)589 TEST(bitwise_wide_imm) {
590   INIT_V8();
591   SETUP();
592 
593   START();
594   __ Mov(x0, 0);
595   __ Mov(x1, 0xf0f0f0f0f0f0f0f0UL);
596 
597   __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
598   __ Orr(w11, w1, Operand(0x90abcdef));
599 
600   __ Orr(w12, w0, kWMinInt);
601   __ Eor(w13, w0, kWMinInt);
602   END();
603 
604   RUN();
605 
606   CHECK_EQUAL_64(0, x0);
607   CHECK_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
608   CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
609   CHECK_EQUAL_64(0xf0fbfdffUL, x11);
610   CHECK_EQUAL_32(kWMinInt, w12);
611   CHECK_EQUAL_32(kWMinInt, w13);
612 
613   TEARDOWN();
614 }
615 
616 
TEST(orn)617 TEST(orn) {
618   INIT_V8();
619   SETUP();
620 
621   START();
622   __ Mov(x0, 0xf0f0);
623   __ Mov(x1, 0xf00000ff);
624 
625   __ Orn(x2, x0, Operand(x1));
626   __ Orn(w3, w0, Operand(w1, LSL, 4));
627   __ Orn(x4, x0, Operand(x1, LSL, 4));
628   __ Orn(x5, x0, Operand(x1, LSR, 1));
629   __ Orn(w6, w0, Operand(w1, ASR, 1));
630   __ Orn(x7, x0, Operand(x1, ASR, 1));
631   __ Orn(w8, w0, Operand(w1, ROR, 16));
632   __ Orn(x9, x0, Operand(x1, ROR, 16));
633   __ Orn(w10, w0, Operand(0xffff));
634   __ Orn(x11, x0, Operand(0xffff0000ffffL));
635   END();
636 
637   RUN();
638 
639   CHECK_EQUAL_64(0xffffffff0ffffff0L, x2);
640   CHECK_EQUAL_64(0xfffff0ff, x3);
641   CHECK_EQUAL_64(0xfffffff0fffff0ffL, x4);
642   CHECK_EQUAL_64(0xffffffff87fffff0L, x5);
643   CHECK_EQUAL_64(0x07fffff0, x6);
644   CHECK_EQUAL_64(0xffffffff87fffff0L, x7);
645   CHECK_EQUAL_64(0xff00ffff, x8);
646   CHECK_EQUAL_64(0xff00ffffffffffffL, x9);
647   CHECK_EQUAL_64(0xfffff0f0, x10);
648   CHECK_EQUAL_64(0xffff0000fffff0f0L, x11);
649 
650   TEARDOWN();
651 }
652 
653 
TEST(orn_extend)654 TEST(orn_extend) {
655   INIT_V8();
656   SETUP();
657 
658   START();
659   __ Mov(x0, 1);
660   __ Mov(x1, 0x8000000080008081UL);
661   __ Orn(w6, w0, Operand(w1, UXTB));
662   __ Orn(x7, x0, Operand(x1, UXTH, 1));
663   __ Orn(w8, w0, Operand(w1, UXTW, 2));
664   __ Orn(x9, x0, Operand(x1, UXTX, 3));
665   __ Orn(w10, w0, Operand(w1, SXTB));
666   __ Orn(x11, x0, Operand(x1, SXTH, 1));
667   __ Orn(x12, x0, Operand(x1, SXTW, 2));
668   __ Orn(x13, x0, Operand(x1, SXTX, 3));
669   END();
670 
671   RUN();
672 
673   CHECK_EQUAL_64(0xffffff7f, x6);
674   CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
675   CHECK_EQUAL_64(0xfffdfdfb, x8);
676   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
677   CHECK_EQUAL_64(0x0000007f, x10);
678   CHECK_EQUAL_64(0x0000fefd, x11);
679   CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
680   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
681 
682   TEARDOWN();
683 }
684 
685 
TEST(and_)686 TEST(and_) {
687   INIT_V8();
688   SETUP();
689 
690   START();
691   __ Mov(x0, 0xfff0);
692   __ Mov(x1, 0xf00000ff);
693 
694   __ And(x2, x0, Operand(x1));
695   __ And(w3, w0, Operand(w1, LSL, 4));
696   __ And(x4, x0, Operand(x1, LSL, 4));
697   __ And(x5, x0, Operand(x1, LSR, 1));
698   __ And(w6, w0, Operand(w1, ASR, 20));
699   __ And(x7, x0, Operand(x1, ASR, 20));
700   __ And(w8, w0, Operand(w1, ROR, 28));
701   __ And(x9, x0, Operand(x1, ROR, 28));
702   __ And(w10, w0, Operand(0xff00));
703   __ And(x11, x0, Operand(0xff));
704   END();
705 
706   RUN();
707 
708   CHECK_EQUAL_64(0x000000f0, x2);
709   CHECK_EQUAL_64(0x00000ff0, x3);
710   CHECK_EQUAL_64(0x00000ff0, x4);
711   CHECK_EQUAL_64(0x00000070, x5);
712   CHECK_EQUAL_64(0x0000ff00, x6);
713   CHECK_EQUAL_64(0x00000f00, x7);
714   CHECK_EQUAL_64(0x00000ff0, x8);
715   CHECK_EQUAL_64(0x00000000, x9);
716   CHECK_EQUAL_64(0x0000ff00, x10);
717   CHECK_EQUAL_64(0x000000f0, x11);
718 
719   TEARDOWN();
720 }
721 
722 
TEST(and_extend)723 TEST(and_extend) {
724   INIT_V8();
725   SETUP();
726 
727   START();
728   __ Mov(x0, 0xffffffffffffffffUL);
729   __ Mov(x1, 0x8000000080008081UL);
730   __ And(w6, w0, Operand(w1, UXTB));
731   __ And(x7, x0, Operand(x1, UXTH, 1));
732   __ And(w8, w0, Operand(w1, UXTW, 2));
733   __ And(x9, x0, Operand(x1, UXTX, 3));
734   __ And(w10, w0, Operand(w1, SXTB));
735   __ And(x11, x0, Operand(x1, SXTH, 1));
736   __ And(x12, x0, Operand(x1, SXTW, 2));
737   __ And(x13, x0, Operand(x1, SXTX, 3));
738   END();
739 
740   RUN();
741 
742   CHECK_EQUAL_64(0x00000081, x6);
743   CHECK_EQUAL_64(0x00010102, x7);
744   CHECK_EQUAL_64(0x00020204, x8);
745   CHECK_EQUAL_64(0x0000000400040408UL, x9);
746   CHECK_EQUAL_64(0xffffff81, x10);
747   CHECK_EQUAL_64(0xffffffffffff0102UL, x11);
748   CHECK_EQUAL_64(0xfffffffe00020204UL, x12);
749   CHECK_EQUAL_64(0x0000000400040408UL, x13);
750 
751   TEARDOWN();
752 }
753 
754 
TEST(ands)755 TEST(ands) {
756   INIT_V8();
757   SETUP();
758 
759   START();
760   __ Mov(x1, 0xf00000ff);
761   __ Ands(w0, w1, Operand(w1));
762   END();
763 
764   RUN();
765 
766   CHECK_EQUAL_NZCV(NFlag);
767   CHECK_EQUAL_64(0xf00000ff, x0);
768 
769   START();
770   __ Mov(x0, 0xfff0);
771   __ Mov(x1, 0xf00000ff);
772   __ Ands(w0, w0, Operand(w1, LSR, 4));
773   END();
774 
775   RUN();
776 
777   CHECK_EQUAL_NZCV(ZFlag);
778   CHECK_EQUAL_64(0x00000000, x0);
779 
780   START();
781   __ Mov(x0, 0x8000000000000000L);
782   __ Mov(x1, 0x00000001);
783   __ Ands(x0, x0, Operand(x1, ROR, 1));
784   END();
785 
786   RUN();
787 
788   CHECK_EQUAL_NZCV(NFlag);
789   CHECK_EQUAL_64(0x8000000000000000L, x0);
790 
791   START();
792   __ Mov(x0, 0xfff0);
793   __ Ands(w0, w0, Operand(0xf));
794   END();
795 
796   RUN();
797 
798   CHECK_EQUAL_NZCV(ZFlag);
799   CHECK_EQUAL_64(0x00000000, x0);
800 
801   START();
802   __ Mov(x0, 0xff000000);
803   __ Ands(w0, w0, Operand(0x80000000));
804   END();
805 
806   RUN();
807 
808   CHECK_EQUAL_NZCV(NFlag);
809   CHECK_EQUAL_64(0x80000000, x0);
810 
811   TEARDOWN();
812 }
813 
814 
TEST(bic)815 TEST(bic) {
816   INIT_V8();
817   SETUP();
818 
819   START();
820   __ Mov(x0, 0xfff0);
821   __ Mov(x1, 0xf00000ff);
822 
823   __ Bic(x2, x0, Operand(x1));
824   __ Bic(w3, w0, Operand(w1, LSL, 4));
825   __ Bic(x4, x0, Operand(x1, LSL, 4));
826   __ Bic(x5, x0, Operand(x1, LSR, 1));
827   __ Bic(w6, w0, Operand(w1, ASR, 20));
828   __ Bic(x7, x0, Operand(x1, ASR, 20));
829   __ Bic(w8, w0, Operand(w1, ROR, 28));
830   __ Bic(x9, x0, Operand(x1, ROR, 24));
831   __ Bic(x10, x0, Operand(0x1f));
832   __ Bic(x11, x0, Operand(0x100));
833 
834   // Test bic into csp when the constant cannot be encoded in the immediate
835   // field.
836   // Use x20 to preserve csp. We check for the result via x21 because the
837   // test infrastructure requires that csp be restored to its original value.
838   __ Mov(x20, csp);
839   __ Mov(x0, 0xffffff);
840   __ Bic(csp, x0, Operand(0xabcdef));
841   __ Mov(x21, csp);
842   __ Mov(csp, x20);
843   END();
844 
845   RUN();
846 
847   CHECK_EQUAL_64(0x0000ff00, x2);
848   CHECK_EQUAL_64(0x0000f000, x3);
849   CHECK_EQUAL_64(0x0000f000, x4);
850   CHECK_EQUAL_64(0x0000ff80, x5);
851   CHECK_EQUAL_64(0x000000f0, x6);
852   CHECK_EQUAL_64(0x0000f0f0, x7);
853   CHECK_EQUAL_64(0x0000f000, x8);
854   CHECK_EQUAL_64(0x0000ff00, x9);
855   CHECK_EQUAL_64(0x0000ffe0, x10);
856   CHECK_EQUAL_64(0x0000fef0, x11);
857 
858   CHECK_EQUAL_64(0x543210, x21);
859 
860   TEARDOWN();
861 }
862 
863 
TEST(bic_extend)864 TEST(bic_extend) {
865   INIT_V8();
866   SETUP();
867 
868   START();
869   __ Mov(x0, 0xffffffffffffffffUL);
870   __ Mov(x1, 0x8000000080008081UL);
871   __ Bic(w6, w0, Operand(w1, UXTB));
872   __ Bic(x7, x0, Operand(x1, UXTH, 1));
873   __ Bic(w8, w0, Operand(w1, UXTW, 2));
874   __ Bic(x9, x0, Operand(x1, UXTX, 3));
875   __ Bic(w10, w0, Operand(w1, SXTB));
876   __ Bic(x11, x0, Operand(x1, SXTH, 1));
877   __ Bic(x12, x0, Operand(x1, SXTW, 2));
878   __ Bic(x13, x0, Operand(x1, SXTX, 3));
879   END();
880 
881   RUN();
882 
883   CHECK_EQUAL_64(0xffffff7e, x6);
884   CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
885   CHECK_EQUAL_64(0xfffdfdfb, x8);
886   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
887   CHECK_EQUAL_64(0x0000007e, x10);
888   CHECK_EQUAL_64(0x0000fefd, x11);
889   CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
890   CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
891 
892   TEARDOWN();
893 }
894 
895 
TEST(bics)896 TEST(bics) {
897   INIT_V8();
898   SETUP();
899 
900   START();
901   __ Mov(x1, 0xffff);
902   __ Bics(w0, w1, Operand(w1));
903   END();
904 
905   RUN();
906 
907   CHECK_EQUAL_NZCV(ZFlag);
908   CHECK_EQUAL_64(0x00000000, x0);
909 
910   START();
911   __ Mov(x0, 0xffffffff);
912   __ Bics(w0, w0, Operand(w0, LSR, 1));
913   END();
914 
915   RUN();
916 
917   CHECK_EQUAL_NZCV(NFlag);
918   CHECK_EQUAL_64(0x80000000, x0);
919 
920   START();
921   __ Mov(x0, 0x8000000000000000L);
922   __ Mov(x1, 0x00000001);
923   __ Bics(x0, x0, Operand(x1, ROR, 1));
924   END();
925 
926   RUN();
927 
928   CHECK_EQUAL_NZCV(ZFlag);
929   CHECK_EQUAL_64(0x00000000, x0);
930 
931   START();
932   __ Mov(x0, 0xffffffffffffffffL);
933   __ Bics(x0, x0, Operand(0x7fffffffffffffffL));
934   END();
935 
936   RUN();
937 
938   CHECK_EQUAL_NZCV(NFlag);
939   CHECK_EQUAL_64(0x8000000000000000L, x0);
940 
941   START();
942   __ Mov(w0, 0xffff0000);
943   __ Bics(w0, w0, Operand(0xfffffff0));
944   END();
945 
946   RUN();
947 
948   CHECK_EQUAL_NZCV(ZFlag);
949   CHECK_EQUAL_64(0x00000000, x0);
950 
951   TEARDOWN();
952 }
953 
954 
TEST(eor)955 TEST(eor) {
956   INIT_V8();
957   SETUP();
958 
959   START();
960   __ Mov(x0, 0xfff0);
961   __ Mov(x1, 0xf00000ff);
962 
963   __ Eor(x2, x0, Operand(x1));
964   __ Eor(w3, w0, Operand(w1, LSL, 4));
965   __ Eor(x4, x0, Operand(x1, LSL, 4));
966   __ Eor(x5, x0, Operand(x1, LSR, 1));
967   __ Eor(w6, w0, Operand(w1, ASR, 20));
968   __ Eor(x7, x0, Operand(x1, ASR, 20));
969   __ Eor(w8, w0, Operand(w1, ROR, 28));
970   __ Eor(x9, x0, Operand(x1, ROR, 28));
971   __ Eor(w10, w0, Operand(0xff00ff00));
972   __ Eor(x11, x0, Operand(0xff00ff00ff00ff00L));
973   END();
974 
975   RUN();
976 
977   CHECK_EQUAL_64(0xf000ff0f, x2);
978   CHECK_EQUAL_64(0x0000f000, x3);
979   CHECK_EQUAL_64(0x0000000f0000f000L, x4);
980   CHECK_EQUAL_64(0x7800ff8f, x5);
981   CHECK_EQUAL_64(0xffff00f0, x6);
982   CHECK_EQUAL_64(0x0000f0f0, x7);
983   CHECK_EQUAL_64(0x0000f00f, x8);
984   CHECK_EQUAL_64(0x00000ff00000ffffL, x9);
985   CHECK_EQUAL_64(0xff0000f0, x10);
986   CHECK_EQUAL_64(0xff00ff00ff0000f0L, x11);
987 
988   TEARDOWN();
989 }
990 
991 
TEST(eor_extend)992 TEST(eor_extend) {
993   INIT_V8();
994   SETUP();
995 
996   START();
997   __ Mov(x0, 0x1111111111111111UL);
998   __ Mov(x1, 0x8000000080008081UL);
999   __ Eor(w6, w0, Operand(w1, UXTB));
1000   __ Eor(x7, x0, Operand(x1, UXTH, 1));
1001   __ Eor(w8, w0, Operand(w1, UXTW, 2));
1002   __ Eor(x9, x0, Operand(x1, UXTX, 3));
1003   __ Eor(w10, w0, Operand(w1, SXTB));
1004   __ Eor(x11, x0, Operand(x1, SXTH, 1));
1005   __ Eor(x12, x0, Operand(x1, SXTW, 2));
1006   __ Eor(x13, x0, Operand(x1, SXTX, 3));
1007   END();
1008 
1009   RUN();
1010 
1011   CHECK_EQUAL_64(0x11111190, x6);
1012   CHECK_EQUAL_64(0x1111111111101013UL, x7);
1013   CHECK_EQUAL_64(0x11131315, x8);
1014   CHECK_EQUAL_64(0x1111111511151519UL, x9);
1015   CHECK_EQUAL_64(0xeeeeee90, x10);
1016   CHECK_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
1017   CHECK_EQUAL_64(0xeeeeeeef11131315UL, x12);
1018   CHECK_EQUAL_64(0x1111111511151519UL, x13);
1019 
1020   TEARDOWN();
1021 }
1022 
1023 
TEST(eon)1024 TEST(eon) {
1025   INIT_V8();
1026   SETUP();
1027 
1028   START();
1029   __ Mov(x0, 0xfff0);
1030   __ Mov(x1, 0xf00000ff);
1031 
1032   __ Eon(x2, x0, Operand(x1));
1033   __ Eon(w3, w0, Operand(w1, LSL, 4));
1034   __ Eon(x4, x0, Operand(x1, LSL, 4));
1035   __ Eon(x5, x0, Operand(x1, LSR, 1));
1036   __ Eon(w6, w0, Operand(w1, ASR, 20));
1037   __ Eon(x7, x0, Operand(x1, ASR, 20));
1038   __ Eon(w8, w0, Operand(w1, ROR, 28));
1039   __ Eon(x9, x0, Operand(x1, ROR, 28));
1040   __ Eon(w10, w0, Operand(0x03c003c0));
1041   __ Eon(x11, x0, Operand(0x0000100000001000L));
1042   END();
1043 
1044   RUN();
1045 
1046   CHECK_EQUAL_64(0xffffffff0fff00f0L, x2);
1047   CHECK_EQUAL_64(0xffff0fff, x3);
1048   CHECK_EQUAL_64(0xfffffff0ffff0fffL, x4);
1049   CHECK_EQUAL_64(0xffffffff87ff0070L, x5);
1050   CHECK_EQUAL_64(0x0000ff0f, x6);
1051   CHECK_EQUAL_64(0xffffffffffff0f0fL, x7);
1052   CHECK_EQUAL_64(0xffff0ff0, x8);
1053   CHECK_EQUAL_64(0xfffff00fffff0000L, x9);
1054   CHECK_EQUAL_64(0xfc3f03cf, x10);
1055   CHECK_EQUAL_64(0xffffefffffff100fL, x11);
1056 
1057   TEARDOWN();
1058 }
1059 
1060 
TEST(eon_extend)1061 TEST(eon_extend) {
1062   INIT_V8();
1063   SETUP();
1064 
1065   START();
1066   __ Mov(x0, 0x1111111111111111UL);
1067   __ Mov(x1, 0x8000000080008081UL);
1068   __ Eon(w6, w0, Operand(w1, UXTB));
1069   __ Eon(x7, x0, Operand(x1, UXTH, 1));
1070   __ Eon(w8, w0, Operand(w1, UXTW, 2));
1071   __ Eon(x9, x0, Operand(x1, UXTX, 3));
1072   __ Eon(w10, w0, Operand(w1, SXTB));
1073   __ Eon(x11, x0, Operand(x1, SXTH, 1));
1074   __ Eon(x12, x0, Operand(x1, SXTW, 2));
1075   __ Eon(x13, x0, Operand(x1, SXTX, 3));
1076   END();
1077 
1078   RUN();
1079 
1080   CHECK_EQUAL_64(0xeeeeee6f, x6);
1081   CHECK_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
1082   CHECK_EQUAL_64(0xeeececea, x8);
1083   CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
1084   CHECK_EQUAL_64(0x1111116f, x10);
1085   CHECK_EQUAL_64(0x111111111111efecUL, x11);
1086   CHECK_EQUAL_64(0x11111110eeececeaUL, x12);
1087   CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
1088 
1089   TEARDOWN();
1090 }
1091 
1092 
TEST(mul)1093 TEST(mul) {
1094   INIT_V8();
1095   SETUP();
1096 
1097   START();
1098   __ Mov(x16, 0);
1099   __ Mov(x17, 1);
1100   __ Mov(x18, 0xffffffff);
1101   __ Mov(x19, 0xffffffffffffffffUL);
1102 
1103   __ Mul(w0, w16, w16);
1104   __ Mul(w1, w16, w17);
1105   __ Mul(w2, w17, w18);
1106   __ Mul(w3, w18, w19);
1107   __ Mul(x4, x16, x16);
1108   __ Mul(x5, x17, x18);
1109   __ Mul(x6, x18, x19);
1110   __ Mul(x7, x19, x19);
1111   __ Smull(x8, w17, w18);
1112   __ Smull(x9, w18, w18);
1113   __ Smull(x10, w19, w19);
1114   __ Mneg(w11, w16, w16);
1115   __ Mneg(w12, w16, w17);
1116   __ Mneg(w13, w17, w18);
1117   __ Mneg(w14, w18, w19);
1118   __ Mneg(x20, x16, x16);
1119   __ Mneg(x21, x17, x18);
1120   __ Mneg(x22, x18, x19);
1121   __ Mneg(x23, x19, x19);
1122   END();
1123 
1124   RUN();
1125 
1126   CHECK_EQUAL_64(0, x0);
1127   CHECK_EQUAL_64(0, x1);
1128   CHECK_EQUAL_64(0xffffffff, x2);
1129   CHECK_EQUAL_64(1, x3);
1130   CHECK_EQUAL_64(0, x4);
1131   CHECK_EQUAL_64(0xffffffff, x5);
1132   CHECK_EQUAL_64(0xffffffff00000001UL, x6);
1133   CHECK_EQUAL_64(1, x7);
1134   CHECK_EQUAL_64(0xffffffffffffffffUL, x8);
1135   CHECK_EQUAL_64(1, x9);
1136   CHECK_EQUAL_64(1, x10);
1137   CHECK_EQUAL_64(0, x11);
1138   CHECK_EQUAL_64(0, x12);
1139   CHECK_EQUAL_64(1, x13);
1140   CHECK_EQUAL_64(0xffffffff, x14);
1141   CHECK_EQUAL_64(0, x20);
1142   CHECK_EQUAL_64(0xffffffff00000001UL, x21);
1143   CHECK_EQUAL_64(0xffffffff, x22);
1144   CHECK_EQUAL_64(0xffffffffffffffffUL, x23);
1145 
1146   TEARDOWN();
1147 }
1148 
1149 
SmullHelper(int64_t expected,int64_t a,int64_t b)1150 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1151   SETUP();
1152   START();
1153   __ Mov(w0, a);
1154   __ Mov(w1, b);
1155   __ Smull(x2, w0, w1);
1156   END();
1157   RUN();
1158   CHECK_EQUAL_64(expected, x2);
1159   TEARDOWN();
1160 }
1161 
1162 
TEST(smull)1163 TEST(smull) {
1164   INIT_V8();
1165   SmullHelper(0, 0, 0);
1166   SmullHelper(1, 1, 1);
1167   SmullHelper(-1, -1, 1);
1168   SmullHelper(1, -1, -1);
1169   SmullHelper(0xffffffff80000000, 0x80000000, 1);
1170   SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1171 }
1172 
1173 
TEST(madd)1174 TEST(madd) {
1175   INIT_V8();
1176   SETUP();
1177 
1178   START();
1179   __ Mov(x16, 0);
1180   __ Mov(x17, 1);
1181   __ Mov(x18, 0xffffffff);
1182   __ Mov(x19, 0xffffffffffffffffUL);
1183 
1184   __ Madd(w0, w16, w16, w16);
1185   __ Madd(w1, w16, w16, w17);
1186   __ Madd(w2, w16, w16, w18);
1187   __ Madd(w3, w16, w16, w19);
1188   __ Madd(w4, w16, w17, w17);
1189   __ Madd(w5, w17, w17, w18);
1190   __ Madd(w6, w17, w17, w19);
1191   __ Madd(w7, w17, w18, w16);
1192   __ Madd(w8, w17, w18, w18);
1193   __ Madd(w9, w18, w18, w17);
1194   __ Madd(w10, w18, w19, w18);
1195   __ Madd(w11, w19, w19, w19);
1196 
1197   __ Madd(x12, x16, x16, x16);
1198   __ Madd(x13, x16, x16, x17);
1199   __ Madd(x14, x16, x16, x18);
1200   __ Madd(x15, x16, x16, x19);
1201   __ Madd(x20, x16, x17, x17);
1202   __ Madd(x21, x17, x17, x18);
1203   __ Madd(x22, x17, x17, x19);
1204   __ Madd(x23, x17, x18, x16);
1205   __ Madd(x24, x17, x18, x18);
1206   __ Madd(x25, x18, x18, x17);
1207   __ Madd(x26, x18, x19, x18);
1208   __ Madd(x27, x19, x19, x19);
1209 
1210   END();
1211 
1212   RUN();
1213 
1214   CHECK_EQUAL_64(0, x0);
1215   CHECK_EQUAL_64(1, x1);
1216   CHECK_EQUAL_64(0xffffffff, x2);
1217   CHECK_EQUAL_64(0xffffffff, x3);
1218   CHECK_EQUAL_64(1, x4);
1219   CHECK_EQUAL_64(0, x5);
1220   CHECK_EQUAL_64(0, x6);
1221   CHECK_EQUAL_64(0xffffffff, x7);
1222   CHECK_EQUAL_64(0xfffffffe, x8);
1223   CHECK_EQUAL_64(2, x9);
1224   CHECK_EQUAL_64(0, x10);
1225   CHECK_EQUAL_64(0, x11);
1226 
1227   CHECK_EQUAL_64(0, x12);
1228   CHECK_EQUAL_64(1, x13);
1229   CHECK_EQUAL_64(0xffffffff, x14);
1230   CHECK_EQUAL_64(0xffffffffffffffff, x15);
1231   CHECK_EQUAL_64(1, x20);
1232   CHECK_EQUAL_64(0x100000000UL, x21);
1233   CHECK_EQUAL_64(0, x22);
1234   CHECK_EQUAL_64(0xffffffff, x23);
1235   CHECK_EQUAL_64(0x1fffffffe, x24);
1236   CHECK_EQUAL_64(0xfffffffe00000002UL, x25);
1237   CHECK_EQUAL_64(0, x26);
1238   CHECK_EQUAL_64(0, x27);
1239 
1240   TEARDOWN();
1241 }
1242 
1243 
TEST(msub)1244 TEST(msub) {
1245   INIT_V8();
1246   SETUP();
1247 
1248   START();
1249   __ Mov(x16, 0);
1250   __ Mov(x17, 1);
1251   __ Mov(x18, 0xffffffff);
1252   __ Mov(x19, 0xffffffffffffffffUL);
1253 
1254   __ Msub(w0, w16, w16, w16);
1255   __ Msub(w1, w16, w16, w17);
1256   __ Msub(w2, w16, w16, w18);
1257   __ Msub(w3, w16, w16, w19);
1258   __ Msub(w4, w16, w17, w17);
1259   __ Msub(w5, w17, w17, w18);
1260   __ Msub(w6, w17, w17, w19);
1261   __ Msub(w7, w17, w18, w16);
1262   __ Msub(w8, w17, w18, w18);
1263   __ Msub(w9, w18, w18, w17);
1264   __ Msub(w10, w18, w19, w18);
1265   __ Msub(w11, w19, w19, w19);
1266 
1267   __ Msub(x12, x16, x16, x16);
1268   __ Msub(x13, x16, x16, x17);
1269   __ Msub(x14, x16, x16, x18);
1270   __ Msub(x15, x16, x16, x19);
1271   __ Msub(x20, x16, x17, x17);
1272   __ Msub(x21, x17, x17, x18);
1273   __ Msub(x22, x17, x17, x19);
1274   __ Msub(x23, x17, x18, x16);
1275   __ Msub(x24, x17, x18, x18);
1276   __ Msub(x25, x18, x18, x17);
1277   __ Msub(x26, x18, x19, x18);
1278   __ Msub(x27, x19, x19, x19);
1279 
1280   END();
1281 
1282   RUN();
1283 
1284   CHECK_EQUAL_64(0, x0);
1285   CHECK_EQUAL_64(1, x1);
1286   CHECK_EQUAL_64(0xffffffff, x2);
1287   CHECK_EQUAL_64(0xffffffff, x3);
1288   CHECK_EQUAL_64(1, x4);
1289   CHECK_EQUAL_64(0xfffffffe, x5);
1290   CHECK_EQUAL_64(0xfffffffe, x6);
1291   CHECK_EQUAL_64(1, x7);
1292   CHECK_EQUAL_64(0, x8);
1293   CHECK_EQUAL_64(0, x9);
1294   CHECK_EQUAL_64(0xfffffffe, x10);
1295   CHECK_EQUAL_64(0xfffffffe, x11);
1296 
1297   CHECK_EQUAL_64(0, x12);
1298   CHECK_EQUAL_64(1, x13);
1299   CHECK_EQUAL_64(0xffffffff, x14);
1300   CHECK_EQUAL_64(0xffffffffffffffffUL, x15);
1301   CHECK_EQUAL_64(1, x20);
1302   CHECK_EQUAL_64(0xfffffffeUL, x21);
1303   CHECK_EQUAL_64(0xfffffffffffffffeUL, x22);
1304   CHECK_EQUAL_64(0xffffffff00000001UL, x23);
1305   CHECK_EQUAL_64(0, x24);
1306   CHECK_EQUAL_64(0x200000000UL, x25);
1307   CHECK_EQUAL_64(0x1fffffffeUL, x26);
1308   CHECK_EQUAL_64(0xfffffffffffffffeUL, x27);
1309 
1310   TEARDOWN();
1311 }
1312 
1313 
TEST(smulh)1314 TEST(smulh) {
1315   INIT_V8();
1316   SETUP();
1317 
1318   START();
1319   __ Mov(x20, 0);
1320   __ Mov(x21, 1);
1321   __ Mov(x22, 0x0000000100000000L);
1322   __ Mov(x23, 0x12345678);
1323   __ Mov(x24, 0x0123456789abcdefL);
1324   __ Mov(x25, 0x0000000200000000L);
1325   __ Mov(x26, 0x8000000000000000UL);
1326   __ Mov(x27, 0xffffffffffffffffUL);
1327   __ Mov(x28, 0x5555555555555555UL);
1328   __ Mov(x29, 0xaaaaaaaaaaaaaaaaUL);
1329 
1330   __ Smulh(x0, x20, x24);
1331   __ Smulh(x1, x21, x24);
1332   __ Smulh(x2, x22, x23);
1333   __ Smulh(x3, x22, x24);
1334   __ Smulh(x4, x24, x25);
1335   __ Smulh(x5, x23, x27);
1336   __ Smulh(x6, x26, x26);
1337   __ Smulh(x7, x26, x27);
1338   __ Smulh(x8, x27, x27);
1339   __ Smulh(x9, x28, x28);
1340   __ Smulh(x10, x28, x29);
1341   __ Smulh(x11, x29, x29);
1342   END();
1343 
1344   RUN();
1345 
1346   CHECK_EQUAL_64(0, x0);
1347   CHECK_EQUAL_64(0, x1);
1348   CHECK_EQUAL_64(0, x2);
1349   CHECK_EQUAL_64(0x01234567, x3);
1350   CHECK_EQUAL_64(0x02468acf, x4);
1351   CHECK_EQUAL_64(0xffffffffffffffffUL, x5);
1352   CHECK_EQUAL_64(0x4000000000000000UL, x6);
1353   CHECK_EQUAL_64(0, x7);
1354   CHECK_EQUAL_64(0, x8);
1355   CHECK_EQUAL_64(0x1c71c71c71c71c71UL, x9);
1356   CHECK_EQUAL_64(0xe38e38e38e38e38eUL, x10);
1357   CHECK_EQUAL_64(0x1c71c71c71c71c72UL, x11);
1358 
1359   TEARDOWN();
1360 }
1361 
1362 
TEST(smaddl_umaddl)1363 TEST(smaddl_umaddl) {
1364   INIT_V8();
1365   SETUP();
1366 
1367   START();
1368   __ Mov(x17, 1);
1369   __ Mov(x18, 0xffffffff);
1370   __ Mov(x19, 0xffffffffffffffffUL);
1371   __ Mov(x20, 4);
1372   __ Mov(x21, 0x200000000UL);
1373 
1374   __ Smaddl(x9, w17, w18, x20);
1375   __ Smaddl(x10, w18, w18, x20);
1376   __ Smaddl(x11, w19, w19, x20);
1377   __ Smaddl(x12, w19, w19, x21);
1378   __ Umaddl(x13, w17, w18, x20);
1379   __ Umaddl(x14, w18, w18, x20);
1380   __ Umaddl(x15, w19, w19, x20);
1381   __ Umaddl(x22, w19, w19, x21);
1382   END();
1383 
1384   RUN();
1385 
1386   CHECK_EQUAL_64(3, x9);
1387   CHECK_EQUAL_64(5, x10);
1388   CHECK_EQUAL_64(5, x11);
1389   CHECK_EQUAL_64(0x200000001UL, x12);
1390   CHECK_EQUAL_64(0x100000003UL, x13);
1391   CHECK_EQUAL_64(0xfffffffe00000005UL, x14);
1392   CHECK_EQUAL_64(0xfffffffe00000005UL, x15);
1393   CHECK_EQUAL_64(0x1, x22);
1394 
1395   TEARDOWN();
1396 }
1397 
1398 
TEST(smsubl_umsubl)1399 TEST(smsubl_umsubl) {
1400   INIT_V8();
1401   SETUP();
1402 
1403   START();
1404   __ Mov(x17, 1);
1405   __ Mov(x18, 0xffffffff);
1406   __ Mov(x19, 0xffffffffffffffffUL);
1407   __ Mov(x20, 4);
1408   __ Mov(x21, 0x200000000UL);
1409 
1410   __ Smsubl(x9, w17, w18, x20);
1411   __ Smsubl(x10, w18, w18, x20);
1412   __ Smsubl(x11, w19, w19, x20);
1413   __ Smsubl(x12, w19, w19, x21);
1414   __ Umsubl(x13, w17, w18, x20);
1415   __ Umsubl(x14, w18, w18, x20);
1416   __ Umsubl(x15, w19, w19, x20);
1417   __ Umsubl(x22, w19, w19, x21);
1418   END();
1419 
1420   RUN();
1421 
1422   CHECK_EQUAL_64(5, x9);
1423   CHECK_EQUAL_64(3, x10);
1424   CHECK_EQUAL_64(3, x11);
1425   CHECK_EQUAL_64(0x1ffffffffUL, x12);
1426   CHECK_EQUAL_64(0xffffffff00000005UL, x13);
1427   CHECK_EQUAL_64(0x200000003UL, x14);
1428   CHECK_EQUAL_64(0x200000003UL, x15);
1429   CHECK_EQUAL_64(0x3ffffffffUL, x22);
1430 
1431   TEARDOWN();
1432 }
1433 
1434 
TEST(div)1435 TEST(div) {
1436   INIT_V8();
1437   SETUP();
1438 
1439   START();
1440   __ Mov(x16, 1);
1441   __ Mov(x17, 0xffffffff);
1442   __ Mov(x18, 0xffffffffffffffffUL);
1443   __ Mov(x19, 0x80000000);
1444   __ Mov(x20, 0x8000000000000000UL);
1445   __ Mov(x21, 2);
1446 
1447   __ Udiv(w0, w16, w16);
1448   __ Udiv(w1, w17, w16);
1449   __ Sdiv(w2, w16, w16);
1450   __ Sdiv(w3, w16, w17);
1451   __ Sdiv(w4, w17, w18);
1452 
1453   __ Udiv(x5, x16, x16);
1454   __ Udiv(x6, x17, x18);
1455   __ Sdiv(x7, x16, x16);
1456   __ Sdiv(x8, x16, x17);
1457   __ Sdiv(x9, x17, x18);
1458 
1459   __ Udiv(w10, w19, w21);
1460   __ Sdiv(w11, w19, w21);
1461   __ Udiv(x12, x19, x21);
1462   __ Sdiv(x13, x19, x21);
1463   __ Udiv(x14, x20, x21);
1464   __ Sdiv(x15, x20, x21);
1465 
1466   __ Udiv(w22, w19, w17);
1467   __ Sdiv(w23, w19, w17);
1468   __ Udiv(x24, x20, x18);
1469   __ Sdiv(x25, x20, x18);
1470 
1471   __ Udiv(x26, x16, x21);
1472   __ Sdiv(x27, x16, x21);
1473   __ Udiv(x28, x18, x21);
1474   __ Sdiv(x29, x18, x21);
1475 
1476   __ Mov(x17, 0);
1477   __ Udiv(w18, w16, w17);
1478   __ Sdiv(w19, w16, w17);
1479   __ Udiv(x20, x16, x17);
1480   __ Sdiv(x21, x16, x17);
1481   END();
1482 
1483   RUN();
1484 
1485   CHECK_EQUAL_64(1, x0);
1486   CHECK_EQUAL_64(0xffffffff, x1);
1487   CHECK_EQUAL_64(1, x2);
1488   CHECK_EQUAL_64(0xffffffff, x3);
1489   CHECK_EQUAL_64(1, x4);
1490   CHECK_EQUAL_64(1, x5);
1491   CHECK_EQUAL_64(0, x6);
1492   CHECK_EQUAL_64(1, x7);
1493   CHECK_EQUAL_64(0, x8);
1494   CHECK_EQUAL_64(0xffffffff00000001UL, x9);
1495   CHECK_EQUAL_64(0x40000000, x10);
1496   CHECK_EQUAL_64(0xC0000000, x11);
1497   CHECK_EQUAL_64(0x40000000, x12);
1498   CHECK_EQUAL_64(0x40000000, x13);
1499   CHECK_EQUAL_64(0x4000000000000000UL, x14);
1500   CHECK_EQUAL_64(0xC000000000000000UL, x15);
1501   CHECK_EQUAL_64(0, x22);
1502   CHECK_EQUAL_64(0x80000000, x23);
1503   CHECK_EQUAL_64(0, x24);
1504   CHECK_EQUAL_64(0x8000000000000000UL, x25);
1505   CHECK_EQUAL_64(0, x26);
1506   CHECK_EQUAL_64(0, x27);
1507   CHECK_EQUAL_64(0x7fffffffffffffffUL, x28);
1508   CHECK_EQUAL_64(0, x29);
1509   CHECK_EQUAL_64(0, x18);
1510   CHECK_EQUAL_64(0, x19);
1511   CHECK_EQUAL_64(0, x20);
1512   CHECK_EQUAL_64(0, x21);
1513 
1514   TEARDOWN();
1515 }
1516 
1517 
TEST(rbit_rev)1518 TEST(rbit_rev) {
1519   INIT_V8();
1520   SETUP();
1521 
1522   START();
1523   __ Mov(x24, 0xfedcba9876543210UL);
1524   __ Rbit(w0, w24);
1525   __ Rbit(x1, x24);
1526   __ Rev16(w2, w24);
1527   __ Rev16(x3, x24);
1528   __ Rev(w4, w24);
1529   __ Rev32(x5, x24);
1530   __ Rev(x6, x24);
1531   END();
1532 
1533   RUN();
1534 
1535   CHECK_EQUAL_64(0x084c2a6e, x0);
1536   CHECK_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
1537   CHECK_EQUAL_64(0x54761032, x2);
1538   CHECK_EQUAL_64(0xdcfe98ba54761032UL, x3);
1539   CHECK_EQUAL_64(0x10325476, x4);
1540   CHECK_EQUAL_64(0x98badcfe10325476UL, x5);
1541   CHECK_EQUAL_64(0x1032547698badcfeUL, x6);
1542 
1543   TEARDOWN();
1544 }
1545 
1546 
TEST(clz_cls)1547 TEST(clz_cls) {
1548   INIT_V8();
1549   SETUP();
1550 
1551   START();
1552   __ Mov(x24, 0x0008000000800000UL);
1553   __ Mov(x25, 0xff800000fff80000UL);
1554   __ Mov(x26, 0);
1555   __ Clz(w0, w24);
1556   __ Clz(x1, x24);
1557   __ Clz(w2, w25);
1558   __ Clz(x3, x25);
1559   __ Clz(w4, w26);
1560   __ Clz(x5, x26);
1561   __ Cls(w6, w24);
1562   __ Cls(x7, x24);
1563   __ Cls(w8, w25);
1564   __ Cls(x9, x25);
1565   __ Cls(w10, w26);
1566   __ Cls(x11, x26);
1567   END();
1568 
1569   RUN();
1570 
1571   CHECK_EQUAL_64(8, x0);
1572   CHECK_EQUAL_64(12, x1);
1573   CHECK_EQUAL_64(0, x2);
1574   CHECK_EQUAL_64(0, x3);
1575   CHECK_EQUAL_64(32, x4);
1576   CHECK_EQUAL_64(64, x5);
1577   CHECK_EQUAL_64(7, x6);
1578   CHECK_EQUAL_64(11, x7);
1579   CHECK_EQUAL_64(12, x8);
1580   CHECK_EQUAL_64(8, x9);
1581   CHECK_EQUAL_64(31, x10);
1582   CHECK_EQUAL_64(63, x11);
1583 
1584   TEARDOWN();
1585 }
1586 
1587 
TEST(label)1588 TEST(label) {
1589   INIT_V8();
1590   SETUP();
1591 
1592   Label label_1, label_2, label_3, label_4;
1593 
1594   START();
1595   __ Mov(x0, 0x1);
1596   __ Mov(x1, 0x0);
1597   __ Mov(x22, lr);    // Save lr.
1598 
1599   __ B(&label_1);
1600   __ B(&label_1);
1601   __ B(&label_1);     // Multiple branches to the same label.
1602   __ Mov(x0, 0x0);
1603   __ Bind(&label_2);
1604   __ B(&label_3);     // Forward branch.
1605   __ Mov(x0, 0x0);
1606   __ Bind(&label_1);
1607   __ B(&label_2);     // Backward branch.
1608   __ Mov(x0, 0x0);
1609   __ Bind(&label_3);
1610   __ Bl(&label_4);
1611   END();
1612 
1613   __ Bind(&label_4);
1614   __ Mov(x1, 0x1);
1615   __ Mov(lr, x22);
1616   END();
1617 
1618   RUN();
1619 
1620   CHECK_EQUAL_64(0x1, x0);
1621   CHECK_EQUAL_64(0x1, x1);
1622 
1623   TEARDOWN();
1624 }
1625 
1626 
TEST(branch_at_start)1627 TEST(branch_at_start) {
1628   INIT_V8();
1629   SETUP();
1630 
1631   Label good, exit;
1632 
1633   // Test that branches can exist at the start of the buffer. (This is a
1634   // boundary condition in the label-handling code.) To achieve this, we have
1635   // to work around the code generated by START.
1636   RESET();
1637   __ B(&good);
1638 
1639   START_AFTER_RESET();
1640   __ Mov(x0, 0x0);
1641   END();
1642 
1643   __ Bind(&exit);
1644   START_AFTER_RESET();
1645   __ Mov(x0, 0x1);
1646   END();
1647 
1648   __ Bind(&good);
1649   __ B(&exit);
1650   END();
1651 
1652   RUN();
1653 
1654   CHECK_EQUAL_64(0x1, x0);
1655   TEARDOWN();
1656 }
1657 
1658 
TEST(adr)1659 TEST(adr) {
1660   INIT_V8();
1661   SETUP();
1662 
1663   Label label_1, label_2, label_3, label_4;
1664 
1665   START();
1666   __ Mov(x0, 0x0);        // Set to non-zero to indicate failure.
1667   __ Adr(x1, &label_3);   // Set to zero to indicate success.
1668 
1669   __ Adr(x2, &label_1);   // Multiple forward references to the same label.
1670   __ Adr(x3, &label_1);
1671   __ Adr(x4, &label_1);
1672 
1673   __ Bind(&label_2);
1674   __ Eor(x5, x2, Operand(x3));  // Ensure that x2,x3 and x4 are identical.
1675   __ Eor(x6, x2, Operand(x4));
1676   __ Orr(x0, x0, Operand(x5));
1677   __ Orr(x0, x0, Operand(x6));
1678   __ Br(x2);  // label_1, label_3
1679 
1680   __ Bind(&label_3);
1681   __ Adr(x2, &label_3);   // Self-reference (offset 0).
1682   __ Eor(x1, x1, Operand(x2));
1683   __ Adr(x2, &label_4);   // Simple forward reference.
1684   __ Br(x2);  // label_4
1685 
1686   __ Bind(&label_1);
1687   __ Adr(x2, &label_3);   // Multiple reverse references to the same label.
1688   __ Adr(x3, &label_3);
1689   __ Adr(x4, &label_3);
1690   __ Adr(x5, &label_2);   // Simple reverse reference.
1691   __ Br(x5);  // label_2
1692 
1693   __ Bind(&label_4);
1694   END();
1695 
1696   RUN();
1697 
1698   CHECK_EQUAL_64(0x0, x0);
1699   CHECK_EQUAL_64(0x0, x1);
1700 
1701   TEARDOWN();
1702 }
1703 
1704 
TEST(adr_far)1705 TEST(adr_far) {
1706   INIT_V8();
1707 
1708   int max_range = 1 << (Instruction::ImmPCRelRangeBitwidth - 1);
1709   SETUP_SIZE(max_range + 1000 * kInstructionSize);
1710 
1711   Label done, fail;
1712   Label test_near, near_forward, near_backward;
1713   Label test_far, far_forward, far_backward;
1714 
1715   START();
1716   __ Mov(x0, 0x0);
1717 
1718   __ Bind(&test_near);
1719   __ Adr(x10, &near_forward, MacroAssembler::kAdrFar);
1720   __ Br(x10);
1721   __ B(&fail);
1722   __ Bind(&near_backward);
1723   __ Orr(x0, x0, 1 << 1);
1724   __ B(&test_far);
1725 
1726   __ Bind(&near_forward);
1727   __ Orr(x0, x0, 1 << 0);
1728   __ Adr(x10, &near_backward, MacroAssembler::kAdrFar);
1729   __ Br(x10);
1730 
1731   __ Bind(&test_far);
1732   __ Adr(x10, &far_forward, MacroAssembler::kAdrFar);
1733   __ Br(x10);
1734   __ B(&fail);
1735   __ Bind(&far_backward);
1736   __ Orr(x0, x0, 1 << 3);
1737   __ B(&done);
1738 
1739   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
1740     if (i % 100 == 0) {
1741       // If we do land in this code, we do not want to execute so many nops
1742       // before reaching the end of test (especially if tracing is activated).
1743       __ b(&fail);
1744     } else {
1745       __ nop();
1746     }
1747   }
1748 
1749 
1750   __ Bind(&far_forward);
1751   __ Orr(x0, x0, 1 << 2);
1752   __ Adr(x10, &far_backward, MacroAssembler::kAdrFar);
1753   __ Br(x10);
1754 
1755   __ B(&done);
1756   __ Bind(&fail);
1757   __ Orr(x0, x0, 1 << 4);
1758   __ Bind(&done);
1759 
1760   END();
1761 
1762   RUN();
1763 
1764   CHECK_EQUAL_64(0xf, x0);
1765 
1766   TEARDOWN();
1767 }
1768 
1769 
TEST(branch_cond)1770 TEST(branch_cond) {
1771   INIT_V8();
1772   SETUP();
1773 
1774   Label wrong;
1775 
1776   START();
1777   __ Mov(x0, 0x1);
1778   __ Mov(x1, 0x1);
1779   __ Mov(x2, 0x8000000000000000L);
1780 
1781   // For each 'cmp' instruction below, condition codes other than the ones
1782   // following it would branch.
1783 
1784   __ Cmp(x1, 0);
1785   __ B(&wrong, eq);
1786   __ B(&wrong, lo);
1787   __ B(&wrong, mi);
1788   __ B(&wrong, vs);
1789   __ B(&wrong, ls);
1790   __ B(&wrong, lt);
1791   __ B(&wrong, le);
1792   Label ok_1;
1793   __ B(&ok_1, ne);
1794   __ Mov(x0, 0x0);
1795   __ Bind(&ok_1);
1796 
1797   __ Cmp(x1, 1);
1798   __ B(&wrong, ne);
1799   __ B(&wrong, lo);
1800   __ B(&wrong, mi);
1801   __ B(&wrong, vs);
1802   __ B(&wrong, hi);
1803   __ B(&wrong, lt);
1804   __ B(&wrong, gt);
1805   Label ok_2;
1806   __ B(&ok_2, pl);
1807   __ Mov(x0, 0x0);
1808   __ Bind(&ok_2);
1809 
1810   __ Cmp(x1, 2);
1811   __ B(&wrong, eq);
1812   __ B(&wrong, hs);
1813   __ B(&wrong, pl);
1814   __ B(&wrong, vs);
1815   __ B(&wrong, hi);
1816   __ B(&wrong, ge);
1817   __ B(&wrong, gt);
1818   Label ok_3;
1819   __ B(&ok_3, vc);
1820   __ Mov(x0, 0x0);
1821   __ Bind(&ok_3);
1822 
1823   __ Cmp(x2, 1);
1824   __ B(&wrong, eq);
1825   __ B(&wrong, lo);
1826   __ B(&wrong, mi);
1827   __ B(&wrong, vc);
1828   __ B(&wrong, ls);
1829   __ B(&wrong, ge);
1830   __ B(&wrong, gt);
1831   Label ok_4;
1832   __ B(&ok_4, le);
1833   __ Mov(x0, 0x0);
1834   __ Bind(&ok_4);
1835 
1836   Label ok_5;
1837   __ b(&ok_5, al);
1838   __ Mov(x0, 0x0);
1839   __ Bind(&ok_5);
1840 
1841   Label ok_6;
1842   __ b(&ok_6, nv);
1843   __ Mov(x0, 0x0);
1844   __ Bind(&ok_6);
1845 
1846   END();
1847 
1848   __ Bind(&wrong);
1849   __ Mov(x0, 0x0);
1850   END();
1851 
1852   RUN();
1853 
1854   CHECK_EQUAL_64(0x1, x0);
1855 
1856   TEARDOWN();
1857 }
1858 
1859 
TEST(branch_to_reg)1860 TEST(branch_to_reg) {
1861   INIT_V8();
1862   SETUP();
1863 
1864   // Test br.
1865   Label fn1, after_fn1;
1866 
1867   START();
1868   __ Mov(x29, lr);
1869 
1870   __ Mov(x1, 0);
1871   __ B(&after_fn1);
1872 
1873   __ Bind(&fn1);
1874   __ Mov(x0, lr);
1875   __ Mov(x1, 42);
1876   __ Br(x0);
1877 
1878   __ Bind(&after_fn1);
1879   __ Bl(&fn1);
1880 
1881   // Test blr.
1882   Label fn2, after_fn2;
1883 
1884   __ Mov(x2, 0);
1885   __ B(&after_fn2);
1886 
1887   __ Bind(&fn2);
1888   __ Mov(x0, lr);
1889   __ Mov(x2, 84);
1890   __ Blr(x0);
1891 
1892   __ Bind(&after_fn2);
1893   __ Bl(&fn2);
1894   __ Mov(x3, lr);
1895 
1896   __ Mov(lr, x29);
1897   END();
1898 
1899   RUN();
1900 
1901   CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
1902   CHECK_EQUAL_64(42, x1);
1903   CHECK_EQUAL_64(84, x2);
1904 
1905   TEARDOWN();
1906 }
1907 
1908 
TEST(compare_branch)1909 TEST(compare_branch) {
1910   INIT_V8();
1911   SETUP();
1912 
1913   START();
1914   __ Mov(x0, 0);
1915   __ Mov(x1, 0);
1916   __ Mov(x2, 0);
1917   __ Mov(x3, 0);
1918   __ Mov(x4, 0);
1919   __ Mov(x5, 0);
1920   __ Mov(x16, 0);
1921   __ Mov(x17, 42);
1922 
1923   Label zt, zt_end;
1924   __ Cbz(w16, &zt);
1925   __ B(&zt_end);
1926   __ Bind(&zt);
1927   __ Mov(x0, 1);
1928   __ Bind(&zt_end);
1929 
1930   Label zf, zf_end;
1931   __ Cbz(x17, &zf);
1932   __ B(&zf_end);
1933   __ Bind(&zf);
1934   __ Mov(x1, 1);
1935   __ Bind(&zf_end);
1936 
1937   Label nzt, nzt_end;
1938   __ Cbnz(w17, &nzt);
1939   __ B(&nzt_end);
1940   __ Bind(&nzt);
1941   __ Mov(x2, 1);
1942   __ Bind(&nzt_end);
1943 
1944   Label nzf, nzf_end;
1945   __ Cbnz(x16, &nzf);
1946   __ B(&nzf_end);
1947   __ Bind(&nzf);
1948   __ Mov(x3, 1);
1949   __ Bind(&nzf_end);
1950 
1951   __ Mov(x18, 0xffffffff00000000UL);
1952 
1953   Label a, a_end;
1954   __ Cbz(w18, &a);
1955   __ B(&a_end);
1956   __ Bind(&a);
1957   __ Mov(x4, 1);
1958   __ Bind(&a_end);
1959 
1960   Label b, b_end;
1961   __ Cbnz(w18, &b);
1962   __ B(&b_end);
1963   __ Bind(&b);
1964   __ Mov(x5, 1);
1965   __ Bind(&b_end);
1966 
1967   END();
1968 
1969   RUN();
1970 
1971   CHECK_EQUAL_64(1, x0);
1972   CHECK_EQUAL_64(0, x1);
1973   CHECK_EQUAL_64(1, x2);
1974   CHECK_EQUAL_64(0, x3);
1975   CHECK_EQUAL_64(1, x4);
1976   CHECK_EQUAL_64(0, x5);
1977 
1978   TEARDOWN();
1979 }
1980 
1981 
TEST(test_branch)1982 TEST(test_branch) {
1983   INIT_V8();
1984   SETUP();
1985 
1986   START();
1987   __ Mov(x0, 0);
1988   __ Mov(x1, 0);
1989   __ Mov(x2, 0);
1990   __ Mov(x3, 0);
1991   __ Mov(x16, 0xaaaaaaaaaaaaaaaaUL);
1992 
1993   Label bz, bz_end;
1994   __ Tbz(w16, 0, &bz);
1995   __ B(&bz_end);
1996   __ Bind(&bz);
1997   __ Mov(x0, 1);
1998   __ Bind(&bz_end);
1999 
2000   Label bo, bo_end;
2001   __ Tbz(x16, 63, &bo);
2002   __ B(&bo_end);
2003   __ Bind(&bo);
2004   __ Mov(x1, 1);
2005   __ Bind(&bo_end);
2006 
2007   Label nbz, nbz_end;
2008   __ Tbnz(x16, 61, &nbz);
2009   __ B(&nbz_end);
2010   __ Bind(&nbz);
2011   __ Mov(x2, 1);
2012   __ Bind(&nbz_end);
2013 
2014   Label nbo, nbo_end;
2015   __ Tbnz(w16, 2, &nbo);
2016   __ B(&nbo_end);
2017   __ Bind(&nbo);
2018   __ Mov(x3, 1);
2019   __ Bind(&nbo_end);
2020   END();
2021 
2022   RUN();
2023 
2024   CHECK_EQUAL_64(1, x0);
2025   CHECK_EQUAL_64(0, x1);
2026   CHECK_EQUAL_64(1, x2);
2027   CHECK_EQUAL_64(0, x3);
2028 
2029   TEARDOWN();
2030 }
2031 
2032 
TEST(far_branch_backward)2033 TEST(far_branch_backward) {
2034   INIT_V8();
2035 
2036   // Test that the MacroAssembler correctly resolves backward branches to labels
2037   // that are outside the immediate range of branch instructions.
2038   int max_range =
2039     std::max(Instruction::ImmBranchRange(TestBranchType),
2040              std::max(Instruction::ImmBranchRange(CompareBranchType),
2041                       Instruction::ImmBranchRange(CondBranchType)));
2042 
2043   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2044 
2045   START();
2046 
2047   Label done, fail;
2048   Label test_tbz, test_cbz, test_bcond;
2049   Label success_tbz, success_cbz, success_bcond;
2050 
2051   __ Mov(x0, 0);
2052   __ Mov(x1, 1);
2053   __ Mov(x10, 0);
2054 
2055   __ B(&test_tbz);
2056   __ Bind(&success_tbz);
2057   __ Orr(x0, x0, 1 << 0);
2058   __ B(&test_cbz);
2059   __ Bind(&success_cbz);
2060   __ Orr(x0, x0, 1 << 1);
2061   __ B(&test_bcond);
2062   __ Bind(&success_bcond);
2063   __ Orr(x0, x0, 1 << 2);
2064 
2065   __ B(&done);
2066 
2067   // Generate enough code to overflow the immediate range of the three types of
2068   // branches below.
2069   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2070     if (i % 100 == 0) {
2071       // If we do land in this code, we do not want to execute so many nops
2072       // before reaching the end of test (especially if tracing is activated).
2073       __ B(&fail);
2074     } else {
2075       __ Nop();
2076     }
2077   }
2078   __ B(&fail);
2079 
2080   __ Bind(&test_tbz);
2081   __ Tbz(x10, 7, &success_tbz);
2082   __ Bind(&test_cbz);
2083   __ Cbz(x10, &success_cbz);
2084   __ Bind(&test_bcond);
2085   __ Cmp(x10, 0);
2086   __ B(eq, &success_bcond);
2087 
2088   // For each out-of-range branch instructions, at least two instructions should
2089   // have been generated.
2090   CHECK_GE(7 * kInstructionSize, __ SizeOfCodeGeneratedSince(&test_tbz));
2091 
2092   __ Bind(&fail);
2093   __ Mov(x1, 0);
2094   __ Bind(&done);
2095 
2096   END();
2097 
2098   RUN();
2099 
2100   CHECK_EQUAL_64(0x7, x0);
2101   CHECK_EQUAL_64(0x1, x1);
2102 
2103   TEARDOWN();
2104 }
2105 
2106 
TEST(far_branch_simple_veneer)2107 TEST(far_branch_simple_veneer) {
2108   INIT_V8();
2109 
2110   // Test that the MacroAssembler correctly emits veneers for forward branches
2111   // to labels that are outside the immediate range of branch instructions.
2112   int max_range =
2113     std::max(Instruction::ImmBranchRange(TestBranchType),
2114              std::max(Instruction::ImmBranchRange(CompareBranchType),
2115                       Instruction::ImmBranchRange(CondBranchType)));
2116 
2117   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2118 
2119   START();
2120 
2121   Label done, fail;
2122   Label test_tbz, test_cbz, test_bcond;
2123   Label success_tbz, success_cbz, success_bcond;
2124 
2125   __ Mov(x0, 0);
2126   __ Mov(x1, 1);
2127   __ Mov(x10, 0);
2128 
2129   __ Bind(&test_tbz);
2130   __ Tbz(x10, 7, &success_tbz);
2131   __ Bind(&test_cbz);
2132   __ Cbz(x10, &success_cbz);
2133   __ Bind(&test_bcond);
2134   __ Cmp(x10, 0);
2135   __ B(eq, &success_bcond);
2136 
2137   // Generate enough code to overflow the immediate range of the three types of
2138   // branches below.
2139   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2140     if (i % 100 == 0) {
2141       // If we do land in this code, we do not want to execute so many nops
2142       // before reaching the end of test (especially if tracing is activated).
2143       // Also, the branches give the MacroAssembler the opportunity to emit the
2144       // veneers.
2145       __ B(&fail);
2146     } else {
2147       __ Nop();
2148     }
2149   }
2150   __ B(&fail);
2151 
2152   __ Bind(&success_tbz);
2153   __ Orr(x0, x0, 1 << 0);
2154   __ B(&test_cbz);
2155   __ Bind(&success_cbz);
2156   __ Orr(x0, x0, 1 << 1);
2157   __ B(&test_bcond);
2158   __ Bind(&success_bcond);
2159   __ Orr(x0, x0, 1 << 2);
2160 
2161   __ B(&done);
2162   __ Bind(&fail);
2163   __ Mov(x1, 0);
2164   __ Bind(&done);
2165 
2166   END();
2167 
2168   RUN();
2169 
2170   CHECK_EQUAL_64(0x7, x0);
2171   CHECK_EQUAL_64(0x1, x1);
2172 
2173   TEARDOWN();
2174 }
2175 
2176 
TEST(far_branch_veneer_link_chain)2177 TEST(far_branch_veneer_link_chain) {
2178   INIT_V8();
2179 
2180   // Test that the MacroAssembler correctly emits veneers for forward branches
2181   // that target out-of-range labels and are part of multiple instructions
2182   // jumping to that label.
2183   //
2184   // We test the three situations with the different types of instruction:
2185   // (1)- When the branch is at the start of the chain with tbz.
2186   // (2)- When the branch is in the middle of the chain with cbz.
2187   // (3)- When the branch is at the end of the chain with bcond.
2188   int max_range =
2189     std::max(Instruction::ImmBranchRange(TestBranchType),
2190              std::max(Instruction::ImmBranchRange(CompareBranchType),
2191                       Instruction::ImmBranchRange(CondBranchType)));
2192 
2193   SETUP_SIZE(max_range + 1000 * kInstructionSize);
2194 
2195   START();
2196 
2197   Label skip, fail, done;
2198   Label test_tbz, test_cbz, test_bcond;
2199   Label success_tbz, success_cbz, success_bcond;
2200 
2201   __ Mov(x0, 0);
2202   __ Mov(x1, 1);
2203   __ Mov(x10, 0);
2204 
2205   __ B(&skip);
2206   // Branches at the start of the chain for situations (2) and (3).
2207   __ B(&success_cbz);
2208   __ B(&success_bcond);
2209   __ Nop();
2210   __ B(&success_bcond);
2211   __ B(&success_cbz);
2212   __ Bind(&skip);
2213 
2214   __ Bind(&test_tbz);
2215   __ Tbz(x10, 7, &success_tbz);
2216   __ Bind(&test_cbz);
2217   __ Cbz(x10, &success_cbz);
2218   __ Bind(&test_bcond);
2219   __ Cmp(x10, 0);
2220   __ B(eq, &success_bcond);
2221 
2222   skip.Unuse();
2223   __ B(&skip);
2224   // Branches at the end of the chain for situations (1) and (2).
2225   __ B(&success_cbz);
2226   __ B(&success_tbz);
2227   __ Nop();
2228   __ B(&success_tbz);
2229   __ B(&success_cbz);
2230   __ Bind(&skip);
2231 
2232   // Generate enough code to overflow the immediate range of the three types of
2233   // branches below.
2234   for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
2235     if (i % 100 == 0) {
2236       // If we do land in this code, we do not want to execute so many nops
2237       // before reaching the end of test (especially if tracing is activated).
2238       // Also, the branches give the MacroAssembler the opportunity to emit the
2239       // veneers.
2240       __ B(&fail);
2241     } else {
2242       __ Nop();
2243     }
2244   }
2245   __ B(&fail);
2246 
2247   __ Bind(&success_tbz);
2248   __ Orr(x0, x0, 1 << 0);
2249   __ B(&test_cbz);
2250   __ Bind(&success_cbz);
2251   __ Orr(x0, x0, 1 << 1);
2252   __ B(&test_bcond);
2253   __ Bind(&success_bcond);
2254   __ Orr(x0, x0, 1 << 2);
2255 
2256   __ B(&done);
2257   __ Bind(&fail);
2258   __ Mov(x1, 0);
2259   __ Bind(&done);
2260 
2261   END();
2262 
2263   RUN();
2264 
2265   CHECK_EQUAL_64(0x7, x0);
2266   CHECK_EQUAL_64(0x1, x1);
2267 
2268   TEARDOWN();
2269 }
2270 
2271 
TEST(far_branch_veneer_broken_link_chain)2272 TEST(far_branch_veneer_broken_link_chain) {
2273   INIT_V8();
2274 
2275   // Check that the MacroAssembler correctly handles the situation when removing
2276   // a branch from the link chain of a label and the two links on each side of
2277   // the removed branch cannot be linked together (out of range).
2278   //
2279   // We test with tbz because it has a small range.
2280   int max_range = Instruction::ImmBranchRange(TestBranchType);
2281   int inter_range = max_range / 2 + max_range / 10;
2282 
2283   SETUP_SIZE(3 * inter_range + 1000 * kInstructionSize);
2284 
2285   START();
2286 
2287   Label skip, fail, done;
2288   Label test_1, test_2, test_3;
2289   Label far_target;
2290 
2291   __ Mov(x0, 0);  // Indicates the origin of the branch.
2292   __ Mov(x1, 1);
2293   __ Mov(x10, 0);
2294 
2295   // First instruction in the label chain.
2296   __ Bind(&test_1);
2297   __ Mov(x0, 1);
2298   __ B(&far_target);
2299 
2300   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2301     if (i % 100 == 0) {
2302       // Do not allow generating veneers. They should not be needed.
2303       __ b(&fail);
2304     } else {
2305       __ Nop();
2306     }
2307   }
2308 
2309   // Will need a veneer to point to reach the target.
2310   __ Bind(&test_2);
2311   __ Mov(x0, 2);
2312   __ Tbz(x10, 7, &far_target);
2313 
2314   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2315     if (i % 100 == 0) {
2316       // Do not allow generating veneers. They should not be needed.
2317       __ b(&fail);
2318     } else {
2319       __ Nop();
2320     }
2321   }
2322 
2323   // Does not need a veneer to reach the target, but the initial branch
2324   // instruction is out of range.
2325   __ Bind(&test_3);
2326   __ Mov(x0, 3);
2327   __ Tbz(x10, 7, &far_target);
2328 
2329   for (unsigned i = 0; i < inter_range / kInstructionSize; ++i) {
2330     if (i % 100 == 0) {
2331       // Allow generating veneers.
2332       __ B(&fail);
2333     } else {
2334       __ Nop();
2335     }
2336   }
2337 
2338   __ B(&fail);
2339 
2340   __ Bind(&far_target);
2341   __ Cmp(x0, 1);
2342   __ B(eq, &test_2);
2343   __ Cmp(x0, 2);
2344   __ B(eq, &test_3);
2345 
2346   __ B(&done);
2347   __ Bind(&fail);
2348   __ Mov(x1, 0);
2349   __ Bind(&done);
2350 
2351   END();
2352 
2353   RUN();
2354 
2355   CHECK_EQUAL_64(0x3, x0);
2356   CHECK_EQUAL_64(0x1, x1);
2357 
2358   TEARDOWN();
2359 }
2360 
2361 
TEST(branch_type)2362 TEST(branch_type) {
2363   INIT_V8();
2364 
2365   SETUP();
2366 
2367   Label fail, done;
2368 
2369   START();
2370   __ Mov(x0, 0x0);
2371   __ Mov(x10, 0x7);
2372   __ Mov(x11, 0x0);
2373 
2374   // Test non taken branches.
2375   __ Cmp(x10, 0x7);
2376   __ B(&fail, ne);
2377   __ B(&fail, never);
2378   __ B(&fail, reg_zero, x10);
2379   __ B(&fail, reg_not_zero, x11);
2380   __ B(&fail, reg_bit_clear, x10, 0);
2381   __ B(&fail, reg_bit_set, x10, 3);
2382 
2383   // Test taken branches.
2384   Label l1, l2, l3, l4, l5;
2385   __ Cmp(x10, 0x7);
2386   __ B(&l1, eq);
2387   __ B(&fail);
2388   __ Bind(&l1);
2389   __ B(&l2, always);
2390   __ B(&fail);
2391   __ Bind(&l2);
2392   __ B(&l3, reg_not_zero, x10);
2393   __ B(&fail);
2394   __ Bind(&l3);
2395   __ B(&l4, reg_bit_clear, x10, 15);
2396   __ B(&fail);
2397   __ Bind(&l4);
2398   __ B(&l5, reg_bit_set, x10, 1);
2399   __ B(&fail);
2400   __ Bind(&l5);
2401 
2402   __ B(&done);
2403 
2404   __ Bind(&fail);
2405   __ Mov(x0, 0x1);
2406 
2407   __ Bind(&done);
2408 
2409   END();
2410 
2411   RUN();
2412 
2413   CHECK_EQUAL_64(0x0, x0);
2414 
2415   TEARDOWN();
2416 }
2417 
2418 
TEST(ldr_str_offset)2419 TEST(ldr_str_offset) {
2420   INIT_V8();
2421   SETUP();
2422 
2423   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2424   uint64_t dst[5] = {0, 0, 0, 0, 0};
2425   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2426   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2427 
2428   START();
2429   __ Mov(x17, src_base);
2430   __ Mov(x18, dst_base);
2431   __ Ldr(w0, MemOperand(x17));
2432   __ Str(w0, MemOperand(x18));
2433   __ Ldr(w1, MemOperand(x17, 4));
2434   __ Str(w1, MemOperand(x18, 12));
2435   __ Ldr(x2, MemOperand(x17, 8));
2436   __ Str(x2, MemOperand(x18, 16));
2437   __ Ldrb(w3, MemOperand(x17, 1));
2438   __ Strb(w3, MemOperand(x18, 25));
2439   __ Ldrh(w4, MemOperand(x17, 2));
2440   __ Strh(w4, MemOperand(x18, 33));
2441   END();
2442 
2443   RUN();
2444 
2445   CHECK_EQUAL_64(0x76543210, x0);
2446   CHECK_EQUAL_64(0x76543210, dst[0]);
2447   CHECK_EQUAL_64(0xfedcba98, x1);
2448   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2449   CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
2450   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2451   CHECK_EQUAL_64(0x32, x3);
2452   CHECK_EQUAL_64(0x3200, dst[3]);
2453   CHECK_EQUAL_64(0x7654, x4);
2454   CHECK_EQUAL_64(0x765400, dst[4]);
2455   CHECK_EQUAL_64(src_base, x17);
2456   CHECK_EQUAL_64(dst_base, x18);
2457 
2458   TEARDOWN();
2459 }
2460 
2461 
TEST(ldr_str_wide)2462 TEST(ldr_str_wide) {
2463   INIT_V8();
2464   SETUP();
2465 
2466   uint32_t src[8192];
2467   uint32_t dst[8192];
2468   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2469   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2470   memset(src, 0xaa, 8192 * sizeof(src[0]));
2471   memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2472   src[0] = 0;
2473   src[6144] = 6144;
2474   src[8191] = 8191;
2475 
2476   START();
2477   __ Mov(x22, src_base);
2478   __ Mov(x23, dst_base);
2479   __ Mov(x24, src_base);
2480   __ Mov(x25, dst_base);
2481   __ Mov(x26, src_base);
2482   __ Mov(x27, dst_base);
2483 
2484   __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2485   __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2486   __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2487   __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2488   __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2489   __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2490   END();
2491 
2492   RUN();
2493 
2494   CHECK_EQUAL_32(8191, w0);
2495   CHECK_EQUAL_32(8191, dst[8191]);
2496   CHECK_EQUAL_64(src_base, x22);
2497   CHECK_EQUAL_64(dst_base, x23);
2498   CHECK_EQUAL_32(0, w1);
2499   CHECK_EQUAL_32(0, dst[0]);
2500   CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2501   CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2502   CHECK_EQUAL_32(6144, w2);
2503   CHECK_EQUAL_32(6144, dst[6144]);
2504   CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2505   CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2506 
2507   TEARDOWN();
2508 }
2509 
2510 
TEST(ldr_str_preindex)2511 TEST(ldr_str_preindex) {
2512   INIT_V8();
2513   SETUP();
2514 
2515   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2516   uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2517   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2518   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2519 
2520   START();
2521   __ Mov(x17, src_base);
2522   __ Mov(x18, dst_base);
2523   __ Mov(x19, src_base);
2524   __ Mov(x20, dst_base);
2525   __ Mov(x21, src_base + 16);
2526   __ Mov(x22, dst_base + 40);
2527   __ Mov(x23, src_base);
2528   __ Mov(x24, dst_base);
2529   __ Mov(x25, src_base);
2530   __ Mov(x26, dst_base);
2531   __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2532   __ Str(w0, MemOperand(x18, 12, PreIndex));
2533   __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2534   __ Str(x1, MemOperand(x20, 16, PreIndex));
2535   __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2536   __ Str(w2, MemOperand(x22, -4, PreIndex));
2537   __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2538   __ Strb(w3, MemOperand(x24, 25, PreIndex));
2539   __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2540   __ Strh(w4, MemOperand(x26, 41, PreIndex));
2541   END();
2542 
2543   RUN();
2544 
2545   CHECK_EQUAL_64(0xfedcba98, x0);
2546   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2547   CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
2548   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2549   CHECK_EQUAL_64(0x01234567, x2);
2550   CHECK_EQUAL_64(0x0123456700000000UL, dst[4]);
2551   CHECK_EQUAL_64(0x32, x3);
2552   CHECK_EQUAL_64(0x3200, dst[3]);
2553   CHECK_EQUAL_64(0x9876, x4);
2554   CHECK_EQUAL_64(0x987600, dst[5]);
2555   CHECK_EQUAL_64(src_base + 4, x17);
2556   CHECK_EQUAL_64(dst_base + 12, x18);
2557   CHECK_EQUAL_64(src_base + 8, x19);
2558   CHECK_EQUAL_64(dst_base + 16, x20);
2559   CHECK_EQUAL_64(src_base + 12, x21);
2560   CHECK_EQUAL_64(dst_base + 36, x22);
2561   CHECK_EQUAL_64(src_base + 1, x23);
2562   CHECK_EQUAL_64(dst_base + 25, x24);
2563   CHECK_EQUAL_64(src_base + 3, x25);
2564   CHECK_EQUAL_64(dst_base + 41, x26);
2565 
2566   TEARDOWN();
2567 }
2568 
2569 
TEST(ldr_str_postindex)2570 TEST(ldr_str_postindex) {
2571   INIT_V8();
2572   SETUP();
2573 
2574   uint64_t src[2] = {0xfedcba9876543210UL, 0x0123456789abcdefUL};
2575   uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2576   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2577   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2578 
2579   START();
2580   __ Mov(x17, src_base + 4);
2581   __ Mov(x18, dst_base + 12);
2582   __ Mov(x19, src_base + 8);
2583   __ Mov(x20, dst_base + 16);
2584   __ Mov(x21, src_base + 8);
2585   __ Mov(x22, dst_base + 32);
2586   __ Mov(x23, src_base + 1);
2587   __ Mov(x24, dst_base + 25);
2588   __ Mov(x25, src_base + 3);
2589   __ Mov(x26, dst_base + 41);
2590   __ Ldr(w0, MemOperand(x17, 4, PostIndex));
2591   __ Str(w0, MemOperand(x18, 12, PostIndex));
2592   __ Ldr(x1, MemOperand(x19, 8, PostIndex));
2593   __ Str(x1, MemOperand(x20, 16, PostIndex));
2594   __ Ldr(x2, MemOperand(x21, -8, PostIndex));
2595   __ Str(x2, MemOperand(x22, -32, PostIndex));
2596   __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
2597   __ Strb(w3, MemOperand(x24, 5, PostIndex));
2598   __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
2599   __ Strh(w4, MemOperand(x26, -41, PostIndex));
2600   END();
2601 
2602   RUN();
2603 
2604   CHECK_EQUAL_64(0xfedcba98, x0);
2605   CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
2606   CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
2607   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
2608   CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
2609   CHECK_EQUAL_64(0x0123456789abcdefUL, dst[4]);
2610   CHECK_EQUAL_64(0x32, x3);
2611   CHECK_EQUAL_64(0x3200, dst[3]);
2612   CHECK_EQUAL_64(0x9876, x4);
2613   CHECK_EQUAL_64(0x987600, dst[5]);
2614   CHECK_EQUAL_64(src_base + 8, x17);
2615   CHECK_EQUAL_64(dst_base + 24, x18);
2616   CHECK_EQUAL_64(src_base + 16, x19);
2617   CHECK_EQUAL_64(dst_base + 32, x20);
2618   CHECK_EQUAL_64(src_base, x21);
2619   CHECK_EQUAL_64(dst_base, x22);
2620   CHECK_EQUAL_64(src_base + 2, x23);
2621   CHECK_EQUAL_64(dst_base + 30, x24);
2622   CHECK_EQUAL_64(src_base, x25);
2623   CHECK_EQUAL_64(dst_base, x26);
2624 
2625   TEARDOWN();
2626 }
2627 
2628 
TEST(load_signed)2629 TEST(load_signed) {
2630   INIT_V8();
2631   SETUP();
2632 
2633   uint32_t src[2] = {0x80008080, 0x7fff7f7f};
2634   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2635 
2636   START();
2637   __ Mov(x24, src_base);
2638   __ Ldrsb(w0, MemOperand(x24));
2639   __ Ldrsb(w1, MemOperand(x24, 4));
2640   __ Ldrsh(w2, MemOperand(x24));
2641   __ Ldrsh(w3, MemOperand(x24, 4));
2642   __ Ldrsb(x4, MemOperand(x24));
2643   __ Ldrsb(x5, MemOperand(x24, 4));
2644   __ Ldrsh(x6, MemOperand(x24));
2645   __ Ldrsh(x7, MemOperand(x24, 4));
2646   __ Ldrsw(x8, MemOperand(x24));
2647   __ Ldrsw(x9, MemOperand(x24, 4));
2648   END();
2649 
2650   RUN();
2651 
2652   CHECK_EQUAL_64(0xffffff80, x0);
2653   CHECK_EQUAL_64(0x0000007f, x1);
2654   CHECK_EQUAL_64(0xffff8080, x2);
2655   CHECK_EQUAL_64(0x00007f7f, x3);
2656   CHECK_EQUAL_64(0xffffffffffffff80UL, x4);
2657   CHECK_EQUAL_64(0x000000000000007fUL, x5);
2658   CHECK_EQUAL_64(0xffffffffffff8080UL, x6);
2659   CHECK_EQUAL_64(0x0000000000007f7fUL, x7);
2660   CHECK_EQUAL_64(0xffffffff80008080UL, x8);
2661   CHECK_EQUAL_64(0x000000007fff7f7fUL, x9);
2662 
2663   TEARDOWN();
2664 }
2665 
2666 
TEST(load_store_regoffset)2667 TEST(load_store_regoffset) {
2668   INIT_V8();
2669   SETUP();
2670 
2671   uint32_t src[3] = {1, 2, 3};
2672   uint32_t dst[4] = {0, 0, 0, 0};
2673   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2674   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2675 
2676   START();
2677   __ Mov(x16, src_base);
2678   __ Mov(x17, dst_base);
2679   __ Mov(x18, src_base + 3 * sizeof(src[0]));
2680   __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
2681   __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
2682   __ Mov(x24, 0);
2683   __ Mov(x25, 4);
2684   __ Mov(x26, -4);
2685   __ Mov(x27, 0xfffffffc);  // 32-bit -4.
2686   __ Mov(x28, 0xfffffffe);  // 32-bit -2.
2687   __ Mov(x29, 0xffffffff);  // 32-bit -1.
2688 
2689   __ Ldr(w0, MemOperand(x16, x24));
2690   __ Ldr(x1, MemOperand(x16, x25));
2691   __ Ldr(w2, MemOperand(x18, x26));
2692   __ Ldr(w3, MemOperand(x18, x27, SXTW));
2693   __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
2694   __ Str(w0, MemOperand(x17, x24));
2695   __ Str(x1, MemOperand(x17, x25));
2696   __ Str(w2, MemOperand(x20, x29, SXTW, 2));
2697   END();
2698 
2699   RUN();
2700 
2701   CHECK_EQUAL_64(1, x0);
2702   CHECK_EQUAL_64(0x0000000300000002UL, x1);
2703   CHECK_EQUAL_64(3, x2);
2704   CHECK_EQUAL_64(3, x3);
2705   CHECK_EQUAL_64(2, x4);
2706   CHECK_EQUAL_32(1, dst[0]);
2707   CHECK_EQUAL_32(2, dst[1]);
2708   CHECK_EQUAL_32(3, dst[2]);
2709   CHECK_EQUAL_32(3, dst[3]);
2710 
2711   TEARDOWN();
2712 }
2713 
2714 
TEST(load_store_float)2715 TEST(load_store_float) {
2716   INIT_V8();
2717   SETUP();
2718 
2719   float src[3] = {1.0, 2.0, 3.0};
2720   float dst[3] = {0.0, 0.0, 0.0};
2721   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2722   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2723 
2724   START();
2725   __ Mov(x17, src_base);
2726   __ Mov(x18, dst_base);
2727   __ Mov(x19, src_base);
2728   __ Mov(x20, dst_base);
2729   __ Mov(x21, src_base);
2730   __ Mov(x22, dst_base);
2731   __ Ldr(s0, MemOperand(x17, sizeof(src[0])));
2732   __ Str(s0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2733   __ Ldr(s1, MemOperand(x19, sizeof(src[0]), PostIndex));
2734   __ Str(s1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2735   __ Ldr(s2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2736   __ Str(s2, MemOperand(x22, sizeof(dst[0])));
2737   END();
2738 
2739   RUN();
2740 
2741   CHECK_EQUAL_FP32(2.0, s0);
2742   CHECK_EQUAL_FP32(2.0, dst[0]);
2743   CHECK_EQUAL_FP32(1.0, s1);
2744   CHECK_EQUAL_FP32(1.0, dst[2]);
2745   CHECK_EQUAL_FP32(3.0, s2);
2746   CHECK_EQUAL_FP32(3.0, dst[1]);
2747   CHECK_EQUAL_64(src_base, x17);
2748   CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2749   CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2750   CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2751   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2752   CHECK_EQUAL_64(dst_base, x22);
2753 
2754   TEARDOWN();
2755 }
2756 
2757 
TEST(load_store_double)2758 TEST(load_store_double) {
2759   INIT_V8();
2760   SETUP();
2761 
2762   double src[3] = {1.0, 2.0, 3.0};
2763   double dst[3] = {0.0, 0.0, 0.0};
2764   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2765   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2766 
2767   START();
2768   __ Mov(x17, src_base);
2769   __ Mov(x18, dst_base);
2770   __ Mov(x19, src_base);
2771   __ Mov(x20, dst_base);
2772   __ Mov(x21, src_base);
2773   __ Mov(x22, dst_base);
2774   __ Ldr(d0, MemOperand(x17, sizeof(src[0])));
2775   __ Str(d0, MemOperand(x18, sizeof(dst[0]), PostIndex));
2776   __ Ldr(d1, MemOperand(x19, sizeof(src[0]), PostIndex));
2777   __ Str(d1, MemOperand(x20, 2 * sizeof(dst[0]), PreIndex));
2778   __ Ldr(d2, MemOperand(x21, 2 * sizeof(src[0]), PreIndex));
2779   __ Str(d2, MemOperand(x22, sizeof(dst[0])));
2780   END();
2781 
2782   RUN();
2783 
2784   CHECK_EQUAL_FP64(2.0, d0);
2785   CHECK_EQUAL_FP64(2.0, dst[0]);
2786   CHECK_EQUAL_FP64(1.0, d1);
2787   CHECK_EQUAL_FP64(1.0, dst[2]);
2788   CHECK_EQUAL_FP64(3.0, d2);
2789   CHECK_EQUAL_FP64(3.0, dst[1]);
2790   CHECK_EQUAL_64(src_base, x17);
2791   CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
2792   CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
2793   CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
2794   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
2795   CHECK_EQUAL_64(dst_base, x22);
2796 
2797   TEARDOWN();
2798 }
2799 
2800 
TEST(ldp_stp_float)2801 TEST(ldp_stp_float) {
2802   INIT_V8();
2803   SETUP();
2804 
2805   float src[2] = {1.0, 2.0};
2806   float dst[3] = {0.0, 0.0, 0.0};
2807   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2808   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2809 
2810   START();
2811   __ Mov(x16, src_base);
2812   __ Mov(x17, dst_base);
2813   __ Ldp(s31, s0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2814   __ Stp(s0, s31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2815   END();
2816 
2817   RUN();
2818 
2819   CHECK_EQUAL_FP32(1.0, s31);
2820   CHECK_EQUAL_FP32(2.0, s0);
2821   CHECK_EQUAL_FP32(0.0, dst[0]);
2822   CHECK_EQUAL_FP32(2.0, dst[1]);
2823   CHECK_EQUAL_FP32(1.0, dst[2]);
2824   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2825   CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2826 
2827   TEARDOWN();
2828 }
2829 
2830 
TEST(ldp_stp_double)2831 TEST(ldp_stp_double) {
2832   INIT_V8();
2833   SETUP();
2834 
2835   double src[2] = {1.0, 2.0};
2836   double dst[3] = {0.0, 0.0, 0.0};
2837   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2838   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2839 
2840   START();
2841   __ Mov(x16, src_base);
2842   __ Mov(x17, dst_base);
2843   __ Ldp(d31, d0, MemOperand(x16, 2 * sizeof(src[0]), PostIndex));
2844   __ Stp(d0, d31, MemOperand(x17, sizeof(dst[1]), PreIndex));
2845   END();
2846 
2847   RUN();
2848 
2849   CHECK_EQUAL_FP64(1.0, d31);
2850   CHECK_EQUAL_FP64(2.0, d0);
2851   CHECK_EQUAL_FP64(0.0, dst[0]);
2852   CHECK_EQUAL_FP64(2.0, dst[1]);
2853   CHECK_EQUAL_FP64(1.0, dst[2]);
2854   CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
2855   CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
2856 
2857   TEARDOWN();
2858 }
2859 
2860 
TEST(ldp_stp_offset)2861 TEST(ldp_stp_offset) {
2862   INIT_V8();
2863   SETUP();
2864 
2865   uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2866                      0xffeeddccbbaa9988UL};
2867   uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2868   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2869   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2870 
2871   START();
2872   __ Mov(x16, src_base);
2873   __ Mov(x17, dst_base);
2874   __ Mov(x18, src_base + 24);
2875   __ Mov(x19, dst_base + 56);
2876   __ Ldp(w0, w1, MemOperand(x16));
2877   __ Ldp(w2, w3, MemOperand(x16, 4));
2878   __ Ldp(x4, x5, MemOperand(x16, 8));
2879   __ Ldp(w6, w7, MemOperand(x18, -12));
2880   __ Ldp(x8, x9, MemOperand(x18, -16));
2881   __ Stp(w0, w1, MemOperand(x17));
2882   __ Stp(w2, w3, MemOperand(x17, 8));
2883   __ Stp(x4, x5, MemOperand(x17, 16));
2884   __ Stp(w6, w7, MemOperand(x19, -24));
2885   __ Stp(x8, x9, MemOperand(x19, -16));
2886   END();
2887 
2888   RUN();
2889 
2890   CHECK_EQUAL_64(0x44556677, x0);
2891   CHECK_EQUAL_64(0x00112233, x1);
2892   CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
2893   CHECK_EQUAL_64(0x00112233, x2);
2894   CHECK_EQUAL_64(0xccddeeff, x3);
2895   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2896   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
2897   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2898   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2899   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2900   CHECK_EQUAL_64(0x8899aabb, x6);
2901   CHECK_EQUAL_64(0xbbaa9988, x7);
2902   CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2903   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
2904   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2905   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2906   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2907   CHECK_EQUAL_64(src_base, x16);
2908   CHECK_EQUAL_64(dst_base, x17);
2909   CHECK_EQUAL_64(src_base + 24, x18);
2910   CHECK_EQUAL_64(dst_base + 56, x19);
2911 
2912   TEARDOWN();
2913 }
2914 
2915 
TEST(ldp_stp_offset_wide)2916 TEST(ldp_stp_offset_wide) {
2917   INIT_V8();
2918   SETUP();
2919 
2920   uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
2921                      0xffeeddccbbaa9988};
2922   uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
2923   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2924   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2925   // Move base too far from the array to force multiple instructions
2926   // to be emitted.
2927   const int64_t base_offset = 1024;
2928 
2929   START();
2930   __ Mov(x20, src_base - base_offset);
2931   __ Mov(x21, dst_base - base_offset);
2932   __ Mov(x18, src_base + base_offset + 24);
2933   __ Mov(x19, dst_base + base_offset + 56);
2934   __ Ldp(w0, w1, MemOperand(x20, base_offset));
2935   __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
2936   __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
2937   __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
2938   __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
2939   __ Stp(w0, w1, MemOperand(x21, base_offset));
2940   __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
2941   __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
2942   __ Stp(w6, w7, MemOperand(x19, -24 - base_offset));
2943   __ Stp(x8, x9, MemOperand(x19, -16 - base_offset));
2944   END();
2945 
2946   RUN();
2947 
2948   CHECK_EQUAL_64(0x44556677, x0);
2949   CHECK_EQUAL_64(0x00112233, x1);
2950   CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
2951   CHECK_EQUAL_64(0x00112233, x2);
2952   CHECK_EQUAL_64(0xccddeeff, x3);
2953   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
2954   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
2955   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
2956   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
2957   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
2958   CHECK_EQUAL_64(0x8899aabb, x6);
2959   CHECK_EQUAL_64(0xbbaa9988, x7);
2960   CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
2961   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
2962   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
2963   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
2964   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
2965   CHECK_EQUAL_64(src_base - base_offset, x20);
2966   CHECK_EQUAL_64(dst_base - base_offset, x21);
2967   CHECK_EQUAL_64(src_base + base_offset + 24, x18);
2968   CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
2969 
2970   TEARDOWN();
2971 }
2972 
2973 
TEST(ldp_stp_preindex)2974 TEST(ldp_stp_preindex) {
2975   INIT_V8();
2976   SETUP();
2977 
2978   uint64_t src[3] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
2979                      0xffeeddccbbaa9988UL};
2980   uint64_t dst[5] = {0, 0, 0, 0, 0};
2981   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2982   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2983 
2984   START();
2985   __ Mov(x16, src_base);
2986   __ Mov(x17, dst_base);
2987   __ Mov(x18, dst_base + 16);
2988   __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
2989   __ Mov(x19, x16);
2990   __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
2991   __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
2992   __ Mov(x20, x17);
2993   __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
2994   __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
2995   __ Mov(x21, x16);
2996   __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
2997   __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
2998   __ Mov(x22, x18);
2999   __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
3000   END();
3001 
3002   RUN();
3003 
3004   CHECK_EQUAL_64(0x00112233, x0);
3005   CHECK_EQUAL_64(0xccddeeff, x1);
3006   CHECK_EQUAL_64(0x44556677, x2);
3007   CHECK_EQUAL_64(0x00112233, x3);
3008   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
3009   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3010   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
3011   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
3012   CHECK_EQUAL_64(0x0011223344556677UL, x6);
3013   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
3014   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3015   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3016   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3017   CHECK_EQUAL_64(src_base, x16);
3018   CHECK_EQUAL_64(dst_base, x17);
3019   CHECK_EQUAL_64(dst_base + 16, x18);
3020   CHECK_EQUAL_64(src_base + 4, x19);
3021   CHECK_EQUAL_64(dst_base + 4, x20);
3022   CHECK_EQUAL_64(src_base + 8, x21);
3023   CHECK_EQUAL_64(dst_base + 24, x22);
3024 
3025   TEARDOWN();
3026 }
3027 
3028 
TEST(ldp_stp_preindex_wide)3029 TEST(ldp_stp_preindex_wide) {
3030   INIT_V8();
3031   SETUP();
3032 
3033   uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
3034                      0xffeeddccbbaa9988};
3035   uint64_t dst[5] = {0, 0, 0, 0, 0};
3036   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3037   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3038   // Move base too far from the array to force multiple instructions
3039   // to be emitted.
3040   const int64_t base_offset = 1024;
3041 
3042   START();
3043   __ Mov(x24, src_base - base_offset);
3044   __ Mov(x25, dst_base + base_offset);
3045   __ Mov(x18, dst_base + base_offset + 16);
3046   __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
3047   __ Mov(x19, x24);
3048   __ Mov(x24, src_base - base_offset + 4);
3049   __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
3050   __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PreIndex));
3051   __ Mov(x20, x25);
3052   __ Mov(x25, dst_base + base_offset + 4);
3053   __ Mov(x24, src_base - base_offset);
3054   __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PreIndex));
3055   __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PreIndex));
3056   __ Mov(x21, x24);
3057   __ Mov(x24, src_base - base_offset + 8);
3058   __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
3059   __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
3060   __ Mov(x22, x18);
3061   __ Mov(x18, dst_base + base_offset + 16 + 8);
3062   __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
3063   END();
3064 
3065   RUN();
3066 
3067   CHECK_EQUAL_64(0x00112233, x0);
3068   CHECK_EQUAL_64(0xccddeeff, x1);
3069   CHECK_EQUAL_64(0x44556677, x2);
3070   CHECK_EQUAL_64(0x00112233, x3);
3071   CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
3072   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3073   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
3074   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
3075   CHECK_EQUAL_64(0x0011223344556677UL, x6);
3076   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
3077   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3078   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3079   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3080   CHECK_EQUAL_64(src_base, x24);
3081   CHECK_EQUAL_64(dst_base, x25);
3082   CHECK_EQUAL_64(dst_base + 16, x18);
3083   CHECK_EQUAL_64(src_base + 4, x19);
3084   CHECK_EQUAL_64(dst_base + 4, x20);
3085   CHECK_EQUAL_64(src_base + 8, x21);
3086   CHECK_EQUAL_64(dst_base + 24, x22);
3087 
3088   TEARDOWN();
3089 }
3090 
3091 
TEST(ldp_stp_postindex)3092 TEST(ldp_stp_postindex) {
3093   INIT_V8();
3094   SETUP();
3095 
3096   uint64_t src[4] = {0x0011223344556677UL, 0x8899aabbccddeeffUL,
3097                      0xffeeddccbbaa9988UL, 0x7766554433221100UL};
3098   uint64_t dst[5] = {0, 0, 0, 0, 0};
3099   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3100   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3101 
3102   START();
3103   __ Mov(x16, src_base);
3104   __ Mov(x17, dst_base);
3105   __ Mov(x18, dst_base + 16);
3106   __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
3107   __ Mov(x19, x16);
3108   __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
3109   __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
3110   __ Mov(x20, x17);
3111   __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
3112   __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
3113   __ Mov(x21, x16);
3114   __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
3115   __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
3116   __ Mov(x22, x18);
3117   __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
3118   END();
3119 
3120   RUN();
3121 
3122   CHECK_EQUAL_64(0x44556677, x0);
3123   CHECK_EQUAL_64(0x00112233, x1);
3124   CHECK_EQUAL_64(0x00112233, x2);
3125   CHECK_EQUAL_64(0xccddeeff, x3);
3126   CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
3127   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3128   CHECK_EQUAL_64(0x0011223344556677UL, x4);
3129   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
3130   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
3131   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
3132   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3133   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3134   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3135   CHECK_EQUAL_64(src_base, x16);
3136   CHECK_EQUAL_64(dst_base, x17);
3137   CHECK_EQUAL_64(dst_base + 16, x18);
3138   CHECK_EQUAL_64(src_base + 4, x19);
3139   CHECK_EQUAL_64(dst_base + 4, x20);
3140   CHECK_EQUAL_64(src_base + 8, x21);
3141   CHECK_EQUAL_64(dst_base + 24, x22);
3142 
3143   TEARDOWN();
3144 }
3145 
3146 
TEST(ldp_stp_postindex_wide)3147 TEST(ldp_stp_postindex_wide) {
3148   INIT_V8();
3149   SETUP();
3150 
3151   uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff, 0xffeeddccbbaa9988,
3152                      0x7766554433221100};
3153   uint64_t dst[5] = {0, 0, 0, 0, 0};
3154   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3155   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3156   // Move base too far from the array to force multiple instructions
3157   // to be emitted.
3158   const int64_t base_offset = 1024;
3159 
3160   START();
3161   __ Mov(x24, src_base);
3162   __ Mov(x25, dst_base);
3163   __ Mov(x18, dst_base + 16);
3164   __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
3165   __ Mov(x19, x24);
3166   __ Sub(x24, x24, base_offset);
3167   __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PostIndex));
3168   __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PostIndex));
3169   __ Mov(x20, x25);
3170   __ Sub(x24, x24, base_offset);
3171   __ Add(x25, x25, base_offset);
3172   __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PostIndex));
3173   __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PostIndex));
3174   __ Mov(x21, x24);
3175   __ Sub(x24, x24, base_offset);
3176   __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
3177   __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
3178   __ Mov(x22, x18);
3179   __ Add(x18, x18, base_offset);
3180   __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
3181   END();
3182 
3183   RUN();
3184 
3185   CHECK_EQUAL_64(0x44556677, x0);
3186   CHECK_EQUAL_64(0x00112233, x1);
3187   CHECK_EQUAL_64(0x00112233, x2);
3188   CHECK_EQUAL_64(0xccddeeff, x3);
3189   CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
3190   CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
3191   CHECK_EQUAL_64(0x0011223344556677UL, x4);
3192   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
3193   CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
3194   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
3195   CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
3196   CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
3197   CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
3198   CHECK_EQUAL_64(src_base + base_offset, x24);
3199   CHECK_EQUAL_64(dst_base - base_offset, x25);
3200   CHECK_EQUAL_64(dst_base - base_offset + 16, x18);
3201   CHECK_EQUAL_64(src_base + base_offset + 4, x19);
3202   CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
3203   CHECK_EQUAL_64(src_base + base_offset + 8, x21);
3204   CHECK_EQUAL_64(dst_base - base_offset + 24, x22);
3205 
3206   TEARDOWN();
3207 }
3208 
3209 
TEST(ldp_sign_extend)3210 TEST(ldp_sign_extend) {
3211   INIT_V8();
3212   SETUP();
3213 
3214   uint32_t src[2] = {0x80000000, 0x7fffffff};
3215   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3216 
3217   START();
3218   __ Mov(x24, src_base);
3219   __ Ldpsw(x0, x1, MemOperand(x24));
3220   END();
3221 
3222   RUN();
3223 
3224   CHECK_EQUAL_64(0xffffffff80000000UL, x0);
3225   CHECK_EQUAL_64(0x000000007fffffffUL, x1);
3226 
3227   TEARDOWN();
3228 }
3229 
3230 
TEST(ldur_stur)3231 TEST(ldur_stur) {
3232   INIT_V8();
3233   SETUP();
3234 
3235   int64_t src[2] = {0x0123456789abcdefUL, 0x0123456789abcdefUL};
3236   int64_t dst[5] = {0, 0, 0, 0, 0};
3237   uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3238   uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3239 
3240   START();
3241   __ Mov(x17, src_base);
3242   __ Mov(x18, dst_base);
3243   __ Mov(x19, src_base + 16);
3244   __ Mov(x20, dst_base + 32);
3245   __ Mov(x21, dst_base + 40);
3246   __ Ldr(w0, MemOperand(x17, 1));
3247   __ Str(w0, MemOperand(x18, 2));
3248   __ Ldr(x1, MemOperand(x17, 3));
3249   __ Str(x1, MemOperand(x18, 9));
3250   __ Ldr(w2, MemOperand(x19, -9));
3251   __ Str(w2, MemOperand(x20, -5));
3252   __ Ldrb(w3, MemOperand(x19, -1));
3253   __ Strb(w3, MemOperand(x21, -1));
3254   END();
3255 
3256   RUN();
3257 
3258   CHECK_EQUAL_64(0x6789abcd, x0);
3259   CHECK_EQUAL_64(0x6789abcd0000L, dst[0]);
3260   CHECK_EQUAL_64(0xabcdef0123456789L, x1);
3261   CHECK_EQUAL_64(0xcdef012345678900L, dst[1]);
3262   CHECK_EQUAL_64(0x000000ab, dst[2]);
3263   CHECK_EQUAL_64(0xabcdef01, x2);
3264   CHECK_EQUAL_64(0x00abcdef01000000L, dst[3]);
3265   CHECK_EQUAL_64(0x00000001, x3);
3266   CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
3267   CHECK_EQUAL_64(src_base, x17);
3268   CHECK_EQUAL_64(dst_base, x18);
3269   CHECK_EQUAL_64(src_base + 16, x19);
3270   CHECK_EQUAL_64(dst_base + 32, x20);
3271 
3272   TEARDOWN();
3273 }
3274 
3275 
3276 #if 0  // TODO(all) enable.
3277 // TODO(rodolph): Adapt w16 Literal tests for RelocInfo.
3278 TEST(ldr_literal) {
3279   INIT_V8();
3280   SETUP();
3281 
3282   START();
3283   __ Ldr(x2, 0x1234567890abcdefUL);
3284   __ Ldr(w3, 0xfedcba09);
3285   __ Ldr(d13, 1.234);
3286   __ Ldr(s25, 2.5);
3287   END();
3288 
3289   RUN();
3290 
3291   CHECK_EQUAL_64(0x1234567890abcdefUL, x2);
3292   CHECK_EQUAL_64(0xfedcba09, x3);
3293   CHECK_EQUAL_FP64(1.234, d13);
3294   CHECK_EQUAL_FP32(2.5, s25);
3295 
3296   TEARDOWN();
3297 }
3298 
3299 
3300 static void LdrLiteralRangeHelper(ptrdiff_t range_,
3301                                   LiteralPoolEmitOption option,
3302                                   bool expect_dump) {
3303   CHECK(range_ > 0);
3304   SETUP_SIZE(range_ + 1024);
3305 
3306   Label label_1, label_2;
3307 
3308   size_t range = static_cast<size_t>(range_);
3309   size_t code_size = 0;
3310   size_t pool_guard_size;
3311 
3312   if (option == NoJumpRequired) {
3313     // Space for an explicit branch.
3314     pool_guard_size = sizeof(Instr);
3315   } else {
3316     pool_guard_size = 0;
3317   }
3318 
3319   START();
3320   // Force a pool dump so the pool starts off empty.
3321   __ EmitLiteralPool(JumpRequired);
3322   CHECK_LITERAL_POOL_SIZE(0);
3323 
3324   __ Ldr(x0, 0x1234567890abcdefUL);
3325   __ Ldr(w1, 0xfedcba09);
3326   __ Ldr(d0, 1.234);
3327   __ Ldr(s1, 2.5);
3328   CHECK_LITERAL_POOL_SIZE(4);
3329 
3330   code_size += 4 * sizeof(Instr);
3331 
3332   // Check that the requested range (allowing space for a branch over the pool)
3333   // can be handled by this test.
3334   CHECK((code_size + pool_guard_size) <= range);
3335 
3336   // Emit NOPs up to 'range', leaving space for the pool guard.
3337   while ((code_size + pool_guard_size) < range) {
3338     __ Nop();
3339     code_size += sizeof(Instr);
3340   }
3341 
3342   // Emit the guard sequence before the literal pool.
3343   if (option == NoJumpRequired) {
3344     __ B(&label_1);
3345     code_size += sizeof(Instr);
3346   }
3347 
3348   CHECK(code_size == range);
3349   CHECK_LITERAL_POOL_SIZE(4);
3350 
3351   // Possibly generate a literal pool.
3352   __ CheckLiteralPool(option);
3353   __ Bind(&label_1);
3354   if (expect_dump) {
3355     CHECK_LITERAL_POOL_SIZE(0);
3356   } else {
3357     CHECK_LITERAL_POOL_SIZE(4);
3358   }
3359 
3360   // Force a pool flush to check that a second pool functions correctly.
3361   __ EmitLiteralPool(JumpRequired);
3362   CHECK_LITERAL_POOL_SIZE(0);
3363 
3364   // These loads should be after the pool (and will require a new one).
3365   __ Ldr(x4, 0x34567890abcdef12UL);
3366   __ Ldr(w5, 0xdcba09fe);
3367   __ Ldr(d4, 123.4);
3368   __ Ldr(s5, 250.0);
3369   CHECK_LITERAL_POOL_SIZE(4);
3370   END();
3371 
3372   RUN();
3373 
3374   // Check that the literals loaded correctly.
3375   CHECK_EQUAL_64(0x1234567890abcdefUL, x0);
3376   CHECK_EQUAL_64(0xfedcba09, x1);
3377   CHECK_EQUAL_FP64(1.234, d0);
3378   CHECK_EQUAL_FP32(2.5, s1);
3379   CHECK_EQUAL_64(0x34567890abcdef12UL, x4);
3380   CHECK_EQUAL_64(0xdcba09fe, x5);
3381   CHECK_EQUAL_FP64(123.4, d4);
3382   CHECK_EQUAL_FP32(250.0, s5);
3383 
3384   TEARDOWN();
3385 }
3386 
3387 
3388 TEST(ldr_literal_range_1) {
3389   INIT_V8();
3390   LdrLiteralRangeHelper(kRecommendedLiteralPoolRange,
3391                         NoJumpRequired,
3392                         true);
3393 }
3394 
3395 
3396 TEST(ldr_literal_range_2) {
3397   INIT_V8();
3398   LdrLiteralRangeHelper(kRecommendedLiteralPoolRange-sizeof(Instr),
3399                         NoJumpRequired,
3400                         false);
3401 }
3402 
3403 
3404 TEST(ldr_literal_range_3) {
3405   INIT_V8();
3406   LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange,
3407                         JumpRequired,
3408                         true);
3409 }
3410 
3411 
3412 TEST(ldr_literal_range_4) {
3413   INIT_V8();
3414   LdrLiteralRangeHelper(2 * kRecommendedLiteralPoolRange-sizeof(Instr),
3415                         JumpRequired,
3416                         false);
3417 }
3418 
3419 
3420 TEST(ldr_literal_range_5) {
3421   INIT_V8();
3422   LdrLiteralRangeHelper(kLiteralPoolCheckInterval,
3423                         JumpRequired,
3424                         false);
3425 }
3426 
3427 
3428 TEST(ldr_literal_range_6) {
3429   INIT_V8();
3430   LdrLiteralRangeHelper(kLiteralPoolCheckInterval-sizeof(Instr),
3431                         JumpRequired,
3432                         false);
3433 }
3434 #endif
3435 
TEST(add_sub_imm)3436 TEST(add_sub_imm) {
3437   INIT_V8();
3438   SETUP();
3439 
3440   START();
3441   __ Mov(x0, 0x0);
3442   __ Mov(x1, 0x1111);
3443   __ Mov(x2, 0xffffffffffffffffL);
3444   __ Mov(x3, 0x8000000000000000L);
3445 
3446   __ Add(x10, x0, Operand(0x123));
3447   __ Add(x11, x1, Operand(0x122000));
3448   __ Add(x12, x0, Operand(0xabc << 12));
3449   __ Add(x13, x2, Operand(1));
3450 
3451   __ Add(w14, w0, Operand(0x123));
3452   __ Add(w15, w1, Operand(0x122000));
3453   __ Add(w16, w0, Operand(0xabc << 12));
3454   __ Add(w17, w2, Operand(1));
3455 
3456   __ Sub(x20, x0, Operand(0x1));
3457   __ Sub(x21, x1, Operand(0x111));
3458   __ Sub(x22, x1, Operand(0x1 << 12));
3459   __ Sub(x23, x3, Operand(1));
3460 
3461   __ Sub(w24, w0, Operand(0x1));
3462   __ Sub(w25, w1, Operand(0x111));
3463   __ Sub(w26, w1, Operand(0x1 << 12));
3464   __ Sub(w27, w3, Operand(1));
3465   END();
3466 
3467   RUN();
3468 
3469   CHECK_EQUAL_64(0x123, x10);
3470   CHECK_EQUAL_64(0x123111, x11);
3471   CHECK_EQUAL_64(0xabc000, x12);
3472   CHECK_EQUAL_64(0x0, x13);
3473 
3474   CHECK_EQUAL_32(0x123, w14);
3475   CHECK_EQUAL_32(0x123111, w15);
3476   CHECK_EQUAL_32(0xabc000, w16);
3477   CHECK_EQUAL_32(0x0, w17);
3478 
3479   CHECK_EQUAL_64(0xffffffffffffffffL, x20);
3480   CHECK_EQUAL_64(0x1000, x21);
3481   CHECK_EQUAL_64(0x111, x22);
3482   CHECK_EQUAL_64(0x7fffffffffffffffL, x23);
3483 
3484   CHECK_EQUAL_32(0xffffffff, w24);
3485   CHECK_EQUAL_32(0x1000, w25);
3486   CHECK_EQUAL_32(0x111, w26);
3487   CHECK_EQUAL_32(0xffffffff, w27);
3488 
3489   TEARDOWN();
3490 }
3491 
3492 
TEST(add_sub_wide_imm)3493 TEST(add_sub_wide_imm) {
3494   INIT_V8();
3495   SETUP();
3496 
3497   START();
3498   __ Mov(x0, 0x0);
3499   __ Mov(x1, 0x1);
3500 
3501   __ Add(x10, x0, Operand(0x1234567890abcdefUL));
3502   __ Add(x11, x1, Operand(0xffffffff));
3503 
3504   __ Add(w12, w0, Operand(0x12345678));
3505   __ Add(w13, w1, Operand(0xffffffff));
3506 
3507   __ Add(w18, w0, Operand(kWMinInt));
3508   __ Sub(w19, w0, Operand(kWMinInt));
3509 
3510   __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
3511   __ Sub(w21, w0, Operand(0x12345678));
3512   END();
3513 
3514   RUN();
3515 
3516   CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
3517   CHECK_EQUAL_64(0x100000000UL, x11);
3518 
3519   CHECK_EQUAL_32(0x12345678, w12);
3520   CHECK_EQUAL_64(0x0, x13);
3521 
3522   CHECK_EQUAL_32(kWMinInt, w18);
3523   CHECK_EQUAL_32(kWMinInt, w19);
3524 
3525   CHECK_EQUAL_64(-0x1234567890abcdefUL, x20);
3526   CHECK_EQUAL_32(-0x12345678, w21);
3527 
3528   TEARDOWN();
3529 }
3530 
3531 
TEST(add_sub_shifted)3532 TEST(add_sub_shifted) {
3533   INIT_V8();
3534   SETUP();
3535 
3536   START();
3537   __ Mov(x0, 0);
3538   __ Mov(x1, 0x0123456789abcdefL);
3539   __ Mov(x2, 0xfedcba9876543210L);
3540   __ Mov(x3, 0xffffffffffffffffL);
3541 
3542   __ Add(x10, x1, Operand(x2));
3543   __ Add(x11, x0, Operand(x1, LSL, 8));
3544   __ Add(x12, x0, Operand(x1, LSR, 8));
3545   __ Add(x13, x0, Operand(x1, ASR, 8));
3546   __ Add(x14, x0, Operand(x2, ASR, 8));
3547   __ Add(w15, w0, Operand(w1, ASR, 8));
3548   __ Add(w18, w3, Operand(w1, ROR, 8));
3549   __ Add(x19, x3, Operand(x1, ROR, 8));
3550 
3551   __ Sub(x20, x3, Operand(x2));
3552   __ Sub(x21, x3, Operand(x1, LSL, 8));
3553   __ Sub(x22, x3, Operand(x1, LSR, 8));
3554   __ Sub(x23, x3, Operand(x1, ASR, 8));
3555   __ Sub(x24, x3, Operand(x2, ASR, 8));
3556   __ Sub(w25, w3, Operand(w1, ASR, 8));
3557   __ Sub(w26, w3, Operand(w1, ROR, 8));
3558   __ Sub(x27, x3, Operand(x1, ROR, 8));
3559   END();
3560 
3561   RUN();
3562 
3563   CHECK_EQUAL_64(0xffffffffffffffffL, x10);
3564   CHECK_EQUAL_64(0x23456789abcdef00L, x11);
3565   CHECK_EQUAL_64(0x000123456789abcdL, x12);
3566   CHECK_EQUAL_64(0x000123456789abcdL, x13);
3567   CHECK_EQUAL_64(0xfffedcba98765432L, x14);
3568   CHECK_EQUAL_64(0xff89abcd, x15);
3569   CHECK_EQUAL_64(0xef89abcc, x18);
3570   CHECK_EQUAL_64(0xef0123456789abccL, x19);
3571 
3572   CHECK_EQUAL_64(0x0123456789abcdefL, x20);
3573   CHECK_EQUAL_64(0xdcba9876543210ffL, x21);
3574   CHECK_EQUAL_64(0xfffedcba98765432L, x22);
3575   CHECK_EQUAL_64(0xfffedcba98765432L, x23);
3576   CHECK_EQUAL_64(0x000123456789abcdL, x24);
3577   CHECK_EQUAL_64(0x00765432, x25);
3578   CHECK_EQUAL_64(0x10765432, x26);
3579   CHECK_EQUAL_64(0x10fedcba98765432L, x27);
3580 
3581   TEARDOWN();
3582 }
3583 
3584 
TEST(add_sub_extended)3585 TEST(add_sub_extended) {
3586   INIT_V8();
3587   SETUP();
3588 
3589   START();
3590   __ Mov(x0, 0);
3591   __ Mov(x1, 0x0123456789abcdefL);
3592   __ Mov(x2, 0xfedcba9876543210L);
3593   __ Mov(w3, 0x80);
3594 
3595   __ Add(x10, x0, Operand(x1, UXTB, 0));
3596   __ Add(x11, x0, Operand(x1, UXTB, 1));
3597   __ Add(x12, x0, Operand(x1, UXTH, 2));
3598   __ Add(x13, x0, Operand(x1, UXTW, 4));
3599 
3600   __ Add(x14, x0, Operand(x1, SXTB, 0));
3601   __ Add(x15, x0, Operand(x1, SXTB, 1));
3602   __ Add(x16, x0, Operand(x1, SXTH, 2));
3603   __ Add(x17, x0, Operand(x1, SXTW, 3));
3604   __ Add(x18, x0, Operand(x2, SXTB, 0));
3605   __ Add(x19, x0, Operand(x2, SXTB, 1));
3606   __ Add(x20, x0, Operand(x2, SXTH, 2));
3607   __ Add(x21, x0, Operand(x2, SXTW, 3));
3608 
3609   __ Add(x22, x1, Operand(x2, SXTB, 1));
3610   __ Sub(x23, x1, Operand(x2, SXTB, 1));
3611 
3612   __ Add(w24, w1, Operand(w2, UXTB, 2));
3613   __ Add(w25, w0, Operand(w1, SXTB, 0));
3614   __ Add(w26, w0, Operand(w1, SXTB, 1));
3615   __ Add(w27, w2, Operand(w1, SXTW, 3));
3616 
3617   __ Add(w28, w0, Operand(w1, SXTW, 3));
3618   __ Add(x29, x0, Operand(w1, SXTW, 3));
3619 
3620   __ Sub(x30, x0, Operand(w3, SXTB, 1));
3621   END();
3622 
3623   RUN();
3624 
3625   CHECK_EQUAL_64(0xefL, x10);
3626   CHECK_EQUAL_64(0x1deL, x11);
3627   CHECK_EQUAL_64(0x337bcL, x12);
3628   CHECK_EQUAL_64(0x89abcdef0L, x13);
3629 
3630   CHECK_EQUAL_64(0xffffffffffffffefL, x14);
3631   CHECK_EQUAL_64(0xffffffffffffffdeL, x15);
3632   CHECK_EQUAL_64(0xffffffffffff37bcL, x16);
3633   CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x17);
3634   CHECK_EQUAL_64(0x10L, x18);
3635   CHECK_EQUAL_64(0x20L, x19);
3636   CHECK_EQUAL_64(0xc840L, x20);
3637   CHECK_EQUAL_64(0x3b2a19080L, x21);
3638 
3639   CHECK_EQUAL_64(0x0123456789abce0fL, x22);
3640   CHECK_EQUAL_64(0x0123456789abcdcfL, x23);
3641 
3642   CHECK_EQUAL_32(0x89abce2f, w24);
3643   CHECK_EQUAL_32(0xffffffef, w25);
3644   CHECK_EQUAL_32(0xffffffde, w26);
3645   CHECK_EQUAL_32(0xc3b2a188, w27);
3646 
3647   CHECK_EQUAL_32(0x4d5e6f78, w28);
3648   CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x29);
3649 
3650   CHECK_EQUAL_64(256, x30);
3651 
3652   TEARDOWN();
3653 }
3654 
3655 
TEST(add_sub_negative)3656 TEST(add_sub_negative) {
3657   INIT_V8();
3658   SETUP();
3659 
3660   START();
3661   __ Mov(x0, 0);
3662   __ Mov(x1, 4687);
3663   __ Mov(x2, 0x1122334455667788);
3664   __ Mov(w3, 0x11223344);
3665   __ Mov(w4, 400000);
3666 
3667   __ Add(x10, x0, -42);
3668   __ Add(x11, x1, -687);
3669   __ Add(x12, x2, -0x88);
3670 
3671   __ Sub(x13, x0, -600);
3672   __ Sub(x14, x1, -313);
3673   __ Sub(x15, x2, -0x555);
3674 
3675   __ Add(w19, w3, -0x344);
3676   __ Add(w20, w4, -2000);
3677 
3678   __ Sub(w21, w3, -0xbc);
3679   __ Sub(w22, w4, -2000);
3680   END();
3681 
3682   RUN();
3683 
3684   CHECK_EQUAL_64(-42, x10);
3685   CHECK_EQUAL_64(4000, x11);
3686   CHECK_EQUAL_64(0x1122334455667700, x12);
3687 
3688   CHECK_EQUAL_64(600, x13);
3689   CHECK_EQUAL_64(5000, x14);
3690   CHECK_EQUAL_64(0x1122334455667cdd, x15);
3691 
3692   CHECK_EQUAL_32(0x11223000, w19);
3693   CHECK_EQUAL_32(398000, w20);
3694 
3695   CHECK_EQUAL_32(0x11223400, w21);
3696   CHECK_EQUAL_32(402000, w22);
3697 
3698   TEARDOWN();
3699 }
3700 
3701 
TEST(add_sub_zero)3702 TEST(add_sub_zero) {
3703   INIT_V8();
3704   SETUP();
3705 
3706   START();
3707   __ Mov(x0, 0);
3708   __ Mov(x1, 0);
3709   __ Mov(x2, 0);
3710 
3711   Label blob1;
3712   __ Bind(&blob1);
3713   __ Add(x0, x0, 0);
3714   __ Sub(x1, x1, 0);
3715   __ Sub(x2, x2, xzr);
3716   CHECK_EQ(0u, __ SizeOfCodeGeneratedSince(&blob1));
3717 
3718   Label blob2;
3719   __ Bind(&blob2);
3720   __ Add(w3, w3, 0);
3721   CHECK_NE(0u, __ SizeOfCodeGeneratedSince(&blob2));
3722 
3723   Label blob3;
3724   __ Bind(&blob3);
3725   __ Sub(w3, w3, wzr);
3726   CHECK_NE(0u, __ SizeOfCodeGeneratedSince(&blob3));
3727 
3728   END();
3729 
3730   RUN();
3731 
3732   CHECK_EQUAL_64(0, x0);
3733   CHECK_EQUAL_64(0, x1);
3734   CHECK_EQUAL_64(0, x2);
3735 
3736   TEARDOWN();
3737 }
3738 
3739 
TEST(claim_drop_zero)3740 TEST(claim_drop_zero) {
3741   INIT_V8();
3742   SETUP();
3743 
3744   START();
3745 
3746   Label start;
3747   __ Bind(&start);
3748   __ Claim(0);
3749   __ Drop(0);
3750   __ Claim(xzr, 8);
3751   __ Drop(xzr, 8);
3752   __ Claim(xzr, 0);
3753   __ Drop(xzr, 0);
3754   __ Claim(x7, 0);
3755   __ Drop(x7, 0);
3756   __ ClaimBySMI(xzr, 8);
3757   __ DropBySMI(xzr, 8);
3758   __ ClaimBySMI(xzr, 0);
3759   __ DropBySMI(xzr, 0);
3760   CHECK_EQ(0u, __ SizeOfCodeGeneratedSince(&start));
3761 
3762   END();
3763 
3764   RUN();
3765 
3766   TEARDOWN();
3767 }
3768 
3769 
TEST(neg)3770 TEST(neg) {
3771   INIT_V8();
3772   SETUP();
3773 
3774   START();
3775   __ Mov(x0, 0xf123456789abcdefL);
3776 
3777   // Immediate.
3778   __ Neg(x1, 0x123);
3779   __ Neg(w2, 0x123);
3780 
3781   // Shifted.
3782   __ Neg(x3, Operand(x0, LSL, 1));
3783   __ Neg(w4, Operand(w0, LSL, 2));
3784   __ Neg(x5, Operand(x0, LSR, 3));
3785   __ Neg(w6, Operand(w0, LSR, 4));
3786   __ Neg(x7, Operand(x0, ASR, 5));
3787   __ Neg(w8, Operand(w0, ASR, 6));
3788 
3789   // Extended.
3790   __ Neg(w9, Operand(w0, UXTB));
3791   __ Neg(x10, Operand(x0, SXTB, 1));
3792   __ Neg(w11, Operand(w0, UXTH, 2));
3793   __ Neg(x12, Operand(x0, SXTH, 3));
3794   __ Neg(w13, Operand(w0, UXTW, 4));
3795   __ Neg(x14, Operand(x0, SXTW, 4));
3796   END();
3797 
3798   RUN();
3799 
3800   CHECK_EQUAL_64(0xfffffffffffffeddUL, x1);
3801   CHECK_EQUAL_64(0xfffffedd, x2);
3802   CHECK_EQUAL_64(0x1db97530eca86422UL, x3);
3803   CHECK_EQUAL_64(0xd950c844, x4);
3804   CHECK_EQUAL_64(0xe1db97530eca8643UL, x5);
3805   CHECK_EQUAL_64(0xf7654322, x6);
3806   CHECK_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
3807   CHECK_EQUAL_64(0x01d950c9, x8);
3808   CHECK_EQUAL_64(0xffffff11, x9);
3809   CHECK_EQUAL_64(0x0000000000000022UL, x10);
3810   CHECK_EQUAL_64(0xfffcc844, x11);
3811   CHECK_EQUAL_64(0x0000000000019088UL, x12);
3812   CHECK_EQUAL_64(0x65432110, x13);
3813   CHECK_EQUAL_64(0x0000000765432110UL, x14);
3814 
3815   TEARDOWN();
3816 }
3817 
3818 
TEST(adc_sbc_shift)3819 TEST(adc_sbc_shift) {
3820   INIT_V8();
3821   SETUP();
3822 
3823   START();
3824   __ Mov(x0, 0);
3825   __ Mov(x1, 1);
3826   __ Mov(x2, 0x0123456789abcdefL);
3827   __ Mov(x3, 0xfedcba9876543210L);
3828   __ Mov(x4, 0xffffffffffffffffL);
3829 
3830   // Clear the C flag.
3831   __ Adds(x0, x0, Operand(0));
3832 
3833   __ Adc(x5, x2, Operand(x3));
3834   __ Adc(x6, x0, Operand(x1, LSL, 60));
3835   __ Sbc(x7, x4, Operand(x3, LSR, 4));
3836   __ Adc(x8, x2, Operand(x3, ASR, 4));
3837   __ Adc(x9, x2, Operand(x3, ROR, 8));
3838 
3839   __ Adc(w10, w2, Operand(w3));
3840   __ Adc(w11, w0, Operand(w1, LSL, 30));
3841   __ Sbc(w12, w4, Operand(w3, LSR, 4));
3842   __ Adc(w13, w2, Operand(w3, ASR, 4));
3843   __ Adc(w14, w2, Operand(w3, ROR, 8));
3844 
3845   // Set the C flag.
3846   __ Cmp(w0, Operand(w0));
3847 
3848   __ Adc(x18, x2, Operand(x3));
3849   __ Adc(x19, x0, Operand(x1, LSL, 60));
3850   __ Sbc(x20, x4, Operand(x3, LSR, 4));
3851   __ Adc(x21, x2, Operand(x3, ASR, 4));
3852   __ Adc(x22, x2, Operand(x3, ROR, 8));
3853 
3854   __ Adc(w23, w2, Operand(w3));
3855   __ Adc(w24, w0, Operand(w1, LSL, 30));
3856   __ Sbc(w25, w4, Operand(w3, LSR, 4));
3857   __ Adc(w26, w2, Operand(w3, ASR, 4));
3858   __ Adc(w27, w2, Operand(w3, ROR, 8));
3859   END();
3860 
3861   RUN();
3862 
3863   CHECK_EQUAL_64(0xffffffffffffffffL, x5);
3864   CHECK_EQUAL_64(1L << 60, x6);
3865   CHECK_EQUAL_64(0xf0123456789abcddL, x7);
3866   CHECK_EQUAL_64(0x0111111111111110L, x8);
3867   CHECK_EQUAL_64(0x1222222222222221L, x9);
3868 
3869   CHECK_EQUAL_32(0xffffffff, w10);
3870   CHECK_EQUAL_32(1 << 30, w11);
3871   CHECK_EQUAL_32(0xf89abcdd, w12);
3872   CHECK_EQUAL_32(0x91111110, w13);
3873   CHECK_EQUAL_32(0x9a222221, w14);
3874 
3875   CHECK_EQUAL_64(0xffffffffffffffffL + 1, x18);
3876   CHECK_EQUAL_64((1L << 60) + 1, x19);
3877   CHECK_EQUAL_64(0xf0123456789abcddL + 1, x20);
3878   CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
3879   CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
3880 
3881   CHECK_EQUAL_32(0xffffffff + 1, w23);
3882   CHECK_EQUAL_32((1 << 30) + 1, w24);
3883   CHECK_EQUAL_32(0xf89abcdd + 1, w25);
3884   CHECK_EQUAL_32(0x91111110 + 1, w26);
3885   CHECK_EQUAL_32(0x9a222221 + 1, w27);
3886 
3887   // Check that adc correctly sets the condition flags.
3888   START();
3889   __ Mov(x0, 1);
3890   __ Mov(x1, 0xffffffffffffffffL);
3891   // Clear the C flag.
3892   __ Adds(x0, x0, Operand(0));
3893   __ Adcs(x10, x0, Operand(x1));
3894   END();
3895 
3896   RUN();
3897 
3898   CHECK_EQUAL_NZCV(ZCFlag);
3899   CHECK_EQUAL_64(0, x10);
3900 
3901   START();
3902   __ Mov(x0, 1);
3903   __ Mov(x1, 0x8000000000000000L);
3904   // Clear the C flag.
3905   __ Adds(x0, x0, Operand(0));
3906   __ Adcs(x10, x0, Operand(x1, ASR, 63));
3907   END();
3908 
3909   RUN();
3910 
3911   CHECK_EQUAL_NZCV(ZCFlag);
3912   CHECK_EQUAL_64(0, x10);
3913 
3914   START();
3915   __ Mov(x0, 0x10);
3916   __ Mov(x1, 0x07ffffffffffffffL);
3917   // Clear the C flag.
3918   __ Adds(x0, x0, Operand(0));
3919   __ Adcs(x10, x0, Operand(x1, LSL, 4));
3920   END();
3921 
3922   RUN();
3923 
3924   CHECK_EQUAL_NZCV(NVFlag);
3925   CHECK_EQUAL_64(0x8000000000000000L, x10);
3926 
3927   // Check that sbc correctly sets the condition flags.
3928   START();
3929   __ Mov(x0, 0);
3930   __ Mov(x1, 0xffffffffffffffffL);
3931   // Clear the C flag.
3932   __ Adds(x0, x0, Operand(0));
3933   __ Sbcs(x10, x0, Operand(x1));
3934   END();
3935 
3936   RUN();
3937 
3938   CHECK_EQUAL_NZCV(ZFlag);
3939   CHECK_EQUAL_64(0, x10);
3940 
3941   START();
3942   __ Mov(x0, 1);
3943   __ Mov(x1, 0xffffffffffffffffL);
3944   // Clear the C flag.
3945   __ Adds(x0, x0, Operand(0));
3946   __ Sbcs(x10, x0, Operand(x1, LSR, 1));
3947   END();
3948 
3949   RUN();
3950 
3951   CHECK_EQUAL_NZCV(NFlag);
3952   CHECK_EQUAL_64(0x8000000000000001L, x10);
3953 
3954   START();
3955   __ Mov(x0, 0);
3956   // Clear the C flag.
3957   __ Adds(x0, x0, Operand(0));
3958   __ Sbcs(x10, x0, Operand(0xffffffffffffffffL));
3959   END();
3960 
3961   RUN();
3962 
3963   CHECK_EQUAL_NZCV(ZFlag);
3964   CHECK_EQUAL_64(0, x10);
3965 
3966   START()
3967   __ Mov(w0, 0x7fffffff);
3968   // Clear the C flag.
3969   __ Adds(x0, x0, Operand(0));
3970   __ Ngcs(w10, w0);
3971   END();
3972 
3973   RUN();
3974 
3975   CHECK_EQUAL_NZCV(NFlag);
3976   CHECK_EQUAL_64(0x80000000, x10);
3977 
3978   START();
3979   // Clear the C flag.
3980   __ Adds(x0, x0, Operand(0));
3981   __ Ngcs(x10, 0x7fffffffffffffffL);
3982   END();
3983 
3984   RUN();
3985 
3986   CHECK_EQUAL_NZCV(NFlag);
3987   CHECK_EQUAL_64(0x8000000000000000L, x10);
3988 
3989   START()
3990   __ Mov(x0, 0);
3991   // Set the C flag.
3992   __ Cmp(x0, Operand(x0));
3993   __ Sbcs(x10, x0, Operand(1));
3994   END();
3995 
3996   RUN();
3997 
3998   CHECK_EQUAL_NZCV(NFlag);
3999   CHECK_EQUAL_64(0xffffffffffffffffL, x10);
4000 
4001   START()
4002   __ Mov(x0, 0);
4003   // Set the C flag.
4004   __ Cmp(x0, Operand(x0));
4005   __ Ngcs(x10, 0x7fffffffffffffffL);
4006   END();
4007 
4008   RUN();
4009 
4010   CHECK_EQUAL_NZCV(NFlag);
4011   CHECK_EQUAL_64(0x8000000000000001L, x10);
4012 
4013   TEARDOWN();
4014 }
4015 
4016 
TEST(adc_sbc_extend)4017 TEST(adc_sbc_extend) {
4018   INIT_V8();
4019   SETUP();
4020 
4021   START();
4022   // Clear the C flag.
4023   __ Adds(x0, x0, Operand(0));
4024 
4025   __ Mov(x0, 0);
4026   __ Mov(x1, 1);
4027   __ Mov(x2, 0x0123456789abcdefL);
4028 
4029   __ Adc(x10, x1, Operand(w2, UXTB, 1));
4030   __ Adc(x11, x1, Operand(x2, SXTH, 2));
4031   __ Sbc(x12, x1, Operand(w2, UXTW, 4));
4032   __ Adc(x13, x1, Operand(x2, UXTX, 4));
4033 
4034   __ Adc(w14, w1, Operand(w2, UXTB, 1));
4035   __ Adc(w15, w1, Operand(w2, SXTH, 2));
4036   __ Adc(w9, w1, Operand(w2, UXTW, 4));
4037 
4038   // Set the C flag.
4039   __ Cmp(w0, Operand(w0));
4040 
4041   __ Adc(x20, x1, Operand(w2, UXTB, 1));
4042   __ Adc(x21, x1, Operand(x2, SXTH, 2));
4043   __ Sbc(x22, x1, Operand(w2, UXTW, 4));
4044   __ Adc(x23, x1, Operand(x2, UXTX, 4));
4045 
4046   __ Adc(w24, w1, Operand(w2, UXTB, 1));
4047   __ Adc(w25, w1, Operand(w2, SXTH, 2));
4048   __ Adc(w26, w1, Operand(w2, UXTW, 4));
4049   END();
4050 
4051   RUN();
4052 
4053   CHECK_EQUAL_64(0x1df, x10);
4054   CHECK_EQUAL_64(0xffffffffffff37bdL, x11);
4055   CHECK_EQUAL_64(0xfffffff765432110L, x12);
4056   CHECK_EQUAL_64(0x123456789abcdef1L, x13);
4057 
4058   CHECK_EQUAL_32(0x1df, w14);
4059   CHECK_EQUAL_32(0xffff37bd, w15);
4060   CHECK_EQUAL_32(0x9abcdef1, w9);
4061 
4062   CHECK_EQUAL_64(0x1df + 1, x20);
4063   CHECK_EQUAL_64(0xffffffffffff37bdL + 1, x21);
4064   CHECK_EQUAL_64(0xfffffff765432110L + 1, x22);
4065   CHECK_EQUAL_64(0x123456789abcdef1L + 1, x23);
4066 
4067   CHECK_EQUAL_32(0x1df + 1, w24);
4068   CHECK_EQUAL_32(0xffff37bd + 1, w25);
4069   CHECK_EQUAL_32(0x9abcdef1 + 1, w26);
4070 
4071   // Check that adc correctly sets the condition flags.
4072   START();
4073   __ Mov(x0, 0xff);
4074   __ Mov(x1, 0xffffffffffffffffL);
4075   // Clear the C flag.
4076   __ Adds(x0, x0, Operand(0));
4077   __ Adcs(x10, x0, Operand(x1, SXTX, 1));
4078   END();
4079 
4080   RUN();
4081 
4082   CHECK_EQUAL_NZCV(CFlag);
4083 
4084   START();
4085   __ Mov(x0, 0x7fffffffffffffffL);
4086   __ Mov(x1, 1);
4087   // Clear the C flag.
4088   __ Adds(x0, x0, Operand(0));
4089   __ Adcs(x10, x0, Operand(x1, UXTB, 2));
4090   END();
4091 
4092   RUN();
4093 
4094   CHECK_EQUAL_NZCV(NVFlag);
4095 
4096   START();
4097   __ Mov(x0, 0x7fffffffffffffffL);
4098   // Clear the C flag.
4099   __ Adds(x0, x0, Operand(0));
4100   __ Adcs(x10, x0, Operand(1));
4101   END();
4102 
4103   RUN();
4104 
4105   CHECK_EQUAL_NZCV(NVFlag);
4106 
4107   TEARDOWN();
4108 }
4109 
4110 
TEST(adc_sbc_wide_imm)4111 TEST(adc_sbc_wide_imm) {
4112   INIT_V8();
4113   SETUP();
4114 
4115   START();
4116   __ Mov(x0, 0);
4117 
4118   // Clear the C flag.
4119   __ Adds(x0, x0, Operand(0));
4120 
4121   __ Adc(x7, x0, Operand(0x1234567890abcdefUL));
4122   __ Adc(w8, w0, Operand(0xffffffff));
4123   __ Sbc(x9, x0, Operand(0x1234567890abcdefUL));
4124   __ Sbc(w10, w0, Operand(0xffffffff));
4125   __ Ngc(x11, Operand(0xffffffff00000000UL));
4126   __ Ngc(w12, Operand(0xffff0000));
4127 
4128   // Set the C flag.
4129   __ Cmp(w0, Operand(w0));
4130 
4131   __ Adc(x18, x0, Operand(0x1234567890abcdefUL));
4132   __ Adc(w19, w0, Operand(0xffffffff));
4133   __ Sbc(x20, x0, Operand(0x1234567890abcdefUL));
4134   __ Sbc(w21, w0, Operand(0xffffffff));
4135   __ Ngc(x22, Operand(0xffffffff00000000UL));
4136   __ Ngc(w23, Operand(0xffff0000));
4137   END();
4138 
4139   RUN();
4140 
4141   CHECK_EQUAL_64(0x1234567890abcdefUL, x7);
4142   CHECK_EQUAL_64(0xffffffff, x8);
4143   CHECK_EQUAL_64(0xedcba9876f543210UL, x9);
4144   CHECK_EQUAL_64(0, x10);
4145   CHECK_EQUAL_64(0xffffffff, x11);
4146   CHECK_EQUAL_64(0xffff, x12);
4147 
4148   CHECK_EQUAL_64(0x1234567890abcdefUL + 1, x18);
4149   CHECK_EQUAL_64(0, x19);
4150   CHECK_EQUAL_64(0xedcba9876f543211UL, x20);
4151   CHECK_EQUAL_64(1, x21);
4152   CHECK_EQUAL_64(0x100000000UL, x22);
4153   CHECK_EQUAL_64(0x10000, x23);
4154 
4155   TEARDOWN();
4156 }
4157 
4158 
TEST(flags)4159 TEST(flags) {
4160   INIT_V8();
4161   SETUP();
4162 
4163   START();
4164   __ Mov(x0, 0);
4165   __ Mov(x1, 0x1111111111111111L);
4166   __ Neg(x10, Operand(x0));
4167   __ Neg(x11, Operand(x1));
4168   __ Neg(w12, Operand(w1));
4169   // Clear the C flag.
4170   __ Adds(x0, x0, Operand(0));
4171   __ Ngc(x13, Operand(x0));
4172   // Set the C flag.
4173   __ Cmp(x0, Operand(x0));
4174   __ Ngc(w14, Operand(w0));
4175   END();
4176 
4177   RUN();
4178 
4179   CHECK_EQUAL_64(0, x10);
4180   CHECK_EQUAL_64(-0x1111111111111111L, x11);
4181   CHECK_EQUAL_32(-0x11111111, w12);
4182   CHECK_EQUAL_64(-1L, x13);
4183   CHECK_EQUAL_32(0, w14);
4184 
4185   START();
4186   __ Mov(x0, 0);
4187   __ Cmp(x0, Operand(x0));
4188   END();
4189 
4190   RUN();
4191 
4192   CHECK_EQUAL_NZCV(ZCFlag);
4193 
4194   START();
4195   __ Mov(w0, 0);
4196   __ Cmp(w0, Operand(w0));
4197   END();
4198 
4199   RUN();
4200 
4201   CHECK_EQUAL_NZCV(ZCFlag);
4202 
4203   START();
4204   __ Mov(x0, 0);
4205   __ Mov(x1, 0x1111111111111111L);
4206   __ Cmp(x0, Operand(x1));
4207   END();
4208 
4209   RUN();
4210 
4211   CHECK_EQUAL_NZCV(NFlag);
4212 
4213   START();
4214   __ Mov(w0, 0);
4215   __ Mov(w1, 0x11111111);
4216   __ Cmp(w0, Operand(w1));
4217   END();
4218 
4219   RUN();
4220 
4221   CHECK_EQUAL_NZCV(NFlag);
4222 
4223   START();
4224   __ Mov(x1, 0x1111111111111111L);
4225   __ Cmp(x1, Operand(0));
4226   END();
4227 
4228   RUN();
4229 
4230   CHECK_EQUAL_NZCV(CFlag);
4231 
4232   START();
4233   __ Mov(w1, 0x11111111);
4234   __ Cmp(w1, Operand(0));
4235   END();
4236 
4237   RUN();
4238 
4239   CHECK_EQUAL_NZCV(CFlag);
4240 
4241   START();
4242   __ Mov(x0, 1);
4243   __ Mov(x1, 0x7fffffffffffffffL);
4244   __ Cmn(x1, Operand(x0));
4245   END();
4246 
4247   RUN();
4248 
4249   CHECK_EQUAL_NZCV(NVFlag);
4250 
4251   START();
4252   __ Mov(w0, 1);
4253   __ Mov(w1, 0x7fffffff);
4254   __ Cmn(w1, Operand(w0));
4255   END();
4256 
4257   RUN();
4258 
4259   CHECK_EQUAL_NZCV(NVFlag);
4260 
4261   START();
4262   __ Mov(x0, 1);
4263   __ Mov(x1, 0xffffffffffffffffL);
4264   __ Cmn(x1, Operand(x0));
4265   END();
4266 
4267   RUN();
4268 
4269   CHECK_EQUAL_NZCV(ZCFlag);
4270 
4271   START();
4272   __ Mov(w0, 1);
4273   __ Mov(w1, 0xffffffff);
4274   __ Cmn(w1, Operand(w0));
4275   END();
4276 
4277   RUN();
4278 
4279   CHECK_EQUAL_NZCV(ZCFlag);
4280 
4281   START();
4282   __ Mov(w0, 0);
4283   __ Mov(w1, 1);
4284   // Clear the C flag.
4285   __ Adds(w0, w0, Operand(0));
4286   __ Ngcs(w0, Operand(w1));
4287   END();
4288 
4289   RUN();
4290 
4291   CHECK_EQUAL_NZCV(NFlag);
4292 
4293   START();
4294   __ Mov(w0, 0);
4295   __ Mov(w1, 0);
4296   // Set the C flag.
4297   __ Cmp(w0, Operand(w0));
4298   __ Ngcs(w0, Operand(w1));
4299   END();
4300 
4301   RUN();
4302 
4303   CHECK_EQUAL_NZCV(ZCFlag);
4304 
4305   TEARDOWN();
4306 }
4307 
4308 
TEST(cmp_shift)4309 TEST(cmp_shift) {
4310   INIT_V8();
4311   SETUP();
4312 
4313   START();
4314   __ Mov(x18, 0xf0000000);
4315   __ Mov(x19, 0xf000000010000000UL);
4316   __ Mov(x20, 0xf0000000f0000000UL);
4317   __ Mov(x21, 0x7800000078000000UL);
4318   __ Mov(x22, 0x3c0000003c000000UL);
4319   __ Mov(x23, 0x8000000780000000UL);
4320   __ Mov(x24, 0x0000000f00000000UL);
4321   __ Mov(x25, 0x00000003c0000000UL);
4322   __ Mov(x26, 0x8000000780000000UL);
4323   __ Mov(x27, 0xc0000003);
4324 
4325   __ Cmp(w20, Operand(w21, LSL, 1));
4326   __ Mrs(x0, NZCV);
4327 
4328   __ Cmp(x20, Operand(x22, LSL, 2));
4329   __ Mrs(x1, NZCV);
4330 
4331   __ Cmp(w19, Operand(w23, LSR, 3));
4332   __ Mrs(x2, NZCV);
4333 
4334   __ Cmp(x18, Operand(x24, LSR, 4));
4335   __ Mrs(x3, NZCV);
4336 
4337   __ Cmp(w20, Operand(w25, ASR, 2));
4338   __ Mrs(x4, NZCV);
4339 
4340   __ Cmp(x20, Operand(x26, ASR, 3));
4341   __ Mrs(x5, NZCV);
4342 
4343   __ Cmp(w27, Operand(w22, ROR, 28));
4344   __ Mrs(x6, NZCV);
4345 
4346   __ Cmp(x20, Operand(x21, ROR, 31));
4347   __ Mrs(x7, NZCV);
4348   END();
4349 
4350   RUN();
4351 
4352   CHECK_EQUAL_32(ZCFlag, w0);
4353   CHECK_EQUAL_32(ZCFlag, w1);
4354   CHECK_EQUAL_32(ZCFlag, w2);
4355   CHECK_EQUAL_32(ZCFlag, w3);
4356   CHECK_EQUAL_32(ZCFlag, w4);
4357   CHECK_EQUAL_32(ZCFlag, w5);
4358   CHECK_EQUAL_32(ZCFlag, w6);
4359   CHECK_EQUAL_32(ZCFlag, w7);
4360 
4361   TEARDOWN();
4362 }
4363 
4364 
TEST(cmp_extend)4365 TEST(cmp_extend) {
4366   INIT_V8();
4367   SETUP();
4368 
4369   START();
4370   __ Mov(w20, 0x2);
4371   __ Mov(w21, 0x1);
4372   __ Mov(x22, 0xffffffffffffffffUL);
4373   __ Mov(x23, 0xff);
4374   __ Mov(x24, 0xfffffffffffffffeUL);
4375   __ Mov(x25, 0xffff);
4376   __ Mov(x26, 0xffffffff);
4377 
4378   __ Cmp(w20, Operand(w21, LSL, 1));
4379   __ Mrs(x0, NZCV);
4380 
4381   __ Cmp(x22, Operand(x23, SXTB, 0));
4382   __ Mrs(x1, NZCV);
4383 
4384   __ Cmp(x24, Operand(x23, SXTB, 1));
4385   __ Mrs(x2, NZCV);
4386 
4387   __ Cmp(x24, Operand(x23, UXTB, 1));
4388   __ Mrs(x3, NZCV);
4389 
4390   __ Cmp(w22, Operand(w25, UXTH));
4391   __ Mrs(x4, NZCV);
4392 
4393   __ Cmp(x22, Operand(x25, SXTH));
4394   __ Mrs(x5, NZCV);
4395 
4396   __ Cmp(x22, Operand(x26, UXTW));
4397   __ Mrs(x6, NZCV);
4398 
4399   __ Cmp(x24, Operand(x26, SXTW, 1));
4400   __ Mrs(x7, NZCV);
4401   END();
4402 
4403   RUN();
4404 
4405   CHECK_EQUAL_32(ZCFlag, w0);
4406   CHECK_EQUAL_32(ZCFlag, w1);
4407   CHECK_EQUAL_32(ZCFlag, w2);
4408   CHECK_EQUAL_32(NCFlag, w3);
4409   CHECK_EQUAL_32(NCFlag, w4);
4410   CHECK_EQUAL_32(ZCFlag, w5);
4411   CHECK_EQUAL_32(NCFlag, w6);
4412   CHECK_EQUAL_32(ZCFlag, w7);
4413 
4414   TEARDOWN();
4415 }
4416 
4417 
TEST(ccmp)4418 TEST(ccmp) {
4419   INIT_V8();
4420   SETUP();
4421 
4422   START();
4423   __ Mov(w16, 0);
4424   __ Mov(w17, 1);
4425   __ Cmp(w16, w16);
4426   __ Ccmp(w16, w17, NCFlag, eq);
4427   __ Mrs(x0, NZCV);
4428 
4429   __ Cmp(w16, w16);
4430   __ Ccmp(w16, w17, NCFlag, ne);
4431   __ Mrs(x1, NZCV);
4432 
4433   __ Cmp(x16, x16);
4434   __ Ccmn(x16, 2, NZCVFlag, eq);
4435   __ Mrs(x2, NZCV);
4436 
4437   __ Cmp(x16, x16);
4438   __ Ccmn(x16, 2, NZCVFlag, ne);
4439   __ Mrs(x3, NZCV);
4440 
4441   __ ccmp(x16, x16, NZCVFlag, al);
4442   __ Mrs(x4, NZCV);
4443 
4444   __ ccmp(x16, x16, NZCVFlag, nv);
4445   __ Mrs(x5, NZCV);
4446 
4447   END();
4448 
4449   RUN();
4450 
4451   CHECK_EQUAL_32(NFlag, w0);
4452   CHECK_EQUAL_32(NCFlag, w1);
4453   CHECK_EQUAL_32(NoFlag, w2);
4454   CHECK_EQUAL_32(NZCVFlag, w3);
4455   CHECK_EQUAL_32(ZCFlag, w4);
4456   CHECK_EQUAL_32(ZCFlag, w5);
4457 
4458   TEARDOWN();
4459 }
4460 
4461 
TEST(ccmp_wide_imm)4462 TEST(ccmp_wide_imm) {
4463   INIT_V8();
4464   SETUP();
4465 
4466   START();
4467   __ Mov(w20, 0);
4468 
4469   __ Cmp(w20, Operand(w20));
4470   __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
4471   __ Mrs(x0, NZCV);
4472 
4473   __ Cmp(w20, Operand(w20));
4474   __ Ccmp(x20, Operand(0xffffffffffffffffUL), NZCVFlag, eq);
4475   __ Mrs(x1, NZCV);
4476   END();
4477 
4478   RUN();
4479 
4480   CHECK_EQUAL_32(NFlag, w0);
4481   CHECK_EQUAL_32(NoFlag, w1);
4482 
4483   TEARDOWN();
4484 }
4485 
4486 
TEST(ccmp_shift_extend)4487 TEST(ccmp_shift_extend) {
4488   INIT_V8();
4489   SETUP();
4490 
4491   START();
4492   __ Mov(w20, 0x2);
4493   __ Mov(w21, 0x1);
4494   __ Mov(x22, 0xffffffffffffffffUL);
4495   __ Mov(x23, 0xff);
4496   __ Mov(x24, 0xfffffffffffffffeUL);
4497 
4498   __ Cmp(w20, Operand(w20));
4499   __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
4500   __ Mrs(x0, NZCV);
4501 
4502   __ Cmp(w20, Operand(w20));
4503   __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
4504   __ Mrs(x1, NZCV);
4505 
4506   __ Cmp(w20, Operand(w20));
4507   __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
4508   __ Mrs(x2, NZCV);
4509 
4510   __ Cmp(w20, Operand(w20));
4511   __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
4512   __ Mrs(x3, NZCV);
4513 
4514   __ Cmp(w20, Operand(w20));
4515   __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
4516   __ Mrs(x4, NZCV);
4517   END();
4518 
4519   RUN();
4520 
4521   CHECK_EQUAL_32(ZCFlag, w0);
4522   CHECK_EQUAL_32(ZCFlag, w1);
4523   CHECK_EQUAL_32(ZCFlag, w2);
4524   CHECK_EQUAL_32(NCFlag, w3);
4525   CHECK_EQUAL_32(NZCVFlag, w4);
4526 
4527   TEARDOWN();
4528 }
4529 
4530 
TEST(csel)4531 TEST(csel) {
4532   INIT_V8();
4533   SETUP();
4534 
4535   START();
4536   __ Mov(x16, 0);
4537   __ Mov(x24, 0x0000000f0000000fUL);
4538   __ Mov(x25, 0x0000001f0000001fUL);
4539   __ Mov(x26, 0);
4540   __ Mov(x27, 0);
4541 
4542   __ Cmp(w16, 0);
4543   __ Csel(w0, w24, w25, eq);
4544   __ Csel(w1, w24, w25, ne);
4545   __ Csinc(w2, w24, w25, mi);
4546   __ Csinc(w3, w24, w25, pl);
4547 
4548   __ csel(w13, w24, w25, al);
4549   __ csel(x14, x24, x25, nv);
4550 
4551   __ Cmp(x16, 1);
4552   __ Csinv(x4, x24, x25, gt);
4553   __ Csinv(x5, x24, x25, le);
4554   __ Csneg(x6, x24, x25, hs);
4555   __ Csneg(x7, x24, x25, lo);
4556 
4557   __ Cset(w8, ne);
4558   __ Csetm(w9, ne);
4559   __ Cinc(x10, x25, ne);
4560   __ Cinv(x11, x24, ne);
4561   __ Cneg(x12, x24, ne);
4562 
4563   __ csel(w15, w24, w25, al);
4564   __ csel(x18, x24, x25, nv);
4565 
4566   __ CzeroX(x24, ne);
4567   __ CzeroX(x25, eq);
4568 
4569   __ CmovX(x26, x25, ne);
4570   __ CmovX(x27, x25, eq);
4571   END();
4572 
4573   RUN();
4574 
4575   CHECK_EQUAL_64(0x0000000f, x0);
4576   CHECK_EQUAL_64(0x0000001f, x1);
4577   CHECK_EQUAL_64(0x00000020, x2);
4578   CHECK_EQUAL_64(0x0000000f, x3);
4579   CHECK_EQUAL_64(0xffffffe0ffffffe0UL, x4);
4580   CHECK_EQUAL_64(0x0000000f0000000fUL, x5);
4581   CHECK_EQUAL_64(0xffffffe0ffffffe1UL, x6);
4582   CHECK_EQUAL_64(0x0000000f0000000fUL, x7);
4583   CHECK_EQUAL_64(0x00000001, x8);
4584   CHECK_EQUAL_64(0xffffffff, x9);
4585   CHECK_EQUAL_64(0x0000001f00000020UL, x10);
4586   CHECK_EQUAL_64(0xfffffff0fffffff0UL, x11);
4587   CHECK_EQUAL_64(0xfffffff0fffffff1UL, x12);
4588   CHECK_EQUAL_64(0x0000000f, x13);
4589   CHECK_EQUAL_64(0x0000000f0000000fUL, x14);
4590   CHECK_EQUAL_64(0x0000000f, x15);
4591   CHECK_EQUAL_64(0x0000000f0000000fUL, x18);
4592   CHECK_EQUAL_64(0, x24);
4593   CHECK_EQUAL_64(0x0000001f0000001fUL, x25);
4594   CHECK_EQUAL_64(0x0000001f0000001fUL, x26);
4595   CHECK_EQUAL_64(0, x27);
4596 
4597   TEARDOWN();
4598 }
4599 
4600 
TEST(csel_imm)4601 TEST(csel_imm) {
4602   INIT_V8();
4603   SETUP();
4604 
4605   START();
4606   __ Mov(x18, 0);
4607   __ Mov(x19, 0x80000000);
4608   __ Mov(x20, 0x8000000000000000UL);
4609 
4610   __ Cmp(x18, Operand(0));
4611   __ Csel(w0, w19, -2, ne);
4612   __ Csel(w1, w19, -1, ne);
4613   __ Csel(w2, w19, 0, ne);
4614   __ Csel(w3, w19, 1, ne);
4615   __ Csel(w4, w19, 2, ne);
4616   __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
4617   __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
4618   __ Csel(w7, w19, 3, eq);
4619 
4620   __ Csel(x8, x20, -2, ne);
4621   __ Csel(x9, x20, -1, ne);
4622   __ Csel(x10, x20, 0, ne);
4623   __ Csel(x11, x20, 1, ne);
4624   __ Csel(x12, x20, 2, ne);
4625   __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
4626   __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
4627   __ Csel(x15, x20, 3, eq);
4628 
4629   END();
4630 
4631   RUN();
4632 
4633   CHECK_EQUAL_32(-2, w0);
4634   CHECK_EQUAL_32(-1, w1);
4635   CHECK_EQUAL_32(0, w2);
4636   CHECK_EQUAL_32(1, w3);
4637   CHECK_EQUAL_32(2, w4);
4638   CHECK_EQUAL_32(-1, w5);
4639   CHECK_EQUAL_32(0x40000000, w6);
4640   CHECK_EQUAL_32(0x80000000, w7);
4641 
4642   CHECK_EQUAL_64(-2, x8);
4643   CHECK_EQUAL_64(-1, x9);
4644   CHECK_EQUAL_64(0, x10);
4645   CHECK_EQUAL_64(1, x11);
4646   CHECK_EQUAL_64(2, x12);
4647   CHECK_EQUAL_64(-1, x13);
4648   CHECK_EQUAL_64(0x4000000000000000UL, x14);
4649   CHECK_EQUAL_64(0x8000000000000000UL, x15);
4650 
4651   TEARDOWN();
4652 }
4653 
4654 
TEST(lslv)4655 TEST(lslv) {
4656   INIT_V8();
4657   SETUP();
4658 
4659   uint64_t value = 0x0123456789abcdefUL;
4660   int shift[] = {1, 3, 5, 9, 17, 33};
4661 
4662   START();
4663   __ Mov(x0, value);
4664   __ Mov(w1, shift[0]);
4665   __ Mov(w2, shift[1]);
4666   __ Mov(w3, shift[2]);
4667   __ Mov(w4, shift[3]);
4668   __ Mov(w5, shift[4]);
4669   __ Mov(w6, shift[5]);
4670 
4671   __ lslv(x0, x0, xzr);
4672 
4673   __ Lsl(x16, x0, x1);
4674   __ Lsl(x17, x0, x2);
4675   __ Lsl(x18, x0, x3);
4676   __ Lsl(x19, x0, x4);
4677   __ Lsl(x20, x0, x5);
4678   __ Lsl(x21, x0, x6);
4679 
4680   __ Lsl(w22, w0, w1);
4681   __ Lsl(w23, w0, w2);
4682   __ Lsl(w24, w0, w3);
4683   __ Lsl(w25, w0, w4);
4684   __ Lsl(w26, w0, w5);
4685   __ Lsl(w27, w0, w6);
4686   END();
4687 
4688   RUN();
4689 
4690   CHECK_EQUAL_64(value, x0);
4691   CHECK_EQUAL_64(value << (shift[0] & 63), x16);
4692   CHECK_EQUAL_64(value << (shift[1] & 63), x17);
4693   CHECK_EQUAL_64(value << (shift[2] & 63), x18);
4694   CHECK_EQUAL_64(value << (shift[3] & 63), x19);
4695   CHECK_EQUAL_64(value << (shift[4] & 63), x20);
4696   CHECK_EQUAL_64(value << (shift[5] & 63), x21);
4697   CHECK_EQUAL_32(value << (shift[0] & 31), w22);
4698   CHECK_EQUAL_32(value << (shift[1] & 31), w23);
4699   CHECK_EQUAL_32(value << (shift[2] & 31), w24);
4700   CHECK_EQUAL_32(value << (shift[3] & 31), w25);
4701   CHECK_EQUAL_32(value << (shift[4] & 31), w26);
4702   CHECK_EQUAL_32(value << (shift[5] & 31), w27);
4703 
4704   TEARDOWN();
4705 }
4706 
4707 
TEST(lsrv)4708 TEST(lsrv) {
4709   INIT_V8();
4710   SETUP();
4711 
4712   uint64_t value = 0x0123456789abcdefUL;
4713   int shift[] = {1, 3, 5, 9, 17, 33};
4714 
4715   START();
4716   __ Mov(x0, value);
4717   __ Mov(w1, shift[0]);
4718   __ Mov(w2, shift[1]);
4719   __ Mov(w3, shift[2]);
4720   __ Mov(w4, shift[3]);
4721   __ Mov(w5, shift[4]);
4722   __ Mov(w6, shift[5]);
4723 
4724   __ lsrv(x0, x0, xzr);
4725 
4726   __ Lsr(x16, x0, x1);
4727   __ Lsr(x17, x0, x2);
4728   __ Lsr(x18, x0, x3);
4729   __ Lsr(x19, x0, x4);
4730   __ Lsr(x20, x0, x5);
4731   __ Lsr(x21, x0, x6);
4732 
4733   __ Lsr(w22, w0, w1);
4734   __ Lsr(w23, w0, w2);
4735   __ Lsr(w24, w0, w3);
4736   __ Lsr(w25, w0, w4);
4737   __ Lsr(w26, w0, w5);
4738   __ Lsr(w27, w0, w6);
4739   END();
4740 
4741   RUN();
4742 
4743   CHECK_EQUAL_64(value, x0);
4744   CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
4745   CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
4746   CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
4747   CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
4748   CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
4749   CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
4750 
4751   value &= 0xffffffffUL;
4752   CHECK_EQUAL_32(value >> (shift[0] & 31), w22);
4753   CHECK_EQUAL_32(value >> (shift[1] & 31), w23);
4754   CHECK_EQUAL_32(value >> (shift[2] & 31), w24);
4755   CHECK_EQUAL_32(value >> (shift[3] & 31), w25);
4756   CHECK_EQUAL_32(value >> (shift[4] & 31), w26);
4757   CHECK_EQUAL_32(value >> (shift[5] & 31), w27);
4758 
4759   TEARDOWN();
4760 }
4761 
4762 
TEST(asrv)4763 TEST(asrv) {
4764   INIT_V8();
4765   SETUP();
4766 
4767   int64_t value = 0xfedcba98fedcba98UL;
4768   int shift[] = {1, 3, 5, 9, 17, 33};
4769 
4770   START();
4771   __ Mov(x0, value);
4772   __ Mov(w1, shift[0]);
4773   __ Mov(w2, shift[1]);
4774   __ Mov(w3, shift[2]);
4775   __ Mov(w4, shift[3]);
4776   __ Mov(w5, shift[4]);
4777   __ Mov(w6, shift[5]);
4778 
4779   __ asrv(x0, x0, xzr);
4780 
4781   __ Asr(x16, x0, x1);
4782   __ Asr(x17, x0, x2);
4783   __ Asr(x18, x0, x3);
4784   __ Asr(x19, x0, x4);
4785   __ Asr(x20, x0, x5);
4786   __ Asr(x21, x0, x6);
4787 
4788   __ Asr(w22, w0, w1);
4789   __ Asr(w23, w0, w2);
4790   __ Asr(w24, w0, w3);
4791   __ Asr(w25, w0, w4);
4792   __ Asr(w26, w0, w5);
4793   __ Asr(w27, w0, w6);
4794   END();
4795 
4796   RUN();
4797 
4798   CHECK_EQUAL_64(value, x0);
4799   CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
4800   CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
4801   CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
4802   CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
4803   CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
4804   CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
4805 
4806   int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
4807   CHECK_EQUAL_32(value32 >> (shift[0] & 31), w22);
4808   CHECK_EQUAL_32(value32 >> (shift[1] & 31), w23);
4809   CHECK_EQUAL_32(value32 >> (shift[2] & 31), w24);
4810   CHECK_EQUAL_32(value32 >> (shift[3] & 31), w25);
4811   CHECK_EQUAL_32(value32 >> (shift[4] & 31), w26);
4812   CHECK_EQUAL_32(value32 >> (shift[5] & 31), w27);
4813 
4814   TEARDOWN();
4815 }
4816 
4817 
TEST(rorv)4818 TEST(rorv) {
4819   INIT_V8();
4820   SETUP();
4821 
4822   uint64_t value = 0x0123456789abcdefUL;
4823   int shift[] = {4, 8, 12, 16, 24, 36};
4824 
4825   START();
4826   __ Mov(x0, value);
4827   __ Mov(w1, shift[0]);
4828   __ Mov(w2, shift[1]);
4829   __ Mov(w3, shift[2]);
4830   __ Mov(w4, shift[3]);
4831   __ Mov(w5, shift[4]);
4832   __ Mov(w6, shift[5]);
4833 
4834   __ rorv(x0, x0, xzr);
4835 
4836   __ Ror(x16, x0, x1);
4837   __ Ror(x17, x0, x2);
4838   __ Ror(x18, x0, x3);
4839   __ Ror(x19, x0, x4);
4840   __ Ror(x20, x0, x5);
4841   __ Ror(x21, x0, x6);
4842 
4843   __ Ror(w22, w0, w1);
4844   __ Ror(w23, w0, w2);
4845   __ Ror(w24, w0, w3);
4846   __ Ror(w25, w0, w4);
4847   __ Ror(w26, w0, w5);
4848   __ Ror(w27, w0, w6);
4849   END();
4850 
4851   RUN();
4852 
4853   CHECK_EQUAL_64(value, x0);
4854   CHECK_EQUAL_64(0xf0123456789abcdeUL, x16);
4855   CHECK_EQUAL_64(0xef0123456789abcdUL, x17);
4856   CHECK_EQUAL_64(0xdef0123456789abcUL, x18);
4857   CHECK_EQUAL_64(0xcdef0123456789abUL, x19);
4858   CHECK_EQUAL_64(0xabcdef0123456789UL, x20);
4859   CHECK_EQUAL_64(0x789abcdef0123456UL, x21);
4860   CHECK_EQUAL_32(0xf89abcde, w22);
4861   CHECK_EQUAL_32(0xef89abcd, w23);
4862   CHECK_EQUAL_32(0xdef89abc, w24);
4863   CHECK_EQUAL_32(0xcdef89ab, w25);
4864   CHECK_EQUAL_32(0xabcdef89, w26);
4865   CHECK_EQUAL_32(0xf89abcde, w27);
4866 
4867   TEARDOWN();
4868 }
4869 
4870 
TEST(bfm)4871 TEST(bfm) {
4872   INIT_V8();
4873   SETUP();
4874 
4875   START();
4876   __ Mov(x1, 0x0123456789abcdefL);
4877 
4878   __ Mov(x10, 0x8888888888888888L);
4879   __ Mov(x11, 0x8888888888888888L);
4880   __ Mov(x12, 0x8888888888888888L);
4881   __ Mov(x13, 0x8888888888888888L);
4882   __ Mov(w20, 0x88888888);
4883   __ Mov(w21, 0x88888888);
4884 
4885   __ bfm(x10, x1, 16, 31);
4886   __ bfm(x11, x1, 32, 15);
4887 
4888   __ bfm(w20, w1, 16, 23);
4889   __ bfm(w21, w1, 24, 15);
4890 
4891   // Aliases.
4892   __ Bfi(x12, x1, 16, 8);
4893   __ Bfxil(x13, x1, 16, 8);
4894   END();
4895 
4896   RUN();
4897 
4898 
4899   CHECK_EQUAL_64(0x88888888888889abL, x10);
4900   CHECK_EQUAL_64(0x8888cdef88888888L, x11);
4901 
4902   CHECK_EQUAL_32(0x888888ab, w20);
4903   CHECK_EQUAL_32(0x88cdef88, w21);
4904 
4905   CHECK_EQUAL_64(0x8888888888ef8888L, x12);
4906   CHECK_EQUAL_64(0x88888888888888abL, x13);
4907 
4908   TEARDOWN();
4909 }
4910 
4911 
TEST(sbfm)4912 TEST(sbfm) {
4913   INIT_V8();
4914   SETUP();
4915 
4916   START();
4917   __ Mov(x1, 0x0123456789abcdefL);
4918   __ Mov(x2, 0xfedcba9876543210L);
4919 
4920   __ sbfm(x10, x1, 16, 31);
4921   __ sbfm(x11, x1, 32, 15);
4922   __ sbfm(x12, x1, 32, 47);
4923   __ sbfm(x13, x1, 48, 35);
4924 
4925   __ sbfm(w14, w1, 16, 23);
4926   __ sbfm(w15, w1, 24, 15);
4927   __ sbfm(w16, w2, 16, 23);
4928   __ sbfm(w17, w2, 24, 15);
4929 
4930   // Aliases.
4931   __ Asr(x18, x1, 32);
4932   __ Asr(x19, x2, 32);
4933   __ Sbfiz(x20, x1, 8, 16);
4934   __ Sbfiz(x21, x2, 8, 16);
4935   __ Sbfx(x22, x1, 8, 16);
4936   __ Sbfx(x23, x2, 8, 16);
4937   __ Sxtb(x24, w1);
4938   __ Sxtb(x25, x2);
4939   __ Sxth(x26, w1);
4940   __ Sxth(x27, x2);
4941   __ Sxtw(x28, w1);
4942   __ Sxtw(x29, x2);
4943   END();
4944 
4945   RUN();
4946 
4947 
4948   CHECK_EQUAL_64(0xffffffffffff89abL, x10);
4949   CHECK_EQUAL_64(0xffffcdef00000000L, x11);
4950   CHECK_EQUAL_64(0x4567L, x12);
4951   CHECK_EQUAL_64(0x789abcdef0000L, x13);
4952 
4953   CHECK_EQUAL_32(0xffffffab, w14);
4954   CHECK_EQUAL_32(0xffcdef00, w15);
4955   CHECK_EQUAL_32(0x54, w16);
4956   CHECK_EQUAL_32(0x00321000, w17);
4957 
4958   CHECK_EQUAL_64(0x01234567L, x18);
4959   CHECK_EQUAL_64(0xfffffffffedcba98L, x19);
4960   CHECK_EQUAL_64(0xffffffffffcdef00L, x20);
4961   CHECK_EQUAL_64(0x321000L, x21);
4962   CHECK_EQUAL_64(0xffffffffffffabcdL, x22);
4963   CHECK_EQUAL_64(0x5432L, x23);
4964   CHECK_EQUAL_64(0xffffffffffffffefL, x24);
4965   CHECK_EQUAL_64(0x10, x25);
4966   CHECK_EQUAL_64(0xffffffffffffcdefL, x26);
4967   CHECK_EQUAL_64(0x3210, x27);
4968   CHECK_EQUAL_64(0xffffffff89abcdefL, x28);
4969   CHECK_EQUAL_64(0x76543210, x29);
4970 
4971   TEARDOWN();
4972 }
4973 
4974 
TEST(ubfm)4975 TEST(ubfm) {
4976   INIT_V8();
4977   SETUP();
4978 
4979   START();
4980   __ Mov(x1, 0x0123456789abcdefL);
4981   __ Mov(x2, 0xfedcba9876543210L);
4982 
4983   __ Mov(x10, 0x8888888888888888L);
4984   __ Mov(x11, 0x8888888888888888L);
4985 
4986   __ ubfm(x10, x1, 16, 31);
4987   __ ubfm(x11, x1, 32, 15);
4988   __ ubfm(x12, x1, 32, 47);
4989   __ ubfm(x13, x1, 48, 35);
4990 
4991   __ ubfm(w25, w1, 16, 23);
4992   __ ubfm(w26, w1, 24, 15);
4993   __ ubfm(w27, w2, 16, 23);
4994   __ ubfm(w28, w2, 24, 15);
4995 
4996   // Aliases
4997   __ Lsl(x15, x1, 63);
4998   __ Lsl(x16, x1, 0);
4999   __ Lsr(x17, x1, 32);
5000   __ Ubfiz(x18, x1, 8, 16);
5001   __ Ubfx(x19, x1, 8, 16);
5002   __ Uxtb(x20, x1);
5003   __ Uxth(x21, x1);
5004   __ Uxtw(x22, x1);
5005   END();
5006 
5007   RUN();
5008 
5009   CHECK_EQUAL_64(0x00000000000089abL, x10);
5010   CHECK_EQUAL_64(0x0000cdef00000000L, x11);
5011   CHECK_EQUAL_64(0x4567L, x12);
5012   CHECK_EQUAL_64(0x789abcdef0000L, x13);
5013 
5014   CHECK_EQUAL_32(0x000000ab, w25);
5015   CHECK_EQUAL_32(0x00cdef00, w26);
5016   CHECK_EQUAL_32(0x54, w27);
5017   CHECK_EQUAL_32(0x00321000, w28);
5018 
5019   CHECK_EQUAL_64(0x8000000000000000L, x15);
5020   CHECK_EQUAL_64(0x0123456789abcdefL, x16);
5021   CHECK_EQUAL_64(0x01234567L, x17);
5022   CHECK_EQUAL_64(0xcdef00L, x18);
5023   CHECK_EQUAL_64(0xabcdL, x19);
5024   CHECK_EQUAL_64(0xefL, x20);
5025   CHECK_EQUAL_64(0xcdefL, x21);
5026   CHECK_EQUAL_64(0x89abcdefL, x22);
5027 
5028   TEARDOWN();
5029 }
5030 
5031 
TEST(extr)5032 TEST(extr) {
5033   INIT_V8();
5034   SETUP();
5035 
5036   START();
5037   __ Mov(x1, 0x0123456789abcdefL);
5038   __ Mov(x2, 0xfedcba9876543210L);
5039 
5040   __ Extr(w10, w1, w2, 0);
5041   __ Extr(x11, x1, x2, 0);
5042   __ Extr(w12, w1, w2, 1);
5043   __ Extr(x13, x2, x1, 2);
5044 
5045   __ Ror(w20, w1, 0);
5046   __ Ror(x21, x1, 0);
5047   __ Ror(w22, w2, 17);
5048   __ Ror(w23, w1, 31);
5049   __ Ror(x24, x2, 1);
5050   __ Ror(x25, x1, 63);
5051   END();
5052 
5053   RUN();
5054 
5055   CHECK_EQUAL_64(0x76543210, x10);
5056   CHECK_EQUAL_64(0xfedcba9876543210L, x11);
5057   CHECK_EQUAL_64(0xbb2a1908, x12);
5058   CHECK_EQUAL_64(0x0048d159e26af37bUL, x13);
5059   CHECK_EQUAL_64(0x89abcdef, x20);
5060   CHECK_EQUAL_64(0x0123456789abcdefL, x21);
5061   CHECK_EQUAL_64(0x19083b2a, x22);
5062   CHECK_EQUAL_64(0x13579bdf, x23);
5063   CHECK_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
5064   CHECK_EQUAL_64(0x02468acf13579bdeUL, x25);
5065 
5066   TEARDOWN();
5067 }
5068 
5069 
TEST(fmov_imm)5070 TEST(fmov_imm) {
5071   INIT_V8();
5072   SETUP();
5073 
5074   START();
5075   __ Fmov(s11, 1.0);
5076   __ Fmov(d22, -13.0);
5077   __ Fmov(s1, 255.0);
5078   __ Fmov(d2, 12.34567);
5079   __ Fmov(s3, 0.0);
5080   __ Fmov(d4, 0.0);
5081   __ Fmov(s5, kFP32PositiveInfinity);
5082   __ Fmov(d6, kFP64NegativeInfinity);
5083   END();
5084 
5085   RUN();
5086 
5087   CHECK_EQUAL_FP32(1.0, s11);
5088   CHECK_EQUAL_FP64(-13.0, d22);
5089   CHECK_EQUAL_FP32(255.0, s1);
5090   CHECK_EQUAL_FP64(12.34567, d2);
5091   CHECK_EQUAL_FP32(0.0, s3);
5092   CHECK_EQUAL_FP64(0.0, d4);
5093   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
5094   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d6);
5095 
5096   TEARDOWN();
5097 }
5098 
5099 
TEST(fmov_reg)5100 TEST(fmov_reg) {
5101   INIT_V8();
5102   SETUP();
5103 
5104   START();
5105   __ Fmov(s20, 1.0);
5106   __ Fmov(w10, s20);
5107   __ Fmov(s30, w10);
5108   __ Fmov(s5, s20);
5109   __ Fmov(d1, -13.0);
5110   __ Fmov(x1, d1);
5111   __ Fmov(d2, x1);
5112   __ Fmov(d4, d1);
5113   __ Fmov(d6, rawbits_to_double(0x0123456789abcdefL));
5114   __ Fmov(s6, s6);
5115   END();
5116 
5117   RUN();
5118 
5119   CHECK_EQUAL_32(float_to_rawbits(1.0), w10);
5120   CHECK_EQUAL_FP32(1.0, s30);
5121   CHECK_EQUAL_FP32(1.0, s5);
5122   CHECK_EQUAL_64(double_to_rawbits(-13.0), x1);
5123   CHECK_EQUAL_FP64(-13.0, d2);
5124   CHECK_EQUAL_FP64(-13.0, d4);
5125   CHECK_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
5126 
5127   TEARDOWN();
5128 }
5129 
5130 
TEST(fadd)5131 TEST(fadd) {
5132   INIT_V8();
5133   SETUP();
5134 
5135   START();
5136   __ Fmov(s14, -0.0f);
5137   __ Fmov(s15, kFP32PositiveInfinity);
5138   __ Fmov(s16, kFP32NegativeInfinity);
5139   __ Fmov(s17, 3.25f);
5140   __ Fmov(s18, 1.0f);
5141   __ Fmov(s19, 0.0f);
5142 
5143   __ Fmov(d26, -0.0);
5144   __ Fmov(d27, kFP64PositiveInfinity);
5145   __ Fmov(d28, kFP64NegativeInfinity);
5146   __ Fmov(d29, 0.0);
5147   __ Fmov(d30, -2.0);
5148   __ Fmov(d31, 2.25);
5149 
5150   __ Fadd(s0, s17, s18);
5151   __ Fadd(s1, s18, s19);
5152   __ Fadd(s2, s14, s18);
5153   __ Fadd(s3, s15, s18);
5154   __ Fadd(s4, s16, s18);
5155   __ Fadd(s5, s15, s16);
5156   __ Fadd(s6, s16, s15);
5157 
5158   __ Fadd(d7, d30, d31);
5159   __ Fadd(d8, d29, d31);
5160   __ Fadd(d9, d26, d31);
5161   __ Fadd(d10, d27, d31);
5162   __ Fadd(d11, d28, d31);
5163   __ Fadd(d12, d27, d28);
5164   __ Fadd(d13, d28, d27);
5165   END();
5166 
5167   RUN();
5168 
5169   CHECK_EQUAL_FP32(4.25, s0);
5170   CHECK_EQUAL_FP32(1.0, s1);
5171   CHECK_EQUAL_FP32(1.0, s2);
5172   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
5173   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
5174   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5175   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5176   CHECK_EQUAL_FP64(0.25, d7);
5177   CHECK_EQUAL_FP64(2.25, d8);
5178   CHECK_EQUAL_FP64(2.25, d9);
5179   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d10);
5180   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d11);
5181   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5182   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5183 
5184   TEARDOWN();
5185 }
5186 
5187 
TEST(fsub)5188 TEST(fsub) {
5189   INIT_V8();
5190   SETUP();
5191 
5192   START();
5193   __ Fmov(s14, -0.0f);
5194   __ Fmov(s15, kFP32PositiveInfinity);
5195   __ Fmov(s16, kFP32NegativeInfinity);
5196   __ Fmov(s17, 3.25f);
5197   __ Fmov(s18, 1.0f);
5198   __ Fmov(s19, 0.0f);
5199 
5200   __ Fmov(d26, -0.0);
5201   __ Fmov(d27, kFP64PositiveInfinity);
5202   __ Fmov(d28, kFP64NegativeInfinity);
5203   __ Fmov(d29, 0.0);
5204   __ Fmov(d30, -2.0);
5205   __ Fmov(d31, 2.25);
5206 
5207   __ Fsub(s0, s17, s18);
5208   __ Fsub(s1, s18, s19);
5209   __ Fsub(s2, s14, s18);
5210   __ Fsub(s3, s18, s15);
5211   __ Fsub(s4, s18, s16);
5212   __ Fsub(s5, s15, s15);
5213   __ Fsub(s6, s16, s16);
5214 
5215   __ Fsub(d7, d30, d31);
5216   __ Fsub(d8, d29, d31);
5217   __ Fsub(d9, d26, d31);
5218   __ Fsub(d10, d31, d27);
5219   __ Fsub(d11, d31, d28);
5220   __ Fsub(d12, d27, d27);
5221   __ Fsub(d13, d28, d28);
5222   END();
5223 
5224   RUN();
5225 
5226   CHECK_EQUAL_FP32(2.25, s0);
5227   CHECK_EQUAL_FP32(1.0, s1);
5228   CHECK_EQUAL_FP32(-1.0, s2);
5229   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
5230   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
5231   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5232   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5233   CHECK_EQUAL_FP64(-4.25, d7);
5234   CHECK_EQUAL_FP64(-2.25, d8);
5235   CHECK_EQUAL_FP64(-2.25, d9);
5236   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
5237   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
5238   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5239   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5240 
5241   TEARDOWN();
5242 }
5243 
5244 
TEST(fmul)5245 TEST(fmul) {
5246   INIT_V8();
5247   SETUP();
5248 
5249   START();
5250   __ Fmov(s14, -0.0f);
5251   __ Fmov(s15, kFP32PositiveInfinity);
5252   __ Fmov(s16, kFP32NegativeInfinity);
5253   __ Fmov(s17, 3.25f);
5254   __ Fmov(s18, 2.0f);
5255   __ Fmov(s19, 0.0f);
5256   __ Fmov(s20, -2.0f);
5257 
5258   __ Fmov(d26, -0.0);
5259   __ Fmov(d27, kFP64PositiveInfinity);
5260   __ Fmov(d28, kFP64NegativeInfinity);
5261   __ Fmov(d29, 0.0);
5262   __ Fmov(d30, -2.0);
5263   __ Fmov(d31, 2.25);
5264 
5265   __ Fmul(s0, s17, s18);
5266   __ Fmul(s1, s18, s19);
5267   __ Fmul(s2, s14, s14);
5268   __ Fmul(s3, s15, s20);
5269   __ Fmul(s4, s16, s20);
5270   __ Fmul(s5, s15, s19);
5271   __ Fmul(s6, s19, s16);
5272 
5273   __ Fmul(d7, d30, d31);
5274   __ Fmul(d8, d29, d31);
5275   __ Fmul(d9, d26, d26);
5276   __ Fmul(d10, d27, d30);
5277   __ Fmul(d11, d28, d30);
5278   __ Fmul(d12, d27, d29);
5279   __ Fmul(d13, d29, d28);
5280   END();
5281 
5282   RUN();
5283 
5284   CHECK_EQUAL_FP32(6.5, s0);
5285   CHECK_EQUAL_FP32(0.0, s1);
5286   CHECK_EQUAL_FP32(0.0, s2);
5287   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
5288   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
5289   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5290   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5291   CHECK_EQUAL_FP64(-4.5, d7);
5292   CHECK_EQUAL_FP64(0.0, d8);
5293   CHECK_EQUAL_FP64(0.0, d9);
5294   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
5295   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
5296   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5297   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5298 
5299   TEARDOWN();
5300 }
5301 
5302 
FmaddFmsubHelper(double n,double m,double a,double fmadd,double fmsub,double fnmadd,double fnmsub)5303 static void FmaddFmsubHelper(double n, double m, double a,
5304                              double fmadd, double fmsub,
5305                              double fnmadd, double fnmsub) {
5306   SETUP();
5307   START();
5308 
5309   __ Fmov(d0, n);
5310   __ Fmov(d1, m);
5311   __ Fmov(d2, a);
5312   __ Fmadd(d28, d0, d1, d2);
5313   __ Fmsub(d29, d0, d1, d2);
5314   __ Fnmadd(d30, d0, d1, d2);
5315   __ Fnmsub(d31, d0, d1, d2);
5316 
5317   END();
5318   RUN();
5319 
5320   CHECK_EQUAL_FP64(fmadd, d28);
5321   CHECK_EQUAL_FP64(fmsub, d29);
5322   CHECK_EQUAL_FP64(fnmadd, d30);
5323   CHECK_EQUAL_FP64(fnmsub, d31);
5324 
5325   TEARDOWN();
5326 }
5327 
5328 
TEST(fmadd_fmsub_double)5329 TEST(fmadd_fmsub_double) {
5330   INIT_V8();
5331 
5332   // It's hard to check the result of fused operations because the only way to
5333   // calculate the result is using fma, which is what the simulator uses anyway.
5334   // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5335 
5336   // Basic operation.
5337   FmaddFmsubHelper(1.0, 2.0, 3.0, 5.0, 1.0, -5.0, -1.0);
5338   FmaddFmsubHelper(-1.0, 2.0, 3.0, 1.0, 5.0, -1.0, -5.0);
5339 
5340   // Check the sign of exact zeroes.
5341   //               n     m     a     fmadd  fmsub  fnmadd fnmsub
5342   FmaddFmsubHelper(-0.0, +0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5343   FmaddFmsubHelper(+0.0, +0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5344   FmaddFmsubHelper(+0.0, +0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5345   FmaddFmsubHelper(-0.0, +0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5346   FmaddFmsubHelper(+0.0, -0.0, -0.0, -0.0,  +0.0,  +0.0,  +0.0);
5347   FmaddFmsubHelper(-0.0, -0.0, -0.0, +0.0,  -0.0,  +0.0,  +0.0);
5348   FmaddFmsubHelper(-0.0, -0.0, +0.0, +0.0,  +0.0,  -0.0,  +0.0);
5349   FmaddFmsubHelper(+0.0, -0.0, +0.0, +0.0,  +0.0,  +0.0,  -0.0);
5350 
5351   // Check NaN generation.
5352   FmaddFmsubHelper(kFP64PositiveInfinity, 0.0, 42.0,
5353                    kFP64DefaultNaN, kFP64DefaultNaN,
5354                    kFP64DefaultNaN, kFP64DefaultNaN);
5355   FmaddFmsubHelper(0.0, kFP64PositiveInfinity, 42.0,
5356                    kFP64DefaultNaN, kFP64DefaultNaN,
5357                    kFP64DefaultNaN, kFP64DefaultNaN);
5358   FmaddFmsubHelper(kFP64PositiveInfinity, 1.0, kFP64PositiveInfinity,
5359                    kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5360                    kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5361                    kFP64NegativeInfinity,   // -inf + (-inf * 1) = -inf
5362                    kFP64DefaultNaN);        // -inf + ( inf * 1) = NaN
5363   FmaddFmsubHelper(kFP64NegativeInfinity, 1.0, kFP64PositiveInfinity,
5364                    kFP64DefaultNaN,         //  inf + (-inf * 1) = NaN
5365                    kFP64PositiveInfinity,   //  inf + ( inf * 1) = inf
5366                    kFP64DefaultNaN,         // -inf + ( inf * 1) = NaN
5367                    kFP64NegativeInfinity);  // -inf + (-inf * 1) = -inf
5368 }
5369 
5370 
FmaddFmsubHelper(float n,float m,float a,float fmadd,float fmsub,float fnmadd,float fnmsub)5371 static void FmaddFmsubHelper(float n, float m, float a,
5372                              float fmadd, float fmsub,
5373                              float fnmadd, float fnmsub) {
5374   SETUP();
5375   START();
5376 
5377   __ Fmov(s0, n);
5378   __ Fmov(s1, m);
5379   __ Fmov(s2, a);
5380   __ Fmadd(s28, s0, s1, s2);
5381   __ Fmsub(s29, s0, s1, s2);
5382   __ Fnmadd(s30, s0, s1, s2);
5383   __ Fnmsub(s31, s0, s1, s2);
5384 
5385   END();
5386   RUN();
5387 
5388   CHECK_EQUAL_FP32(fmadd, s28);
5389   CHECK_EQUAL_FP32(fmsub, s29);
5390   CHECK_EQUAL_FP32(fnmadd, s30);
5391   CHECK_EQUAL_FP32(fnmsub, s31);
5392 
5393   TEARDOWN();
5394 }
5395 
5396 
TEST(fmadd_fmsub_float)5397 TEST(fmadd_fmsub_float) {
5398   INIT_V8();
5399   // It's hard to check the result of fused operations because the only way to
5400   // calculate the result is using fma, which is what the simulator uses anyway.
5401   // TODO(jbramley): Add tests to check behaviour against a hardware trace.
5402 
5403   // Basic operation.
5404   FmaddFmsubHelper(1.0f, 2.0f, 3.0f, 5.0f, 1.0f, -5.0f, -1.0f);
5405   FmaddFmsubHelper(-1.0f, 2.0f, 3.0f, 1.0f, 5.0f, -1.0f, -5.0f);
5406 
5407   // Check the sign of exact zeroes.
5408   //               n      m      a      fmadd  fmsub  fnmadd fnmsub
5409   FmaddFmsubHelper(-0.0f, +0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5410   FmaddFmsubHelper(+0.0f, +0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5411   FmaddFmsubHelper(+0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5412   FmaddFmsubHelper(-0.0f, +0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5413   FmaddFmsubHelper(+0.0f, -0.0f, -0.0f, -0.0f, +0.0f, +0.0f, +0.0f);
5414   FmaddFmsubHelper(-0.0f, -0.0f, -0.0f, +0.0f, -0.0f, +0.0f, +0.0f);
5415   FmaddFmsubHelper(-0.0f, -0.0f, +0.0f, +0.0f, +0.0f, -0.0f, +0.0f);
5416   FmaddFmsubHelper(+0.0f, -0.0f, +0.0f, +0.0f, +0.0f, +0.0f, -0.0f);
5417 
5418   // Check NaN generation.
5419   FmaddFmsubHelper(kFP32PositiveInfinity, 0.0f, 42.0f,
5420                    kFP32DefaultNaN, kFP32DefaultNaN,
5421                    kFP32DefaultNaN, kFP32DefaultNaN);
5422   FmaddFmsubHelper(0.0f, kFP32PositiveInfinity, 42.0f,
5423                    kFP32DefaultNaN, kFP32DefaultNaN,
5424                    kFP32DefaultNaN, kFP32DefaultNaN);
5425   FmaddFmsubHelper(kFP32PositiveInfinity, 1.0f, kFP32PositiveInfinity,
5426                    kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5427                    kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5428                    kFP32NegativeInfinity,   // -inf + (-inf * 1) = -inf
5429                    kFP32DefaultNaN);        // -inf + ( inf * 1) = NaN
5430   FmaddFmsubHelper(kFP32NegativeInfinity, 1.0f, kFP32PositiveInfinity,
5431                    kFP32DefaultNaN,         //  inf + (-inf * 1) = NaN
5432                    kFP32PositiveInfinity,   //  inf + ( inf * 1) = inf
5433                    kFP32DefaultNaN,         // -inf + ( inf * 1) = NaN
5434                    kFP32NegativeInfinity);  // -inf + (-inf * 1) = -inf
5435 }
5436 
5437 
TEST(fmadd_fmsub_double_nans)5438 TEST(fmadd_fmsub_double_nans) {
5439   INIT_V8();
5440   // Make sure that NaN propagation works correctly.
5441   double s1 = rawbits_to_double(0x7ff5555511111111);
5442   double s2 = rawbits_to_double(0x7ff5555522222222);
5443   double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
5444   double q1 = rawbits_to_double(0x7ffaaaaa11111111);
5445   double q2 = rawbits_to_double(0x7ffaaaaa22222222);
5446   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
5447   CHECK(IsSignallingNaN(s1));
5448   CHECK(IsSignallingNaN(s2));
5449   CHECK(IsSignallingNaN(sa));
5450   CHECK(IsQuietNaN(q1));
5451   CHECK(IsQuietNaN(q2));
5452   CHECK(IsQuietNaN(qa));
5453 
5454   // The input NaNs after passing through ProcessNaN.
5455   double s1_proc = rawbits_to_double(0x7ffd555511111111);
5456   double s2_proc = rawbits_to_double(0x7ffd555522222222);
5457   double sa_proc = rawbits_to_double(0x7ffd5555aaaaaaaa);
5458   double q1_proc = q1;
5459   double q2_proc = q2;
5460   double qa_proc = qa;
5461   CHECK(IsQuietNaN(s1_proc));
5462   CHECK(IsQuietNaN(s2_proc));
5463   CHECK(IsQuietNaN(sa_proc));
5464   CHECK(IsQuietNaN(q1_proc));
5465   CHECK(IsQuietNaN(q2_proc));
5466   CHECK(IsQuietNaN(qa_proc));
5467 
5468   // Negated NaNs as it would be done on ARMv8 hardware.
5469   double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
5470   double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
5471   double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
5472   double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
5473   CHECK(IsQuietNaN(s1_proc_neg));
5474   CHECK(IsQuietNaN(sa_proc_neg));
5475   CHECK(IsQuietNaN(q1_proc_neg));
5476   CHECK(IsQuietNaN(qa_proc_neg));
5477 
5478   // Quiet NaNs are propagated.
5479   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5480   FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5481   FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5482   FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5483   FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5484   FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5485   FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5486 
5487   // Signalling NaNs are propagated, and made quiet.
5488   FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5489   FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5490   FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5491   FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5492   FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5493   FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5494   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5495 
5496   // Signalling NaNs take precedence over quiet NaNs.
5497   FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5498   FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5499   FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5500   FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5501   FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5502   FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5503   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5504 
5505   // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5506   FmaddFmsubHelper(0, kFP64PositiveInfinity, qa,
5507                    kFP64DefaultNaN, kFP64DefaultNaN,
5508                    kFP64DefaultNaN, kFP64DefaultNaN);
5509   FmaddFmsubHelper(kFP64PositiveInfinity, 0, qa,
5510                    kFP64DefaultNaN, kFP64DefaultNaN,
5511                    kFP64DefaultNaN, kFP64DefaultNaN);
5512   FmaddFmsubHelper(0, kFP64NegativeInfinity, qa,
5513                    kFP64DefaultNaN, kFP64DefaultNaN,
5514                    kFP64DefaultNaN, kFP64DefaultNaN);
5515   FmaddFmsubHelper(kFP64NegativeInfinity, 0, qa,
5516                    kFP64DefaultNaN, kFP64DefaultNaN,
5517                    kFP64DefaultNaN, kFP64DefaultNaN);
5518 }
5519 
5520 
TEST(fmadd_fmsub_float_nans)5521 TEST(fmadd_fmsub_float_nans) {
5522   INIT_V8();
5523   // Make sure that NaN propagation works correctly.
5524   float s1 = rawbits_to_float(0x7f951111);
5525   float s2 = rawbits_to_float(0x7f952222);
5526   float sa = rawbits_to_float(0x7f95aaaa);
5527   float q1 = rawbits_to_float(0x7fea1111);
5528   float q2 = rawbits_to_float(0x7fea2222);
5529   float qa = rawbits_to_float(0x7feaaaaa);
5530   CHECK(IsSignallingNaN(s1));
5531   CHECK(IsSignallingNaN(s2));
5532   CHECK(IsSignallingNaN(sa));
5533   CHECK(IsQuietNaN(q1));
5534   CHECK(IsQuietNaN(q2));
5535   CHECK(IsQuietNaN(qa));
5536 
5537   // The input NaNs after passing through ProcessNaN.
5538   float s1_proc = rawbits_to_float(0x7fd51111);
5539   float s2_proc = rawbits_to_float(0x7fd52222);
5540   float sa_proc = rawbits_to_float(0x7fd5aaaa);
5541   float q1_proc = q1;
5542   float q2_proc = q2;
5543   float qa_proc = qa;
5544   CHECK(IsQuietNaN(s1_proc));
5545   CHECK(IsQuietNaN(s2_proc));
5546   CHECK(IsQuietNaN(sa_proc));
5547   CHECK(IsQuietNaN(q1_proc));
5548   CHECK(IsQuietNaN(q2_proc));
5549   CHECK(IsQuietNaN(qa_proc));
5550 
5551   // Negated NaNs as it would be done on ARMv8 hardware.
5552   float s1_proc_neg = rawbits_to_float(0xffd51111);
5553   float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
5554   float q1_proc_neg = rawbits_to_float(0xffea1111);
5555   float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
5556   CHECK(IsQuietNaN(s1_proc_neg));
5557   CHECK(IsQuietNaN(sa_proc_neg));
5558   CHECK(IsQuietNaN(q1_proc_neg));
5559   CHECK(IsQuietNaN(qa_proc_neg));
5560 
5561   // Quiet NaNs are propagated.
5562   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5563   FmaddFmsubHelper(0, q2, 0, q2_proc, q2_proc, q2_proc, q2_proc);
5564   FmaddFmsubHelper(0, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5565   FmaddFmsubHelper(q1, q2, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
5566   FmaddFmsubHelper(0, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5567   FmaddFmsubHelper(q1, 0, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5568   FmaddFmsubHelper(q1, q2, qa, qa_proc, qa_proc, qa_proc_neg, qa_proc_neg);
5569 
5570   // Signalling NaNs are propagated, and made quiet.
5571   FmaddFmsubHelper(s1, 0, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5572   FmaddFmsubHelper(0, s2, 0, s2_proc, s2_proc, s2_proc, s2_proc);
5573   FmaddFmsubHelper(0, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5574   FmaddFmsubHelper(s1, s2, 0, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5575   FmaddFmsubHelper(0, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5576   FmaddFmsubHelper(s1, 0, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5577   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5578 
5579   // Signalling NaNs take precedence over quiet NaNs.
5580   FmaddFmsubHelper(s1, q2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5581   FmaddFmsubHelper(q1, s2, qa, s2_proc, s2_proc, s2_proc, s2_proc);
5582   FmaddFmsubHelper(q1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5583   FmaddFmsubHelper(s1, s2, qa, s1_proc, s1_proc_neg, s1_proc_neg, s1_proc);
5584   FmaddFmsubHelper(q1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5585   FmaddFmsubHelper(s1, q2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5586   FmaddFmsubHelper(s1, s2, sa, sa_proc, sa_proc, sa_proc_neg, sa_proc_neg);
5587 
5588   // A NaN generated by the intermediate op1 * op2 overrides a quiet NaN in a.
5589   FmaddFmsubHelper(0, kFP32PositiveInfinity, qa,
5590                    kFP32DefaultNaN, kFP32DefaultNaN,
5591                    kFP32DefaultNaN, kFP32DefaultNaN);
5592   FmaddFmsubHelper(kFP32PositiveInfinity, 0, qa,
5593                    kFP32DefaultNaN, kFP32DefaultNaN,
5594                    kFP32DefaultNaN, kFP32DefaultNaN);
5595   FmaddFmsubHelper(0, kFP32NegativeInfinity, qa,
5596                    kFP32DefaultNaN, kFP32DefaultNaN,
5597                    kFP32DefaultNaN, kFP32DefaultNaN);
5598   FmaddFmsubHelper(kFP32NegativeInfinity, 0, qa,
5599                    kFP32DefaultNaN, kFP32DefaultNaN,
5600                    kFP32DefaultNaN, kFP32DefaultNaN);
5601 }
5602 
5603 
TEST(fdiv)5604 TEST(fdiv) {
5605   INIT_V8();
5606   SETUP();
5607 
5608   START();
5609   __ Fmov(s14, -0.0f);
5610   __ Fmov(s15, kFP32PositiveInfinity);
5611   __ Fmov(s16, kFP32NegativeInfinity);
5612   __ Fmov(s17, 3.25f);
5613   __ Fmov(s18, 2.0f);
5614   __ Fmov(s19, 2.0f);
5615   __ Fmov(s20, -2.0f);
5616 
5617   __ Fmov(d26, -0.0);
5618   __ Fmov(d27, kFP64PositiveInfinity);
5619   __ Fmov(d28, kFP64NegativeInfinity);
5620   __ Fmov(d29, 0.0);
5621   __ Fmov(d30, -2.0);
5622   __ Fmov(d31, 2.25);
5623 
5624   __ Fdiv(s0, s17, s18);
5625   __ Fdiv(s1, s18, s19);
5626   __ Fdiv(s2, s14, s18);
5627   __ Fdiv(s3, s18, s15);
5628   __ Fdiv(s4, s18, s16);
5629   __ Fdiv(s5, s15, s16);
5630   __ Fdiv(s6, s14, s14);
5631 
5632   __ Fdiv(d7, d31, d30);
5633   __ Fdiv(d8, d29, d31);
5634   __ Fdiv(d9, d26, d31);
5635   __ Fdiv(d10, d31, d27);
5636   __ Fdiv(d11, d31, d28);
5637   __ Fdiv(d12, d28, d27);
5638   __ Fdiv(d13, d29, d29);
5639   END();
5640 
5641   RUN();
5642 
5643   CHECK_EQUAL_FP32(1.625f, s0);
5644   CHECK_EQUAL_FP32(1.0f, s1);
5645   CHECK_EQUAL_FP32(-0.0f, s2);
5646   CHECK_EQUAL_FP32(0.0f, s3);
5647   CHECK_EQUAL_FP32(-0.0f, s4);
5648   CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
5649   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
5650   CHECK_EQUAL_FP64(-1.125, d7);
5651   CHECK_EQUAL_FP64(0.0, d8);
5652   CHECK_EQUAL_FP64(-0.0, d9);
5653   CHECK_EQUAL_FP64(0.0, d10);
5654   CHECK_EQUAL_FP64(-0.0, d11);
5655   CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
5656   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
5657 
5658   TEARDOWN();
5659 }
5660 
5661 
MinMaxHelper(float n,float m,bool min,float quiet_nan_substitute=0.0)5662 static float MinMaxHelper(float n,
5663                           float m,
5664                           bool min,
5665                           float quiet_nan_substitute = 0.0) {
5666   uint32_t raw_n = float_to_rawbits(n);
5667   uint32_t raw_m = float_to_rawbits(m);
5668 
5669   if (std::isnan(n) && ((raw_n & kSQuietNanMask) == 0)) {
5670     // n is signalling NaN.
5671     return rawbits_to_float(raw_n | kSQuietNanMask);
5672   } else if (std::isnan(m) && ((raw_m & kSQuietNanMask) == 0)) {
5673     // m is signalling NaN.
5674     return rawbits_to_float(raw_m | kSQuietNanMask);
5675   } else if (quiet_nan_substitute == 0.0) {
5676     if (std::isnan(n)) {
5677       // n is quiet NaN.
5678       return n;
5679     } else if (std::isnan(m)) {
5680       // m is quiet NaN.
5681       return m;
5682     }
5683   } else {
5684     // Substitute n or m if one is quiet, but not both.
5685     if (std::isnan(n) && !std::isnan(m)) {
5686       // n is quiet NaN: replace with substitute.
5687       n = quiet_nan_substitute;
5688     } else if (!std::isnan(n) && std::isnan(m)) {
5689       // m is quiet NaN: replace with substitute.
5690       m = quiet_nan_substitute;
5691     }
5692   }
5693 
5694   if ((n == 0.0) && (m == 0.0) &&
5695       (copysign(1.0, n) != copysign(1.0, m))) {
5696     return min ? -0.0 : 0.0;
5697   }
5698 
5699   return min ? fminf(n, m) : fmaxf(n, m);
5700 }
5701 
5702 
MinMaxHelper(double n,double m,bool min,double quiet_nan_substitute=0.0)5703 static double MinMaxHelper(double n,
5704                            double m,
5705                            bool min,
5706                            double quiet_nan_substitute = 0.0) {
5707   uint64_t raw_n = double_to_rawbits(n);
5708   uint64_t raw_m = double_to_rawbits(m);
5709 
5710   if (std::isnan(n) && ((raw_n & kDQuietNanMask) == 0)) {
5711     // n is signalling NaN.
5712     return rawbits_to_double(raw_n | kDQuietNanMask);
5713   } else if (std::isnan(m) && ((raw_m & kDQuietNanMask) == 0)) {
5714     // m is signalling NaN.
5715     return rawbits_to_double(raw_m | kDQuietNanMask);
5716   } else if (quiet_nan_substitute == 0.0) {
5717     if (std::isnan(n)) {
5718       // n is quiet NaN.
5719       return n;
5720     } else if (std::isnan(m)) {
5721       // m is quiet NaN.
5722       return m;
5723     }
5724   } else {
5725     // Substitute n or m if one is quiet, but not both.
5726     if (std::isnan(n) && !std::isnan(m)) {
5727       // n is quiet NaN: replace with substitute.
5728       n = quiet_nan_substitute;
5729     } else if (!std::isnan(n) && std::isnan(m)) {
5730       // m is quiet NaN: replace with substitute.
5731       m = quiet_nan_substitute;
5732     }
5733   }
5734 
5735   if ((n == 0.0) && (m == 0.0) &&
5736       (copysign(1.0, n) != copysign(1.0, m))) {
5737     return min ? -0.0 : 0.0;
5738   }
5739 
5740   return min ? fmin(n, m) : fmax(n, m);
5741 }
5742 
5743 
FminFmaxDoubleHelper(double n,double m,double min,double max,double minnm,double maxnm)5744 static void FminFmaxDoubleHelper(double n, double m, double min, double max,
5745                                  double minnm, double maxnm) {
5746   SETUP();
5747 
5748   START();
5749   __ Fmov(d0, n);
5750   __ Fmov(d1, m);
5751   __ Fmin(d28, d0, d1);
5752   __ Fmax(d29, d0, d1);
5753   __ Fminnm(d30, d0, d1);
5754   __ Fmaxnm(d31, d0, d1);
5755   END();
5756 
5757   RUN();
5758 
5759   CHECK_EQUAL_FP64(min, d28);
5760   CHECK_EQUAL_FP64(max, d29);
5761   CHECK_EQUAL_FP64(minnm, d30);
5762   CHECK_EQUAL_FP64(maxnm, d31);
5763 
5764   TEARDOWN();
5765 }
5766 
5767 
TEST(fmax_fmin_d)5768 TEST(fmax_fmin_d) {
5769   INIT_V8();
5770   // Use non-standard NaNs to check that the payload bits are preserved.
5771   double snan = rawbits_to_double(0x7ff5555512345678);
5772   double qnan = rawbits_to_double(0x7ffaaaaa87654321);
5773 
5774   double snan_processed = rawbits_to_double(0x7ffd555512345678);
5775   double qnan_processed = qnan;
5776 
5777   CHECK(IsSignallingNaN(snan));
5778   CHECK(IsQuietNaN(qnan));
5779   CHECK(IsQuietNaN(snan_processed));
5780   CHECK(IsQuietNaN(qnan_processed));
5781 
5782   // Bootstrap tests.
5783   FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
5784   FminFmaxDoubleHelper(0, 1, 0, 1, 0, 1);
5785   FminFmaxDoubleHelper(kFP64PositiveInfinity, kFP64NegativeInfinity,
5786                        kFP64NegativeInfinity, kFP64PositiveInfinity,
5787                        kFP64NegativeInfinity, kFP64PositiveInfinity);
5788   FminFmaxDoubleHelper(snan, 0,
5789                        snan_processed, snan_processed,
5790                        snan_processed, snan_processed);
5791   FminFmaxDoubleHelper(0, snan,
5792                        snan_processed, snan_processed,
5793                        snan_processed, snan_processed);
5794   FminFmaxDoubleHelper(qnan, 0,
5795                        qnan_processed, qnan_processed,
5796                        0, 0);
5797   FminFmaxDoubleHelper(0, qnan,
5798                        qnan_processed, qnan_processed,
5799                        0, 0);
5800   FminFmaxDoubleHelper(qnan, snan,
5801                        snan_processed, snan_processed,
5802                        snan_processed, snan_processed);
5803   FminFmaxDoubleHelper(snan, qnan,
5804                        snan_processed, snan_processed,
5805                        snan_processed, snan_processed);
5806 
5807   // Iterate over all combinations of inputs.
5808   double inputs[] = { DBL_MAX, DBL_MIN, 1.0, 0.0,
5809                       -DBL_MAX, -DBL_MIN, -1.0, -0.0,
5810                       kFP64PositiveInfinity, kFP64NegativeInfinity,
5811                       kFP64QuietNaN, kFP64SignallingNaN };
5812 
5813   const int count = sizeof(inputs) / sizeof(inputs[0]);
5814 
5815   for (int in = 0; in < count; in++) {
5816     double n = inputs[in];
5817     for (int im = 0; im < count; im++) {
5818       double m = inputs[im];
5819       FminFmaxDoubleHelper(n, m,
5820                            MinMaxHelper(n, m, true),
5821                            MinMaxHelper(n, m, false),
5822                            MinMaxHelper(n, m, true, kFP64PositiveInfinity),
5823                            MinMaxHelper(n, m, false, kFP64NegativeInfinity));
5824     }
5825   }
5826 }
5827 
5828 
FminFmaxFloatHelper(float n,float m,float min,float max,float minnm,float maxnm)5829 static void FminFmaxFloatHelper(float n, float m, float min, float max,
5830                                 float minnm, float maxnm) {
5831   SETUP();
5832 
5833   START();
5834   __ Fmov(s0, n);
5835   __ Fmov(s1, m);
5836   __ Fmin(s28, s0, s1);
5837   __ Fmax(s29, s0, s1);
5838   __ Fminnm(s30, s0, s1);
5839   __ Fmaxnm(s31, s0, s1);
5840   END();
5841 
5842   RUN();
5843 
5844   CHECK_EQUAL_FP32(min, s28);
5845   CHECK_EQUAL_FP32(max, s29);
5846   CHECK_EQUAL_FP32(minnm, s30);
5847   CHECK_EQUAL_FP32(maxnm, s31);
5848 
5849   TEARDOWN();
5850 }
5851 
5852 
TEST(fmax_fmin_s)5853 TEST(fmax_fmin_s) {
5854   INIT_V8();
5855   // Use non-standard NaNs to check that the payload bits are preserved.
5856   float snan = rawbits_to_float(0x7f951234);
5857   float qnan = rawbits_to_float(0x7fea8765);
5858 
5859   float snan_processed = rawbits_to_float(0x7fd51234);
5860   float qnan_processed = qnan;
5861 
5862   CHECK(IsSignallingNaN(snan));
5863   CHECK(IsQuietNaN(qnan));
5864   CHECK(IsQuietNaN(snan_processed));
5865   CHECK(IsQuietNaN(qnan_processed));
5866 
5867   // Bootstrap tests.
5868   FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
5869   FminFmaxFloatHelper(0, 1, 0, 1, 0, 1);
5870   FminFmaxFloatHelper(kFP32PositiveInfinity, kFP32NegativeInfinity,
5871                       kFP32NegativeInfinity, kFP32PositiveInfinity,
5872                       kFP32NegativeInfinity, kFP32PositiveInfinity);
5873   FminFmaxFloatHelper(snan, 0,
5874                       snan_processed, snan_processed,
5875                       snan_processed, snan_processed);
5876   FminFmaxFloatHelper(0, snan,
5877                       snan_processed, snan_processed,
5878                       snan_processed, snan_processed);
5879   FminFmaxFloatHelper(qnan, 0,
5880                       qnan_processed, qnan_processed,
5881                       0, 0);
5882   FminFmaxFloatHelper(0, qnan,
5883                       qnan_processed, qnan_processed,
5884                       0, 0);
5885   FminFmaxFloatHelper(qnan, snan,
5886                       snan_processed, snan_processed,
5887                       snan_processed, snan_processed);
5888   FminFmaxFloatHelper(snan, qnan,
5889                       snan_processed, snan_processed,
5890                       snan_processed, snan_processed);
5891 
5892   // Iterate over all combinations of inputs.
5893   float inputs[] = { FLT_MAX, FLT_MIN, 1.0, 0.0,
5894                      -FLT_MAX, -FLT_MIN, -1.0, -0.0,
5895                      kFP32PositiveInfinity, kFP32NegativeInfinity,
5896                      kFP32QuietNaN, kFP32SignallingNaN };
5897 
5898   const int count = sizeof(inputs) / sizeof(inputs[0]);
5899 
5900   for (int in = 0; in < count; in++) {
5901     float n = inputs[in];
5902     for (int im = 0; im < count; im++) {
5903       float m = inputs[im];
5904       FminFmaxFloatHelper(n, m,
5905                           MinMaxHelper(n, m, true),
5906                           MinMaxHelper(n, m, false),
5907                           MinMaxHelper(n, m, true, kFP32PositiveInfinity),
5908                           MinMaxHelper(n, m, false, kFP32NegativeInfinity));
5909     }
5910   }
5911 }
5912 
5913 
TEST(fccmp)5914 TEST(fccmp) {
5915   INIT_V8();
5916   SETUP();
5917 
5918   START();
5919   __ Fmov(s16, 0.0);
5920   __ Fmov(s17, 0.5);
5921   __ Fmov(d18, -0.5);
5922   __ Fmov(d19, -1.0);
5923   __ Mov(x20, 0);
5924 
5925   __ Cmp(x20, 0);
5926   __ Fccmp(s16, s16, NoFlag, eq);
5927   __ Mrs(x0, NZCV);
5928 
5929   __ Cmp(x20, 0);
5930   __ Fccmp(s16, s16, VFlag, ne);
5931   __ Mrs(x1, NZCV);
5932 
5933   __ Cmp(x20, 0);
5934   __ Fccmp(s16, s17, CFlag, ge);
5935   __ Mrs(x2, NZCV);
5936 
5937   __ Cmp(x20, 0);
5938   __ Fccmp(s16, s17, CVFlag, lt);
5939   __ Mrs(x3, NZCV);
5940 
5941   __ Cmp(x20, 0);
5942   __ Fccmp(d18, d18, ZFlag, le);
5943   __ Mrs(x4, NZCV);
5944 
5945   __ Cmp(x20, 0);
5946   __ Fccmp(d18, d18, ZVFlag, gt);
5947   __ Mrs(x5, NZCV);
5948 
5949   __ Cmp(x20, 0);
5950   __ Fccmp(d18, d19, ZCVFlag, ls);
5951   __ Mrs(x6, NZCV);
5952 
5953   __ Cmp(x20, 0);
5954   __ Fccmp(d18, d19, NFlag, hi);
5955   __ Mrs(x7, NZCV);
5956 
5957   __ fccmp(s16, s16, NFlag, al);
5958   __ Mrs(x8, NZCV);
5959 
5960   __ fccmp(d18, d18, NFlag, nv);
5961   __ Mrs(x9, NZCV);
5962 
5963   END();
5964 
5965   RUN();
5966 
5967   CHECK_EQUAL_32(ZCFlag, w0);
5968   CHECK_EQUAL_32(VFlag, w1);
5969   CHECK_EQUAL_32(NFlag, w2);
5970   CHECK_EQUAL_32(CVFlag, w3);
5971   CHECK_EQUAL_32(ZCFlag, w4);
5972   CHECK_EQUAL_32(ZVFlag, w5);
5973   CHECK_EQUAL_32(CFlag, w6);
5974   CHECK_EQUAL_32(NFlag, w7);
5975   CHECK_EQUAL_32(ZCFlag, w8);
5976   CHECK_EQUAL_32(ZCFlag, w9);
5977 
5978   TEARDOWN();
5979 }
5980 
5981 
TEST(fcmp)5982 TEST(fcmp) {
5983   INIT_V8();
5984   SETUP();
5985 
5986   START();
5987 
5988   // Some of these tests require a floating-point scratch register assigned to
5989   // the macro assembler, but most do not.
5990   {
5991     // We're going to mess around with the available scratch registers in this
5992     // test. A UseScratchRegisterScope will make sure that they are restored to
5993     // the default values once we're finished.
5994     UseScratchRegisterScope temps(&masm);
5995     masm.FPTmpList()->set_list(0);
5996 
5997     __ Fmov(s8, 0.0);
5998     __ Fmov(s9, 0.5);
5999     __ Mov(w18, 0x7f800001);  // Single precision NaN.
6000     __ Fmov(s18, w18);
6001 
6002     __ Fcmp(s8, s8);
6003     __ Mrs(x0, NZCV);
6004     __ Fcmp(s8, s9);
6005     __ Mrs(x1, NZCV);
6006     __ Fcmp(s9, s8);
6007     __ Mrs(x2, NZCV);
6008     __ Fcmp(s8, s18);
6009     __ Mrs(x3, NZCV);
6010     __ Fcmp(s18, s18);
6011     __ Mrs(x4, NZCV);
6012     __ Fcmp(s8, 0.0);
6013     __ Mrs(x5, NZCV);
6014     masm.FPTmpList()->set_list(d0.Bit());
6015     __ Fcmp(s8, 255.0);
6016     masm.FPTmpList()->set_list(0);
6017     __ Mrs(x6, NZCV);
6018 
6019     __ Fmov(d19, 0.0);
6020     __ Fmov(d20, 0.5);
6021     __ Mov(x21, 0x7ff0000000000001UL);   // Double precision NaN.
6022     __ Fmov(d21, x21);
6023 
6024     __ Fcmp(d19, d19);
6025     __ Mrs(x10, NZCV);
6026     __ Fcmp(d19, d20);
6027     __ Mrs(x11, NZCV);
6028     __ Fcmp(d20, d19);
6029     __ Mrs(x12, NZCV);
6030     __ Fcmp(d19, d21);
6031     __ Mrs(x13, NZCV);
6032     __ Fcmp(d21, d21);
6033     __ Mrs(x14, NZCV);
6034     __ Fcmp(d19, 0.0);
6035     __ Mrs(x15, NZCV);
6036     masm.FPTmpList()->set_list(d0.Bit());
6037     __ Fcmp(d19, 12.3456);
6038     masm.FPTmpList()->set_list(0);
6039     __ Mrs(x16, NZCV);
6040   }
6041 
6042   END();
6043 
6044   RUN();
6045 
6046   CHECK_EQUAL_32(ZCFlag, w0);
6047   CHECK_EQUAL_32(NFlag, w1);
6048   CHECK_EQUAL_32(CFlag, w2);
6049   CHECK_EQUAL_32(CVFlag, w3);
6050   CHECK_EQUAL_32(CVFlag, w4);
6051   CHECK_EQUAL_32(ZCFlag, w5);
6052   CHECK_EQUAL_32(NFlag, w6);
6053   CHECK_EQUAL_32(ZCFlag, w10);
6054   CHECK_EQUAL_32(NFlag, w11);
6055   CHECK_EQUAL_32(CFlag, w12);
6056   CHECK_EQUAL_32(CVFlag, w13);
6057   CHECK_EQUAL_32(CVFlag, w14);
6058   CHECK_EQUAL_32(ZCFlag, w15);
6059   CHECK_EQUAL_32(NFlag, w16);
6060 
6061   TEARDOWN();
6062 }
6063 
6064 
TEST(fcsel)6065 TEST(fcsel) {
6066   INIT_V8();
6067   SETUP();
6068 
6069   START();
6070   __ Mov(x16, 0);
6071   __ Fmov(s16, 1.0);
6072   __ Fmov(s17, 2.0);
6073   __ Fmov(d18, 3.0);
6074   __ Fmov(d19, 4.0);
6075 
6076   __ Cmp(x16, 0);
6077   __ Fcsel(s0, s16, s17, eq);
6078   __ Fcsel(s1, s16, s17, ne);
6079   __ Fcsel(d2, d18, d19, eq);
6080   __ Fcsel(d3, d18, d19, ne);
6081   __ fcsel(s4, s16, s17, al);
6082   __ fcsel(d5, d18, d19, nv);
6083   END();
6084 
6085   RUN();
6086 
6087   CHECK_EQUAL_FP32(1.0, s0);
6088   CHECK_EQUAL_FP32(2.0, s1);
6089   CHECK_EQUAL_FP64(3.0, d2);
6090   CHECK_EQUAL_FP64(4.0, d3);
6091   CHECK_EQUAL_FP32(1.0, s4);
6092   CHECK_EQUAL_FP64(3.0, d5);
6093 
6094   TEARDOWN();
6095 }
6096 
6097 
TEST(fneg)6098 TEST(fneg) {
6099   INIT_V8();
6100   SETUP();
6101 
6102   START();
6103   __ Fmov(s16, 1.0);
6104   __ Fmov(s17, 0.0);
6105   __ Fmov(s18, kFP32PositiveInfinity);
6106   __ Fmov(d19, 1.0);
6107   __ Fmov(d20, 0.0);
6108   __ Fmov(d21, kFP64PositiveInfinity);
6109 
6110   __ Fneg(s0, s16);
6111   __ Fneg(s1, s0);
6112   __ Fneg(s2, s17);
6113   __ Fneg(s3, s2);
6114   __ Fneg(s4, s18);
6115   __ Fneg(s5, s4);
6116   __ Fneg(d6, d19);
6117   __ Fneg(d7, d6);
6118   __ Fneg(d8, d20);
6119   __ Fneg(d9, d8);
6120   __ Fneg(d10, d21);
6121   __ Fneg(d11, d10);
6122   END();
6123 
6124   RUN();
6125 
6126   CHECK_EQUAL_FP32(-1.0, s0);
6127   CHECK_EQUAL_FP32(1.0, s1);
6128   CHECK_EQUAL_FP32(-0.0, s2);
6129   CHECK_EQUAL_FP32(0.0, s3);
6130   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
6131   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
6132   CHECK_EQUAL_FP64(-1.0, d6);
6133   CHECK_EQUAL_FP64(1.0, d7);
6134   CHECK_EQUAL_FP64(-0.0, d8);
6135   CHECK_EQUAL_FP64(0.0, d9);
6136   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
6137   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
6138 
6139   TEARDOWN();
6140 }
6141 
6142 
TEST(fabs)6143 TEST(fabs) {
6144   INIT_V8();
6145   SETUP();
6146 
6147   START();
6148   __ Fmov(s16, -1.0);
6149   __ Fmov(s17, -0.0);
6150   __ Fmov(s18, kFP32NegativeInfinity);
6151   __ Fmov(d19, -1.0);
6152   __ Fmov(d20, -0.0);
6153   __ Fmov(d21, kFP64NegativeInfinity);
6154 
6155   __ Fabs(s0, s16);
6156   __ Fabs(s1, s0);
6157   __ Fabs(s2, s17);
6158   __ Fabs(s3, s18);
6159   __ Fabs(d4, d19);
6160   __ Fabs(d5, d4);
6161   __ Fabs(d6, d20);
6162   __ Fabs(d7, d21);
6163   END();
6164 
6165   RUN();
6166 
6167   CHECK_EQUAL_FP32(1.0, s0);
6168   CHECK_EQUAL_FP32(1.0, s1);
6169   CHECK_EQUAL_FP32(0.0, s2);
6170   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
6171   CHECK_EQUAL_FP64(1.0, d4);
6172   CHECK_EQUAL_FP64(1.0, d5);
6173   CHECK_EQUAL_FP64(0.0, d6);
6174   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
6175 
6176   TEARDOWN();
6177 }
6178 
6179 
TEST(fsqrt)6180 TEST(fsqrt) {
6181   INIT_V8();
6182   SETUP();
6183 
6184   START();
6185   __ Fmov(s16, 0.0);
6186   __ Fmov(s17, 1.0);
6187   __ Fmov(s18, 0.25);
6188   __ Fmov(s19, 65536.0);
6189   __ Fmov(s20, -0.0);
6190   __ Fmov(s21, kFP32PositiveInfinity);
6191   __ Fmov(s22, -1.0);
6192   __ Fmov(d23, 0.0);
6193   __ Fmov(d24, 1.0);
6194   __ Fmov(d25, 0.25);
6195   __ Fmov(d26, 4294967296.0);
6196   __ Fmov(d27, -0.0);
6197   __ Fmov(d28, kFP64PositiveInfinity);
6198   __ Fmov(d29, -1.0);
6199 
6200   __ Fsqrt(s0, s16);
6201   __ Fsqrt(s1, s17);
6202   __ Fsqrt(s2, s18);
6203   __ Fsqrt(s3, s19);
6204   __ Fsqrt(s4, s20);
6205   __ Fsqrt(s5, s21);
6206   __ Fsqrt(s6, s22);
6207   __ Fsqrt(d7, d23);
6208   __ Fsqrt(d8, d24);
6209   __ Fsqrt(d9, d25);
6210   __ Fsqrt(d10, d26);
6211   __ Fsqrt(d11, d27);
6212   __ Fsqrt(d12, d28);
6213   __ Fsqrt(d13, d29);
6214   END();
6215 
6216   RUN();
6217 
6218   CHECK_EQUAL_FP32(0.0, s0);
6219   CHECK_EQUAL_FP32(1.0, s1);
6220   CHECK_EQUAL_FP32(0.5, s2);
6221   CHECK_EQUAL_FP32(256.0, s3);
6222   CHECK_EQUAL_FP32(-0.0, s4);
6223   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
6224   CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
6225   CHECK_EQUAL_FP64(0.0, d7);
6226   CHECK_EQUAL_FP64(1.0, d8);
6227   CHECK_EQUAL_FP64(0.5, d9);
6228   CHECK_EQUAL_FP64(65536.0, d10);
6229   CHECK_EQUAL_FP64(-0.0, d11);
6230   CHECK_EQUAL_FP64(kFP32PositiveInfinity, d12);
6231   CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
6232 
6233   TEARDOWN();
6234 }
6235 
6236 
TEST(frinta)6237 TEST(frinta) {
6238   INIT_V8();
6239   SETUP();
6240 
6241   START();
6242   __ Fmov(s16, 1.0);
6243   __ Fmov(s17, 1.1);
6244   __ Fmov(s18, 1.5);
6245   __ Fmov(s19, 1.9);
6246   __ Fmov(s20, 2.5);
6247   __ Fmov(s21, -1.5);
6248   __ Fmov(s22, -2.5);
6249   __ Fmov(s23, kFP32PositiveInfinity);
6250   __ Fmov(s24, kFP32NegativeInfinity);
6251   __ Fmov(s25, 0.0);
6252   __ Fmov(s26, -0.0);
6253   __ Fmov(s27, -0.2);
6254 
6255   __ Frinta(s0, s16);
6256   __ Frinta(s1, s17);
6257   __ Frinta(s2, s18);
6258   __ Frinta(s3, s19);
6259   __ Frinta(s4, s20);
6260   __ Frinta(s5, s21);
6261   __ Frinta(s6, s22);
6262   __ Frinta(s7, s23);
6263   __ Frinta(s8, s24);
6264   __ Frinta(s9, s25);
6265   __ Frinta(s10, s26);
6266   __ Frinta(s11, s27);
6267 
6268   __ Fmov(d16, 1.0);
6269   __ Fmov(d17, 1.1);
6270   __ Fmov(d18, 1.5);
6271   __ Fmov(d19, 1.9);
6272   __ Fmov(d20, 2.5);
6273   __ Fmov(d21, -1.5);
6274   __ Fmov(d22, -2.5);
6275   __ Fmov(d23, kFP32PositiveInfinity);
6276   __ Fmov(d24, kFP32NegativeInfinity);
6277   __ Fmov(d25, 0.0);
6278   __ Fmov(d26, -0.0);
6279   __ Fmov(d27, -0.2);
6280 
6281   __ Frinta(d12, d16);
6282   __ Frinta(d13, d17);
6283   __ Frinta(d14, d18);
6284   __ Frinta(d15, d19);
6285   __ Frinta(d16, d20);
6286   __ Frinta(d17, d21);
6287   __ Frinta(d18, d22);
6288   __ Frinta(d19, d23);
6289   __ Frinta(d20, d24);
6290   __ Frinta(d21, d25);
6291   __ Frinta(d22, d26);
6292   __ Frinta(d23, d27);
6293   END();
6294 
6295   RUN();
6296 
6297   CHECK_EQUAL_FP32(1.0, s0);
6298   CHECK_EQUAL_FP32(1.0, s1);
6299   CHECK_EQUAL_FP32(2.0, s2);
6300   CHECK_EQUAL_FP32(2.0, s3);
6301   CHECK_EQUAL_FP32(3.0, s4);
6302   CHECK_EQUAL_FP32(-2.0, s5);
6303   CHECK_EQUAL_FP32(-3.0, s6);
6304   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6305   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6306   CHECK_EQUAL_FP32(0.0, s9);
6307   CHECK_EQUAL_FP32(-0.0, s10);
6308   CHECK_EQUAL_FP32(-0.0, s11);
6309   CHECK_EQUAL_FP64(1.0, d12);
6310   CHECK_EQUAL_FP64(1.0, d13);
6311   CHECK_EQUAL_FP64(2.0, d14);
6312   CHECK_EQUAL_FP64(2.0, d15);
6313   CHECK_EQUAL_FP64(3.0, d16);
6314   CHECK_EQUAL_FP64(-2.0, d17);
6315   CHECK_EQUAL_FP64(-3.0, d18);
6316   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6317   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6318   CHECK_EQUAL_FP64(0.0, d21);
6319   CHECK_EQUAL_FP64(-0.0, d22);
6320   CHECK_EQUAL_FP64(-0.0, d23);
6321 
6322   TEARDOWN();
6323 }
6324 
6325 
TEST(frintm)6326 TEST(frintm) {
6327   INIT_V8();
6328   SETUP();
6329 
6330   START();
6331   __ Fmov(s16, 1.0);
6332   __ Fmov(s17, 1.1);
6333   __ Fmov(s18, 1.5);
6334   __ Fmov(s19, 1.9);
6335   __ Fmov(s20, 2.5);
6336   __ Fmov(s21, -1.5);
6337   __ Fmov(s22, -2.5);
6338   __ Fmov(s23, kFP32PositiveInfinity);
6339   __ Fmov(s24, kFP32NegativeInfinity);
6340   __ Fmov(s25, 0.0);
6341   __ Fmov(s26, -0.0);
6342   __ Fmov(s27, -0.2);
6343 
6344   __ Frintm(s0, s16);
6345   __ Frintm(s1, s17);
6346   __ Frintm(s2, s18);
6347   __ Frintm(s3, s19);
6348   __ Frintm(s4, s20);
6349   __ Frintm(s5, s21);
6350   __ Frintm(s6, s22);
6351   __ Frintm(s7, s23);
6352   __ Frintm(s8, s24);
6353   __ Frintm(s9, s25);
6354   __ Frintm(s10, s26);
6355   __ Frintm(s11, s27);
6356 
6357   __ Fmov(d16, 1.0);
6358   __ Fmov(d17, 1.1);
6359   __ Fmov(d18, 1.5);
6360   __ Fmov(d19, 1.9);
6361   __ Fmov(d20, 2.5);
6362   __ Fmov(d21, -1.5);
6363   __ Fmov(d22, -2.5);
6364   __ Fmov(d23, kFP32PositiveInfinity);
6365   __ Fmov(d24, kFP32NegativeInfinity);
6366   __ Fmov(d25, 0.0);
6367   __ Fmov(d26, -0.0);
6368   __ Fmov(d27, -0.2);
6369 
6370   __ Frintm(d12, d16);
6371   __ Frintm(d13, d17);
6372   __ Frintm(d14, d18);
6373   __ Frintm(d15, d19);
6374   __ Frintm(d16, d20);
6375   __ Frintm(d17, d21);
6376   __ Frintm(d18, d22);
6377   __ Frintm(d19, d23);
6378   __ Frintm(d20, d24);
6379   __ Frintm(d21, d25);
6380   __ Frintm(d22, d26);
6381   __ Frintm(d23, d27);
6382   END();
6383 
6384   RUN();
6385 
6386   CHECK_EQUAL_FP32(1.0, s0);
6387   CHECK_EQUAL_FP32(1.0, s1);
6388   CHECK_EQUAL_FP32(1.0, s2);
6389   CHECK_EQUAL_FP32(1.0, s3);
6390   CHECK_EQUAL_FP32(2.0, s4);
6391   CHECK_EQUAL_FP32(-2.0, s5);
6392   CHECK_EQUAL_FP32(-3.0, s6);
6393   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6394   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6395   CHECK_EQUAL_FP32(0.0, s9);
6396   CHECK_EQUAL_FP32(-0.0, s10);
6397   CHECK_EQUAL_FP32(-1.0, s11);
6398   CHECK_EQUAL_FP64(1.0, d12);
6399   CHECK_EQUAL_FP64(1.0, d13);
6400   CHECK_EQUAL_FP64(1.0, d14);
6401   CHECK_EQUAL_FP64(1.0, d15);
6402   CHECK_EQUAL_FP64(2.0, d16);
6403   CHECK_EQUAL_FP64(-2.0, d17);
6404   CHECK_EQUAL_FP64(-3.0, d18);
6405   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6406   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6407   CHECK_EQUAL_FP64(0.0, d21);
6408   CHECK_EQUAL_FP64(-0.0, d22);
6409   CHECK_EQUAL_FP64(-1.0, d23);
6410 
6411   TEARDOWN();
6412 }
6413 
6414 
TEST(frintn)6415 TEST(frintn) {
6416   INIT_V8();
6417   SETUP();
6418 
6419   START();
6420   __ Fmov(s16, 1.0);
6421   __ Fmov(s17, 1.1);
6422   __ Fmov(s18, 1.5);
6423   __ Fmov(s19, 1.9);
6424   __ Fmov(s20, 2.5);
6425   __ Fmov(s21, -1.5);
6426   __ Fmov(s22, -2.5);
6427   __ Fmov(s23, kFP32PositiveInfinity);
6428   __ Fmov(s24, kFP32NegativeInfinity);
6429   __ Fmov(s25, 0.0);
6430   __ Fmov(s26, -0.0);
6431   __ Fmov(s27, -0.2);
6432 
6433   __ Frintn(s0, s16);
6434   __ Frintn(s1, s17);
6435   __ Frintn(s2, s18);
6436   __ Frintn(s3, s19);
6437   __ Frintn(s4, s20);
6438   __ Frintn(s5, s21);
6439   __ Frintn(s6, s22);
6440   __ Frintn(s7, s23);
6441   __ Frintn(s8, s24);
6442   __ Frintn(s9, s25);
6443   __ Frintn(s10, s26);
6444   __ Frintn(s11, s27);
6445 
6446   __ Fmov(d16, 1.0);
6447   __ Fmov(d17, 1.1);
6448   __ Fmov(d18, 1.5);
6449   __ Fmov(d19, 1.9);
6450   __ Fmov(d20, 2.5);
6451   __ Fmov(d21, -1.5);
6452   __ Fmov(d22, -2.5);
6453   __ Fmov(d23, kFP32PositiveInfinity);
6454   __ Fmov(d24, kFP32NegativeInfinity);
6455   __ Fmov(d25, 0.0);
6456   __ Fmov(d26, -0.0);
6457   __ Fmov(d27, -0.2);
6458 
6459   __ Frintn(d12, d16);
6460   __ Frintn(d13, d17);
6461   __ Frintn(d14, d18);
6462   __ Frintn(d15, d19);
6463   __ Frintn(d16, d20);
6464   __ Frintn(d17, d21);
6465   __ Frintn(d18, d22);
6466   __ Frintn(d19, d23);
6467   __ Frintn(d20, d24);
6468   __ Frintn(d21, d25);
6469   __ Frintn(d22, d26);
6470   __ Frintn(d23, d27);
6471   END();
6472 
6473   RUN();
6474 
6475   CHECK_EQUAL_FP32(1.0, s0);
6476   CHECK_EQUAL_FP32(1.0, s1);
6477   CHECK_EQUAL_FP32(2.0, s2);
6478   CHECK_EQUAL_FP32(2.0, s3);
6479   CHECK_EQUAL_FP32(2.0, s4);
6480   CHECK_EQUAL_FP32(-2.0, s5);
6481   CHECK_EQUAL_FP32(-2.0, s6);
6482   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6483   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6484   CHECK_EQUAL_FP32(0.0, s9);
6485   CHECK_EQUAL_FP32(-0.0, s10);
6486   CHECK_EQUAL_FP32(-0.0, s11);
6487   CHECK_EQUAL_FP64(1.0, d12);
6488   CHECK_EQUAL_FP64(1.0, d13);
6489   CHECK_EQUAL_FP64(2.0, d14);
6490   CHECK_EQUAL_FP64(2.0, d15);
6491   CHECK_EQUAL_FP64(2.0, d16);
6492   CHECK_EQUAL_FP64(-2.0, d17);
6493   CHECK_EQUAL_FP64(-2.0, d18);
6494   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6495   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6496   CHECK_EQUAL_FP64(0.0, d21);
6497   CHECK_EQUAL_FP64(-0.0, d22);
6498   CHECK_EQUAL_FP64(-0.0, d23);
6499 
6500   TEARDOWN();
6501 }
6502 
6503 
TEST(frintp)6504 TEST(frintp) {
6505   INIT_V8();
6506   SETUP();
6507 
6508   START();
6509   __ Fmov(s16, 1.0);
6510   __ Fmov(s17, 1.1);
6511   __ Fmov(s18, 1.5);
6512   __ Fmov(s19, 1.9);
6513   __ Fmov(s20, 2.5);
6514   __ Fmov(s21, -1.5);
6515   __ Fmov(s22, -2.5);
6516   __ Fmov(s23, kFP32PositiveInfinity);
6517   __ Fmov(s24, kFP32NegativeInfinity);
6518   __ Fmov(s25, 0.0);
6519   __ Fmov(s26, -0.0);
6520   __ Fmov(s27, -0.2);
6521 
6522   __ Frintp(s0, s16);
6523   __ Frintp(s1, s17);
6524   __ Frintp(s2, s18);
6525   __ Frintp(s3, s19);
6526   __ Frintp(s4, s20);
6527   __ Frintp(s5, s21);
6528   __ Frintp(s6, s22);
6529   __ Frintp(s7, s23);
6530   __ Frintp(s8, s24);
6531   __ Frintp(s9, s25);
6532   __ Frintp(s10, s26);
6533   __ Frintp(s11, s27);
6534 
6535   __ Fmov(d16, -0.5);
6536   __ Fmov(d17, -0.8);
6537   __ Fmov(d18, 1.5);
6538   __ Fmov(d19, 1.9);
6539   __ Fmov(d20, 2.5);
6540   __ Fmov(d21, -1.5);
6541   __ Fmov(d22, -2.5);
6542   __ Fmov(d23, kFP32PositiveInfinity);
6543   __ Fmov(d24, kFP32NegativeInfinity);
6544   __ Fmov(d25, 0.0);
6545   __ Fmov(d26, -0.0);
6546   __ Fmov(d27, -0.2);
6547 
6548   __ Frintp(d12, d16);
6549   __ Frintp(d13, d17);
6550   __ Frintp(d14, d18);
6551   __ Frintp(d15, d19);
6552   __ Frintp(d16, d20);
6553   __ Frintp(d17, d21);
6554   __ Frintp(d18, d22);
6555   __ Frintp(d19, d23);
6556   __ Frintp(d20, d24);
6557   __ Frintp(d21, d25);
6558   __ Frintp(d22, d26);
6559   __ Frintp(d23, d27);
6560   END();
6561 
6562   RUN();
6563 
6564   CHECK_EQUAL_FP32(1.0, s0);
6565   CHECK_EQUAL_FP32(2.0, s1);
6566   CHECK_EQUAL_FP32(2.0, s2);
6567   CHECK_EQUAL_FP32(2.0, s3);
6568   CHECK_EQUAL_FP32(3.0, s4);
6569   CHECK_EQUAL_FP32(-1.0, s5);
6570   CHECK_EQUAL_FP32(-2.0, s6);
6571   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6572   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6573   CHECK_EQUAL_FP32(0.0, s9);
6574   CHECK_EQUAL_FP32(-0.0, s10);
6575   CHECK_EQUAL_FP32(-0.0, s11);
6576   CHECK_EQUAL_FP64(-0.0, d12);
6577   CHECK_EQUAL_FP64(-0.0, d13);
6578   CHECK_EQUAL_FP64(2.0, d14);
6579   CHECK_EQUAL_FP64(2.0, d15);
6580   CHECK_EQUAL_FP64(3.0, d16);
6581   CHECK_EQUAL_FP64(-1.0, d17);
6582   CHECK_EQUAL_FP64(-2.0, d18);
6583   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
6584   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
6585   CHECK_EQUAL_FP64(0.0, d21);
6586   CHECK_EQUAL_FP64(-0.0, d22);
6587   CHECK_EQUAL_FP64(-0.0, d23);
6588 
6589   TEARDOWN();
6590 }
6591 
6592 
TEST(frintz)6593 TEST(frintz) {
6594   INIT_V8();
6595   SETUP();
6596 
6597   START();
6598   __ Fmov(s16, 1.0);
6599   __ Fmov(s17, 1.1);
6600   __ Fmov(s18, 1.5);
6601   __ Fmov(s19, 1.9);
6602   __ Fmov(s20, 2.5);
6603   __ Fmov(s21, -1.5);
6604   __ Fmov(s22, -2.5);
6605   __ Fmov(s23, kFP32PositiveInfinity);
6606   __ Fmov(s24, kFP32NegativeInfinity);
6607   __ Fmov(s25, 0.0);
6608   __ Fmov(s26, -0.0);
6609 
6610   __ Frintz(s0, s16);
6611   __ Frintz(s1, s17);
6612   __ Frintz(s2, s18);
6613   __ Frintz(s3, s19);
6614   __ Frintz(s4, s20);
6615   __ Frintz(s5, s21);
6616   __ Frintz(s6, s22);
6617   __ Frintz(s7, s23);
6618   __ Frintz(s8, s24);
6619   __ Frintz(s9, s25);
6620   __ Frintz(s10, s26);
6621 
6622   __ Fmov(d16, 1.0);
6623   __ Fmov(d17, 1.1);
6624   __ Fmov(d18, 1.5);
6625   __ Fmov(d19, 1.9);
6626   __ Fmov(d20, 2.5);
6627   __ Fmov(d21, -1.5);
6628   __ Fmov(d22, -2.5);
6629   __ Fmov(d23, kFP32PositiveInfinity);
6630   __ Fmov(d24, kFP32NegativeInfinity);
6631   __ Fmov(d25, 0.0);
6632   __ Fmov(d26, -0.0);
6633 
6634   __ Frintz(d11, d16);
6635   __ Frintz(d12, d17);
6636   __ Frintz(d13, d18);
6637   __ Frintz(d14, d19);
6638   __ Frintz(d15, d20);
6639   __ Frintz(d16, d21);
6640   __ Frintz(d17, d22);
6641   __ Frintz(d18, d23);
6642   __ Frintz(d19, d24);
6643   __ Frintz(d20, d25);
6644   __ Frintz(d21, d26);
6645   END();
6646 
6647   RUN();
6648 
6649   CHECK_EQUAL_FP32(1.0, s0);
6650   CHECK_EQUAL_FP32(1.0, s1);
6651   CHECK_EQUAL_FP32(1.0, s2);
6652   CHECK_EQUAL_FP32(1.0, s3);
6653   CHECK_EQUAL_FP32(2.0, s4);
6654   CHECK_EQUAL_FP32(-1.0, s5);
6655   CHECK_EQUAL_FP32(-2.0, s6);
6656   CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
6657   CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
6658   CHECK_EQUAL_FP32(0.0, s9);
6659   CHECK_EQUAL_FP32(-0.0, s10);
6660   CHECK_EQUAL_FP64(1.0, d11);
6661   CHECK_EQUAL_FP64(1.0, d12);
6662   CHECK_EQUAL_FP64(1.0, d13);
6663   CHECK_EQUAL_FP64(1.0, d14);
6664   CHECK_EQUAL_FP64(2.0, d15);
6665   CHECK_EQUAL_FP64(-1.0, d16);
6666   CHECK_EQUAL_FP64(-2.0, d17);
6667   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d18);
6668   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d19);
6669   CHECK_EQUAL_FP64(0.0, d20);
6670   CHECK_EQUAL_FP64(-0.0, d21);
6671 
6672   TEARDOWN();
6673 }
6674 
6675 
TEST(fcvt_ds)6676 TEST(fcvt_ds) {
6677   INIT_V8();
6678   SETUP();
6679 
6680   START();
6681   __ Fmov(s16, 1.0);
6682   __ Fmov(s17, 1.1);
6683   __ Fmov(s18, 1.5);
6684   __ Fmov(s19, 1.9);
6685   __ Fmov(s20, 2.5);
6686   __ Fmov(s21, -1.5);
6687   __ Fmov(s22, -2.5);
6688   __ Fmov(s23, kFP32PositiveInfinity);
6689   __ Fmov(s24, kFP32NegativeInfinity);
6690   __ Fmov(s25, 0.0);
6691   __ Fmov(s26, -0.0);
6692   __ Fmov(s27, FLT_MAX);
6693   __ Fmov(s28, FLT_MIN);
6694   __ Fmov(s29, rawbits_to_float(0x7fc12345));   // Quiet NaN.
6695   __ Fmov(s30, rawbits_to_float(0x7f812345));   // Signalling NaN.
6696 
6697   __ Fcvt(d0, s16);
6698   __ Fcvt(d1, s17);
6699   __ Fcvt(d2, s18);
6700   __ Fcvt(d3, s19);
6701   __ Fcvt(d4, s20);
6702   __ Fcvt(d5, s21);
6703   __ Fcvt(d6, s22);
6704   __ Fcvt(d7, s23);
6705   __ Fcvt(d8, s24);
6706   __ Fcvt(d9, s25);
6707   __ Fcvt(d10, s26);
6708   __ Fcvt(d11, s27);
6709   __ Fcvt(d12, s28);
6710   __ Fcvt(d13, s29);
6711   __ Fcvt(d14, s30);
6712   END();
6713 
6714   RUN();
6715 
6716   CHECK_EQUAL_FP64(1.0f, d0);
6717   CHECK_EQUAL_FP64(1.1f, d1);
6718   CHECK_EQUAL_FP64(1.5f, d2);
6719   CHECK_EQUAL_FP64(1.9f, d3);
6720   CHECK_EQUAL_FP64(2.5f, d4);
6721   CHECK_EQUAL_FP64(-1.5f, d5);
6722   CHECK_EQUAL_FP64(-2.5f, d6);
6723   CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
6724   CHECK_EQUAL_FP64(kFP64NegativeInfinity, d8);
6725   CHECK_EQUAL_FP64(0.0f, d9);
6726   CHECK_EQUAL_FP64(-0.0f, d10);
6727   CHECK_EQUAL_FP64(FLT_MAX, d11);
6728   CHECK_EQUAL_FP64(FLT_MIN, d12);
6729 
6730   // Check that the NaN payload is preserved according to ARM64 conversion
6731   // rules:
6732   //  - The sign bit is preserved.
6733   //  - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
6734   //  - The remaining mantissa bits are copied until they run out.
6735   //  - The low-order bits that haven't already been assigned are set to 0.
6736   CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
6737   CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
6738 
6739   TEARDOWN();
6740 }
6741 
6742 
TEST(fcvt_sd)6743 TEST(fcvt_sd) {
6744   INIT_V8();
6745   // There are a huge number of corner-cases to check, so this test iterates
6746   // through a list. The list is then negated and checked again (since the sign
6747   // is irrelevant in ties-to-even rounding), so the list shouldn't include any
6748   // negative values.
6749   //
6750   // Note that this test only checks ties-to-even rounding, because that is all
6751   // that the simulator supports.
6752   struct {double in; float expected;} test[] = {
6753     // Check some simple conversions.
6754     {0.0, 0.0f},
6755     {1.0, 1.0f},
6756     {1.5, 1.5f},
6757     {2.0, 2.0f},
6758     {FLT_MAX, FLT_MAX},
6759     //  - The smallest normalized float.
6760     {pow(2.0, -126), powf(2, -126)},
6761     //  - Normal floats that need (ties-to-even) rounding.
6762     //    For normalized numbers:
6763     //         bit 29 (0x0000000020000000) is the lowest-order bit which will
6764     //                                     fit in the float's mantissa.
6765     {rawbits_to_double(0x3ff0000000000000), rawbits_to_float(0x3f800000)},
6766     {rawbits_to_double(0x3ff0000000000001), rawbits_to_float(0x3f800000)},
6767     {rawbits_to_double(0x3ff0000010000000), rawbits_to_float(0x3f800000)},
6768     {rawbits_to_double(0x3ff0000010000001), rawbits_to_float(0x3f800001)},
6769     {rawbits_to_double(0x3ff0000020000000), rawbits_to_float(0x3f800001)},
6770     {rawbits_to_double(0x3ff0000020000001), rawbits_to_float(0x3f800001)},
6771     {rawbits_to_double(0x3ff0000030000000), rawbits_to_float(0x3f800002)},
6772     {rawbits_to_double(0x3ff0000030000001), rawbits_to_float(0x3f800002)},
6773     {rawbits_to_double(0x3ff0000040000000), rawbits_to_float(0x3f800002)},
6774     {rawbits_to_double(0x3ff0000040000001), rawbits_to_float(0x3f800002)},
6775     {rawbits_to_double(0x3ff0000050000000), rawbits_to_float(0x3f800002)},
6776     {rawbits_to_double(0x3ff0000050000001), rawbits_to_float(0x3f800003)},
6777     {rawbits_to_double(0x3ff0000060000000), rawbits_to_float(0x3f800003)},
6778     //  - A mantissa that overflows into the exponent during rounding.
6779     {rawbits_to_double(0x3feffffff0000000), rawbits_to_float(0x3f800000)},
6780     //  - The largest double that rounds to a normal float.
6781     {rawbits_to_double(0x47efffffefffffff), rawbits_to_float(0x7f7fffff)},
6782 
6783     // Doubles that are too big for a float.
6784     {kFP64PositiveInfinity, kFP32PositiveInfinity},
6785     {DBL_MAX, kFP32PositiveInfinity},
6786     //  - The smallest exponent that's too big for a float.
6787     {pow(2.0, 128), kFP32PositiveInfinity},
6788     //  - This exponent is in range, but the value rounds to infinity.
6789     {rawbits_to_double(0x47effffff0000000), kFP32PositiveInfinity},
6790 
6791     // Doubles that are too small for a float.
6792     //  - The smallest (subnormal) double.
6793     {DBL_MIN, 0.0},
6794     //  - The largest double which is too small for a subnormal float.
6795     {rawbits_to_double(0x3690000000000000), rawbits_to_float(0x00000000)},
6796 
6797     // Normal doubles that become subnormal floats.
6798     //  - The largest subnormal float.
6799     {rawbits_to_double(0x380fffffc0000000), rawbits_to_float(0x007fffff)},
6800     //  - The smallest subnormal float.
6801     {rawbits_to_double(0x36a0000000000000), rawbits_to_float(0x00000001)},
6802     //  - Subnormal floats that need (ties-to-even) rounding.
6803     //    For these subnormals:
6804     //         bit 34 (0x0000000400000000) is the lowest-order bit which will
6805     //                                     fit in the float's mantissa.
6806     {rawbits_to_double(0x37c159e000000000), rawbits_to_float(0x00045678)},
6807     {rawbits_to_double(0x37c159e000000001), rawbits_to_float(0x00045678)},
6808     {rawbits_to_double(0x37c159e200000000), rawbits_to_float(0x00045678)},
6809     {rawbits_to_double(0x37c159e200000001), rawbits_to_float(0x00045679)},
6810     {rawbits_to_double(0x37c159e400000000), rawbits_to_float(0x00045679)},
6811     {rawbits_to_double(0x37c159e400000001), rawbits_to_float(0x00045679)},
6812     {rawbits_to_double(0x37c159e600000000), rawbits_to_float(0x0004567a)},
6813     {rawbits_to_double(0x37c159e600000001), rawbits_to_float(0x0004567a)},
6814     {rawbits_to_double(0x37c159e800000000), rawbits_to_float(0x0004567a)},
6815     {rawbits_to_double(0x37c159e800000001), rawbits_to_float(0x0004567a)},
6816     {rawbits_to_double(0x37c159ea00000000), rawbits_to_float(0x0004567a)},
6817     {rawbits_to_double(0x37c159ea00000001), rawbits_to_float(0x0004567b)},
6818     {rawbits_to_double(0x37c159ec00000000), rawbits_to_float(0x0004567b)},
6819     //  - The smallest double which rounds up to become a subnormal float.
6820     {rawbits_to_double(0x3690000000000001), rawbits_to_float(0x00000001)},
6821 
6822     // Check NaN payload preservation.
6823     {rawbits_to_double(0x7ff82468a0000000), rawbits_to_float(0x7fc12345)},
6824     {rawbits_to_double(0x7ff82468bfffffff), rawbits_to_float(0x7fc12345)},
6825     //  - Signalling NaNs become quiet NaNs.
6826     {rawbits_to_double(0x7ff02468a0000000), rawbits_to_float(0x7fc12345)},
6827     {rawbits_to_double(0x7ff02468bfffffff), rawbits_to_float(0x7fc12345)},
6828     {rawbits_to_double(0x7ff000001fffffff), rawbits_to_float(0x7fc00000)},
6829   };
6830   int count = sizeof(test) / sizeof(test[0]);
6831 
6832   for (int i = 0; i < count; i++) {
6833     double in = test[i].in;
6834     float expected = test[i].expected;
6835 
6836     // We only expect positive input.
6837     CHECK(std::signbit(in) == 0);
6838     CHECK(std::signbit(expected) == 0);
6839 
6840     SETUP();
6841     START();
6842 
6843     __ Fmov(d10, in);
6844     __ Fcvt(s20, d10);
6845 
6846     __ Fmov(d11, -in);
6847     __ Fcvt(s21, d11);
6848 
6849     END();
6850     RUN();
6851     CHECK_EQUAL_FP32(expected, s20);
6852     CHECK_EQUAL_FP32(-expected, s21);
6853     TEARDOWN();
6854   }
6855 }
6856 
6857 
TEST(fcvtas)6858 TEST(fcvtas) {
6859   INIT_V8();
6860   SETUP();
6861 
6862   START();
6863   __ Fmov(s0, 1.0);
6864   __ Fmov(s1, 1.1);
6865   __ Fmov(s2, 2.5);
6866   __ Fmov(s3, -2.5);
6867   __ Fmov(s4, kFP32PositiveInfinity);
6868   __ Fmov(s5, kFP32NegativeInfinity);
6869   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
6870   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
6871   __ Fmov(d8, 1.0);
6872   __ Fmov(d9, 1.1);
6873   __ Fmov(d10, 2.5);
6874   __ Fmov(d11, -2.5);
6875   __ Fmov(d12, kFP64PositiveInfinity);
6876   __ Fmov(d13, kFP64NegativeInfinity);
6877   __ Fmov(d14, kWMaxInt - 1);
6878   __ Fmov(d15, kWMinInt + 1);
6879   __ Fmov(s17, 1.1);
6880   __ Fmov(s18, 2.5);
6881   __ Fmov(s19, -2.5);
6882   __ Fmov(s20, kFP32PositiveInfinity);
6883   __ Fmov(s21, kFP32NegativeInfinity);
6884   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
6885   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
6886   __ Fmov(d24, 1.1);
6887   __ Fmov(d25, 2.5);
6888   __ Fmov(d26, -2.5);
6889   __ Fmov(d27, kFP64PositiveInfinity);
6890   __ Fmov(d28, kFP64NegativeInfinity);
6891   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
6892   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
6893 
6894   __ Fcvtas(w0, s0);
6895   __ Fcvtas(w1, s1);
6896   __ Fcvtas(w2, s2);
6897   __ Fcvtas(w3, s3);
6898   __ Fcvtas(w4, s4);
6899   __ Fcvtas(w5, s5);
6900   __ Fcvtas(w6, s6);
6901   __ Fcvtas(w7, s7);
6902   __ Fcvtas(w8, d8);
6903   __ Fcvtas(w9, d9);
6904   __ Fcvtas(w10, d10);
6905   __ Fcvtas(w11, d11);
6906   __ Fcvtas(w12, d12);
6907   __ Fcvtas(w13, d13);
6908   __ Fcvtas(w14, d14);
6909   __ Fcvtas(w15, d15);
6910   __ Fcvtas(x17, s17);
6911   __ Fcvtas(x18, s18);
6912   __ Fcvtas(x19, s19);
6913   __ Fcvtas(x20, s20);
6914   __ Fcvtas(x21, s21);
6915   __ Fcvtas(x22, s22);
6916   __ Fcvtas(x23, s23);
6917   __ Fcvtas(x24, d24);
6918   __ Fcvtas(x25, d25);
6919   __ Fcvtas(x26, d26);
6920   __ Fcvtas(x27, d27);
6921   __ Fcvtas(x28, d28);
6922   __ Fcvtas(x29, d29);
6923   __ Fcvtas(x30, d30);
6924   END();
6925 
6926   RUN();
6927 
6928   CHECK_EQUAL_64(1, x0);
6929   CHECK_EQUAL_64(1, x1);
6930   CHECK_EQUAL_64(3, x2);
6931   CHECK_EQUAL_64(0xfffffffd, x3);
6932   CHECK_EQUAL_64(0x7fffffff, x4);
6933   CHECK_EQUAL_64(0x80000000, x5);
6934   CHECK_EQUAL_64(0x7fffff80, x6);
6935   CHECK_EQUAL_64(0x80000080, x7);
6936   CHECK_EQUAL_64(1, x8);
6937   CHECK_EQUAL_64(1, x9);
6938   CHECK_EQUAL_64(3, x10);
6939   CHECK_EQUAL_64(0xfffffffd, x11);
6940   CHECK_EQUAL_64(0x7fffffff, x12);
6941   CHECK_EQUAL_64(0x80000000, x13);
6942   CHECK_EQUAL_64(0x7ffffffe, x14);
6943   CHECK_EQUAL_64(0x80000001, x15);
6944   CHECK_EQUAL_64(1, x17);
6945   CHECK_EQUAL_64(3, x18);
6946   CHECK_EQUAL_64(0xfffffffffffffffdUL, x19);
6947   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
6948   CHECK_EQUAL_64(0x8000000000000000UL, x21);
6949   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
6950   CHECK_EQUAL_64(0x8000008000000000UL, x23);
6951   CHECK_EQUAL_64(1, x24);
6952   CHECK_EQUAL_64(3, x25);
6953   CHECK_EQUAL_64(0xfffffffffffffffdUL, x26);
6954   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
6955   CHECK_EQUAL_64(0x8000000000000000UL, x28);
6956   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
6957   CHECK_EQUAL_64(0x8000000000000400UL, x30);
6958 
6959   TEARDOWN();
6960 }
6961 
6962 
TEST(fcvtau)6963 TEST(fcvtau) {
6964   INIT_V8();
6965   SETUP();
6966 
6967   START();
6968   __ Fmov(s0, 1.0);
6969   __ Fmov(s1, 1.1);
6970   __ Fmov(s2, 2.5);
6971   __ Fmov(s3, -2.5);
6972   __ Fmov(s4, kFP32PositiveInfinity);
6973   __ Fmov(s5, kFP32NegativeInfinity);
6974   __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
6975   __ Fmov(d8, 1.0);
6976   __ Fmov(d9, 1.1);
6977   __ Fmov(d10, 2.5);
6978   __ Fmov(d11, -2.5);
6979   __ Fmov(d12, kFP64PositiveInfinity);
6980   __ Fmov(d13, kFP64NegativeInfinity);
6981   __ Fmov(d14, 0xfffffffe);
6982   __ Fmov(s16, 1.0);
6983   __ Fmov(s17, 1.1);
6984   __ Fmov(s18, 2.5);
6985   __ Fmov(s19, -2.5);
6986   __ Fmov(s20, kFP32PositiveInfinity);
6987   __ Fmov(s21, kFP32NegativeInfinity);
6988   __ Fmov(s22, 0xffffff0000000000UL);  // Largest float < UINT64_MAX.
6989   __ Fmov(d24, 1.1);
6990   __ Fmov(d25, 2.5);
6991   __ Fmov(d26, -2.5);
6992   __ Fmov(d27, kFP64PositiveInfinity);
6993   __ Fmov(d28, kFP64NegativeInfinity);
6994   __ Fmov(d29, 0xfffffffffffff800UL);  // Largest double < UINT64_MAX.
6995   __ Fmov(s30, 0x100000000UL);
6996 
6997   __ Fcvtau(w0, s0);
6998   __ Fcvtau(w1, s1);
6999   __ Fcvtau(w2, s2);
7000   __ Fcvtau(w3, s3);
7001   __ Fcvtau(w4, s4);
7002   __ Fcvtau(w5, s5);
7003   __ Fcvtau(w6, s6);
7004   __ Fcvtau(w8, d8);
7005   __ Fcvtau(w9, d9);
7006   __ Fcvtau(w10, d10);
7007   __ Fcvtau(w11, d11);
7008   __ Fcvtau(w12, d12);
7009   __ Fcvtau(w13, d13);
7010   __ Fcvtau(w14, d14);
7011   __ Fcvtau(w15, d15);
7012   __ Fcvtau(x16, s16);
7013   __ Fcvtau(x17, s17);
7014   __ Fcvtau(x18, s18);
7015   __ Fcvtau(x19, s19);
7016   __ Fcvtau(x20, s20);
7017   __ Fcvtau(x21, s21);
7018   __ Fcvtau(x22, s22);
7019   __ Fcvtau(x24, d24);
7020   __ Fcvtau(x25, d25);
7021   __ Fcvtau(x26, d26);
7022   __ Fcvtau(x27, d27);
7023   __ Fcvtau(x28, d28);
7024   __ Fcvtau(x29, d29);
7025   __ Fcvtau(w30, s30);
7026   END();
7027 
7028   RUN();
7029 
7030   CHECK_EQUAL_64(1, x0);
7031   CHECK_EQUAL_64(1, x1);
7032   CHECK_EQUAL_64(3, x2);
7033   CHECK_EQUAL_64(0, x3);
7034   CHECK_EQUAL_64(0xffffffff, x4);
7035   CHECK_EQUAL_64(0, x5);
7036   CHECK_EQUAL_64(0xffffff00, x6);
7037   CHECK_EQUAL_64(1, x8);
7038   CHECK_EQUAL_64(1, x9);
7039   CHECK_EQUAL_64(3, x10);
7040   CHECK_EQUAL_64(0, x11);
7041   CHECK_EQUAL_64(0xffffffff, x12);
7042   CHECK_EQUAL_64(0, x13);
7043   CHECK_EQUAL_64(0xfffffffe, x14);
7044   CHECK_EQUAL_64(1, x16);
7045   CHECK_EQUAL_64(1, x17);
7046   CHECK_EQUAL_64(3, x18);
7047   CHECK_EQUAL_64(0, x19);
7048   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7049   CHECK_EQUAL_64(0, x21);
7050   CHECK_EQUAL_64(0xffffff0000000000UL, x22);
7051   CHECK_EQUAL_64(1, x24);
7052   CHECK_EQUAL_64(3, x25);
7053   CHECK_EQUAL_64(0, x26);
7054   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7055   CHECK_EQUAL_64(0, x28);
7056   CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
7057   CHECK_EQUAL_64(0xffffffff, x30);
7058 
7059   TEARDOWN();
7060 }
7061 
7062 
TEST(fcvtms)7063 TEST(fcvtms) {
7064   INIT_V8();
7065   SETUP();
7066 
7067   START();
7068   __ Fmov(s0, 1.0);
7069   __ Fmov(s1, 1.1);
7070   __ Fmov(s2, 1.5);
7071   __ Fmov(s3, -1.5);
7072   __ Fmov(s4, kFP32PositiveInfinity);
7073   __ Fmov(s5, kFP32NegativeInfinity);
7074   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7075   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7076   __ Fmov(d8, 1.0);
7077   __ Fmov(d9, 1.1);
7078   __ Fmov(d10, 1.5);
7079   __ Fmov(d11, -1.5);
7080   __ Fmov(d12, kFP64PositiveInfinity);
7081   __ Fmov(d13, kFP64NegativeInfinity);
7082   __ Fmov(d14, kWMaxInt - 1);
7083   __ Fmov(d15, kWMinInt + 1);
7084   __ Fmov(s17, 1.1);
7085   __ Fmov(s18, 1.5);
7086   __ Fmov(s19, -1.5);
7087   __ Fmov(s20, kFP32PositiveInfinity);
7088   __ Fmov(s21, kFP32NegativeInfinity);
7089   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7090   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7091   __ Fmov(d24, 1.1);
7092   __ Fmov(d25, 1.5);
7093   __ Fmov(d26, -1.5);
7094   __ Fmov(d27, kFP64PositiveInfinity);
7095   __ Fmov(d28, kFP64NegativeInfinity);
7096   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7097   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7098 
7099   __ Fcvtms(w0, s0);
7100   __ Fcvtms(w1, s1);
7101   __ Fcvtms(w2, s2);
7102   __ Fcvtms(w3, s3);
7103   __ Fcvtms(w4, s4);
7104   __ Fcvtms(w5, s5);
7105   __ Fcvtms(w6, s6);
7106   __ Fcvtms(w7, s7);
7107   __ Fcvtms(w8, d8);
7108   __ Fcvtms(w9, d9);
7109   __ Fcvtms(w10, d10);
7110   __ Fcvtms(w11, d11);
7111   __ Fcvtms(w12, d12);
7112   __ Fcvtms(w13, d13);
7113   __ Fcvtms(w14, d14);
7114   __ Fcvtms(w15, d15);
7115   __ Fcvtms(x17, s17);
7116   __ Fcvtms(x18, s18);
7117   __ Fcvtms(x19, s19);
7118   __ Fcvtms(x20, s20);
7119   __ Fcvtms(x21, s21);
7120   __ Fcvtms(x22, s22);
7121   __ Fcvtms(x23, s23);
7122   __ Fcvtms(x24, d24);
7123   __ Fcvtms(x25, d25);
7124   __ Fcvtms(x26, d26);
7125   __ Fcvtms(x27, d27);
7126   __ Fcvtms(x28, d28);
7127   __ Fcvtms(x29, d29);
7128   __ Fcvtms(x30, d30);
7129   END();
7130 
7131   RUN();
7132 
7133   CHECK_EQUAL_64(1, x0);
7134   CHECK_EQUAL_64(1, x1);
7135   CHECK_EQUAL_64(1, x2);
7136   CHECK_EQUAL_64(0xfffffffe, x3);
7137   CHECK_EQUAL_64(0x7fffffff, x4);
7138   CHECK_EQUAL_64(0x80000000, x5);
7139   CHECK_EQUAL_64(0x7fffff80, x6);
7140   CHECK_EQUAL_64(0x80000080, x7);
7141   CHECK_EQUAL_64(1, x8);
7142   CHECK_EQUAL_64(1, x9);
7143   CHECK_EQUAL_64(1, x10);
7144   CHECK_EQUAL_64(0xfffffffe, x11);
7145   CHECK_EQUAL_64(0x7fffffff, x12);
7146   CHECK_EQUAL_64(0x80000000, x13);
7147   CHECK_EQUAL_64(0x7ffffffe, x14);
7148   CHECK_EQUAL_64(0x80000001, x15);
7149   CHECK_EQUAL_64(1, x17);
7150   CHECK_EQUAL_64(1, x18);
7151   CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
7152   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7153   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7154   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7155   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7156   CHECK_EQUAL_64(1, x24);
7157   CHECK_EQUAL_64(1, x25);
7158   CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
7159   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7160   CHECK_EQUAL_64(0x8000000000000000UL, x28);
7161   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7162   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7163 
7164   TEARDOWN();
7165 }
7166 
7167 
TEST(fcvtmu)7168 TEST(fcvtmu) {
7169   INIT_V8();
7170   SETUP();
7171 
7172   START();
7173   __ Fmov(s0, 1.0);
7174   __ Fmov(s1, 1.1);
7175   __ Fmov(s2, 1.5);
7176   __ Fmov(s3, -1.5);
7177   __ Fmov(s4, kFP32PositiveInfinity);
7178   __ Fmov(s5, kFP32NegativeInfinity);
7179   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7180   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7181   __ Fmov(d8, 1.0);
7182   __ Fmov(d9, 1.1);
7183   __ Fmov(d10, 1.5);
7184   __ Fmov(d11, -1.5);
7185   __ Fmov(d12, kFP64PositiveInfinity);
7186   __ Fmov(d13, kFP64NegativeInfinity);
7187   __ Fmov(d14, kWMaxInt - 1);
7188   __ Fmov(d15, kWMinInt + 1);
7189   __ Fmov(s17, 1.1);
7190   __ Fmov(s18, 1.5);
7191   __ Fmov(s19, -1.5);
7192   __ Fmov(s20, kFP32PositiveInfinity);
7193   __ Fmov(s21, kFP32NegativeInfinity);
7194   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7195   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7196   __ Fmov(d24, 1.1);
7197   __ Fmov(d25, 1.5);
7198   __ Fmov(d26, -1.5);
7199   __ Fmov(d27, kFP64PositiveInfinity);
7200   __ Fmov(d28, kFP64NegativeInfinity);
7201   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7202   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7203 
7204   __ Fcvtmu(w0, s0);
7205   __ Fcvtmu(w1, s1);
7206   __ Fcvtmu(w2, s2);
7207   __ Fcvtmu(w3, s3);
7208   __ Fcvtmu(w4, s4);
7209   __ Fcvtmu(w5, s5);
7210   __ Fcvtmu(w6, s6);
7211   __ Fcvtmu(w7, s7);
7212   __ Fcvtmu(w8, d8);
7213   __ Fcvtmu(w9, d9);
7214   __ Fcvtmu(w10, d10);
7215   __ Fcvtmu(w11, d11);
7216   __ Fcvtmu(w12, d12);
7217   __ Fcvtmu(w13, d13);
7218   __ Fcvtmu(w14, d14);
7219   __ Fcvtmu(x17, s17);
7220   __ Fcvtmu(x18, s18);
7221   __ Fcvtmu(x19, s19);
7222   __ Fcvtmu(x20, s20);
7223   __ Fcvtmu(x21, s21);
7224   __ Fcvtmu(x22, s22);
7225   __ Fcvtmu(x23, s23);
7226   __ Fcvtmu(x24, d24);
7227   __ Fcvtmu(x25, d25);
7228   __ Fcvtmu(x26, d26);
7229   __ Fcvtmu(x27, d27);
7230   __ Fcvtmu(x28, d28);
7231   __ Fcvtmu(x29, d29);
7232   __ Fcvtmu(x30, d30);
7233   END();
7234 
7235   RUN();
7236 
7237   CHECK_EQUAL_64(1, x0);
7238   CHECK_EQUAL_64(1, x1);
7239   CHECK_EQUAL_64(1, x2);
7240   CHECK_EQUAL_64(0, x3);
7241   CHECK_EQUAL_64(0xffffffff, x4);
7242   CHECK_EQUAL_64(0, x5);
7243   CHECK_EQUAL_64(0x7fffff80, x6);
7244   CHECK_EQUAL_64(0, x7);
7245   CHECK_EQUAL_64(1, x8);
7246   CHECK_EQUAL_64(1, x9);
7247   CHECK_EQUAL_64(1, x10);
7248   CHECK_EQUAL_64(0, x11);
7249   CHECK_EQUAL_64(0xffffffff, x12);
7250   CHECK_EQUAL_64(0, x13);
7251   CHECK_EQUAL_64(0x7ffffffe, x14);
7252   CHECK_EQUAL_64(1, x17);
7253   CHECK_EQUAL_64(1, x18);
7254   CHECK_EQUAL_64(0x0UL, x19);
7255   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7256   CHECK_EQUAL_64(0x0UL, x21);
7257   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7258   CHECK_EQUAL_64(0x0UL, x23);
7259   CHECK_EQUAL_64(1, x24);
7260   CHECK_EQUAL_64(1, x25);
7261   CHECK_EQUAL_64(0x0UL, x26);
7262   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7263   CHECK_EQUAL_64(0x0UL, x28);
7264   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7265   CHECK_EQUAL_64(0x0UL, x30);
7266 
7267   TEARDOWN();
7268 }
7269 
7270 
TEST(fcvtns)7271 TEST(fcvtns) {
7272   INIT_V8();
7273   SETUP();
7274 
7275   START();
7276   __ Fmov(s0, 1.0);
7277   __ Fmov(s1, 1.1);
7278   __ Fmov(s2, 1.5);
7279   __ Fmov(s3, -1.5);
7280   __ Fmov(s4, kFP32PositiveInfinity);
7281   __ Fmov(s5, kFP32NegativeInfinity);
7282   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7283   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7284   __ Fmov(d8, 1.0);
7285   __ Fmov(d9, 1.1);
7286   __ Fmov(d10, 1.5);
7287   __ Fmov(d11, -1.5);
7288   __ Fmov(d12, kFP64PositiveInfinity);
7289   __ Fmov(d13, kFP64NegativeInfinity);
7290   __ Fmov(d14, kWMaxInt - 1);
7291   __ Fmov(d15, kWMinInt + 1);
7292   __ Fmov(s17, 1.1);
7293   __ Fmov(s18, 1.5);
7294   __ Fmov(s19, -1.5);
7295   __ Fmov(s20, kFP32PositiveInfinity);
7296   __ Fmov(s21, kFP32NegativeInfinity);
7297   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7298   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7299   __ Fmov(d24, 1.1);
7300   __ Fmov(d25, 1.5);
7301   __ Fmov(d26, -1.5);
7302   __ Fmov(d27, kFP64PositiveInfinity);
7303   __ Fmov(d28, kFP64NegativeInfinity);
7304   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7305   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7306 
7307   __ Fcvtns(w0, s0);
7308   __ Fcvtns(w1, s1);
7309   __ Fcvtns(w2, s2);
7310   __ Fcvtns(w3, s3);
7311   __ Fcvtns(w4, s4);
7312   __ Fcvtns(w5, s5);
7313   __ Fcvtns(w6, s6);
7314   __ Fcvtns(w7, s7);
7315   __ Fcvtns(w8, d8);
7316   __ Fcvtns(w9, d9);
7317   __ Fcvtns(w10, d10);
7318   __ Fcvtns(w11, d11);
7319   __ Fcvtns(w12, d12);
7320   __ Fcvtns(w13, d13);
7321   __ Fcvtns(w14, d14);
7322   __ Fcvtns(w15, d15);
7323   __ Fcvtns(x17, s17);
7324   __ Fcvtns(x18, s18);
7325   __ Fcvtns(x19, s19);
7326   __ Fcvtns(x20, s20);
7327   __ Fcvtns(x21, s21);
7328   __ Fcvtns(x22, s22);
7329   __ Fcvtns(x23, s23);
7330   __ Fcvtns(x24, d24);
7331   __ Fcvtns(x25, d25);
7332   __ Fcvtns(x26, d26);
7333   __ Fcvtns(x27, d27);
7334 //  __ Fcvtns(x28, d28);
7335   __ Fcvtns(x29, d29);
7336   __ Fcvtns(x30, d30);
7337   END();
7338 
7339   RUN();
7340 
7341   CHECK_EQUAL_64(1, x0);
7342   CHECK_EQUAL_64(1, x1);
7343   CHECK_EQUAL_64(2, x2);
7344   CHECK_EQUAL_64(0xfffffffe, x3);
7345   CHECK_EQUAL_64(0x7fffffff, x4);
7346   CHECK_EQUAL_64(0x80000000, x5);
7347   CHECK_EQUAL_64(0x7fffff80, x6);
7348   CHECK_EQUAL_64(0x80000080, x7);
7349   CHECK_EQUAL_64(1, x8);
7350   CHECK_EQUAL_64(1, x9);
7351   CHECK_EQUAL_64(2, x10);
7352   CHECK_EQUAL_64(0xfffffffe, x11);
7353   CHECK_EQUAL_64(0x7fffffff, x12);
7354   CHECK_EQUAL_64(0x80000000, x13);
7355   CHECK_EQUAL_64(0x7ffffffe, x14);
7356   CHECK_EQUAL_64(0x80000001, x15);
7357   CHECK_EQUAL_64(1, x17);
7358   CHECK_EQUAL_64(2, x18);
7359   CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
7360   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7361   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7362   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7363   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7364   CHECK_EQUAL_64(1, x24);
7365   CHECK_EQUAL_64(2, x25);
7366   CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
7367   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7368 //  CHECK_EQUAL_64(0x8000000000000000UL, x28);
7369   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7370   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7371 
7372   TEARDOWN();
7373 }
7374 
7375 
TEST(fcvtnu)7376 TEST(fcvtnu) {
7377   INIT_V8();
7378   SETUP();
7379 
7380   START();
7381   __ Fmov(s0, 1.0);
7382   __ Fmov(s1, 1.1);
7383   __ Fmov(s2, 1.5);
7384   __ Fmov(s3, -1.5);
7385   __ Fmov(s4, kFP32PositiveInfinity);
7386   __ Fmov(s5, kFP32NegativeInfinity);
7387   __ Fmov(s6, 0xffffff00);  // Largest float < UINT32_MAX.
7388   __ Fmov(d8, 1.0);
7389   __ Fmov(d9, 1.1);
7390   __ Fmov(d10, 1.5);
7391   __ Fmov(d11, -1.5);
7392   __ Fmov(d12, kFP64PositiveInfinity);
7393   __ Fmov(d13, kFP64NegativeInfinity);
7394   __ Fmov(d14, 0xfffffffe);
7395   __ Fmov(s16, 1.0);
7396   __ Fmov(s17, 1.1);
7397   __ Fmov(s18, 1.5);
7398   __ Fmov(s19, -1.5);
7399   __ Fmov(s20, kFP32PositiveInfinity);
7400   __ Fmov(s21, kFP32NegativeInfinity);
7401   __ Fmov(s22, 0xffffff0000000000UL);   // Largest float < UINT64_MAX.
7402   __ Fmov(d24, 1.1);
7403   __ Fmov(d25, 1.5);
7404   __ Fmov(d26, -1.5);
7405   __ Fmov(d27, kFP64PositiveInfinity);
7406   __ Fmov(d28, kFP64NegativeInfinity);
7407   __ Fmov(d29, 0xfffffffffffff800UL);   // Largest double < UINT64_MAX.
7408   __ Fmov(s30, 0x100000000UL);
7409 
7410   __ Fcvtnu(w0, s0);
7411   __ Fcvtnu(w1, s1);
7412   __ Fcvtnu(w2, s2);
7413   __ Fcvtnu(w3, s3);
7414   __ Fcvtnu(w4, s4);
7415   __ Fcvtnu(w5, s5);
7416   __ Fcvtnu(w6, s6);
7417   __ Fcvtnu(w8, d8);
7418   __ Fcvtnu(w9, d9);
7419   __ Fcvtnu(w10, d10);
7420   __ Fcvtnu(w11, d11);
7421   __ Fcvtnu(w12, d12);
7422   __ Fcvtnu(w13, d13);
7423   __ Fcvtnu(w14, d14);
7424   __ Fcvtnu(w15, d15);
7425   __ Fcvtnu(x16, s16);
7426   __ Fcvtnu(x17, s17);
7427   __ Fcvtnu(x18, s18);
7428   __ Fcvtnu(x19, s19);
7429   __ Fcvtnu(x20, s20);
7430   __ Fcvtnu(x21, s21);
7431   __ Fcvtnu(x22, s22);
7432   __ Fcvtnu(x24, d24);
7433   __ Fcvtnu(x25, d25);
7434   __ Fcvtnu(x26, d26);
7435   __ Fcvtnu(x27, d27);
7436 //  __ Fcvtnu(x28, d28);
7437   __ Fcvtnu(x29, d29);
7438   __ Fcvtnu(w30, s30);
7439   END();
7440 
7441   RUN();
7442 
7443   CHECK_EQUAL_64(1, x0);
7444   CHECK_EQUAL_64(1, x1);
7445   CHECK_EQUAL_64(2, x2);
7446   CHECK_EQUAL_64(0, x3);
7447   CHECK_EQUAL_64(0xffffffff, x4);
7448   CHECK_EQUAL_64(0, x5);
7449   CHECK_EQUAL_64(0xffffff00, x6);
7450   CHECK_EQUAL_64(1, x8);
7451   CHECK_EQUAL_64(1, x9);
7452   CHECK_EQUAL_64(2, x10);
7453   CHECK_EQUAL_64(0, x11);
7454   CHECK_EQUAL_64(0xffffffff, x12);
7455   CHECK_EQUAL_64(0, x13);
7456   CHECK_EQUAL_64(0xfffffffe, x14);
7457   CHECK_EQUAL_64(1, x16);
7458   CHECK_EQUAL_64(1, x17);
7459   CHECK_EQUAL_64(2, x18);
7460   CHECK_EQUAL_64(0, x19);
7461   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7462   CHECK_EQUAL_64(0, x21);
7463   CHECK_EQUAL_64(0xffffff0000000000UL, x22);
7464   CHECK_EQUAL_64(1, x24);
7465   CHECK_EQUAL_64(2, x25);
7466   CHECK_EQUAL_64(0, x26);
7467   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7468 //  CHECK_EQUAL_64(0, x28);
7469   CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
7470   CHECK_EQUAL_64(0xffffffff, x30);
7471 
7472   TEARDOWN();
7473 }
7474 
7475 
TEST(fcvtzs)7476 TEST(fcvtzs) {
7477   INIT_V8();
7478   SETUP();
7479 
7480   START();
7481   __ Fmov(s0, 1.0);
7482   __ Fmov(s1, 1.1);
7483   __ Fmov(s2, 1.5);
7484   __ Fmov(s3, -1.5);
7485   __ Fmov(s4, kFP32PositiveInfinity);
7486   __ Fmov(s5, kFP32NegativeInfinity);
7487   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7488   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7489   __ Fmov(d8, 1.0);
7490   __ Fmov(d9, 1.1);
7491   __ Fmov(d10, 1.5);
7492   __ Fmov(d11, -1.5);
7493   __ Fmov(d12, kFP64PositiveInfinity);
7494   __ Fmov(d13, kFP64NegativeInfinity);
7495   __ Fmov(d14, kWMaxInt - 1);
7496   __ Fmov(d15, kWMinInt + 1);
7497   __ Fmov(s17, 1.1);
7498   __ Fmov(s18, 1.5);
7499   __ Fmov(s19, -1.5);
7500   __ Fmov(s20, kFP32PositiveInfinity);
7501   __ Fmov(s21, kFP32NegativeInfinity);
7502   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7503   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7504   __ Fmov(d24, 1.1);
7505   __ Fmov(d25, 1.5);
7506   __ Fmov(d26, -1.5);
7507   __ Fmov(d27, kFP64PositiveInfinity);
7508   __ Fmov(d28, kFP64NegativeInfinity);
7509   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7510   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7511 
7512   __ Fcvtzs(w0, s0);
7513   __ Fcvtzs(w1, s1);
7514   __ Fcvtzs(w2, s2);
7515   __ Fcvtzs(w3, s3);
7516   __ Fcvtzs(w4, s4);
7517   __ Fcvtzs(w5, s5);
7518   __ Fcvtzs(w6, s6);
7519   __ Fcvtzs(w7, s7);
7520   __ Fcvtzs(w8, d8);
7521   __ Fcvtzs(w9, d9);
7522   __ Fcvtzs(w10, d10);
7523   __ Fcvtzs(w11, d11);
7524   __ Fcvtzs(w12, d12);
7525   __ Fcvtzs(w13, d13);
7526   __ Fcvtzs(w14, d14);
7527   __ Fcvtzs(w15, d15);
7528   __ Fcvtzs(x17, s17);
7529   __ Fcvtzs(x18, s18);
7530   __ Fcvtzs(x19, s19);
7531   __ Fcvtzs(x20, s20);
7532   __ Fcvtzs(x21, s21);
7533   __ Fcvtzs(x22, s22);
7534   __ Fcvtzs(x23, s23);
7535   __ Fcvtzs(x24, d24);
7536   __ Fcvtzs(x25, d25);
7537   __ Fcvtzs(x26, d26);
7538   __ Fcvtzs(x27, d27);
7539   __ Fcvtzs(x28, d28);
7540   __ Fcvtzs(x29, d29);
7541   __ Fcvtzs(x30, d30);
7542   END();
7543 
7544   RUN();
7545 
7546   CHECK_EQUAL_64(1, x0);
7547   CHECK_EQUAL_64(1, x1);
7548   CHECK_EQUAL_64(1, x2);
7549   CHECK_EQUAL_64(0xffffffff, x3);
7550   CHECK_EQUAL_64(0x7fffffff, x4);
7551   CHECK_EQUAL_64(0x80000000, x5);
7552   CHECK_EQUAL_64(0x7fffff80, x6);
7553   CHECK_EQUAL_64(0x80000080, x7);
7554   CHECK_EQUAL_64(1, x8);
7555   CHECK_EQUAL_64(1, x9);
7556   CHECK_EQUAL_64(1, x10);
7557   CHECK_EQUAL_64(0xffffffff, x11);
7558   CHECK_EQUAL_64(0x7fffffff, x12);
7559   CHECK_EQUAL_64(0x80000000, x13);
7560   CHECK_EQUAL_64(0x7ffffffe, x14);
7561   CHECK_EQUAL_64(0x80000001, x15);
7562   CHECK_EQUAL_64(1, x17);
7563   CHECK_EQUAL_64(1, x18);
7564   CHECK_EQUAL_64(0xffffffffffffffffUL, x19);
7565   CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
7566   CHECK_EQUAL_64(0x8000000000000000UL, x21);
7567   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7568   CHECK_EQUAL_64(0x8000008000000000UL, x23);
7569   CHECK_EQUAL_64(1, x24);
7570   CHECK_EQUAL_64(1, x25);
7571   CHECK_EQUAL_64(0xffffffffffffffffUL, x26);
7572   CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
7573   CHECK_EQUAL_64(0x8000000000000000UL, x28);
7574   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7575   CHECK_EQUAL_64(0x8000000000000400UL, x30);
7576 
7577   TEARDOWN();
7578 }
7579 
7580 
TEST(fcvtzu)7581 TEST(fcvtzu) {
7582   INIT_V8();
7583   SETUP();
7584 
7585   START();
7586   __ Fmov(s0, 1.0);
7587   __ Fmov(s1, 1.1);
7588   __ Fmov(s2, 1.5);
7589   __ Fmov(s3, -1.5);
7590   __ Fmov(s4, kFP32PositiveInfinity);
7591   __ Fmov(s5, kFP32NegativeInfinity);
7592   __ Fmov(s6, 0x7fffff80);  // Largest float < INT32_MAX.
7593   __ Fneg(s7, s6);          // Smallest float > INT32_MIN.
7594   __ Fmov(d8, 1.0);
7595   __ Fmov(d9, 1.1);
7596   __ Fmov(d10, 1.5);
7597   __ Fmov(d11, -1.5);
7598   __ Fmov(d12, kFP64PositiveInfinity);
7599   __ Fmov(d13, kFP64NegativeInfinity);
7600   __ Fmov(d14, kWMaxInt - 1);
7601   __ Fmov(d15, kWMinInt + 1);
7602   __ Fmov(s17, 1.1);
7603   __ Fmov(s18, 1.5);
7604   __ Fmov(s19, -1.5);
7605   __ Fmov(s20, kFP32PositiveInfinity);
7606   __ Fmov(s21, kFP32NegativeInfinity);
7607   __ Fmov(s22, 0x7fffff8000000000UL);   // Largest float < INT64_MAX.
7608   __ Fneg(s23, s22);                    // Smallest float > INT64_MIN.
7609   __ Fmov(d24, 1.1);
7610   __ Fmov(d25, 1.5);
7611   __ Fmov(d26, -1.5);
7612   __ Fmov(d27, kFP64PositiveInfinity);
7613   __ Fmov(d28, kFP64NegativeInfinity);
7614   __ Fmov(d29, 0x7ffffffffffffc00UL);   // Largest double < INT64_MAX.
7615   __ Fneg(d30, d29);                    // Smallest double > INT64_MIN.
7616 
7617   __ Fcvtzu(w0, s0);
7618   __ Fcvtzu(w1, s1);
7619   __ Fcvtzu(w2, s2);
7620   __ Fcvtzu(w3, s3);
7621   __ Fcvtzu(w4, s4);
7622   __ Fcvtzu(w5, s5);
7623   __ Fcvtzu(w6, s6);
7624   __ Fcvtzu(w7, s7);
7625   __ Fcvtzu(w8, d8);
7626   __ Fcvtzu(w9, d9);
7627   __ Fcvtzu(w10, d10);
7628   __ Fcvtzu(w11, d11);
7629   __ Fcvtzu(w12, d12);
7630   __ Fcvtzu(w13, d13);
7631   __ Fcvtzu(w14, d14);
7632   __ Fcvtzu(x17, s17);
7633   __ Fcvtzu(x18, s18);
7634   __ Fcvtzu(x19, s19);
7635   __ Fcvtzu(x20, s20);
7636   __ Fcvtzu(x21, s21);
7637   __ Fcvtzu(x22, s22);
7638   __ Fcvtzu(x23, s23);
7639   __ Fcvtzu(x24, d24);
7640   __ Fcvtzu(x25, d25);
7641   __ Fcvtzu(x26, d26);
7642   __ Fcvtzu(x27, d27);
7643   __ Fcvtzu(x28, d28);
7644   __ Fcvtzu(x29, d29);
7645   __ Fcvtzu(x30, d30);
7646   END();
7647 
7648   RUN();
7649 
7650   CHECK_EQUAL_64(1, x0);
7651   CHECK_EQUAL_64(1, x1);
7652   CHECK_EQUAL_64(1, x2);
7653   CHECK_EQUAL_64(0, x3);
7654   CHECK_EQUAL_64(0xffffffff, x4);
7655   CHECK_EQUAL_64(0, x5);
7656   CHECK_EQUAL_64(0x7fffff80, x6);
7657   CHECK_EQUAL_64(0, x7);
7658   CHECK_EQUAL_64(1, x8);
7659   CHECK_EQUAL_64(1, x9);
7660   CHECK_EQUAL_64(1, x10);
7661   CHECK_EQUAL_64(0, x11);
7662   CHECK_EQUAL_64(0xffffffff, x12);
7663   CHECK_EQUAL_64(0, x13);
7664   CHECK_EQUAL_64(0x7ffffffe, x14);
7665   CHECK_EQUAL_64(1, x17);
7666   CHECK_EQUAL_64(1, x18);
7667   CHECK_EQUAL_64(0x0UL, x19);
7668   CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
7669   CHECK_EQUAL_64(0x0UL, x21);
7670   CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
7671   CHECK_EQUAL_64(0x0UL, x23);
7672   CHECK_EQUAL_64(1, x24);
7673   CHECK_EQUAL_64(1, x25);
7674   CHECK_EQUAL_64(0x0UL, x26);
7675   CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
7676   CHECK_EQUAL_64(0x0UL, x28);
7677   CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
7678   CHECK_EQUAL_64(0x0UL, x30);
7679 
7680   TEARDOWN();
7681 }
7682 
7683 
7684 // Test that scvtf and ucvtf can convert the 64-bit input into the expected
7685 // value. All possible values of 'fbits' are tested. The expected value is
7686 // modified accordingly in each case.
7687 //
7688 // The expected value is specified as the bit encoding of the expected double
7689 // produced by scvtf (expected_scvtf_bits) as well as ucvtf
7690 // (expected_ucvtf_bits).
7691 //
7692 // Where the input value is representable by int32_t or uint32_t, conversions
7693 // from W registers will also be tested.
TestUScvtfHelper(uint64_t in,uint64_t expected_scvtf_bits,uint64_t expected_ucvtf_bits)7694 static void TestUScvtfHelper(uint64_t in,
7695                              uint64_t expected_scvtf_bits,
7696                              uint64_t expected_ucvtf_bits) {
7697   uint64_t u64 = in;
7698   uint32_t u32 = u64 & 0xffffffff;
7699   int64_t s64 = static_cast<int64_t>(in);
7700   int32_t s32 = s64 & 0x7fffffff;
7701 
7702   bool cvtf_s32 = (s64 == s32);
7703   bool cvtf_u32 = (u64 == u32);
7704 
7705   double results_scvtf_x[65];
7706   double results_ucvtf_x[65];
7707   double results_scvtf_w[33];
7708   double results_ucvtf_w[33];
7709 
7710   SETUP();
7711   START();
7712 
7713   __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7714   __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7715   __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7716   __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7717 
7718   __ Mov(x10, s64);
7719 
7720   // Corrupt the top word, in case it is accidentally used during W-register
7721   // conversions.
7722   __ Mov(x11, 0x5555555555555555);
7723   __ Bfi(x11, x10, 0, kWRegSizeInBits);
7724 
7725   // Test integer conversions.
7726   __ Scvtf(d0, x10);
7727   __ Ucvtf(d1, x10);
7728   __ Scvtf(d2, w11);
7729   __ Ucvtf(d3, w11);
7730   __ Str(d0, MemOperand(x0));
7731   __ Str(d1, MemOperand(x1));
7732   __ Str(d2, MemOperand(x2));
7733   __ Str(d3, MemOperand(x3));
7734 
7735   // Test all possible values of fbits.
7736   for (int fbits = 1; fbits <= 32; fbits++) {
7737     __ Scvtf(d0, x10, fbits);
7738     __ Ucvtf(d1, x10, fbits);
7739     __ Scvtf(d2, w11, fbits);
7740     __ Ucvtf(d3, w11, fbits);
7741     __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7742     __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7743     __ Str(d2, MemOperand(x2, fbits * kDRegSize));
7744     __ Str(d3, MemOperand(x3, fbits * kDRegSize));
7745   }
7746 
7747   // Conversions from W registers can only handle fbits values <= 32, so just
7748   // test conversions from X registers for 32 < fbits <= 64.
7749   for (int fbits = 33; fbits <= 64; fbits++) {
7750     __ Scvtf(d0, x10, fbits);
7751     __ Ucvtf(d1, x10, fbits);
7752     __ Str(d0, MemOperand(x0, fbits * kDRegSize));
7753     __ Str(d1, MemOperand(x1, fbits * kDRegSize));
7754   }
7755 
7756   END();
7757   RUN();
7758 
7759   // Check the results.
7760   double expected_scvtf_base = rawbits_to_double(expected_scvtf_bits);
7761   double expected_ucvtf_base = rawbits_to_double(expected_ucvtf_bits);
7762 
7763   for (int fbits = 0; fbits <= 32; fbits++) {
7764     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7765     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7766     CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7767     CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7768     if (cvtf_s32) CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
7769     if (cvtf_u32) CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
7770   }
7771   for (int fbits = 33; fbits <= 64; fbits++) {
7772     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
7773     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
7774     CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
7775     CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
7776   }
7777 
7778   TEARDOWN();
7779 }
7780 
7781 
TEST(scvtf_ucvtf_double)7782 TEST(scvtf_ucvtf_double) {
7783   INIT_V8();
7784   // Simple conversions of positive numbers which require no rounding; the
7785   // results should not depened on the rounding mode, and ucvtf and scvtf should
7786   // produce the same result.
7787   TestUScvtfHelper(0x0000000000000000, 0x0000000000000000, 0x0000000000000000);
7788   TestUScvtfHelper(0x0000000000000001, 0x3ff0000000000000, 0x3ff0000000000000);
7789   TestUScvtfHelper(0x0000000040000000, 0x41d0000000000000, 0x41d0000000000000);
7790   TestUScvtfHelper(0x0000000100000000, 0x41f0000000000000, 0x41f0000000000000);
7791   TestUScvtfHelper(0x4000000000000000, 0x43d0000000000000, 0x43d0000000000000);
7792   // Test mantissa extremities.
7793   TestUScvtfHelper(0x4000000000000400, 0x43d0000000000001, 0x43d0000000000001);
7794   // The largest int32_t that fits in a double.
7795   TestUScvtfHelper(0x000000007fffffff, 0x41dfffffffc00000, 0x41dfffffffc00000);
7796   // Values that would be negative if treated as an int32_t.
7797   TestUScvtfHelper(0x00000000ffffffff, 0x41efffffffe00000, 0x41efffffffe00000);
7798   TestUScvtfHelper(0x0000000080000000, 0x41e0000000000000, 0x41e0000000000000);
7799   TestUScvtfHelper(0x0000000080000001, 0x41e0000000200000, 0x41e0000000200000);
7800   // The largest int64_t that fits in a double.
7801   TestUScvtfHelper(0x7ffffffffffffc00, 0x43dfffffffffffff, 0x43dfffffffffffff);
7802   // Check for bit pattern reproduction.
7803   TestUScvtfHelper(0x0123456789abcde0, 0x43723456789abcde, 0x43723456789abcde);
7804   TestUScvtfHelper(0x0000000012345678, 0x41b2345678000000, 0x41b2345678000000);
7805 
7806   // Simple conversions of negative int64_t values. These require no rounding,
7807   // and the results should not depend on the rounding mode.
7808   TestUScvtfHelper(0xffffffffc0000000, 0xc1d0000000000000, 0x43effffffff80000);
7809   TestUScvtfHelper(0xffffffff00000000, 0xc1f0000000000000, 0x43efffffffe00000);
7810   TestUScvtfHelper(0xc000000000000000, 0xc3d0000000000000, 0x43e8000000000000);
7811 
7812   // Conversions which require rounding.
7813   TestUScvtfHelper(0x1000000000000000, 0x43b0000000000000, 0x43b0000000000000);
7814   TestUScvtfHelper(0x1000000000000001, 0x43b0000000000000, 0x43b0000000000000);
7815   TestUScvtfHelper(0x1000000000000080, 0x43b0000000000000, 0x43b0000000000000);
7816   TestUScvtfHelper(0x1000000000000081, 0x43b0000000000001, 0x43b0000000000001);
7817   TestUScvtfHelper(0x1000000000000100, 0x43b0000000000001, 0x43b0000000000001);
7818   TestUScvtfHelper(0x1000000000000101, 0x43b0000000000001, 0x43b0000000000001);
7819   TestUScvtfHelper(0x1000000000000180, 0x43b0000000000002, 0x43b0000000000002);
7820   TestUScvtfHelper(0x1000000000000181, 0x43b0000000000002, 0x43b0000000000002);
7821   TestUScvtfHelper(0x1000000000000200, 0x43b0000000000002, 0x43b0000000000002);
7822   TestUScvtfHelper(0x1000000000000201, 0x43b0000000000002, 0x43b0000000000002);
7823   TestUScvtfHelper(0x1000000000000280, 0x43b0000000000002, 0x43b0000000000002);
7824   TestUScvtfHelper(0x1000000000000281, 0x43b0000000000003, 0x43b0000000000003);
7825   TestUScvtfHelper(0x1000000000000300, 0x43b0000000000003, 0x43b0000000000003);
7826   // Check rounding of negative int64_t values (and large uint64_t values).
7827   TestUScvtfHelper(0x8000000000000000, 0xc3e0000000000000, 0x43e0000000000000);
7828   TestUScvtfHelper(0x8000000000000001, 0xc3e0000000000000, 0x43e0000000000000);
7829   TestUScvtfHelper(0x8000000000000200, 0xc3e0000000000000, 0x43e0000000000000);
7830   TestUScvtfHelper(0x8000000000000201, 0xc3dfffffffffffff, 0x43e0000000000000);
7831   TestUScvtfHelper(0x8000000000000400, 0xc3dfffffffffffff, 0x43e0000000000000);
7832   TestUScvtfHelper(0x8000000000000401, 0xc3dfffffffffffff, 0x43e0000000000001);
7833   TestUScvtfHelper(0x8000000000000600, 0xc3dffffffffffffe, 0x43e0000000000001);
7834   TestUScvtfHelper(0x8000000000000601, 0xc3dffffffffffffe, 0x43e0000000000001);
7835   TestUScvtfHelper(0x8000000000000800, 0xc3dffffffffffffe, 0x43e0000000000001);
7836   TestUScvtfHelper(0x8000000000000801, 0xc3dffffffffffffe, 0x43e0000000000001);
7837   TestUScvtfHelper(0x8000000000000a00, 0xc3dffffffffffffe, 0x43e0000000000001);
7838   TestUScvtfHelper(0x8000000000000a01, 0xc3dffffffffffffd, 0x43e0000000000001);
7839   TestUScvtfHelper(0x8000000000000c00, 0xc3dffffffffffffd, 0x43e0000000000002);
7840   // Round up to produce a result that's too big for the input to represent.
7841   TestUScvtfHelper(0x7ffffffffffffe00, 0x43e0000000000000, 0x43e0000000000000);
7842   TestUScvtfHelper(0x7fffffffffffffff, 0x43e0000000000000, 0x43e0000000000000);
7843   TestUScvtfHelper(0xfffffffffffffc00, 0xc090000000000000, 0x43f0000000000000);
7844   TestUScvtfHelper(0xffffffffffffffff, 0xbff0000000000000, 0x43f0000000000000);
7845 }
7846 
7847 
7848 // The same as TestUScvtfHelper, but convert to floats.
TestUScvtf32Helper(uint64_t in,uint32_t expected_scvtf_bits,uint32_t expected_ucvtf_bits)7849 static void TestUScvtf32Helper(uint64_t in,
7850                                uint32_t expected_scvtf_bits,
7851                                uint32_t expected_ucvtf_bits) {
7852   uint64_t u64 = in;
7853   uint32_t u32 = u64 & 0xffffffff;
7854   int64_t s64 = static_cast<int64_t>(in);
7855   int32_t s32 = s64 & 0x7fffffff;
7856 
7857   bool cvtf_s32 = (s64 == s32);
7858   bool cvtf_u32 = (u64 == u32);
7859 
7860   float results_scvtf_x[65];
7861   float results_ucvtf_x[65];
7862   float results_scvtf_w[33];
7863   float results_ucvtf_w[33];
7864 
7865   SETUP();
7866   START();
7867 
7868   __ Mov(x0, reinterpret_cast<int64_t>(results_scvtf_x));
7869   __ Mov(x1, reinterpret_cast<int64_t>(results_ucvtf_x));
7870   __ Mov(x2, reinterpret_cast<int64_t>(results_scvtf_w));
7871   __ Mov(x3, reinterpret_cast<int64_t>(results_ucvtf_w));
7872 
7873   __ Mov(x10, s64);
7874 
7875   // Corrupt the top word, in case it is accidentally used during W-register
7876   // conversions.
7877   __ Mov(x11, 0x5555555555555555);
7878   __ Bfi(x11, x10, 0, kWRegSizeInBits);
7879 
7880   // Test integer conversions.
7881   __ Scvtf(s0, x10);
7882   __ Ucvtf(s1, x10);
7883   __ Scvtf(s2, w11);
7884   __ Ucvtf(s3, w11);
7885   __ Str(s0, MemOperand(x0));
7886   __ Str(s1, MemOperand(x1));
7887   __ Str(s2, MemOperand(x2));
7888   __ Str(s3, MemOperand(x3));
7889 
7890   // Test all possible values of fbits.
7891   for (int fbits = 1; fbits <= 32; fbits++) {
7892     __ Scvtf(s0, x10, fbits);
7893     __ Ucvtf(s1, x10, fbits);
7894     __ Scvtf(s2, w11, fbits);
7895     __ Ucvtf(s3, w11, fbits);
7896     __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7897     __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7898     __ Str(s2, MemOperand(x2, fbits * kSRegSize));
7899     __ Str(s3, MemOperand(x3, fbits * kSRegSize));
7900   }
7901 
7902   // Conversions from W registers can only handle fbits values <= 32, so just
7903   // test conversions from X registers for 32 < fbits <= 64.
7904   for (int fbits = 33; fbits <= 64; fbits++) {
7905     __ Scvtf(s0, x10, fbits);
7906     __ Ucvtf(s1, x10, fbits);
7907     __ Str(s0, MemOperand(x0, fbits * kSRegSize));
7908     __ Str(s1, MemOperand(x1, fbits * kSRegSize));
7909   }
7910 
7911   END();
7912   RUN();
7913 
7914   // Check the results.
7915   float expected_scvtf_base = rawbits_to_float(expected_scvtf_bits);
7916   float expected_ucvtf_base = rawbits_to_float(expected_ucvtf_bits);
7917 
7918   for (int fbits = 0; fbits <= 32; fbits++) {
7919     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7920     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7921     CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7922     CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7923     if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
7924     if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
7925     break;
7926   }
7927   for (int fbits = 33; fbits <= 64; fbits++) {
7928     break;
7929     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
7930     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
7931     CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
7932     CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
7933   }
7934 
7935   TEARDOWN();
7936 }
7937 
7938 
TEST(scvtf_ucvtf_float)7939 TEST(scvtf_ucvtf_float) {
7940   INIT_V8();
7941   // Simple conversions of positive numbers which require no rounding; the
7942   // results should not depened on the rounding mode, and ucvtf and scvtf should
7943   // produce the same result.
7944   TestUScvtf32Helper(0x0000000000000000, 0x00000000, 0x00000000);
7945   TestUScvtf32Helper(0x0000000000000001, 0x3f800000, 0x3f800000);
7946   TestUScvtf32Helper(0x0000000040000000, 0x4e800000, 0x4e800000);
7947   TestUScvtf32Helper(0x0000000100000000, 0x4f800000, 0x4f800000);
7948   TestUScvtf32Helper(0x4000000000000000, 0x5e800000, 0x5e800000);
7949   // Test mantissa extremities.
7950   TestUScvtf32Helper(0x0000000000800001, 0x4b000001, 0x4b000001);
7951   TestUScvtf32Helper(0x4000008000000000, 0x5e800001, 0x5e800001);
7952   // The largest int32_t that fits in a float.
7953   TestUScvtf32Helper(0x000000007fffff80, 0x4effffff, 0x4effffff);
7954   // Values that would be negative if treated as an int32_t.
7955   TestUScvtf32Helper(0x00000000ffffff00, 0x4f7fffff, 0x4f7fffff);
7956   TestUScvtf32Helper(0x0000000080000000, 0x4f000000, 0x4f000000);
7957   TestUScvtf32Helper(0x0000000080000100, 0x4f000001, 0x4f000001);
7958   // The largest int64_t that fits in a float.
7959   TestUScvtf32Helper(0x7fffff8000000000, 0x5effffff, 0x5effffff);
7960   // Check for bit pattern reproduction.
7961   TestUScvtf32Helper(0x0000000000876543, 0x4b076543, 0x4b076543);
7962 
7963   // Simple conversions of negative int64_t values. These require no rounding,
7964   // and the results should not depend on the rounding mode.
7965   TestUScvtf32Helper(0xfffffc0000000000, 0xd4800000, 0x5f7ffffc);
7966   TestUScvtf32Helper(0xc000000000000000, 0xde800000, 0x5f400000);
7967 
7968   // Conversions which require rounding.
7969   TestUScvtf32Helper(0x0000800000000000, 0x57000000, 0x57000000);
7970   TestUScvtf32Helper(0x0000800000000001, 0x57000000, 0x57000000);
7971   TestUScvtf32Helper(0x0000800000800000, 0x57000000, 0x57000000);
7972   TestUScvtf32Helper(0x0000800000800001, 0x57000001, 0x57000001);
7973   TestUScvtf32Helper(0x0000800001000000, 0x57000001, 0x57000001);
7974   TestUScvtf32Helper(0x0000800001000001, 0x57000001, 0x57000001);
7975   TestUScvtf32Helper(0x0000800001800000, 0x57000002, 0x57000002);
7976   TestUScvtf32Helper(0x0000800001800001, 0x57000002, 0x57000002);
7977   TestUScvtf32Helper(0x0000800002000000, 0x57000002, 0x57000002);
7978   TestUScvtf32Helper(0x0000800002000001, 0x57000002, 0x57000002);
7979   TestUScvtf32Helper(0x0000800002800000, 0x57000002, 0x57000002);
7980   TestUScvtf32Helper(0x0000800002800001, 0x57000003, 0x57000003);
7981   TestUScvtf32Helper(0x0000800003000000, 0x57000003, 0x57000003);
7982   // Check rounding of negative int64_t values (and large uint64_t values).
7983   TestUScvtf32Helper(0x8000000000000000, 0xdf000000, 0x5f000000);
7984   TestUScvtf32Helper(0x8000000000000001, 0xdf000000, 0x5f000000);
7985   TestUScvtf32Helper(0x8000004000000000, 0xdf000000, 0x5f000000);
7986   TestUScvtf32Helper(0x8000004000000001, 0xdeffffff, 0x5f000000);
7987   TestUScvtf32Helper(0x8000008000000000, 0xdeffffff, 0x5f000000);
7988   TestUScvtf32Helper(0x8000008000000001, 0xdeffffff, 0x5f000001);
7989   TestUScvtf32Helper(0x800000c000000000, 0xdefffffe, 0x5f000001);
7990   TestUScvtf32Helper(0x800000c000000001, 0xdefffffe, 0x5f000001);
7991   TestUScvtf32Helper(0x8000010000000000, 0xdefffffe, 0x5f000001);
7992   TestUScvtf32Helper(0x8000010000000001, 0xdefffffe, 0x5f000001);
7993   TestUScvtf32Helper(0x8000014000000000, 0xdefffffe, 0x5f000001);
7994   TestUScvtf32Helper(0x8000014000000001, 0xdefffffd, 0x5f000001);
7995   TestUScvtf32Helper(0x8000018000000000, 0xdefffffd, 0x5f000002);
7996   // Round up to produce a result that's too big for the input to represent.
7997   TestUScvtf32Helper(0x000000007fffffc0, 0x4f000000, 0x4f000000);
7998   TestUScvtf32Helper(0x000000007fffffff, 0x4f000000, 0x4f000000);
7999   TestUScvtf32Helper(0x00000000ffffff80, 0x4f800000, 0x4f800000);
8000   TestUScvtf32Helper(0x00000000ffffffff, 0x4f800000, 0x4f800000);
8001   TestUScvtf32Helper(0x7fffffc000000000, 0x5f000000, 0x5f000000);
8002   TestUScvtf32Helper(0x7fffffffffffffff, 0x5f000000, 0x5f000000);
8003   TestUScvtf32Helper(0xffffff8000000000, 0xd3000000, 0x5f800000);
8004   TestUScvtf32Helper(0xffffffffffffffff, 0xbf800000, 0x5f800000);
8005 }
8006 
8007 
TEST(system_mrs)8008 TEST(system_mrs) {
8009   INIT_V8();
8010   SETUP();
8011 
8012   START();
8013   __ Mov(w0, 0);
8014   __ Mov(w1, 1);
8015   __ Mov(w2, 0x80000000);
8016 
8017   // Set the Z and C flags.
8018   __ Cmp(w0, w0);
8019   __ Mrs(x3, NZCV);
8020 
8021   // Set the N flag.
8022   __ Cmp(w0, w1);
8023   __ Mrs(x4, NZCV);
8024 
8025   // Set the Z, C and V flags.
8026   __ Adds(w0, w2, w2);
8027   __ Mrs(x5, NZCV);
8028 
8029   // Read the default FPCR.
8030   __ Mrs(x6, FPCR);
8031   END();
8032 
8033   RUN();
8034 
8035   // NZCV
8036   CHECK_EQUAL_32(ZCFlag, w3);
8037   CHECK_EQUAL_32(NFlag, w4);
8038   CHECK_EQUAL_32(ZCVFlag, w5);
8039 
8040   // FPCR
8041   // The default FPCR on Linux-based platforms is 0.
8042   CHECK_EQUAL_32(0, w6);
8043 
8044   TEARDOWN();
8045 }
8046 
8047 
TEST(system_msr)8048 TEST(system_msr) {
8049   INIT_V8();
8050   // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
8051   const uint64_t fpcr_core = 0x07c00000;
8052 
8053   // All FPCR fields (including fields which may be read-as-zero):
8054   //  Stride, Len
8055   //  IDE, IXE, UFE, OFE, DZE, IOE
8056   const uint64_t fpcr_all = fpcr_core | 0x00379f00;
8057 
8058   SETUP();
8059 
8060   START();
8061   __ Mov(w0, 0);
8062   __ Mov(w1, 0x7fffffff);
8063 
8064   __ Mov(x7, 0);
8065 
8066   __ Mov(x10, NVFlag);
8067   __ Cmp(w0, w0);     // Set Z and C.
8068   __ Msr(NZCV, x10);  // Set N and V.
8069   // The Msr should have overwritten every flag set by the Cmp.
8070   __ Cinc(x7, x7, mi);  // N
8071   __ Cinc(x7, x7, ne);  // !Z
8072   __ Cinc(x7, x7, lo);  // !C
8073   __ Cinc(x7, x7, vs);  // V
8074 
8075   __ Mov(x10, ZCFlag);
8076   __ Cmn(w1, w1);     // Set N and V.
8077   __ Msr(NZCV, x10);  // Set Z and C.
8078   // The Msr should have overwritten every flag set by the Cmn.
8079   __ Cinc(x7, x7, pl);  // !N
8080   __ Cinc(x7, x7, eq);  // Z
8081   __ Cinc(x7, x7, hs);  // C
8082   __ Cinc(x7, x7, vc);  // !V
8083 
8084   // All core FPCR fields must be writable.
8085   __ Mov(x8, fpcr_core);
8086   __ Msr(FPCR, x8);
8087   __ Mrs(x8, FPCR);
8088 
8089   // All FPCR fields, including optional ones. This part of the test doesn't
8090   // achieve much other than ensuring that supported fields can be cleared by
8091   // the next test.
8092   __ Mov(x9, fpcr_all);
8093   __ Msr(FPCR, x9);
8094   __ Mrs(x9, FPCR);
8095   __ And(x9, x9, fpcr_core);
8096 
8097   // The undefined bits must ignore writes.
8098   // It's conceivable that a future version of the architecture could use these
8099   // fields (making this test fail), but in the meantime this is a useful test
8100   // for the simulator.
8101   __ Mov(x10, ~fpcr_all);
8102   __ Msr(FPCR, x10);
8103   __ Mrs(x10, FPCR);
8104 
8105   END();
8106 
8107   RUN();
8108 
8109   // We should have incremented x7 (from 0) exactly 8 times.
8110   CHECK_EQUAL_64(8, x7);
8111 
8112   CHECK_EQUAL_64(fpcr_core, x8);
8113   CHECK_EQUAL_64(fpcr_core, x9);
8114   CHECK_EQUAL_64(0, x10);
8115 
8116   TEARDOWN();
8117 }
8118 
8119 
TEST(system_nop)8120 TEST(system_nop) {
8121   INIT_V8();
8122   SETUP();
8123   RegisterDump before;
8124 
8125   START();
8126   before.Dump(&masm);
8127   __ Nop();
8128   END();
8129 
8130   RUN();
8131 
8132   CHECK_EQUAL_REGISTERS(before);
8133   CHECK_EQUAL_NZCV(before.flags_nzcv());
8134 
8135   TEARDOWN();
8136 }
8137 
8138 
TEST(zero_dest)8139 TEST(zero_dest) {
8140   INIT_V8();
8141   SETUP();
8142   RegisterDump before;
8143 
8144   START();
8145   // Preserve the system stack pointer, in case we clobber it.
8146   __ Mov(x30, csp);
8147   // Initialize the other registers used in this test.
8148   uint64_t literal_base = 0x0100001000100101UL;
8149   __ Mov(x0, 0);
8150   __ Mov(x1, literal_base);
8151   for (int i = 2; i < x30.code(); i++) {
8152     __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
8153   }
8154   before.Dump(&masm);
8155 
8156   // All of these instructions should be NOPs in these forms, but have
8157   // alternate forms which can write into the stack pointer.
8158   __ add(xzr, x0, x1);
8159   __ add(xzr, x1, xzr);
8160   __ add(xzr, xzr, x1);
8161 
8162   __ and_(xzr, x0, x2);
8163   __ and_(xzr, x2, xzr);
8164   __ and_(xzr, xzr, x2);
8165 
8166   __ bic(xzr, x0, x3);
8167   __ bic(xzr, x3, xzr);
8168   __ bic(xzr, xzr, x3);
8169 
8170   __ eon(xzr, x0, x4);
8171   __ eon(xzr, x4, xzr);
8172   __ eon(xzr, xzr, x4);
8173 
8174   __ eor(xzr, x0, x5);
8175   __ eor(xzr, x5, xzr);
8176   __ eor(xzr, xzr, x5);
8177 
8178   __ orr(xzr, x0, x6);
8179   __ orr(xzr, x6, xzr);
8180   __ orr(xzr, xzr, x6);
8181 
8182   __ sub(xzr, x0, x7);
8183   __ sub(xzr, x7, xzr);
8184   __ sub(xzr, xzr, x7);
8185 
8186   // Swap the saved system stack pointer with the real one. If csp was written
8187   // during the test, it will show up in x30. This is done because the test
8188   // framework assumes that csp will be valid at the end of the test.
8189   __ Mov(x29, x30);
8190   __ Mov(x30, csp);
8191   __ Mov(csp, x29);
8192   // We used x29 as a scratch register, so reset it to make sure it doesn't
8193   // trigger a test failure.
8194   __ Add(x29, x28, x1);
8195   END();
8196 
8197   RUN();
8198 
8199   CHECK_EQUAL_REGISTERS(before);
8200   CHECK_EQUAL_NZCV(before.flags_nzcv());
8201 
8202   TEARDOWN();
8203 }
8204 
8205 
TEST(zero_dest_setflags)8206 TEST(zero_dest_setflags) {
8207   INIT_V8();
8208   SETUP();
8209   RegisterDump before;
8210 
8211   START();
8212   // Preserve the system stack pointer, in case we clobber it.
8213   __ Mov(x30, csp);
8214   // Initialize the other registers used in this test.
8215   uint64_t literal_base = 0x0100001000100101UL;
8216   __ Mov(x0, 0);
8217   __ Mov(x1, literal_base);
8218   for (int i = 2; i < 30; i++) {
8219     __ Add(Register::XRegFromCode(i), Register::XRegFromCode(i-1), x1);
8220   }
8221   before.Dump(&masm);
8222 
8223   // All of these instructions should only write to the flags in these forms,
8224   // but have alternate forms which can write into the stack pointer.
8225   __ adds(xzr, x0, Operand(x1, UXTX));
8226   __ adds(xzr, x1, Operand(xzr, UXTX));
8227   __ adds(xzr, x1, 1234);
8228   __ adds(xzr, x0, x1);
8229   __ adds(xzr, x1, xzr);
8230   __ adds(xzr, xzr, x1);
8231 
8232   __ ands(xzr, x2, ~0xf);
8233   __ ands(xzr, xzr, ~0xf);
8234   __ ands(xzr, x0, x2);
8235   __ ands(xzr, x2, xzr);
8236   __ ands(xzr, xzr, x2);
8237 
8238   __ bics(xzr, x3, ~0xf);
8239   __ bics(xzr, xzr, ~0xf);
8240   __ bics(xzr, x0, x3);
8241   __ bics(xzr, x3, xzr);
8242   __ bics(xzr, xzr, x3);
8243 
8244   __ subs(xzr, x0, Operand(x3, UXTX));
8245   __ subs(xzr, x3, Operand(xzr, UXTX));
8246   __ subs(xzr, x3, 1234);
8247   __ subs(xzr, x0, x3);
8248   __ subs(xzr, x3, xzr);
8249   __ subs(xzr, xzr, x3);
8250 
8251   // Swap the saved system stack pointer with the real one. If csp was written
8252   // during the test, it will show up in x30. This is done because the test
8253   // framework assumes that csp will be valid at the end of the test.
8254   __ Mov(x29, x30);
8255   __ Mov(x30, csp);
8256   __ Mov(csp, x29);
8257   // We used x29 as a scratch register, so reset it to make sure it doesn't
8258   // trigger a test failure.
8259   __ Add(x29, x28, x1);
8260   END();
8261 
8262   RUN();
8263 
8264   CHECK_EQUAL_REGISTERS(before);
8265 
8266   TEARDOWN();
8267 }
8268 
8269 
TEST(register_bit)8270 TEST(register_bit) {
8271   // No code generation takes place in this test, so no need to setup and
8272   // teardown.
8273 
8274   // Simple tests.
8275   CHECK(x0.Bit() == (1UL << 0));
8276   CHECK(x1.Bit() == (1UL << 1));
8277   CHECK(x10.Bit() == (1UL << 10));
8278 
8279   // AAPCS64 definitions.
8280   CHECK(fp.Bit() == (1UL << kFramePointerRegCode));
8281   CHECK(lr.Bit() == (1UL << kLinkRegCode));
8282 
8283   // Fixed (hardware) definitions.
8284   CHECK(xzr.Bit() == (1UL << kZeroRegCode));
8285 
8286   // Internal ABI definitions.
8287   CHECK(jssp.Bit() == (1UL << kJSSPCode));
8288   CHECK(csp.Bit() == (1UL << kSPRegInternalCode));
8289   CHECK(csp.Bit() != xzr.Bit());
8290 
8291   // xn.Bit() == wn.Bit() at all times, for the same n.
8292   CHECK(x0.Bit() == w0.Bit());
8293   CHECK(x1.Bit() == w1.Bit());
8294   CHECK(x10.Bit() == w10.Bit());
8295   CHECK(jssp.Bit() == wjssp.Bit());
8296   CHECK(xzr.Bit() == wzr.Bit());
8297   CHECK(csp.Bit() == wcsp.Bit());
8298 }
8299 
8300 
TEST(stack_pointer_override)8301 TEST(stack_pointer_override) {
8302   // This test generates some stack maintenance code, but the test only checks
8303   // the reported state.
8304   INIT_V8();
8305   SETUP();
8306   START();
8307 
8308   // The default stack pointer in V8 is jssp, but for compatibility with W16,
8309   // the test framework sets it to csp before calling the test.
8310   CHECK(csp.Is(__ StackPointer()));
8311   __ SetStackPointer(x0);
8312   CHECK(x0.Is(__ StackPointer()));
8313   __ SetStackPointer(jssp);
8314   CHECK(jssp.Is(__ StackPointer()));
8315   __ SetStackPointer(csp);
8316   CHECK(csp.Is(__ StackPointer()));
8317 
8318   END();
8319   RUN();
8320   TEARDOWN();
8321 }
8322 
8323 
TEST(peek_poke_simple)8324 TEST(peek_poke_simple) {
8325   INIT_V8();
8326   SETUP();
8327   START();
8328 
8329   static const RegList x0_to_x3 = x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit();
8330   static const RegList x10_to_x13 = x10.Bit() | x11.Bit() |
8331                                     x12.Bit() | x13.Bit();
8332 
8333   // The literal base is chosen to have two useful properties:
8334   //  * When multiplied by small values (such as a register index), this value
8335   //    is clearly readable in the result.
8336   //  * The value is not formed from repeating fixed-size smaller values, so it
8337   //    can be used to detect endianness-related errors.
8338   uint64_t literal_base = 0x0100001000100101UL;
8339 
8340   // Initialize the registers.
8341   __ Mov(x0, literal_base);
8342   __ Add(x1, x0, x0);
8343   __ Add(x2, x1, x0);
8344   __ Add(x3, x2, x0);
8345 
8346   __ Claim(4);
8347 
8348   // Simple exchange.
8349   //  After this test:
8350   //    x0-x3 should be unchanged.
8351   //    w10-w13 should contain the lower words of x0-x3.
8352   __ Poke(x0, 0);
8353   __ Poke(x1, 8);
8354   __ Poke(x2, 16);
8355   __ Poke(x3, 24);
8356   Clobber(&masm, x0_to_x3);
8357   __ Peek(x0, 0);
8358   __ Peek(x1, 8);
8359   __ Peek(x2, 16);
8360   __ Peek(x3, 24);
8361 
8362   __ Poke(w0, 0);
8363   __ Poke(w1, 4);
8364   __ Poke(w2, 8);
8365   __ Poke(w3, 12);
8366   Clobber(&masm, x10_to_x13);
8367   __ Peek(w10, 0);
8368   __ Peek(w11, 4);
8369   __ Peek(w12, 8);
8370   __ Peek(w13, 12);
8371 
8372   __ Drop(4);
8373 
8374   END();
8375   RUN();
8376 
8377   CHECK_EQUAL_64(literal_base * 1, x0);
8378   CHECK_EQUAL_64(literal_base * 2, x1);
8379   CHECK_EQUAL_64(literal_base * 3, x2);
8380   CHECK_EQUAL_64(literal_base * 4, x3);
8381 
8382   CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8383   CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8384   CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8385   CHECK_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
8386 
8387   TEARDOWN();
8388 }
8389 
8390 
TEST(peek_poke_unaligned)8391 TEST(peek_poke_unaligned) {
8392   INIT_V8();
8393   SETUP();
8394   START();
8395 
8396   // The literal base is chosen to have two useful properties:
8397   //  * When multiplied by small values (such as a register index), this value
8398   //    is clearly readable in the result.
8399   //  * The value is not formed from repeating fixed-size smaller values, so it
8400   //    can be used to detect endianness-related errors.
8401   uint64_t literal_base = 0x0100001000100101UL;
8402 
8403   // Initialize the registers.
8404   __ Mov(x0, literal_base);
8405   __ Add(x1, x0, x0);
8406   __ Add(x2, x1, x0);
8407   __ Add(x3, x2, x0);
8408   __ Add(x4, x3, x0);
8409   __ Add(x5, x4, x0);
8410   __ Add(x6, x5, x0);
8411 
8412   __ Claim(4);
8413 
8414   // Unaligned exchanges.
8415   //  After this test:
8416   //    x0-x6 should be unchanged.
8417   //    w10-w12 should contain the lower words of x0-x2.
8418   __ Poke(x0, 1);
8419   Clobber(&masm, x0.Bit());
8420   __ Peek(x0, 1);
8421   __ Poke(x1, 2);
8422   Clobber(&masm, x1.Bit());
8423   __ Peek(x1, 2);
8424   __ Poke(x2, 3);
8425   Clobber(&masm, x2.Bit());
8426   __ Peek(x2, 3);
8427   __ Poke(x3, 4);
8428   Clobber(&masm, x3.Bit());
8429   __ Peek(x3, 4);
8430   __ Poke(x4, 5);
8431   Clobber(&masm, x4.Bit());
8432   __ Peek(x4, 5);
8433   __ Poke(x5, 6);
8434   Clobber(&masm, x5.Bit());
8435   __ Peek(x5, 6);
8436   __ Poke(x6, 7);
8437   Clobber(&masm, x6.Bit());
8438   __ Peek(x6, 7);
8439 
8440   __ Poke(w0, 1);
8441   Clobber(&masm, w10.Bit());
8442   __ Peek(w10, 1);
8443   __ Poke(w1, 2);
8444   Clobber(&masm, w11.Bit());
8445   __ Peek(w11, 2);
8446   __ Poke(w2, 3);
8447   Clobber(&masm, w12.Bit());
8448   __ Peek(w12, 3);
8449 
8450   __ Drop(4);
8451 
8452   END();
8453   RUN();
8454 
8455   CHECK_EQUAL_64(literal_base * 1, x0);
8456   CHECK_EQUAL_64(literal_base * 2, x1);
8457   CHECK_EQUAL_64(literal_base * 3, x2);
8458   CHECK_EQUAL_64(literal_base * 4, x3);
8459   CHECK_EQUAL_64(literal_base * 5, x4);
8460   CHECK_EQUAL_64(literal_base * 6, x5);
8461   CHECK_EQUAL_64(literal_base * 7, x6);
8462 
8463   CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
8464   CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
8465   CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
8466 
8467   TEARDOWN();
8468 }
8469 
8470 
TEST(peek_poke_endianness)8471 TEST(peek_poke_endianness) {
8472   INIT_V8();
8473   SETUP();
8474   START();
8475 
8476   // The literal base is chosen to have two useful properties:
8477   //  * When multiplied by small values (such as a register index), this value
8478   //    is clearly readable in the result.
8479   //  * The value is not formed from repeating fixed-size smaller values, so it
8480   //    can be used to detect endianness-related errors.
8481   uint64_t literal_base = 0x0100001000100101UL;
8482 
8483   // Initialize the registers.
8484   __ Mov(x0, literal_base);
8485   __ Add(x1, x0, x0);
8486 
8487   __ Claim(4);
8488 
8489   // Endianness tests.
8490   //  After this section:
8491   //    x4 should match x0[31:0]:x0[63:32]
8492   //    w5 should match w1[15:0]:w1[31:16]
8493   __ Poke(x0, 0);
8494   __ Poke(x0, 8);
8495   __ Peek(x4, 4);
8496 
8497   __ Poke(w1, 0);
8498   __ Poke(w1, 4);
8499   __ Peek(w5, 2);
8500 
8501   __ Drop(4);
8502 
8503   END();
8504   RUN();
8505 
8506   uint64_t x0_expected = literal_base * 1;
8507   uint64_t x1_expected = literal_base * 2;
8508   uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
8509   uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
8510                          ((x1_expected >> 16) & 0x0000ffff);
8511 
8512   CHECK_EQUAL_64(x0_expected, x0);
8513   CHECK_EQUAL_64(x1_expected, x1);
8514   CHECK_EQUAL_64(x4_expected, x4);
8515   CHECK_EQUAL_64(x5_expected, x5);
8516 
8517   TEARDOWN();
8518 }
8519 
8520 
TEST(peek_poke_mixed)8521 TEST(peek_poke_mixed) {
8522   INIT_V8();
8523   SETUP();
8524   START();
8525 
8526   // The literal base is chosen to have two useful properties:
8527   //  * When multiplied by small values (such as a register index), this value
8528   //    is clearly readable in the result.
8529   //  * The value is not formed from repeating fixed-size smaller values, so it
8530   //    can be used to detect endianness-related errors.
8531   uint64_t literal_base = 0x0100001000100101UL;
8532 
8533   // Initialize the registers.
8534   __ Mov(x0, literal_base);
8535   __ Add(x1, x0, x0);
8536   __ Add(x2, x1, x0);
8537   __ Add(x3, x2, x0);
8538 
8539   __ Claim(4);
8540 
8541   // Mix with other stack operations.
8542   //  After this section:
8543   //    x0-x3 should be unchanged.
8544   //    x6 should match x1[31:0]:x0[63:32]
8545   //    w7 should match x1[15:0]:x0[63:48]
8546   __ Poke(x1, 8);
8547   __ Poke(x0, 0);
8548   {
8549     CHECK(__ StackPointer().Is(csp));
8550     __ Mov(x4, __ StackPointer());
8551     __ SetStackPointer(x4);
8552 
8553     __ Poke(wzr, 0);    // Clobber the space we're about to drop.
8554     __ Drop(1, kWRegSize);
8555     __ Peek(x6, 0);
8556     __ Claim(1);
8557     __ Peek(w7, 10);
8558     __ Poke(x3, 28);
8559     __ Poke(xzr, 0);    // Clobber the space we're about to drop.
8560     __ Drop(1);
8561     __ Poke(x2, 12);
8562     __ Push(w0);
8563 
8564     __ Mov(csp, __ StackPointer());
8565     __ SetStackPointer(csp);
8566   }
8567 
8568   __ Pop(x0, x1, x2, x3);
8569 
8570   END();
8571   RUN();
8572 
8573   uint64_t x0_expected = literal_base * 1;
8574   uint64_t x1_expected = literal_base * 2;
8575   uint64_t x2_expected = literal_base * 3;
8576   uint64_t x3_expected = literal_base * 4;
8577   uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
8578   uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
8579                          ((x0_expected >> 48) & 0x0000ffff);
8580 
8581   CHECK_EQUAL_64(x0_expected, x0);
8582   CHECK_EQUAL_64(x1_expected, x1);
8583   CHECK_EQUAL_64(x2_expected, x2);
8584   CHECK_EQUAL_64(x3_expected, x3);
8585   CHECK_EQUAL_64(x6_expected, x6);
8586   CHECK_EQUAL_64(x7_expected, x7);
8587 
8588   TEARDOWN();
8589 }
8590 
8591 
8592 // This enum is used only as an argument to the push-pop test helpers.
8593 enum PushPopMethod {
8594   // Push or Pop using the Push and Pop methods, with blocks of up to four
8595   // registers. (Smaller blocks will be used if necessary.)
8596   PushPopByFour,
8597 
8598   // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
8599   PushPopRegList
8600 };
8601 
8602 
8603 // The maximum number of registers that can be used by the PushPopJssp* tests,
8604 // where a reg_count field is provided.
8605 static int const kPushPopJsspMaxRegCount = -1;
8606 
8607 // Test a simple push-pop pattern:
8608 //  * Claim <claim> bytes to set the stack alignment.
8609 //  * Push <reg_count> registers with size <reg_size>.
8610 //  * Clobber the register contents.
8611 //  * Pop <reg_count> registers to restore the original contents.
8612 //  * Drop <claim> bytes to restore the original stack pointer.
8613 //
8614 // Different push and pop methods can be specified independently to test for
8615 // proper word-endian behaviour.
PushPopJsspSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)8616 static void PushPopJsspSimpleHelper(int reg_count,
8617                                     int claim,
8618                                     int reg_size,
8619                                     PushPopMethod push_method,
8620                                     PushPopMethod pop_method) {
8621   SETUP();
8622 
8623   START();
8624 
8625   // Registers in the TmpList can be used by the macro assembler for debug code
8626   // (for example in 'Pop'), so we can't use them here. We can't use jssp
8627   // because it will be the stack pointer for this test.
8628   static RegList const allowed = ~(masm.TmpList()->list() | jssp.Bit());
8629   if (reg_count == kPushPopJsspMaxRegCount) {
8630     reg_count = CountSetBits(allowed, kNumberOfRegisters);
8631   }
8632   // Work out which registers to use, based on reg_size.
8633   Register r[kNumberOfRegisters];
8634   Register x[kNumberOfRegisters];
8635   RegList list = PopulateRegisterArray(NULL, x, r, reg_size, reg_count,
8636                                        allowed);
8637 
8638   // The literal base is chosen to have two useful properties:
8639   //  * When multiplied by small values (such as a register index), this value
8640   //    is clearly readable in the result.
8641   //  * The value is not formed from repeating fixed-size smaller values, so it
8642   //    can be used to detect endianness-related errors.
8643   uint64_t literal_base = 0x0100001000100101UL;
8644 
8645   {
8646     CHECK(__ StackPointer().Is(csp));
8647     __ Mov(jssp, __ StackPointer());
8648     __ SetStackPointer(jssp);
8649 
8650     int i;
8651 
8652     // Initialize the registers.
8653     for (i = 0; i < reg_count; i++) {
8654       // Always write into the X register, to ensure that the upper word is
8655       // properly ignored by Push when testing W registers.
8656       if (!x[i].IsZero()) {
8657         __ Mov(x[i], literal_base * i);
8658       }
8659     }
8660 
8661     // Claim memory first, as requested.
8662     __ Claim(claim, kByteSizeInBytes);
8663 
8664     switch (push_method) {
8665       case PushPopByFour:
8666         // Push high-numbered registers first (to the highest addresses).
8667         for (i = reg_count; i >= 4; i -= 4) {
8668           __ Push(r[i-1], r[i-2], r[i-3], r[i-4]);
8669         }
8670         // Finish off the leftovers.
8671         switch (i) {
8672           case 3:  __ Push(r[2], r[1], r[0]); break;
8673           case 2:  __ Push(r[1], r[0]);       break;
8674           case 1:  __ Push(r[0]);             break;
8675           default:
8676             CHECK(i == 0);
8677             break;
8678         }
8679         break;
8680       case PushPopRegList:
8681         __ PushSizeRegList(list, reg_size);
8682         break;
8683     }
8684 
8685     // Clobber all the registers, to ensure that they get repopulated by Pop.
8686     Clobber(&masm, list);
8687 
8688     switch (pop_method) {
8689       case PushPopByFour:
8690         // Pop low-numbered registers first (from the lowest addresses).
8691         for (i = 0; i <= (reg_count-4); i += 4) {
8692           __ Pop(r[i], r[i+1], r[i+2], r[i+3]);
8693         }
8694         // Finish off the leftovers.
8695         switch (reg_count - i) {
8696           case 3:  __ Pop(r[i], r[i+1], r[i+2]); break;
8697           case 2:  __ Pop(r[i], r[i+1]);         break;
8698           case 1:  __ Pop(r[i]);                 break;
8699           default:
8700             CHECK(i == reg_count);
8701             break;
8702         }
8703         break;
8704       case PushPopRegList:
8705         __ PopSizeRegList(list, reg_size);
8706         break;
8707     }
8708 
8709     // Drop memory to restore jssp.
8710     __ Drop(claim, kByteSizeInBytes);
8711 
8712     __ Mov(csp, __ StackPointer());
8713     __ SetStackPointer(csp);
8714   }
8715 
8716   END();
8717 
8718   RUN();
8719 
8720   // Check that the register contents were preserved.
8721   // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
8722   // that the upper word was properly cleared by Pop.
8723   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8724   for (int i = 0; i < reg_count; i++) {
8725     if (x[i].IsZero()) {
8726       CHECK_EQUAL_64(0, x[i]);
8727     } else {
8728       CHECK_EQUAL_64(literal_base * i, x[i]);
8729     }
8730   }
8731 
8732   TEARDOWN();
8733 }
8734 
8735 
TEST(push_pop_jssp_simple_32)8736 TEST(push_pop_jssp_simple_32) {
8737   INIT_V8();
8738   for (int claim = 0; claim <= 8; claim++) {
8739     for (int count = 0; count <= 8; count++) {
8740       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8741                               PushPopByFour, PushPopByFour);
8742       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8743                               PushPopByFour, PushPopRegList);
8744       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8745                               PushPopRegList, PushPopByFour);
8746       PushPopJsspSimpleHelper(count, claim, kWRegSizeInBits,
8747                               PushPopRegList, PushPopRegList);
8748     }
8749     // Test with the maximum number of registers.
8750     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8751                             PushPopByFour, PushPopByFour);
8752     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8753                             PushPopByFour, PushPopRegList);
8754     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8755                             PushPopRegList, PushPopByFour);
8756     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kWRegSizeInBits,
8757                             PushPopRegList, PushPopRegList);
8758   }
8759 }
8760 
8761 
TEST(push_pop_jssp_simple_64)8762 TEST(push_pop_jssp_simple_64) {
8763   INIT_V8();
8764   for (int claim = 0; claim <= 8; claim++) {
8765     for (int count = 0; count <= 8; count++) {
8766       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8767                               PushPopByFour, PushPopByFour);
8768       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8769                               PushPopByFour, PushPopRegList);
8770       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8771                               PushPopRegList, PushPopByFour);
8772       PushPopJsspSimpleHelper(count, claim, kXRegSizeInBits,
8773                               PushPopRegList, PushPopRegList);
8774     }
8775     // Test with the maximum number of registers.
8776     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8777                             PushPopByFour, PushPopByFour);
8778     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8779                             PushPopByFour, PushPopRegList);
8780     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8781                             PushPopRegList, PushPopByFour);
8782     PushPopJsspSimpleHelper(kPushPopJsspMaxRegCount, claim, kXRegSizeInBits,
8783                             PushPopRegList, PushPopRegList);
8784   }
8785 }
8786 
8787 
8788 // The maximum number of registers that can be used by the PushPopFPJssp* tests,
8789 // where a reg_count field is provided.
8790 static int const kPushPopFPJsspMaxRegCount = -1;
8791 
8792 // Test a simple push-pop pattern:
8793 //  * Claim <claim> bytes to set the stack alignment.
8794 //  * Push <reg_count> FP registers with size <reg_size>.
8795 //  * Clobber the register contents.
8796 //  * Pop <reg_count> FP registers to restore the original contents.
8797 //  * Drop <claim> bytes to restore the original stack pointer.
8798 //
8799 // Different push and pop methods can be specified independently to test for
8800 // proper word-endian behaviour.
PushPopFPJsspSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)8801 static void PushPopFPJsspSimpleHelper(int reg_count,
8802                                       int claim,
8803                                       int reg_size,
8804                                       PushPopMethod push_method,
8805                                       PushPopMethod pop_method) {
8806   SETUP();
8807 
8808   START();
8809 
8810   // We can use any floating-point register. None of them are reserved for
8811   // debug code, for example.
8812   static RegList const allowed = ~0;
8813   if (reg_count == kPushPopFPJsspMaxRegCount) {
8814     reg_count = CountSetBits(allowed, kNumberOfFPRegisters);
8815   }
8816   // Work out which registers to use, based on reg_size.
8817   FPRegister v[kNumberOfRegisters];
8818   FPRegister d[kNumberOfRegisters];
8819   RegList list = PopulateFPRegisterArray(NULL, d, v, reg_size, reg_count,
8820                                          allowed);
8821 
8822   // The literal base is chosen to have two useful properties:
8823   //  * When multiplied (using an integer) by small values (such as a register
8824   //    index), this value is clearly readable in the result.
8825   //  * The value is not formed from repeating fixed-size smaller values, so it
8826   //    can be used to detect endianness-related errors.
8827   //  * It is never a floating-point NaN, and will therefore always compare
8828   //    equal to itself.
8829   uint64_t literal_base = 0x0100001000100101UL;
8830 
8831   {
8832     CHECK(__ StackPointer().Is(csp));
8833     __ Mov(jssp, __ StackPointer());
8834     __ SetStackPointer(jssp);
8835 
8836     int i;
8837 
8838     // Initialize the registers, using X registers to load the literal.
8839     __ Mov(x0, 0);
8840     __ Mov(x1, literal_base);
8841     for (i = 0; i < reg_count; i++) {
8842       // Always write into the D register, to ensure that the upper word is
8843       // properly ignored by Push when testing S registers.
8844       __ Fmov(d[i], x0);
8845       // Calculate the next literal.
8846       __ Add(x0, x0, x1);
8847     }
8848 
8849     // Claim memory first, as requested.
8850     __ Claim(claim, kByteSizeInBytes);
8851 
8852     switch (push_method) {
8853       case PushPopByFour:
8854         // Push high-numbered registers first (to the highest addresses).
8855         for (i = reg_count; i >= 4; i -= 4) {
8856           __ Push(v[i-1], v[i-2], v[i-3], v[i-4]);
8857         }
8858         // Finish off the leftovers.
8859         switch (i) {
8860           case 3:  __ Push(v[2], v[1], v[0]); break;
8861           case 2:  __ Push(v[1], v[0]);       break;
8862           case 1:  __ Push(v[0]);             break;
8863           default:
8864             CHECK(i == 0);
8865             break;
8866         }
8867         break;
8868       case PushPopRegList:
8869         __ PushSizeRegList(list, reg_size, CPURegister::kFPRegister);
8870         break;
8871     }
8872 
8873     // Clobber all the registers, to ensure that they get repopulated by Pop.
8874     ClobberFP(&masm, list);
8875 
8876     switch (pop_method) {
8877       case PushPopByFour:
8878         // Pop low-numbered registers first (from the lowest addresses).
8879         for (i = 0; i <= (reg_count-4); i += 4) {
8880           __ Pop(v[i], v[i+1], v[i+2], v[i+3]);
8881         }
8882         // Finish off the leftovers.
8883         switch (reg_count - i) {
8884           case 3:  __ Pop(v[i], v[i+1], v[i+2]); break;
8885           case 2:  __ Pop(v[i], v[i+1]);         break;
8886           case 1:  __ Pop(v[i]);                 break;
8887           default:
8888             CHECK(i == reg_count);
8889             break;
8890         }
8891         break;
8892       case PushPopRegList:
8893         __ PopSizeRegList(list, reg_size, CPURegister::kFPRegister);
8894         break;
8895     }
8896 
8897     // Drop memory to restore jssp.
8898     __ Drop(claim, kByteSizeInBytes);
8899 
8900     __ Mov(csp, __ StackPointer());
8901     __ SetStackPointer(csp);
8902   }
8903 
8904   END();
8905 
8906   RUN();
8907 
8908   // Check that the register contents were preserved.
8909   // Always use CHECK_EQUAL_FP64, even when testing S registers, so we can
8910   // test that the upper word was properly cleared by Pop.
8911   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
8912   for (int i = 0; i < reg_count; i++) {
8913     uint64_t literal = literal_base * i;
8914     double expected;
8915     memcpy(&expected, &literal, sizeof(expected));
8916     CHECK_EQUAL_FP64(expected, d[i]);
8917   }
8918 
8919   TEARDOWN();
8920 }
8921 
8922 
TEST(push_pop_fp_jssp_simple_32)8923 TEST(push_pop_fp_jssp_simple_32) {
8924   INIT_V8();
8925   for (int claim = 0; claim <= 8; claim++) {
8926     for (int count = 0; count <= 8; count++) {
8927       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8928                                 PushPopByFour, PushPopByFour);
8929       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8930                                 PushPopByFour, PushPopRegList);
8931       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8932                                 PushPopRegList, PushPopByFour);
8933       PushPopFPJsspSimpleHelper(count, claim, kSRegSizeInBits,
8934                                 PushPopRegList, PushPopRegList);
8935     }
8936     // Test with the maximum number of registers.
8937     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8938                               PushPopByFour, PushPopByFour);
8939     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8940                               PushPopByFour, PushPopRegList);
8941     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8942                               PushPopRegList, PushPopByFour);
8943     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kSRegSizeInBits,
8944                               PushPopRegList, PushPopRegList);
8945   }
8946 }
8947 
8948 
TEST(push_pop_fp_jssp_simple_64)8949 TEST(push_pop_fp_jssp_simple_64) {
8950   INIT_V8();
8951   for (int claim = 0; claim <= 8; claim++) {
8952     for (int count = 0; count <= 8; count++) {
8953       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8954                                 PushPopByFour, PushPopByFour);
8955       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8956                                 PushPopByFour, PushPopRegList);
8957       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8958                                 PushPopRegList, PushPopByFour);
8959       PushPopFPJsspSimpleHelper(count, claim, kDRegSizeInBits,
8960                                 PushPopRegList, PushPopRegList);
8961     }
8962     // Test with the maximum number of registers.
8963     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8964                               PushPopByFour, PushPopByFour);
8965     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8966                               PushPopByFour, PushPopRegList);
8967     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8968                               PushPopRegList, PushPopByFour);
8969     PushPopFPJsspSimpleHelper(kPushPopFPJsspMaxRegCount, claim, kDRegSizeInBits,
8970                               PushPopRegList, PushPopRegList);
8971   }
8972 }
8973 
8974 
8975 // Push and pop data using an overlapping combination of Push/Pop and
8976 // RegList-based methods.
PushPopJsspMixedMethodsHelper(int claim,int reg_size)8977 static void PushPopJsspMixedMethodsHelper(int claim, int reg_size) {
8978   SETUP();
8979 
8980   // Registers x8 and x9 are used by the macro assembler for debug code (for
8981   // example in 'Pop'), so we can't use them here. We can't use jssp because it
8982   // will be the stack pointer for this test.
8983   static RegList const allowed =
8984       ~(x8.Bit() | x9.Bit() | jssp.Bit() | xzr.Bit());
8985   // Work out which registers to use, based on reg_size.
8986   Register r[10];
8987   Register x[10];
8988   PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8989 
8990   // Calculate some handy register lists.
8991   RegList r0_to_r3 = 0;
8992   for (int i = 0; i <= 3; i++) {
8993     r0_to_r3 |= x[i].Bit();
8994   }
8995   RegList r4_to_r5 = 0;
8996   for (int i = 4; i <= 5; i++) {
8997     r4_to_r5 |= x[i].Bit();
8998   }
8999   RegList r6_to_r9 = 0;
9000   for (int i = 6; i <= 9; i++) {
9001     r6_to_r9 |= x[i].Bit();
9002   }
9003 
9004   // The literal base is chosen to have two useful properties:
9005   //  * When multiplied by small values (such as a register index), this value
9006   //    is clearly readable in the result.
9007   //  * The value is not formed from repeating fixed-size smaller values, so it
9008   //    can be used to detect endianness-related errors.
9009   uint64_t literal_base = 0x0100001000100101UL;
9010 
9011   START();
9012   {
9013     CHECK(__ StackPointer().Is(csp));
9014     __ Mov(jssp, __ StackPointer());
9015     __ SetStackPointer(jssp);
9016 
9017     // Claim memory first, as requested.
9018     __ Claim(claim, kByteSizeInBytes);
9019 
9020     __ Mov(x[3], literal_base * 3);
9021     __ Mov(x[2], literal_base * 2);
9022     __ Mov(x[1], literal_base * 1);
9023     __ Mov(x[0], literal_base * 0);
9024 
9025     __ PushSizeRegList(r0_to_r3, reg_size);
9026     __ Push(r[3], r[2]);
9027 
9028     Clobber(&masm, r0_to_r3);
9029     __ PopSizeRegList(r0_to_r3, reg_size);
9030 
9031     __ Push(r[2], r[1], r[3], r[0]);
9032 
9033     Clobber(&masm, r4_to_r5);
9034     __ Pop(r[4], r[5]);
9035     Clobber(&masm, r6_to_r9);
9036     __ Pop(r[6], r[7], r[8], r[9]);
9037 
9038     // Drop memory to restore jssp.
9039     __ Drop(claim, kByteSizeInBytes);
9040 
9041     __ Mov(csp, __ StackPointer());
9042     __ SetStackPointer(csp);
9043   }
9044 
9045   END();
9046 
9047   RUN();
9048 
9049   // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
9050   // that the upper word was properly cleared by Pop.
9051   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
9052 
9053   CHECK_EQUAL_64(literal_base * 3, x[9]);
9054   CHECK_EQUAL_64(literal_base * 2, x[8]);
9055   CHECK_EQUAL_64(literal_base * 0, x[7]);
9056   CHECK_EQUAL_64(literal_base * 3, x[6]);
9057   CHECK_EQUAL_64(literal_base * 1, x[5]);
9058   CHECK_EQUAL_64(literal_base * 2, x[4]);
9059 
9060   TEARDOWN();
9061 }
9062 
9063 
TEST(push_pop_jssp_mixed_methods_64)9064 TEST(push_pop_jssp_mixed_methods_64) {
9065   INIT_V8();
9066   for (int claim = 0; claim <= 8; claim++) {
9067     PushPopJsspMixedMethodsHelper(claim, kXRegSizeInBits);
9068   }
9069 }
9070 
9071 
TEST(push_pop_jssp_mixed_methods_32)9072 TEST(push_pop_jssp_mixed_methods_32) {
9073   INIT_V8();
9074   for (int claim = 0; claim <= 8; claim++) {
9075     PushPopJsspMixedMethodsHelper(claim, kWRegSizeInBits);
9076   }
9077 }
9078 
9079 
9080 // Push and pop data using overlapping X- and W-sized quantities.
PushPopJsspWXOverlapHelper(int reg_count,int claim)9081 static void PushPopJsspWXOverlapHelper(int reg_count, int claim) {
9082   // This test emits rather a lot of code.
9083   SETUP_SIZE(BUF_SIZE * 2);
9084 
9085   // Work out which registers to use, based on reg_size.
9086   Register tmp = x8;
9087   static RegList const allowed = ~(tmp.Bit() | jssp.Bit());
9088   if (reg_count == kPushPopJsspMaxRegCount) {
9089     reg_count = CountSetBits(allowed, kNumberOfRegisters);
9090   }
9091   Register w[kNumberOfRegisters];
9092   Register x[kNumberOfRegisters];
9093   RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
9094 
9095   // The number of W-sized slots we expect to pop. When we pop, we alternate
9096   // between W and X registers, so we need reg_count*1.5 W-sized slots.
9097   int const requested_w_slots = reg_count + reg_count / 2;
9098 
9099   // Track what _should_ be on the stack, using W-sized slots.
9100   static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
9101   uint32_t stack[kMaxWSlots];
9102   for (int i = 0; i < kMaxWSlots; i++) {
9103     stack[i] = 0xdeadbeef;
9104   }
9105 
9106   // The literal base is chosen to have two useful properties:
9107   //  * When multiplied by small values (such as a register index), this value
9108   //    is clearly readable in the result.
9109   //  * The value is not formed from repeating fixed-size smaller values, so it
9110   //    can be used to detect endianness-related errors.
9111   static uint64_t const literal_base = 0x0100001000100101UL;
9112   static uint64_t const literal_base_hi = literal_base >> 32;
9113   static uint64_t const literal_base_lo = literal_base & 0xffffffff;
9114   static uint64_t const literal_base_w = literal_base & 0xffffffff;
9115 
9116   START();
9117   {
9118     CHECK(__ StackPointer().Is(csp));
9119     __ Mov(jssp, __ StackPointer());
9120     __ SetStackPointer(jssp);
9121 
9122     // Initialize the registers.
9123     for (int i = 0; i < reg_count; i++) {
9124       // Always write into the X register, to ensure that the upper word is
9125       // properly ignored by Push when testing W registers.
9126       if (!x[i].IsZero()) {
9127         __ Mov(x[i], literal_base * i);
9128       }
9129     }
9130 
9131     // Claim memory first, as requested.
9132     __ Claim(claim, kByteSizeInBytes);
9133 
9134     // The push-pop pattern is as follows:
9135     // Push:           Pop:
9136     //  x[0](hi)   ->   w[0]
9137     //  x[0](lo)   ->   x[1](hi)
9138     //  w[1]       ->   x[1](lo)
9139     //  w[1]       ->   w[2]
9140     //  x[2](hi)   ->   x[2](hi)
9141     //  x[2](lo)   ->   x[2](lo)
9142     //  x[2](hi)   ->   w[3]
9143     //  x[2](lo)   ->   x[4](hi)
9144     //  x[2](hi)   ->   x[4](lo)
9145     //  x[2](lo)   ->   w[5]
9146     //  w[3]       ->   x[5](hi)
9147     //  w[3]       ->   x[6](lo)
9148     //  w[3]       ->   w[7]
9149     //  w[3]       ->   x[8](hi)
9150     //  x[4](hi)   ->   x[8](lo)
9151     //  x[4](lo)   ->   w[9]
9152     // ... pattern continues ...
9153     //
9154     // That is, registers are pushed starting with the lower numbers,
9155     // alternating between x and w registers, and pushing i%4+1 copies of each,
9156     // where i is the register number.
9157     // Registers are popped starting with the higher numbers one-by-one,
9158     // alternating between x and w registers, but only popping one at a time.
9159     //
9160     // This pattern provides a wide variety of alignment effects and overlaps.
9161 
9162     // ---- Push ----
9163 
9164     int active_w_slots = 0;
9165     for (int i = 0; active_w_slots < requested_w_slots; i++) {
9166       CHECK(i < reg_count);
9167       // In order to test various arguments to PushMultipleTimes, and to try to
9168       // exercise different alignment and overlap effects, we push each
9169       // register a different number of times.
9170       int times = i % 4 + 1;
9171       if (i & 1) {
9172         // Push odd-numbered registers as W registers.
9173         if (i & 2) {
9174           __ PushMultipleTimes(w[i], times);
9175         } else {
9176           // Use a register to specify the count.
9177           __ Mov(tmp.W(), times);
9178           __ PushMultipleTimes(w[i], tmp.W());
9179         }
9180         // Fill in the expected stack slots.
9181         for (int j = 0; j < times; j++) {
9182           if (w[i].Is(wzr)) {
9183             // The zero register always writes zeroes.
9184             stack[active_w_slots++] = 0;
9185           } else {
9186             stack[active_w_slots++] = literal_base_w * i;
9187           }
9188         }
9189       } else {
9190         // Push even-numbered registers as X registers.
9191         if (i & 2) {
9192           __ PushMultipleTimes(x[i], times);
9193         } else {
9194           // Use a register to specify the count.
9195           __ Mov(tmp, times);
9196           __ PushMultipleTimes(x[i], tmp);
9197         }
9198         // Fill in the expected stack slots.
9199         for (int j = 0; j < times; j++) {
9200           if (x[i].IsZero()) {
9201             // The zero register always writes zeroes.
9202             stack[active_w_slots++] = 0;
9203             stack[active_w_slots++] = 0;
9204           } else {
9205             stack[active_w_slots++] = literal_base_hi * i;
9206             stack[active_w_slots++] = literal_base_lo * i;
9207           }
9208         }
9209       }
9210     }
9211     // Because we were pushing several registers at a time, we probably pushed
9212     // more than we needed to.
9213     if (active_w_slots > requested_w_slots) {
9214       __ Drop(active_w_slots - requested_w_slots, kWRegSize);
9215       // Bump the number of active W-sized slots back to where it should be,
9216       // and fill the empty space with a dummy value.
9217       do {
9218         stack[active_w_slots--] = 0xdeadbeef;
9219       } while (active_w_slots > requested_w_slots);
9220     }
9221 
9222     // ---- Pop ----
9223 
9224     Clobber(&masm, list);
9225 
9226     // If popping an even number of registers, the first one will be X-sized.
9227     // Otherwise, the first one will be W-sized.
9228     bool next_is_64 = !(reg_count & 1);
9229     for (int i = reg_count-1; i >= 0; i--) {
9230       if (next_is_64) {
9231         __ Pop(x[i]);
9232         active_w_slots -= 2;
9233       } else {
9234         __ Pop(w[i]);
9235         active_w_slots -= 1;
9236       }
9237       next_is_64 = !next_is_64;
9238     }
9239     CHECK(active_w_slots == 0);
9240 
9241     // Drop memory to restore jssp.
9242     __ Drop(claim, kByteSizeInBytes);
9243 
9244     __ Mov(csp, __ StackPointer());
9245     __ SetStackPointer(csp);
9246   }
9247 
9248   END();
9249 
9250   RUN();
9251 
9252   int slot = 0;
9253   for (int i = 0; i < reg_count; i++) {
9254     // Even-numbered registers were written as W registers.
9255     // Odd-numbered registers were written as X registers.
9256     bool expect_64 = (i & 1);
9257     uint64_t expected;
9258 
9259     if (expect_64) {
9260       uint64_t hi = stack[slot++];
9261       uint64_t lo = stack[slot++];
9262       expected = (hi << 32) | lo;
9263     } else {
9264       expected = stack[slot++];
9265     }
9266 
9267     // Always use CHECK_EQUAL_64, even when testing W registers, so we can
9268     // test that the upper word was properly cleared by Pop.
9269     if (x[i].IsZero()) {
9270       CHECK_EQUAL_64(0, x[i]);
9271     } else {
9272       CHECK_EQUAL_64(expected, x[i]);
9273     }
9274   }
9275   CHECK(slot == requested_w_slots);
9276 
9277   TEARDOWN();
9278 }
9279 
9280 
TEST(push_pop_jssp_wx_overlap)9281 TEST(push_pop_jssp_wx_overlap) {
9282   INIT_V8();
9283   for (int claim = 0; claim <= 8; claim++) {
9284     for (int count = 1; count <= 8; count++) {
9285       PushPopJsspWXOverlapHelper(count, claim);
9286       PushPopJsspWXOverlapHelper(count, claim);
9287       PushPopJsspWXOverlapHelper(count, claim);
9288       PushPopJsspWXOverlapHelper(count, claim);
9289     }
9290     // Test with the maximum number of registers.
9291     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9292     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9293     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9294     PushPopJsspWXOverlapHelper(kPushPopJsspMaxRegCount, claim);
9295   }
9296 }
9297 
9298 
TEST(push_pop_csp)9299 TEST(push_pop_csp) {
9300   INIT_V8();
9301   SETUP();
9302 
9303   START();
9304 
9305   CHECK(csp.Is(__ StackPointer()));
9306 
9307   __ Mov(x3, 0x3333333333333333UL);
9308   __ Mov(x2, 0x2222222222222222UL);
9309   __ Mov(x1, 0x1111111111111111UL);
9310   __ Mov(x0, 0x0000000000000000UL);
9311   __ Claim(2);
9312   __ PushXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9313   __ Push(x3, x2);
9314   __ PopXRegList(x0.Bit() | x1.Bit() | x2.Bit() | x3.Bit());
9315   __ Push(x2, x1, x3, x0);
9316   __ Pop(x4, x5);
9317   __ Pop(x6, x7, x8, x9);
9318 
9319   __ Claim(2);
9320   __ PushWRegList(w0.Bit() | w1.Bit() | w2.Bit() | w3.Bit());
9321   __ Push(w3, w1, w2, w0);
9322   __ PopWRegList(w10.Bit() | w11.Bit() | w12.Bit() | w13.Bit());
9323   __ Pop(w14, w15, w16, w17);
9324 
9325   __ Claim(2);
9326   __ Push(w2, w2, w1, w1);
9327   __ Push(x3, x3);
9328   __ Pop(w18, w19, w20, w21);
9329   __ Pop(x22, x23);
9330 
9331   __ Claim(2);
9332   __ PushXRegList(x1.Bit() | x22.Bit());
9333   __ PopXRegList(x24.Bit() | x26.Bit());
9334 
9335   __ Claim(2);
9336   __ PushWRegList(w1.Bit() | w2.Bit() | w4.Bit() | w22.Bit());
9337   __ PopWRegList(w25.Bit() | w27.Bit() | w28.Bit() | w29.Bit());
9338 
9339   __ Claim(2);
9340   __ PushXRegList(0);
9341   __ PopXRegList(0);
9342   __ PushXRegList(0xffffffff);
9343   __ PopXRegList(0xffffffff);
9344   __ Drop(12);
9345 
9346   END();
9347 
9348   RUN();
9349 
9350   CHECK_EQUAL_64(0x1111111111111111UL, x3);
9351   CHECK_EQUAL_64(0x0000000000000000UL, x2);
9352   CHECK_EQUAL_64(0x3333333333333333UL, x1);
9353   CHECK_EQUAL_64(0x2222222222222222UL, x0);
9354   CHECK_EQUAL_64(0x3333333333333333UL, x9);
9355   CHECK_EQUAL_64(0x2222222222222222UL, x8);
9356   CHECK_EQUAL_64(0x0000000000000000UL, x7);
9357   CHECK_EQUAL_64(0x3333333333333333UL, x6);
9358   CHECK_EQUAL_64(0x1111111111111111UL, x5);
9359   CHECK_EQUAL_64(0x2222222222222222UL, x4);
9360 
9361   CHECK_EQUAL_32(0x11111111U, w13);
9362   CHECK_EQUAL_32(0x33333333U, w12);
9363   CHECK_EQUAL_32(0x00000000U, w11);
9364   CHECK_EQUAL_32(0x22222222U, w10);
9365   CHECK_EQUAL_32(0x11111111U, w17);
9366   CHECK_EQUAL_32(0x00000000U, w16);
9367   CHECK_EQUAL_32(0x33333333U, w15);
9368   CHECK_EQUAL_32(0x22222222U, w14);
9369 
9370   CHECK_EQUAL_32(0x11111111U, w18);
9371   CHECK_EQUAL_32(0x11111111U, w19);
9372   CHECK_EQUAL_32(0x11111111U, w20);
9373   CHECK_EQUAL_32(0x11111111U, w21);
9374   CHECK_EQUAL_64(0x3333333333333333UL, x22);
9375   CHECK_EQUAL_64(0x0000000000000000UL, x23);
9376 
9377   CHECK_EQUAL_64(0x3333333333333333UL, x24);
9378   CHECK_EQUAL_64(0x3333333333333333UL, x26);
9379 
9380   CHECK_EQUAL_32(0x33333333U, w25);
9381   CHECK_EQUAL_32(0x00000000U, w27);
9382   CHECK_EQUAL_32(0x22222222U, w28);
9383   CHECK_EQUAL_32(0x33333333U, w29);
9384   TEARDOWN();
9385 }
9386 
9387 
TEST(push_queued)9388 TEST(push_queued) {
9389   INIT_V8();
9390   SETUP();
9391 
9392   START();
9393 
9394   CHECK(__ StackPointer().Is(csp));
9395   __ Mov(jssp, __ StackPointer());
9396   __ SetStackPointer(jssp);
9397 
9398   MacroAssembler::PushPopQueue queue(&masm);
9399 
9400   // Queue up registers.
9401   queue.Queue(x0);
9402   queue.Queue(x1);
9403   queue.Queue(x2);
9404   queue.Queue(x3);
9405 
9406   queue.Queue(w4);
9407   queue.Queue(w5);
9408   queue.Queue(w6);
9409 
9410   queue.Queue(d0);
9411   queue.Queue(d1);
9412 
9413   queue.Queue(s2);
9414 
9415   __ Mov(x0, 0x1234000000000000);
9416   __ Mov(x1, 0x1234000100010001);
9417   __ Mov(x2, 0x1234000200020002);
9418   __ Mov(x3, 0x1234000300030003);
9419   __ Mov(w4, 0x12340004);
9420   __ Mov(w5, 0x12340005);
9421   __ Mov(w6, 0x12340006);
9422   __ Fmov(d0, 123400.0);
9423   __ Fmov(d1, 123401.0);
9424   __ Fmov(s2, 123402.0);
9425 
9426   // Actually push them.
9427   queue.PushQueued();
9428 
9429   Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9430   Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9431 
9432   // Pop them conventionally.
9433   __ Pop(s2);
9434   __ Pop(d1, d0);
9435   __ Pop(w6, w5, w4);
9436   __ Pop(x3, x2, x1, x0);
9437 
9438   __ Mov(csp, __ StackPointer());
9439   __ SetStackPointer(csp);
9440 
9441   END();
9442 
9443   RUN();
9444 
9445   CHECK_EQUAL_64(0x1234000000000000, x0);
9446   CHECK_EQUAL_64(0x1234000100010001, x1);
9447   CHECK_EQUAL_64(0x1234000200020002, x2);
9448   CHECK_EQUAL_64(0x1234000300030003, x3);
9449 
9450   CHECK_EQUAL_32(0x12340004, w4);
9451   CHECK_EQUAL_32(0x12340005, w5);
9452   CHECK_EQUAL_32(0x12340006, w6);
9453 
9454   CHECK_EQUAL_FP64(123400.0, d0);
9455   CHECK_EQUAL_FP64(123401.0, d1);
9456 
9457   CHECK_EQUAL_FP32(123402.0, s2);
9458 
9459   TEARDOWN();
9460 }
9461 
9462 
TEST(pop_queued)9463 TEST(pop_queued) {
9464   INIT_V8();
9465   SETUP();
9466 
9467   START();
9468 
9469   CHECK(__ StackPointer().Is(csp));
9470   __ Mov(jssp, __ StackPointer());
9471   __ SetStackPointer(jssp);
9472 
9473   MacroAssembler::PushPopQueue queue(&masm);
9474 
9475   __ Mov(x0, 0x1234000000000000);
9476   __ Mov(x1, 0x1234000100010001);
9477   __ Mov(x2, 0x1234000200020002);
9478   __ Mov(x3, 0x1234000300030003);
9479   __ Mov(w4, 0x12340004);
9480   __ Mov(w5, 0x12340005);
9481   __ Mov(w6, 0x12340006);
9482   __ Fmov(d0, 123400.0);
9483   __ Fmov(d1, 123401.0);
9484   __ Fmov(s2, 123402.0);
9485 
9486   // Push registers conventionally.
9487   __ Push(x0, x1, x2, x3);
9488   __ Push(w4, w5, w6);
9489   __ Push(d0, d1);
9490   __ Push(s2);
9491 
9492   // Queue up a pop.
9493   queue.Queue(s2);
9494 
9495   queue.Queue(d1);
9496   queue.Queue(d0);
9497 
9498   queue.Queue(w6);
9499   queue.Queue(w5);
9500   queue.Queue(w4);
9501 
9502   queue.Queue(x3);
9503   queue.Queue(x2);
9504   queue.Queue(x1);
9505   queue.Queue(x0);
9506 
9507   Clobber(&masm, CPURegList(CPURegister::kRegister, kXRegSizeInBits, 0, 6));
9508   Clobber(&masm, CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, 2));
9509 
9510   // Actually pop them.
9511   queue.PopQueued();
9512 
9513   __ Mov(csp, __ StackPointer());
9514   __ SetStackPointer(csp);
9515 
9516   END();
9517 
9518   RUN();
9519 
9520   CHECK_EQUAL_64(0x1234000000000000, x0);
9521   CHECK_EQUAL_64(0x1234000100010001, x1);
9522   CHECK_EQUAL_64(0x1234000200020002, x2);
9523   CHECK_EQUAL_64(0x1234000300030003, x3);
9524 
9525   CHECK_EQUAL_64(0x0000000012340004, x4);
9526   CHECK_EQUAL_64(0x0000000012340005, x5);
9527   CHECK_EQUAL_64(0x0000000012340006, x6);
9528 
9529   CHECK_EQUAL_FP64(123400.0, d0);
9530   CHECK_EQUAL_FP64(123401.0, d1);
9531 
9532   CHECK_EQUAL_FP32(123402.0, s2);
9533 
9534   TEARDOWN();
9535 }
9536 
9537 
TEST(jump_both_smi)9538 TEST(jump_both_smi) {
9539   INIT_V8();
9540   SETUP();
9541 
9542   Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9543   Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9544   Label return1, return2, return3, done;
9545 
9546   START();
9547 
9548   __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9549   __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9550   __ Mov(x2, 0x1234567800000000UL);  // A smi.
9551   __ Mov(x3, 0x8765432100000000UL);  // A smi.
9552   __ Mov(x4, 0xdead);
9553   __ Mov(x5, 0xdead);
9554   __ Mov(x6, 0xdead);
9555   __ Mov(x7, 0xdead);
9556 
9557   __ JumpIfBothSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9558   __ Bind(&return1);
9559   __ JumpIfBothSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9560   __ Bind(&return2);
9561   __ JumpIfBothSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9562   __ Bind(&return3);
9563   __ JumpIfBothSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9564 
9565   __ Bind(&cond_fail_00);
9566   __ Mov(x4, 0);
9567   __ B(&return1);
9568   __ Bind(&cond_pass_00);
9569   __ Mov(x4, 1);
9570   __ B(&return1);
9571 
9572   __ Bind(&cond_fail_01);
9573   __ Mov(x5, 0);
9574   __ B(&return2);
9575   __ Bind(&cond_pass_01);
9576   __ Mov(x5, 1);
9577   __ B(&return2);
9578 
9579   __ Bind(&cond_fail_10);
9580   __ Mov(x6, 0);
9581   __ B(&return3);
9582   __ Bind(&cond_pass_10);
9583   __ Mov(x6, 1);
9584   __ B(&return3);
9585 
9586   __ Bind(&cond_fail_11);
9587   __ Mov(x7, 0);
9588   __ B(&done);
9589   __ Bind(&cond_pass_11);
9590   __ Mov(x7, 1);
9591 
9592   __ Bind(&done);
9593 
9594   END();
9595 
9596   RUN();
9597 
9598   CHECK_EQUAL_64(0x5555555500000001UL, x0);
9599   CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9600   CHECK_EQUAL_64(0x1234567800000000UL, x2);
9601   CHECK_EQUAL_64(0x8765432100000000UL, x3);
9602   CHECK_EQUAL_64(0, x4);
9603   CHECK_EQUAL_64(0, x5);
9604   CHECK_EQUAL_64(0, x6);
9605   CHECK_EQUAL_64(1, x7);
9606 
9607   TEARDOWN();
9608 }
9609 
9610 
TEST(jump_either_smi)9611 TEST(jump_either_smi) {
9612   INIT_V8();
9613   SETUP();
9614 
9615   Label cond_pass_00, cond_pass_01, cond_pass_10, cond_pass_11;
9616   Label cond_fail_00, cond_fail_01, cond_fail_10, cond_fail_11;
9617   Label return1, return2, return3, done;
9618 
9619   START();
9620 
9621   __ Mov(x0, 0x5555555500000001UL);  // A pointer.
9622   __ Mov(x1, 0xaaaaaaaa00000001UL);  // A pointer.
9623   __ Mov(x2, 0x1234567800000000UL);  // A smi.
9624   __ Mov(x3, 0x8765432100000000UL);  // A smi.
9625   __ Mov(x4, 0xdead);
9626   __ Mov(x5, 0xdead);
9627   __ Mov(x6, 0xdead);
9628   __ Mov(x7, 0xdead);
9629 
9630   __ JumpIfEitherSmi(x0, x1, &cond_pass_00, &cond_fail_00);
9631   __ Bind(&return1);
9632   __ JumpIfEitherSmi(x0, x2, &cond_pass_01, &cond_fail_01);
9633   __ Bind(&return2);
9634   __ JumpIfEitherSmi(x2, x1, &cond_pass_10, &cond_fail_10);
9635   __ Bind(&return3);
9636   __ JumpIfEitherSmi(x2, x3, &cond_pass_11, &cond_fail_11);
9637 
9638   __ Bind(&cond_fail_00);
9639   __ Mov(x4, 0);
9640   __ B(&return1);
9641   __ Bind(&cond_pass_00);
9642   __ Mov(x4, 1);
9643   __ B(&return1);
9644 
9645   __ Bind(&cond_fail_01);
9646   __ Mov(x5, 0);
9647   __ B(&return2);
9648   __ Bind(&cond_pass_01);
9649   __ Mov(x5, 1);
9650   __ B(&return2);
9651 
9652   __ Bind(&cond_fail_10);
9653   __ Mov(x6, 0);
9654   __ B(&return3);
9655   __ Bind(&cond_pass_10);
9656   __ Mov(x6, 1);
9657   __ B(&return3);
9658 
9659   __ Bind(&cond_fail_11);
9660   __ Mov(x7, 0);
9661   __ B(&done);
9662   __ Bind(&cond_pass_11);
9663   __ Mov(x7, 1);
9664 
9665   __ Bind(&done);
9666 
9667   END();
9668 
9669   RUN();
9670 
9671   CHECK_EQUAL_64(0x5555555500000001UL, x0);
9672   CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
9673   CHECK_EQUAL_64(0x1234567800000000UL, x2);
9674   CHECK_EQUAL_64(0x8765432100000000UL, x3);
9675   CHECK_EQUAL_64(0, x4);
9676   CHECK_EQUAL_64(1, x5);
9677   CHECK_EQUAL_64(1, x6);
9678   CHECK_EQUAL_64(1, x7);
9679 
9680   TEARDOWN();
9681 }
9682 
9683 
TEST(noreg)9684 TEST(noreg) {
9685   // This test doesn't generate any code, but it verifies some invariants
9686   // related to NoReg.
9687   CHECK(NoReg.Is(NoFPReg));
9688   CHECK(NoFPReg.Is(NoReg));
9689   CHECK(NoReg.Is(NoCPUReg));
9690   CHECK(NoCPUReg.Is(NoReg));
9691   CHECK(NoFPReg.Is(NoCPUReg));
9692   CHECK(NoCPUReg.Is(NoFPReg));
9693 
9694   CHECK(NoReg.IsNone());
9695   CHECK(NoFPReg.IsNone());
9696   CHECK(NoCPUReg.IsNone());
9697 }
9698 
9699 
TEST(isvalid)9700 TEST(isvalid) {
9701   // This test doesn't generate any code, but it verifies some invariants
9702   // related to IsValid().
9703   CHECK(!NoReg.IsValid());
9704   CHECK(!NoFPReg.IsValid());
9705   CHECK(!NoCPUReg.IsValid());
9706 
9707   CHECK(x0.IsValid());
9708   CHECK(w0.IsValid());
9709   CHECK(x30.IsValid());
9710   CHECK(w30.IsValid());
9711   CHECK(xzr.IsValid());
9712   CHECK(wzr.IsValid());
9713 
9714   CHECK(csp.IsValid());
9715   CHECK(wcsp.IsValid());
9716 
9717   CHECK(d0.IsValid());
9718   CHECK(s0.IsValid());
9719   CHECK(d31.IsValid());
9720   CHECK(s31.IsValid());
9721 
9722   CHECK(x0.IsValidRegister());
9723   CHECK(w0.IsValidRegister());
9724   CHECK(xzr.IsValidRegister());
9725   CHECK(wzr.IsValidRegister());
9726   CHECK(csp.IsValidRegister());
9727   CHECK(wcsp.IsValidRegister());
9728   CHECK(!x0.IsValidFPRegister());
9729   CHECK(!w0.IsValidFPRegister());
9730   CHECK(!xzr.IsValidFPRegister());
9731   CHECK(!wzr.IsValidFPRegister());
9732   CHECK(!csp.IsValidFPRegister());
9733   CHECK(!wcsp.IsValidFPRegister());
9734 
9735   CHECK(d0.IsValidFPRegister());
9736   CHECK(s0.IsValidFPRegister());
9737   CHECK(!d0.IsValidRegister());
9738   CHECK(!s0.IsValidRegister());
9739 
9740   // Test the same as before, but using CPURegister types. This shouldn't make
9741   // any difference.
9742   CHECK(static_cast<CPURegister>(x0).IsValid());
9743   CHECK(static_cast<CPURegister>(w0).IsValid());
9744   CHECK(static_cast<CPURegister>(x30).IsValid());
9745   CHECK(static_cast<CPURegister>(w30).IsValid());
9746   CHECK(static_cast<CPURegister>(xzr).IsValid());
9747   CHECK(static_cast<CPURegister>(wzr).IsValid());
9748 
9749   CHECK(static_cast<CPURegister>(csp).IsValid());
9750   CHECK(static_cast<CPURegister>(wcsp).IsValid());
9751 
9752   CHECK(static_cast<CPURegister>(d0).IsValid());
9753   CHECK(static_cast<CPURegister>(s0).IsValid());
9754   CHECK(static_cast<CPURegister>(d31).IsValid());
9755   CHECK(static_cast<CPURegister>(s31).IsValid());
9756 
9757   CHECK(static_cast<CPURegister>(x0).IsValidRegister());
9758   CHECK(static_cast<CPURegister>(w0).IsValidRegister());
9759   CHECK(static_cast<CPURegister>(xzr).IsValidRegister());
9760   CHECK(static_cast<CPURegister>(wzr).IsValidRegister());
9761   CHECK(static_cast<CPURegister>(csp).IsValidRegister());
9762   CHECK(static_cast<CPURegister>(wcsp).IsValidRegister());
9763   CHECK(!static_cast<CPURegister>(x0).IsValidFPRegister());
9764   CHECK(!static_cast<CPURegister>(w0).IsValidFPRegister());
9765   CHECK(!static_cast<CPURegister>(xzr).IsValidFPRegister());
9766   CHECK(!static_cast<CPURegister>(wzr).IsValidFPRegister());
9767   CHECK(!static_cast<CPURegister>(csp).IsValidFPRegister());
9768   CHECK(!static_cast<CPURegister>(wcsp).IsValidFPRegister());
9769 
9770   CHECK(static_cast<CPURegister>(d0).IsValidFPRegister());
9771   CHECK(static_cast<CPURegister>(s0).IsValidFPRegister());
9772   CHECK(!static_cast<CPURegister>(d0).IsValidRegister());
9773   CHECK(!static_cast<CPURegister>(s0).IsValidRegister());
9774 }
9775 
9776 
TEST(cpureglist_utils_x)9777 TEST(cpureglist_utils_x) {
9778   // This test doesn't generate any code, but it verifies the behaviour of
9779   // the CPURegList utility methods.
9780 
9781   // Test a list of X registers.
9782   CPURegList test(x0, x1, x2, x3);
9783 
9784   CHECK(test.IncludesAliasOf(x0));
9785   CHECK(test.IncludesAliasOf(x1));
9786   CHECK(test.IncludesAliasOf(x2));
9787   CHECK(test.IncludesAliasOf(x3));
9788   CHECK(test.IncludesAliasOf(w0));
9789   CHECK(test.IncludesAliasOf(w1));
9790   CHECK(test.IncludesAliasOf(w2));
9791   CHECK(test.IncludesAliasOf(w3));
9792 
9793   CHECK(!test.IncludesAliasOf(x4));
9794   CHECK(!test.IncludesAliasOf(x30));
9795   CHECK(!test.IncludesAliasOf(xzr));
9796   CHECK(!test.IncludesAliasOf(csp));
9797   CHECK(!test.IncludesAliasOf(w4));
9798   CHECK(!test.IncludesAliasOf(w30));
9799   CHECK(!test.IncludesAliasOf(wzr));
9800   CHECK(!test.IncludesAliasOf(wcsp));
9801 
9802   CHECK(!test.IncludesAliasOf(d0));
9803   CHECK(!test.IncludesAliasOf(d1));
9804   CHECK(!test.IncludesAliasOf(d2));
9805   CHECK(!test.IncludesAliasOf(d3));
9806   CHECK(!test.IncludesAliasOf(s0));
9807   CHECK(!test.IncludesAliasOf(s1));
9808   CHECK(!test.IncludesAliasOf(s2));
9809   CHECK(!test.IncludesAliasOf(s3));
9810 
9811   CHECK(!test.IsEmpty());
9812 
9813   CHECK(test.type() == x0.type());
9814 
9815   CHECK(test.PopHighestIndex().Is(x3));
9816   CHECK(test.PopLowestIndex().Is(x0));
9817 
9818   CHECK(test.IncludesAliasOf(x1));
9819   CHECK(test.IncludesAliasOf(x2));
9820   CHECK(test.IncludesAliasOf(w1));
9821   CHECK(test.IncludesAliasOf(w2));
9822   CHECK(!test.IncludesAliasOf(x0));
9823   CHECK(!test.IncludesAliasOf(x3));
9824   CHECK(!test.IncludesAliasOf(w0));
9825   CHECK(!test.IncludesAliasOf(w3));
9826 
9827   CHECK(test.PopHighestIndex().Is(x2));
9828   CHECK(test.PopLowestIndex().Is(x1));
9829 
9830   CHECK(!test.IncludesAliasOf(x1));
9831   CHECK(!test.IncludesAliasOf(x2));
9832   CHECK(!test.IncludesAliasOf(w1));
9833   CHECK(!test.IncludesAliasOf(w2));
9834 
9835   CHECK(test.IsEmpty());
9836 }
9837 
9838 
TEST(cpureglist_utils_w)9839 TEST(cpureglist_utils_w) {
9840   // This test doesn't generate any code, but it verifies the behaviour of
9841   // the CPURegList utility methods.
9842 
9843   // Test a list of W registers.
9844   CPURegList test(w10, w11, w12, w13);
9845 
9846   CHECK(test.IncludesAliasOf(x10));
9847   CHECK(test.IncludesAliasOf(x11));
9848   CHECK(test.IncludesAliasOf(x12));
9849   CHECK(test.IncludesAliasOf(x13));
9850   CHECK(test.IncludesAliasOf(w10));
9851   CHECK(test.IncludesAliasOf(w11));
9852   CHECK(test.IncludesAliasOf(w12));
9853   CHECK(test.IncludesAliasOf(w13));
9854 
9855   CHECK(!test.IncludesAliasOf(x0));
9856   CHECK(!test.IncludesAliasOf(x9));
9857   CHECK(!test.IncludesAliasOf(x14));
9858   CHECK(!test.IncludesAliasOf(x30));
9859   CHECK(!test.IncludesAliasOf(xzr));
9860   CHECK(!test.IncludesAliasOf(csp));
9861   CHECK(!test.IncludesAliasOf(w0));
9862   CHECK(!test.IncludesAliasOf(w9));
9863   CHECK(!test.IncludesAliasOf(w14));
9864   CHECK(!test.IncludesAliasOf(w30));
9865   CHECK(!test.IncludesAliasOf(wzr));
9866   CHECK(!test.IncludesAliasOf(wcsp));
9867 
9868   CHECK(!test.IncludesAliasOf(d10));
9869   CHECK(!test.IncludesAliasOf(d11));
9870   CHECK(!test.IncludesAliasOf(d12));
9871   CHECK(!test.IncludesAliasOf(d13));
9872   CHECK(!test.IncludesAliasOf(s10));
9873   CHECK(!test.IncludesAliasOf(s11));
9874   CHECK(!test.IncludesAliasOf(s12));
9875   CHECK(!test.IncludesAliasOf(s13));
9876 
9877   CHECK(!test.IsEmpty());
9878 
9879   CHECK(test.type() == w10.type());
9880 
9881   CHECK(test.PopHighestIndex().Is(w13));
9882   CHECK(test.PopLowestIndex().Is(w10));
9883 
9884   CHECK(test.IncludesAliasOf(x11));
9885   CHECK(test.IncludesAliasOf(x12));
9886   CHECK(test.IncludesAliasOf(w11));
9887   CHECK(test.IncludesAliasOf(w12));
9888   CHECK(!test.IncludesAliasOf(x10));
9889   CHECK(!test.IncludesAliasOf(x13));
9890   CHECK(!test.IncludesAliasOf(w10));
9891   CHECK(!test.IncludesAliasOf(w13));
9892 
9893   CHECK(test.PopHighestIndex().Is(w12));
9894   CHECK(test.PopLowestIndex().Is(w11));
9895 
9896   CHECK(!test.IncludesAliasOf(x11));
9897   CHECK(!test.IncludesAliasOf(x12));
9898   CHECK(!test.IncludesAliasOf(w11));
9899   CHECK(!test.IncludesAliasOf(w12));
9900 
9901   CHECK(test.IsEmpty());
9902 }
9903 
9904 
TEST(cpureglist_utils_d)9905 TEST(cpureglist_utils_d) {
9906   // This test doesn't generate any code, but it verifies the behaviour of
9907   // the CPURegList utility methods.
9908 
9909   // Test a list of D registers.
9910   CPURegList test(d20, d21, d22, d23);
9911 
9912   CHECK(test.IncludesAliasOf(d20));
9913   CHECK(test.IncludesAliasOf(d21));
9914   CHECK(test.IncludesAliasOf(d22));
9915   CHECK(test.IncludesAliasOf(d23));
9916   CHECK(test.IncludesAliasOf(s20));
9917   CHECK(test.IncludesAliasOf(s21));
9918   CHECK(test.IncludesAliasOf(s22));
9919   CHECK(test.IncludesAliasOf(s23));
9920 
9921   CHECK(!test.IncludesAliasOf(d0));
9922   CHECK(!test.IncludesAliasOf(d19));
9923   CHECK(!test.IncludesAliasOf(d24));
9924   CHECK(!test.IncludesAliasOf(d31));
9925   CHECK(!test.IncludesAliasOf(s0));
9926   CHECK(!test.IncludesAliasOf(s19));
9927   CHECK(!test.IncludesAliasOf(s24));
9928   CHECK(!test.IncludesAliasOf(s31));
9929 
9930   CHECK(!test.IncludesAliasOf(x20));
9931   CHECK(!test.IncludesAliasOf(x21));
9932   CHECK(!test.IncludesAliasOf(x22));
9933   CHECK(!test.IncludesAliasOf(x23));
9934   CHECK(!test.IncludesAliasOf(w20));
9935   CHECK(!test.IncludesAliasOf(w21));
9936   CHECK(!test.IncludesAliasOf(w22));
9937   CHECK(!test.IncludesAliasOf(w23));
9938 
9939   CHECK(!test.IncludesAliasOf(xzr));
9940   CHECK(!test.IncludesAliasOf(wzr));
9941   CHECK(!test.IncludesAliasOf(csp));
9942   CHECK(!test.IncludesAliasOf(wcsp));
9943 
9944   CHECK(!test.IsEmpty());
9945 
9946   CHECK(test.type() == d20.type());
9947 
9948   CHECK(test.PopHighestIndex().Is(d23));
9949   CHECK(test.PopLowestIndex().Is(d20));
9950 
9951   CHECK(test.IncludesAliasOf(d21));
9952   CHECK(test.IncludesAliasOf(d22));
9953   CHECK(test.IncludesAliasOf(s21));
9954   CHECK(test.IncludesAliasOf(s22));
9955   CHECK(!test.IncludesAliasOf(d20));
9956   CHECK(!test.IncludesAliasOf(d23));
9957   CHECK(!test.IncludesAliasOf(s20));
9958   CHECK(!test.IncludesAliasOf(s23));
9959 
9960   CHECK(test.PopHighestIndex().Is(d22));
9961   CHECK(test.PopLowestIndex().Is(d21));
9962 
9963   CHECK(!test.IncludesAliasOf(d21));
9964   CHECK(!test.IncludesAliasOf(d22));
9965   CHECK(!test.IncludesAliasOf(s21));
9966   CHECK(!test.IncludesAliasOf(s22));
9967 
9968   CHECK(test.IsEmpty());
9969 }
9970 
9971 
TEST(cpureglist_utils_s)9972 TEST(cpureglist_utils_s) {
9973   // This test doesn't generate any code, but it verifies the behaviour of
9974   // the CPURegList utility methods.
9975 
9976   // Test a list of S registers.
9977   CPURegList test(s20, s21, s22, s23);
9978 
9979   // The type and size mechanisms are already covered, so here we just test
9980   // that lists of S registers alias individual D registers.
9981 
9982   CHECK(test.IncludesAliasOf(d20));
9983   CHECK(test.IncludesAliasOf(d21));
9984   CHECK(test.IncludesAliasOf(d22));
9985   CHECK(test.IncludesAliasOf(d23));
9986   CHECK(test.IncludesAliasOf(s20));
9987   CHECK(test.IncludesAliasOf(s21));
9988   CHECK(test.IncludesAliasOf(s22));
9989   CHECK(test.IncludesAliasOf(s23));
9990 }
9991 
9992 
TEST(cpureglist_utils_empty)9993 TEST(cpureglist_utils_empty) {
9994   // This test doesn't generate any code, but it verifies the behaviour of
9995   // the CPURegList utility methods.
9996 
9997   // Test an empty list.
9998   // Empty lists can have type and size properties. Check that we can create
9999   // them, and that they are empty.
10000   CPURegList reg32(CPURegister::kRegister, kWRegSizeInBits, 0);
10001   CPURegList reg64(CPURegister::kRegister, kXRegSizeInBits, 0);
10002   CPURegList fpreg32(CPURegister::kFPRegister, kSRegSizeInBits, 0);
10003   CPURegList fpreg64(CPURegister::kFPRegister, kDRegSizeInBits, 0);
10004 
10005   CHECK(reg32.IsEmpty());
10006   CHECK(reg64.IsEmpty());
10007   CHECK(fpreg32.IsEmpty());
10008   CHECK(fpreg64.IsEmpty());
10009 
10010   CHECK(reg32.PopLowestIndex().IsNone());
10011   CHECK(reg64.PopLowestIndex().IsNone());
10012   CHECK(fpreg32.PopLowestIndex().IsNone());
10013   CHECK(fpreg64.PopLowestIndex().IsNone());
10014 
10015   CHECK(reg32.PopHighestIndex().IsNone());
10016   CHECK(reg64.PopHighestIndex().IsNone());
10017   CHECK(fpreg32.PopHighestIndex().IsNone());
10018   CHECK(fpreg64.PopHighestIndex().IsNone());
10019 
10020   CHECK(reg32.IsEmpty());
10021   CHECK(reg64.IsEmpty());
10022   CHECK(fpreg32.IsEmpty());
10023   CHECK(fpreg64.IsEmpty());
10024 }
10025 
10026 
TEST(printf)10027 TEST(printf) {
10028   INIT_V8();
10029   SETUP_SIZE(BUF_SIZE * 2);
10030   START();
10031 
10032   char const * test_plain_string = "Printf with no arguments.\n";
10033   char const * test_substring = "'This is a substring.'";
10034   RegisterDump before;
10035 
10036   // Initialize x29 to the value of the stack pointer. We will use x29 as a
10037   // temporary stack pointer later, and initializing it in this way allows the
10038   // RegisterDump check to pass.
10039   __ Mov(x29, __ StackPointer());
10040 
10041   // Test simple integer arguments.
10042   __ Mov(x0, 1234);
10043   __ Mov(x1, 0x1234);
10044 
10045   // Test simple floating-point arguments.
10046   __ Fmov(d0, 1.234);
10047 
10048   // Test pointer (string) arguments.
10049   __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
10050 
10051   // Test the maximum number of arguments, and sign extension.
10052   __ Mov(w3, 0xffffffff);
10053   __ Mov(w4, 0xffffffff);
10054   __ Mov(x5, 0xffffffffffffffff);
10055   __ Mov(x6, 0xffffffffffffffff);
10056   __ Fmov(s1, 1.234);
10057   __ Fmov(s2, 2.345);
10058   __ Fmov(d3, 3.456);
10059   __ Fmov(d4, 4.567);
10060 
10061   // Test printing callee-saved registers.
10062   __ Mov(x28, 0x123456789abcdef);
10063   __ Fmov(d10, 42.0);
10064 
10065   // Test with three arguments.
10066   __ Mov(x10, 3);
10067   __ Mov(x11, 40);
10068   __ Mov(x12, 500);
10069 
10070   // A single character.
10071   __ Mov(w13, 'x');
10072 
10073   // Check that we don't clobber any registers.
10074   before.Dump(&masm);
10075 
10076   __ Printf(test_plain_string);   // NOLINT(runtime/printf)
10077   __ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
10078   __ Printf("w5: %" PRId32 ", x5: %" PRId64"\n", w5, x5);
10079   __ Printf("d0: %f\n", d0);
10080   __ Printf("Test %%s: %s\n", x2);
10081   __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
10082             "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
10083             w3, w4, x5, x6);
10084   __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
10085   __ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
10086   __ Printf("%g\n", d10);
10087   __ Printf("%%%%%s%%%c%%\n", x2, w13);
10088 
10089   // Print the stack pointer (csp).
10090   CHECK(csp.Is(__ StackPointer()));
10091   __ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10092             __ StackPointer(), __ StackPointer().W());
10093 
10094   // Test with a different stack pointer.
10095   const Register old_stack_pointer = __ StackPointer();
10096   __ Mov(x29, old_stack_pointer);
10097   __ SetStackPointer(x29);
10098   // Print the stack pointer (not csp).
10099   __ Printf("StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10100             __ StackPointer(), __ StackPointer().W());
10101   __ Mov(old_stack_pointer, __ StackPointer());
10102   __ SetStackPointer(old_stack_pointer);
10103 
10104   // Test with three arguments.
10105   __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
10106 
10107   // Mixed argument types.
10108   __ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
10109             w3, s1, x5, d3);
10110   __ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
10111             s1, d3, w3, x5);
10112 
10113   END();
10114   RUN();
10115 
10116   // We cannot easily test the output of the Printf sequences, and because
10117   // Printf preserves all registers by default, we can't look at the number of
10118   // bytes that were printed. However, the printf_no_preserve test should check
10119   // that, and here we just test that we didn't clobber any registers.
10120   CHECK_EQUAL_REGISTERS(before);
10121 
10122   TEARDOWN();
10123 }
10124 
10125 
TEST(printf_no_preserve)10126 TEST(printf_no_preserve) {
10127   INIT_V8();
10128   SETUP();
10129   START();
10130 
10131   char const * test_plain_string = "Printf with no arguments.\n";
10132   char const * test_substring = "'This is a substring.'";
10133 
10134   __ PrintfNoPreserve(test_plain_string);
10135   __ Mov(x19, x0);
10136 
10137   // Test simple integer arguments.
10138   __ Mov(x0, 1234);
10139   __ Mov(x1, 0x1234);
10140   __ PrintfNoPreserve("x0: %" PRId64", x1: 0x%08" PRIx64 "\n", x0, x1);
10141   __ Mov(x20, x0);
10142 
10143   // Test simple floating-point arguments.
10144   __ Fmov(d0, 1.234);
10145   __ PrintfNoPreserve("d0: %f\n", d0);
10146   __ Mov(x21, x0);
10147 
10148   // Test pointer (string) arguments.
10149   __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
10150   __ PrintfNoPreserve("Test %%s: %s\n", x2);
10151   __ Mov(x22, x0);
10152 
10153   // Test the maximum number of arguments, and sign extension.
10154   __ Mov(w3, 0xffffffff);
10155   __ Mov(w4, 0xffffffff);
10156   __ Mov(x5, 0xffffffffffffffff);
10157   __ Mov(x6, 0xffffffffffffffff);
10158   __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32 "\n"
10159                       "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
10160                       w3, w4, x5, x6);
10161   __ Mov(x23, x0);
10162 
10163   __ Fmov(s1, 1.234);
10164   __ Fmov(s2, 2.345);
10165   __ Fmov(d3, 3.456);
10166   __ Fmov(d4, 4.567);
10167   __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
10168   __ Mov(x24, x0);
10169 
10170   // Test printing callee-saved registers.
10171   __ Mov(x28, 0x123456789abcdef);
10172   __ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
10173   __ Mov(x25, x0);
10174 
10175   __ Fmov(d10, 42.0);
10176   __ PrintfNoPreserve("%g\n", d10);
10177   __ Mov(x26, x0);
10178 
10179   // Test with a different stack pointer.
10180   const Register old_stack_pointer = __ StackPointer();
10181   __ Mov(x29, old_stack_pointer);
10182   __ SetStackPointer(x29);
10183   // Print the stack pointer (not csp).
10184   __ PrintfNoPreserve(
10185       "StackPointer(not csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
10186       __ StackPointer(), __ StackPointer().W());
10187   __ Mov(x27, x0);
10188   __ Mov(old_stack_pointer, __ StackPointer());
10189   __ SetStackPointer(old_stack_pointer);
10190 
10191   // Test with three arguments.
10192   __ Mov(x3, 3);
10193   __ Mov(x4, 40);
10194   __ Mov(x5, 500);
10195   __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
10196   __ Mov(x28, x0);
10197 
10198   // Mixed argument types.
10199   __ Mov(w3, 0xffffffff);
10200   __ Fmov(s1, 1.234);
10201   __ Mov(x5, 0xffffffffffffffff);
10202   __ Fmov(d3, 3.456);
10203   __ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
10204                       w3, s1, x5, d3);
10205   __ Mov(x29, x0);
10206 
10207   END();
10208   RUN();
10209 
10210   // We cannot easily test the exact output of the Printf sequences, but we can
10211   // use the return code to check that the string length was correct.
10212 
10213   // Printf with no arguments.
10214   CHECK_EQUAL_64(strlen(test_plain_string), x19);
10215   // x0: 1234, x1: 0x00001234
10216   CHECK_EQUAL_64(25, x20);
10217   // d0: 1.234000
10218   CHECK_EQUAL_64(13, x21);
10219   // Test %s: 'This is a substring.'
10220   CHECK_EQUAL_64(32, x22);
10221   // w3(uint32): 4294967295
10222   // w4(int32): -1
10223   // x5(uint64): 18446744073709551615
10224   // x6(int64): -1
10225   CHECK_EQUAL_64(23 + 14 + 33 + 14, x23);
10226   // %f: 1.234000
10227   // %g: 2.345
10228   // %e: 3.456000e+00
10229   // %E: 4.567000E+00
10230   CHECK_EQUAL_64(13 + 10 + 17 + 17, x24);
10231   // 0x89abcdef, 0x123456789abcdef
10232   CHECK_EQUAL_64(30, x25);
10233   // 42
10234   CHECK_EQUAL_64(3, x26);
10235   // StackPointer(not csp): 0x00007fb037ae2370, 0x37ae2370
10236   // Note: This is an example value, but the field width is fixed here so the
10237   // string length is still predictable.
10238   CHECK_EQUAL_64(54, x27);
10239   // 3=3, 4=40, 5=500
10240   CHECK_EQUAL_64(17, x28);
10241   // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
10242   CHECK_EQUAL_64(69, x29);
10243 
10244   TEARDOWN();
10245 }
10246 
10247 
TEST(blr_lr)10248 TEST(blr_lr) {
10249   // A simple test to check that the simulator correcty handle "blr lr".
10250   INIT_V8();
10251   SETUP();
10252 
10253   START();
10254   Label target;
10255   Label end;
10256 
10257   __ Mov(x0, 0x0);
10258   __ Adr(lr, &target);
10259 
10260   __ Blr(lr);
10261   __ Mov(x0, 0xdeadbeef);
10262   __ B(&end);
10263 
10264   __ Bind(&target);
10265   __ Mov(x0, 0xc001c0de);
10266 
10267   __ Bind(&end);
10268   END();
10269 
10270   RUN();
10271 
10272   CHECK_EQUAL_64(0xc001c0de, x0);
10273 
10274   TEARDOWN();
10275 }
10276 
10277 
TEST(barriers)10278 TEST(barriers) {
10279   // Generate all supported barriers, this is just a smoke test
10280   INIT_V8();
10281   SETUP();
10282 
10283   START();
10284 
10285   // DMB
10286   __ Dmb(FullSystem, BarrierAll);
10287   __ Dmb(FullSystem, BarrierReads);
10288   __ Dmb(FullSystem, BarrierWrites);
10289   __ Dmb(FullSystem, BarrierOther);
10290 
10291   __ Dmb(InnerShareable, BarrierAll);
10292   __ Dmb(InnerShareable, BarrierReads);
10293   __ Dmb(InnerShareable, BarrierWrites);
10294   __ Dmb(InnerShareable, BarrierOther);
10295 
10296   __ Dmb(NonShareable, BarrierAll);
10297   __ Dmb(NonShareable, BarrierReads);
10298   __ Dmb(NonShareable, BarrierWrites);
10299   __ Dmb(NonShareable, BarrierOther);
10300 
10301   __ Dmb(OuterShareable, BarrierAll);
10302   __ Dmb(OuterShareable, BarrierReads);
10303   __ Dmb(OuterShareable, BarrierWrites);
10304   __ Dmb(OuterShareable, BarrierOther);
10305 
10306   // DSB
10307   __ Dsb(FullSystem, BarrierAll);
10308   __ Dsb(FullSystem, BarrierReads);
10309   __ Dsb(FullSystem, BarrierWrites);
10310   __ Dsb(FullSystem, BarrierOther);
10311 
10312   __ Dsb(InnerShareable, BarrierAll);
10313   __ Dsb(InnerShareable, BarrierReads);
10314   __ Dsb(InnerShareable, BarrierWrites);
10315   __ Dsb(InnerShareable, BarrierOther);
10316 
10317   __ Dsb(NonShareable, BarrierAll);
10318   __ Dsb(NonShareable, BarrierReads);
10319   __ Dsb(NonShareable, BarrierWrites);
10320   __ Dsb(NonShareable, BarrierOther);
10321 
10322   __ Dsb(OuterShareable, BarrierAll);
10323   __ Dsb(OuterShareable, BarrierReads);
10324   __ Dsb(OuterShareable, BarrierWrites);
10325   __ Dsb(OuterShareable, BarrierOther);
10326 
10327   // ISB
10328   __ Isb();
10329 
10330   END();
10331 
10332   RUN();
10333 
10334   TEARDOWN();
10335 }
10336 
10337 
TEST(process_nan_double)10338 TEST(process_nan_double) {
10339   INIT_V8();
10340   // Make sure that NaN propagation works correctly.
10341   double sn = rawbits_to_double(0x7ff5555511111111);
10342   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10343   CHECK(IsSignallingNaN(sn));
10344   CHECK(IsQuietNaN(qn));
10345 
10346   // The input NaNs after passing through ProcessNaN.
10347   double sn_proc = rawbits_to_double(0x7ffd555511111111);
10348   double qn_proc = qn;
10349   CHECK(IsQuietNaN(sn_proc));
10350   CHECK(IsQuietNaN(qn_proc));
10351 
10352   SETUP();
10353   START();
10354 
10355   // Execute a number of instructions which all use ProcessNaN, and check that
10356   // they all handle the NaN correctly.
10357   __ Fmov(d0, sn);
10358   __ Fmov(d10, qn);
10359 
10360   // Operations that always propagate NaNs unchanged, even signalling NaNs.
10361   //   - Signalling NaN
10362   __ Fmov(d1, d0);
10363   __ Fabs(d2, d0);
10364   __ Fneg(d3, d0);
10365   //   - Quiet NaN
10366   __ Fmov(d11, d10);
10367   __ Fabs(d12, d10);
10368   __ Fneg(d13, d10);
10369 
10370   // Operations that use ProcessNaN.
10371   //   - Signalling NaN
10372   __ Fsqrt(d4, d0);
10373   __ Frinta(d5, d0);
10374   __ Frintn(d6, d0);
10375   __ Frintz(d7, d0);
10376   //   - Quiet NaN
10377   __ Fsqrt(d14, d10);
10378   __ Frinta(d15, d10);
10379   __ Frintn(d16, d10);
10380   __ Frintz(d17, d10);
10381 
10382   // The behaviour of fcvt is checked in TEST(fcvt_sd).
10383 
10384   END();
10385   RUN();
10386 
10387   uint64_t qn_raw = double_to_rawbits(qn);
10388   uint64_t sn_raw = double_to_rawbits(sn);
10389 
10390   //   - Signalling NaN
10391   CHECK_EQUAL_FP64(sn, d1);
10392   CHECK_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
10393   CHECK_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
10394   //   - Quiet NaN
10395   CHECK_EQUAL_FP64(qn, d11);
10396   CHECK_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
10397   CHECK_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
10398 
10399   //   - Signalling NaN
10400   CHECK_EQUAL_FP64(sn_proc, d4);
10401   CHECK_EQUAL_FP64(sn_proc, d5);
10402   CHECK_EQUAL_FP64(sn_proc, d6);
10403   CHECK_EQUAL_FP64(sn_proc, d7);
10404   //   - Quiet NaN
10405   CHECK_EQUAL_FP64(qn_proc, d14);
10406   CHECK_EQUAL_FP64(qn_proc, d15);
10407   CHECK_EQUAL_FP64(qn_proc, d16);
10408   CHECK_EQUAL_FP64(qn_proc, d17);
10409 
10410   TEARDOWN();
10411 }
10412 
10413 
TEST(process_nan_float)10414 TEST(process_nan_float) {
10415   INIT_V8();
10416   // Make sure that NaN propagation works correctly.
10417   float sn = rawbits_to_float(0x7f951111);
10418   float qn = rawbits_to_float(0x7fea1111);
10419   CHECK(IsSignallingNaN(sn));
10420   CHECK(IsQuietNaN(qn));
10421 
10422   // The input NaNs after passing through ProcessNaN.
10423   float sn_proc = rawbits_to_float(0x7fd51111);
10424   float qn_proc = qn;
10425   CHECK(IsQuietNaN(sn_proc));
10426   CHECK(IsQuietNaN(qn_proc));
10427 
10428   SETUP();
10429   START();
10430 
10431   // Execute a number of instructions which all use ProcessNaN, and check that
10432   // they all handle the NaN correctly.
10433   __ Fmov(s0, sn);
10434   __ Fmov(s10, qn);
10435 
10436   // Operations that always propagate NaNs unchanged, even signalling NaNs.
10437   //   - Signalling NaN
10438   __ Fmov(s1, s0);
10439   __ Fabs(s2, s0);
10440   __ Fneg(s3, s0);
10441   //   - Quiet NaN
10442   __ Fmov(s11, s10);
10443   __ Fabs(s12, s10);
10444   __ Fneg(s13, s10);
10445 
10446   // Operations that use ProcessNaN.
10447   //   - Signalling NaN
10448   __ Fsqrt(s4, s0);
10449   __ Frinta(s5, s0);
10450   __ Frintn(s6, s0);
10451   __ Frintz(s7, s0);
10452   //   - Quiet NaN
10453   __ Fsqrt(s14, s10);
10454   __ Frinta(s15, s10);
10455   __ Frintn(s16, s10);
10456   __ Frintz(s17, s10);
10457 
10458   // The behaviour of fcvt is checked in TEST(fcvt_sd).
10459 
10460   END();
10461   RUN();
10462 
10463   uint32_t qn_raw = float_to_rawbits(qn);
10464   uint32_t sn_raw = float_to_rawbits(sn);
10465 
10466   //   - Signalling NaN
10467   CHECK_EQUAL_FP32(sn, s1);
10468   CHECK_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
10469   CHECK_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
10470   //   - Quiet NaN
10471   CHECK_EQUAL_FP32(qn, s11);
10472   CHECK_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
10473   CHECK_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
10474 
10475   //   - Signalling NaN
10476   CHECK_EQUAL_FP32(sn_proc, s4);
10477   CHECK_EQUAL_FP32(sn_proc, s5);
10478   CHECK_EQUAL_FP32(sn_proc, s6);
10479   CHECK_EQUAL_FP32(sn_proc, s7);
10480   //   - Quiet NaN
10481   CHECK_EQUAL_FP32(qn_proc, s14);
10482   CHECK_EQUAL_FP32(qn_proc, s15);
10483   CHECK_EQUAL_FP32(qn_proc, s16);
10484   CHECK_EQUAL_FP32(qn_proc, s17);
10485 
10486   TEARDOWN();
10487 }
10488 
10489 
ProcessNaNsHelper(double n,double m,double expected)10490 static void ProcessNaNsHelper(double n, double m, double expected) {
10491   CHECK(std::isnan(n) || std::isnan(m));
10492   CHECK(std::isnan(expected));
10493 
10494   SETUP();
10495   START();
10496 
10497   // Execute a number of instructions which all use ProcessNaNs, and check that
10498   // they all propagate NaNs correctly.
10499   __ Fmov(d0, n);
10500   __ Fmov(d1, m);
10501 
10502   __ Fadd(d2, d0, d1);
10503   __ Fsub(d3, d0, d1);
10504   __ Fmul(d4, d0, d1);
10505   __ Fdiv(d5, d0, d1);
10506   __ Fmax(d6, d0, d1);
10507   __ Fmin(d7, d0, d1);
10508 
10509   END();
10510   RUN();
10511 
10512   CHECK_EQUAL_FP64(expected, d2);
10513   CHECK_EQUAL_FP64(expected, d3);
10514   CHECK_EQUAL_FP64(expected, d4);
10515   CHECK_EQUAL_FP64(expected, d5);
10516   CHECK_EQUAL_FP64(expected, d6);
10517   CHECK_EQUAL_FP64(expected, d7);
10518 
10519   TEARDOWN();
10520 }
10521 
10522 
TEST(process_nans_double)10523 TEST(process_nans_double) {
10524   INIT_V8();
10525   // Make sure that NaN propagation works correctly.
10526   double sn = rawbits_to_double(0x7ff5555511111111);
10527   double sm = rawbits_to_double(0x7ff5555522222222);
10528   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10529   double qm = rawbits_to_double(0x7ffaaaaa22222222);
10530   CHECK(IsSignallingNaN(sn));
10531   CHECK(IsSignallingNaN(sm));
10532   CHECK(IsQuietNaN(qn));
10533   CHECK(IsQuietNaN(qm));
10534 
10535   // The input NaNs after passing through ProcessNaN.
10536   double sn_proc = rawbits_to_double(0x7ffd555511111111);
10537   double sm_proc = rawbits_to_double(0x7ffd555522222222);
10538   double qn_proc = qn;
10539   double qm_proc = qm;
10540   CHECK(IsQuietNaN(sn_proc));
10541   CHECK(IsQuietNaN(sm_proc));
10542   CHECK(IsQuietNaN(qn_proc));
10543   CHECK(IsQuietNaN(qm_proc));
10544 
10545   // Quiet NaNs are propagated.
10546   ProcessNaNsHelper(qn, 0, qn_proc);
10547   ProcessNaNsHelper(0, qm, qm_proc);
10548   ProcessNaNsHelper(qn, qm, qn_proc);
10549 
10550   // Signalling NaNs are propagated, and made quiet.
10551   ProcessNaNsHelper(sn, 0, sn_proc);
10552   ProcessNaNsHelper(0, sm, sm_proc);
10553   ProcessNaNsHelper(sn, sm, sn_proc);
10554 
10555   // Signalling NaNs take precedence over quiet NaNs.
10556   ProcessNaNsHelper(sn, qm, sn_proc);
10557   ProcessNaNsHelper(qn, sm, sm_proc);
10558   ProcessNaNsHelper(sn, sm, sn_proc);
10559 }
10560 
10561 
ProcessNaNsHelper(float n,float m,float expected)10562 static void ProcessNaNsHelper(float n, float m, float expected) {
10563   CHECK(std::isnan(n) || std::isnan(m));
10564   CHECK(std::isnan(expected));
10565 
10566   SETUP();
10567   START();
10568 
10569   // Execute a number of instructions which all use ProcessNaNs, and check that
10570   // they all propagate NaNs correctly.
10571   __ Fmov(s0, n);
10572   __ Fmov(s1, m);
10573 
10574   __ Fadd(s2, s0, s1);
10575   __ Fsub(s3, s0, s1);
10576   __ Fmul(s4, s0, s1);
10577   __ Fdiv(s5, s0, s1);
10578   __ Fmax(s6, s0, s1);
10579   __ Fmin(s7, s0, s1);
10580 
10581   END();
10582   RUN();
10583 
10584   CHECK_EQUAL_FP32(expected, s2);
10585   CHECK_EQUAL_FP32(expected, s3);
10586   CHECK_EQUAL_FP32(expected, s4);
10587   CHECK_EQUAL_FP32(expected, s5);
10588   CHECK_EQUAL_FP32(expected, s6);
10589   CHECK_EQUAL_FP32(expected, s7);
10590 
10591   TEARDOWN();
10592 }
10593 
10594 
TEST(process_nans_float)10595 TEST(process_nans_float) {
10596   INIT_V8();
10597   // Make sure that NaN propagation works correctly.
10598   float sn = rawbits_to_float(0x7f951111);
10599   float sm = rawbits_to_float(0x7f952222);
10600   float qn = rawbits_to_float(0x7fea1111);
10601   float qm = rawbits_to_float(0x7fea2222);
10602   CHECK(IsSignallingNaN(sn));
10603   CHECK(IsSignallingNaN(sm));
10604   CHECK(IsQuietNaN(qn));
10605   CHECK(IsQuietNaN(qm));
10606 
10607   // The input NaNs after passing through ProcessNaN.
10608   float sn_proc = rawbits_to_float(0x7fd51111);
10609   float sm_proc = rawbits_to_float(0x7fd52222);
10610   float qn_proc = qn;
10611   float qm_proc = qm;
10612   CHECK(IsQuietNaN(sn_proc));
10613   CHECK(IsQuietNaN(sm_proc));
10614   CHECK(IsQuietNaN(qn_proc));
10615   CHECK(IsQuietNaN(qm_proc));
10616 
10617   // Quiet NaNs are propagated.
10618   ProcessNaNsHelper(qn, 0, qn_proc);
10619   ProcessNaNsHelper(0, qm, qm_proc);
10620   ProcessNaNsHelper(qn, qm, qn_proc);
10621 
10622   // Signalling NaNs are propagated, and made quiet.
10623   ProcessNaNsHelper(sn, 0, sn_proc);
10624   ProcessNaNsHelper(0, sm, sm_proc);
10625   ProcessNaNsHelper(sn, sm, sn_proc);
10626 
10627   // Signalling NaNs take precedence over quiet NaNs.
10628   ProcessNaNsHelper(sn, qm, sn_proc);
10629   ProcessNaNsHelper(qn, sm, sm_proc);
10630   ProcessNaNsHelper(sn, sm, sn_proc);
10631 }
10632 
10633 
DefaultNaNHelper(float n,float m,float a)10634 static void DefaultNaNHelper(float n, float m, float a) {
10635   CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
10636 
10637   bool test_1op = std::isnan(n);
10638   bool test_2op = std::isnan(n) || std::isnan(m);
10639 
10640   SETUP();
10641   START();
10642 
10643   // Enable Default-NaN mode in the FPCR.
10644   __ Mrs(x0, FPCR);
10645   __ Orr(x1, x0, DN_mask);
10646   __ Msr(FPCR, x1);
10647 
10648   // Execute a number of instructions which all use ProcessNaNs, and check that
10649   // they all produce the default NaN.
10650   __ Fmov(s0, n);
10651   __ Fmov(s1, m);
10652   __ Fmov(s2, a);
10653 
10654   if (test_1op) {
10655     // Operations that always propagate NaNs unchanged, even signalling NaNs.
10656     __ Fmov(s10, s0);
10657     __ Fabs(s11, s0);
10658     __ Fneg(s12, s0);
10659 
10660     // Operations that use ProcessNaN.
10661     __ Fsqrt(s13, s0);
10662     __ Frinta(s14, s0);
10663     __ Frintn(s15, s0);
10664     __ Frintz(s16, s0);
10665 
10666     // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10667     __ Fcvt(d17, s0);
10668   }
10669 
10670   if (test_2op) {
10671     __ Fadd(s18, s0, s1);
10672     __ Fsub(s19, s0, s1);
10673     __ Fmul(s20, s0, s1);
10674     __ Fdiv(s21, s0, s1);
10675     __ Fmax(s22, s0, s1);
10676     __ Fmin(s23, s0, s1);
10677   }
10678 
10679   __ Fmadd(s24, s0, s1, s2);
10680   __ Fmsub(s25, s0, s1, s2);
10681   __ Fnmadd(s26, s0, s1, s2);
10682   __ Fnmsub(s27, s0, s1, s2);
10683 
10684   // Restore FPCR.
10685   __ Msr(FPCR, x0);
10686 
10687   END();
10688   RUN();
10689 
10690   if (test_1op) {
10691     uint32_t n_raw = float_to_rawbits(n);
10692     CHECK_EQUAL_FP32(n, s10);
10693     CHECK_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
10694     CHECK_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
10695     CHECK_EQUAL_FP32(kFP32DefaultNaN, s13);
10696     CHECK_EQUAL_FP32(kFP32DefaultNaN, s14);
10697     CHECK_EQUAL_FP32(kFP32DefaultNaN, s15);
10698     CHECK_EQUAL_FP32(kFP32DefaultNaN, s16);
10699     CHECK_EQUAL_FP64(kFP64DefaultNaN, d17);
10700   }
10701 
10702   if (test_2op) {
10703     CHECK_EQUAL_FP32(kFP32DefaultNaN, s18);
10704     CHECK_EQUAL_FP32(kFP32DefaultNaN, s19);
10705     CHECK_EQUAL_FP32(kFP32DefaultNaN, s20);
10706     CHECK_EQUAL_FP32(kFP32DefaultNaN, s21);
10707     CHECK_EQUAL_FP32(kFP32DefaultNaN, s22);
10708     CHECK_EQUAL_FP32(kFP32DefaultNaN, s23);
10709   }
10710 
10711   CHECK_EQUAL_FP32(kFP32DefaultNaN, s24);
10712   CHECK_EQUAL_FP32(kFP32DefaultNaN, s25);
10713   CHECK_EQUAL_FP32(kFP32DefaultNaN, s26);
10714   CHECK_EQUAL_FP32(kFP32DefaultNaN, s27);
10715 
10716   TEARDOWN();
10717 }
10718 
10719 
TEST(default_nan_float)10720 TEST(default_nan_float) {
10721   INIT_V8();
10722   float sn = rawbits_to_float(0x7f951111);
10723   float sm = rawbits_to_float(0x7f952222);
10724   float sa = rawbits_to_float(0x7f95aaaa);
10725   float qn = rawbits_to_float(0x7fea1111);
10726   float qm = rawbits_to_float(0x7fea2222);
10727   float qa = rawbits_to_float(0x7feaaaaa);
10728   CHECK(IsSignallingNaN(sn));
10729   CHECK(IsSignallingNaN(sm));
10730   CHECK(IsSignallingNaN(sa));
10731   CHECK(IsQuietNaN(qn));
10732   CHECK(IsQuietNaN(qm));
10733   CHECK(IsQuietNaN(qa));
10734 
10735   //   - Signalling NaNs
10736   DefaultNaNHelper(sn, 0.0f, 0.0f);
10737   DefaultNaNHelper(0.0f, sm, 0.0f);
10738   DefaultNaNHelper(0.0f, 0.0f, sa);
10739   DefaultNaNHelper(sn, sm, 0.0f);
10740   DefaultNaNHelper(0.0f, sm, sa);
10741   DefaultNaNHelper(sn, 0.0f, sa);
10742   DefaultNaNHelper(sn, sm, sa);
10743   //   - Quiet NaNs
10744   DefaultNaNHelper(qn, 0.0f, 0.0f);
10745   DefaultNaNHelper(0.0f, qm, 0.0f);
10746   DefaultNaNHelper(0.0f, 0.0f, qa);
10747   DefaultNaNHelper(qn, qm, 0.0f);
10748   DefaultNaNHelper(0.0f, qm, qa);
10749   DefaultNaNHelper(qn, 0.0f, qa);
10750   DefaultNaNHelper(qn, qm, qa);
10751   //   - Mixed NaNs
10752   DefaultNaNHelper(qn, sm, sa);
10753   DefaultNaNHelper(sn, qm, sa);
10754   DefaultNaNHelper(sn, sm, qa);
10755   DefaultNaNHelper(qn, qm, sa);
10756   DefaultNaNHelper(sn, qm, qa);
10757   DefaultNaNHelper(qn, sm, qa);
10758   DefaultNaNHelper(qn, qm, qa);
10759 }
10760 
10761 
DefaultNaNHelper(double n,double m,double a)10762 static void DefaultNaNHelper(double n, double m, double a) {
10763   CHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
10764 
10765   bool test_1op = std::isnan(n);
10766   bool test_2op = std::isnan(n) || std::isnan(m);
10767 
10768   SETUP();
10769   START();
10770 
10771   // Enable Default-NaN mode in the FPCR.
10772   __ Mrs(x0, FPCR);
10773   __ Orr(x1, x0, DN_mask);
10774   __ Msr(FPCR, x1);
10775 
10776   // Execute a number of instructions which all use ProcessNaNs, and check that
10777   // they all produce the default NaN.
10778   __ Fmov(d0, n);
10779   __ Fmov(d1, m);
10780   __ Fmov(d2, a);
10781 
10782   if (test_1op) {
10783     // Operations that always propagate NaNs unchanged, even signalling NaNs.
10784     __ Fmov(d10, d0);
10785     __ Fabs(d11, d0);
10786     __ Fneg(d12, d0);
10787 
10788     // Operations that use ProcessNaN.
10789     __ Fsqrt(d13, d0);
10790     __ Frinta(d14, d0);
10791     __ Frintn(d15, d0);
10792     __ Frintz(d16, d0);
10793 
10794     // Fcvt usually has special NaN handling, but it respects default-NaN mode.
10795     __ Fcvt(s17, d0);
10796   }
10797 
10798   if (test_2op) {
10799     __ Fadd(d18, d0, d1);
10800     __ Fsub(d19, d0, d1);
10801     __ Fmul(d20, d0, d1);
10802     __ Fdiv(d21, d0, d1);
10803     __ Fmax(d22, d0, d1);
10804     __ Fmin(d23, d0, d1);
10805   }
10806 
10807   __ Fmadd(d24, d0, d1, d2);
10808   __ Fmsub(d25, d0, d1, d2);
10809   __ Fnmadd(d26, d0, d1, d2);
10810   __ Fnmsub(d27, d0, d1, d2);
10811 
10812   // Restore FPCR.
10813   __ Msr(FPCR, x0);
10814 
10815   END();
10816   RUN();
10817 
10818   if (test_1op) {
10819     uint64_t n_raw = double_to_rawbits(n);
10820     CHECK_EQUAL_FP64(n, d10);
10821     CHECK_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
10822     CHECK_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
10823     CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
10824     CHECK_EQUAL_FP64(kFP64DefaultNaN, d14);
10825     CHECK_EQUAL_FP64(kFP64DefaultNaN, d15);
10826     CHECK_EQUAL_FP64(kFP64DefaultNaN, d16);
10827     CHECK_EQUAL_FP32(kFP32DefaultNaN, s17);
10828   }
10829 
10830   if (test_2op) {
10831     CHECK_EQUAL_FP64(kFP64DefaultNaN, d18);
10832     CHECK_EQUAL_FP64(kFP64DefaultNaN, d19);
10833     CHECK_EQUAL_FP64(kFP64DefaultNaN, d20);
10834     CHECK_EQUAL_FP64(kFP64DefaultNaN, d21);
10835     CHECK_EQUAL_FP64(kFP64DefaultNaN, d22);
10836     CHECK_EQUAL_FP64(kFP64DefaultNaN, d23);
10837   }
10838 
10839   CHECK_EQUAL_FP64(kFP64DefaultNaN, d24);
10840   CHECK_EQUAL_FP64(kFP64DefaultNaN, d25);
10841   CHECK_EQUAL_FP64(kFP64DefaultNaN, d26);
10842   CHECK_EQUAL_FP64(kFP64DefaultNaN, d27);
10843 
10844   TEARDOWN();
10845 }
10846 
10847 
TEST(default_nan_double)10848 TEST(default_nan_double) {
10849   INIT_V8();
10850   double sn = rawbits_to_double(0x7ff5555511111111);
10851   double sm = rawbits_to_double(0x7ff5555522222222);
10852   double sa = rawbits_to_double(0x7ff55555aaaaaaaa);
10853   double qn = rawbits_to_double(0x7ffaaaaa11111111);
10854   double qm = rawbits_to_double(0x7ffaaaaa22222222);
10855   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
10856   CHECK(IsSignallingNaN(sn));
10857   CHECK(IsSignallingNaN(sm));
10858   CHECK(IsSignallingNaN(sa));
10859   CHECK(IsQuietNaN(qn));
10860   CHECK(IsQuietNaN(qm));
10861   CHECK(IsQuietNaN(qa));
10862 
10863   //   - Signalling NaNs
10864   DefaultNaNHelper(sn, 0.0, 0.0);
10865   DefaultNaNHelper(0.0, sm, 0.0);
10866   DefaultNaNHelper(0.0, 0.0, sa);
10867   DefaultNaNHelper(sn, sm, 0.0);
10868   DefaultNaNHelper(0.0, sm, sa);
10869   DefaultNaNHelper(sn, 0.0, sa);
10870   DefaultNaNHelper(sn, sm, sa);
10871   //   - Quiet NaNs
10872   DefaultNaNHelper(qn, 0.0, 0.0);
10873   DefaultNaNHelper(0.0, qm, 0.0);
10874   DefaultNaNHelper(0.0, 0.0, qa);
10875   DefaultNaNHelper(qn, qm, 0.0);
10876   DefaultNaNHelper(0.0, qm, qa);
10877   DefaultNaNHelper(qn, 0.0, qa);
10878   DefaultNaNHelper(qn, qm, qa);
10879   //   - Mixed NaNs
10880   DefaultNaNHelper(qn, sm, sa);
10881   DefaultNaNHelper(sn, qm, sa);
10882   DefaultNaNHelper(sn, sm, qa);
10883   DefaultNaNHelper(qn, qm, sa);
10884   DefaultNaNHelper(sn, qm, qa);
10885   DefaultNaNHelper(qn, sm, qa);
10886   DefaultNaNHelper(qn, qm, qa);
10887 }
10888 
10889 
TEST(call_no_relocation)10890 TEST(call_no_relocation) {
10891   Address call_start;
10892   Address return_address;
10893 
10894   INIT_V8();
10895   SETUP();
10896 
10897   START();
10898 
10899   Label function;
10900   Label test;
10901 
10902   __ B(&test);
10903 
10904   __ Bind(&function);
10905   __ Mov(x0, 0x1);
10906   __ Ret();
10907 
10908   __ Bind(&test);
10909   __ Mov(x0, 0x0);
10910   __ Push(lr, xzr);
10911   {
10912     Assembler::BlockConstPoolScope scope(&masm);
10913     call_start = buf + __ pc_offset();
10914     __ Call(buf + function.pos(), RelocInfo::NONE64);
10915     return_address = buf + __ pc_offset();
10916   }
10917   __ Pop(xzr, lr);
10918   END();
10919 
10920   RUN();
10921 
10922   CHECK_EQUAL_64(1, x0);
10923 
10924   // The return_address_from_call_start function doesn't currently encounter any
10925   // non-relocatable sequences, so we check it here to make sure it works.
10926   // TODO(jbramley): Once Crankshaft is complete, decide if we need to support
10927   // non-relocatable calls at all.
10928   CHECK(return_address ==
10929         Assembler::return_address_from_call_start(call_start));
10930 
10931   TEARDOWN();
10932 }
10933 
10934 
AbsHelperX(int64_t value)10935 static void AbsHelperX(int64_t value) {
10936   int64_t expected;
10937 
10938   SETUP();
10939   START();
10940 
10941   Label fail;
10942   Label done;
10943 
10944   __ Mov(x0, 0);
10945   __ Mov(x1, value);
10946 
10947   if (value != kXMinInt) {
10948     expected = labs(value);
10949 
10950     Label next;
10951     // The result is representable.
10952     __ Abs(x10, x1);
10953     __ Abs(x11, x1, &fail);
10954     __ Abs(x12, x1, &fail, &next);
10955     __ Bind(&next);
10956     __ Abs(x13, x1, NULL, &done);
10957   } else {
10958     // labs is undefined for kXMinInt but our implementation in the
10959     // MacroAssembler will return kXMinInt in such a case.
10960     expected = kXMinInt;
10961 
10962     Label next;
10963     // The result is not representable.
10964     __ Abs(x10, x1);
10965     __ Abs(x11, x1, NULL, &fail);
10966     __ Abs(x12, x1, &next, &fail);
10967     __ Bind(&next);
10968     __ Abs(x13, x1, &done);
10969   }
10970 
10971   __ Bind(&fail);
10972   __ Mov(x0, -1);
10973 
10974   __ Bind(&done);
10975 
10976   END();
10977   RUN();
10978 
10979   CHECK_EQUAL_64(0, x0);
10980   CHECK_EQUAL_64(value, x1);
10981   CHECK_EQUAL_64(expected, x10);
10982   CHECK_EQUAL_64(expected, x11);
10983   CHECK_EQUAL_64(expected, x12);
10984   CHECK_EQUAL_64(expected, x13);
10985 
10986   TEARDOWN();
10987 }
10988 
10989 
AbsHelperW(int32_t value)10990 static void AbsHelperW(int32_t value) {
10991   int32_t expected;
10992 
10993   SETUP();
10994   START();
10995 
10996   Label fail;
10997   Label done;
10998 
10999   __ Mov(w0, 0);
11000   // TODO(jbramley): The cast is needed to avoid a sign-extension bug in VIXL.
11001   // Once it is fixed, we should remove the cast.
11002   __ Mov(w1, static_cast<uint32_t>(value));
11003 
11004   if (value != kWMinInt) {
11005     expected = abs(value);
11006 
11007     Label next;
11008     // The result is representable.
11009     __ Abs(w10, w1);
11010     __ Abs(w11, w1, &fail);
11011     __ Abs(w12, w1, &fail, &next);
11012     __ Bind(&next);
11013     __ Abs(w13, w1, NULL, &done);
11014   } else {
11015     // abs is undefined for kWMinInt but our implementation in the
11016     // MacroAssembler will return kWMinInt in such a case.
11017     expected = kWMinInt;
11018 
11019     Label next;
11020     // The result is not representable.
11021     __ Abs(w10, w1);
11022     __ Abs(w11, w1, NULL, &fail);
11023     __ Abs(w12, w1, &next, &fail);
11024     __ Bind(&next);
11025     __ Abs(w13, w1, &done);
11026   }
11027 
11028   __ Bind(&fail);
11029   __ Mov(w0, -1);
11030 
11031   __ Bind(&done);
11032 
11033   END();
11034   RUN();
11035 
11036   CHECK_EQUAL_32(0, w0);
11037   CHECK_EQUAL_32(value, w1);
11038   CHECK_EQUAL_32(expected, w10);
11039   CHECK_EQUAL_32(expected, w11);
11040   CHECK_EQUAL_32(expected, w12);
11041   CHECK_EQUAL_32(expected, w13);
11042 
11043   TEARDOWN();
11044 }
11045 
11046 
TEST(abs)11047 TEST(abs) {
11048   INIT_V8();
11049   AbsHelperX(0);
11050   AbsHelperX(42);
11051   AbsHelperX(-42);
11052   AbsHelperX(kXMinInt);
11053   AbsHelperX(kXMaxInt);
11054 
11055   AbsHelperW(0);
11056   AbsHelperW(42);
11057   AbsHelperW(-42);
11058   AbsHelperW(kWMinInt);
11059   AbsHelperW(kWMaxInt);
11060 }
11061 
11062 
TEST(pool_size)11063 TEST(pool_size) {
11064   INIT_V8();
11065   SETUP();
11066 
11067   // This test does not execute any code. It only tests that the size of the
11068   // pools is read correctly from the RelocInfo.
11069 
11070   Label exit;
11071   __ b(&exit);
11072 
11073   const unsigned constant_pool_size = 312;
11074   const unsigned veneer_pool_size = 184;
11075 
11076   __ RecordConstPool(constant_pool_size);
11077   for (unsigned i = 0; i < constant_pool_size / 4; ++i) {
11078     __ dc32(0);
11079   }
11080 
11081   __ RecordVeneerPool(masm.pc_offset(), veneer_pool_size);
11082   for (unsigned i = 0; i < veneer_pool_size / kInstructionSize; ++i) {
11083     __ nop();
11084   }
11085 
11086   __ bind(&exit);
11087 
11088   HandleScope handle_scope(isolate);
11089   CodeDesc desc;
11090   masm.GetCode(&desc);
11091   Handle<Code> code = isolate->factory()->NewCode(desc, 0, masm.CodeObject());
11092 
11093   unsigned pool_count = 0;
11094   int pool_mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
11095                   RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
11096   for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
11097     RelocInfo* info = it.rinfo();
11098     if (RelocInfo::IsConstPool(info->rmode())) {
11099       CHECK(info->data() == constant_pool_size);
11100       ++pool_count;
11101     }
11102     if (RelocInfo::IsVeneerPool(info->rmode())) {
11103       CHECK(info->data() == veneer_pool_size);
11104       ++pool_count;
11105     }
11106   }
11107 
11108   CHECK(pool_count == 2);
11109 
11110   TEARDOWN();
11111 }
11112 
11113 
TEST(jump_tables_forward)11114 TEST(jump_tables_forward) {
11115   // Test jump tables with forward jumps.
11116   const int kNumCases = 512;
11117 
11118   INIT_V8();
11119   SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
11120   START();
11121 
11122   int32_t values[kNumCases];
11123   isolate->random_number_generator()->NextBytes(values, sizeof(values));
11124   int32_t results[kNumCases];
11125   memset(results, 0, sizeof(results));
11126   uintptr_t results_ptr = reinterpret_cast<uintptr_t>(results);
11127 
11128   Label loop;
11129   Label labels[kNumCases];
11130   Label done;
11131 
11132   const Register& index = x0;
11133   STATIC_ASSERT(sizeof(results[0]) == 4);
11134   const Register& value = w1;
11135   const Register& target = x2;
11136 
11137   __ Mov(index, 0);
11138   __ Mov(target, results_ptr);
11139   __ Bind(&loop);
11140 
11141   {
11142     Assembler::BlockPoolsScope block_pools(&masm);
11143     Label base;
11144 
11145     __ Adr(x10, &base);
11146     __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
11147     __ Br(x11);
11148     __ Bind(&base);
11149     for (int i = 0; i < kNumCases; ++i) {
11150       __ dcptr(&labels[i]);
11151     }
11152   }
11153 
11154   for (int i = 0; i < kNumCases; ++i) {
11155     __ Bind(&labels[i]);
11156     __ Mov(value, values[i]);
11157     __ B(&done);
11158   }
11159 
11160   __ Bind(&done);
11161   __ Str(value, MemOperand(target, 4, PostIndex));
11162   __ Add(index, index, 1);
11163   __ Cmp(index, kNumCases);
11164   __ B(ne, &loop);
11165 
11166   END();
11167 
11168   RUN();
11169 
11170   for (int i = 0; i < kNumCases; ++i) {
11171     CHECK_EQ(values[i], results[i]);
11172   }
11173 
11174   TEARDOWN();
11175 }
11176 
11177 
TEST(jump_tables_backward)11178 TEST(jump_tables_backward) {
11179   // Test jump tables with backward jumps.
11180   const int kNumCases = 512;
11181 
11182   INIT_V8();
11183   SETUP_SIZE(kNumCases * 5 * kInstructionSize + 8192);
11184   START();
11185 
11186   int32_t values[kNumCases];
11187   isolate->random_number_generator()->NextBytes(values, sizeof(values));
11188   int32_t results[kNumCases];
11189   memset(results, 0, sizeof(results));
11190   uintptr_t results_ptr = reinterpret_cast<uintptr_t>(results);
11191 
11192   Label loop;
11193   Label labels[kNumCases];
11194   Label done;
11195 
11196   const Register& index = x0;
11197   STATIC_ASSERT(sizeof(results[0]) == 4);
11198   const Register& value = w1;
11199   const Register& target = x2;
11200 
11201   __ Mov(index, 0);
11202   __ Mov(target, results_ptr);
11203   __ B(&loop);
11204 
11205   for (int i = 0; i < kNumCases; ++i) {
11206     __ Bind(&labels[i]);
11207     __ Mov(value, values[i]);
11208     __ B(&done);
11209   }
11210 
11211   __ Bind(&loop);
11212   {
11213     Assembler::BlockPoolsScope block_pools(&masm);
11214     Label base;
11215 
11216     __ Adr(x10, &base);
11217     __ Ldr(x11, MemOperand(x10, index, LSL, kPointerSizeLog2));
11218     __ Br(x11);
11219     __ Bind(&base);
11220     for (int i = 0; i < kNumCases; ++i) {
11221       __ dcptr(&labels[i]);
11222     }
11223   }
11224 
11225   __ Bind(&done);
11226   __ Str(value, MemOperand(target, 4, PostIndex));
11227   __ Add(index, index, 1);
11228   __ Cmp(index, kNumCases);
11229   __ B(ne, &loop);
11230 
11231   END();
11232 
11233   RUN();
11234 
11235   for (int i = 0; i < kNumCases; ++i) {
11236     CHECK_EQ(values[i], results[i]);
11237   }
11238 
11239   TEARDOWN();
11240 }
11241 
11242 
TEST(internal_reference_linked)11243 TEST(internal_reference_linked) {
11244   // Test internal reference when they are linked in a label chain.
11245 
11246   INIT_V8();
11247   SETUP();
11248   START();
11249 
11250   Label done;
11251 
11252   __ Mov(x0, 0);
11253   __ Cbnz(x0, &done);
11254 
11255   {
11256     Assembler::BlockPoolsScope block_pools(&masm);
11257     Label base;
11258 
11259     __ Adr(x10, &base);
11260     __ Ldr(x11, MemOperand(x10));
11261     __ Br(x11);
11262     __ Bind(&base);
11263     __ dcptr(&done);
11264   }
11265 
11266   // Dead code, just to extend the label chain.
11267   __ B(&done);
11268   __ dcptr(&done);
11269   __ Tbz(x0, 1, &done);
11270 
11271   __ Bind(&done);
11272   __ Mov(x0, 1);
11273 
11274   END();
11275 
11276   RUN();
11277 
11278   CHECK_EQUAL_64(0x1, x0);
11279 
11280   TEARDOWN();
11281 }
11282