1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "class_linker-inl.h"
22 #include "common_runtime_test.h"
23 #include "entrypoints/quick/quick_entrypoints_enum.h"
24 #include "mirror/class-inl.h"
25 #include "mirror/string-inl.h"
26 #include "scoped_thread_state_change.h"
27 
28 namespace art {
29 
30 
31 class StubTest : public CommonRuntimeTest {
32  protected:
33   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()34   void SetUp() OVERRIDE {
35     // Do the normal setup.
36     CommonRuntimeTest::SetUp();
37 
38     {
39       // Create callee-save methods
40       ScopedObjectAccess soa(Thread::Current());
41       runtime_->SetInstructionSet(kRuntimeISA);
42       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
43         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
44         if (!runtime_->HasCalleeSaveMethod(type)) {
45           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
46         }
47       }
48     }
49   }
50 
SetUpRuntimeOptions(RuntimeOptions * options)51   void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
52     // Use a smaller heap
53     for (std::pair<std::string, const void*>& pair : *options) {
54       if (pair.first.find("-Xmx") == 0) {
55         pair.first = "-Xmx4M";  // Smallest we can go.
56       }
57     }
58     options->push_back(std::make_pair("-Xint", nullptr));
59   }
60 
61   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)62   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
63     return &self->tlsPtr_;
64   }
65 
66  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)67   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
68     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
69   }
70 
71   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)72   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
73                              ArtMethod* referrer) {
74     // Push a transition back into managed code onto the linked list in thread.
75     ManagedStack fragment;
76     self->PushManagedStackFragment(&fragment);
77 
78     size_t result;
79     size_t fpr_result = 0;
80 #if defined(__i386__)
81     // TODO: Set the thread?
82     __asm__ __volatile__(
83         "subl $12, %%esp\n\t"       // Align stack.
84         "pushl %[referrer]\n\t"     // Store referrer.
85         "call *%%edi\n\t"           // Call the stub
86         "addl $16, %%esp"           // Pop referrer
87         : "=a" (result)
88           // Use the result from eax
89         : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
90           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
91         : "memory");  // clobber.
92     // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
93     //       but compilation fails when declaring that.
94 #elif defined(__arm__)
95     __asm__ __volatile__(
96         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
97         ".cfi_adjust_cfa_offset 52\n\t"
98         "push {r9}\n\t"
99         ".cfi_adjust_cfa_offset 4\n\t"
100         "mov r9, %[referrer]\n\n"
101         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
102         ".cfi_adjust_cfa_offset 8\n\t"
103         "ldr r9, [sp, #8]\n\t"
104 
105         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
106         "sub sp, sp, #20\n\t"
107         "str %[arg0], [sp]\n\t"
108         "str %[arg1], [sp, #4]\n\t"
109         "str %[arg2], [sp, #8]\n\t"
110         "str %[code], [sp, #12]\n\t"
111         "str %[self], [sp, #16]\n\t"
112         "ldr r0, [sp]\n\t"
113         "ldr r1, [sp, #4]\n\t"
114         "ldr r2, [sp, #8]\n\t"
115         "ldr r3, [sp, #12]\n\t"
116         "ldr r9, [sp, #16]\n\t"
117         "add sp, sp, #20\n\t"
118 
119         "blx r3\n\t"                // Call the stub
120         "add sp, sp, #12\n\t"       // Pop null and padding
121         ".cfi_adjust_cfa_offset -12\n\t"
122         "pop {r1-r12, lr}\n\t"      // Restore state
123         ".cfi_adjust_cfa_offset -52\n\t"
124         "mov %[result], r0\n\t"     // Save the result
125         : [result] "=r" (result)
126           // Use the result from r0
127         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
128           [referrer] "r"(referrer)
129         : "memory");  // clobber.
130 #elif defined(__aarch64__)
131     __asm__ __volatile__(
132         // Spill x0-x7 which we say we don't clobber. May contain args.
133         "sub sp, sp, #64\n\t"
134         ".cfi_adjust_cfa_offset 64\n\t"
135         "stp x0, x1, [sp]\n\t"
136         "stp x2, x3, [sp, #16]\n\t"
137         "stp x4, x5, [sp, #32]\n\t"
138         "stp x6, x7, [sp, #48]\n\t"
139 
140         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
141         ".cfi_adjust_cfa_offset 16\n\t"
142         "str %[referrer], [sp]\n\t"    // referrer
143 
144         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
145         "sub sp, sp, #48\n\t"
146         ".cfi_adjust_cfa_offset 48\n\t"
147         // All things are "r" constraints, so direct str/stp should work.
148         "stp %[arg0], %[arg1], [sp]\n\t"
149         "stp %[arg2], %[code], [sp, #16]\n\t"
150         "str %[self], [sp, #32]\n\t"
151 
152         // Now we definitely have x0-x3 free, use it to garble d8 - d15
153         "movk x0, #0xfad0\n\t"
154         "movk x0, #0xebad, lsl #16\n\t"
155         "movk x0, #0xfad0, lsl #32\n\t"
156         "movk x0, #0xebad, lsl #48\n\t"
157         "fmov d8, x0\n\t"
158         "add x0, x0, 1\n\t"
159         "fmov d9, x0\n\t"
160         "add x0, x0, 1\n\t"
161         "fmov d10, x0\n\t"
162         "add x0, x0, 1\n\t"
163         "fmov d11, x0\n\t"
164         "add x0, x0, 1\n\t"
165         "fmov d12, x0\n\t"
166         "add x0, x0, 1\n\t"
167         "fmov d13, x0\n\t"
168         "add x0, x0, 1\n\t"
169         "fmov d14, x0\n\t"
170         "add x0, x0, 1\n\t"
171         "fmov d15, x0\n\t"
172 
173         // Load call params into the right registers.
174         "ldp x0, x1, [sp]\n\t"
175         "ldp x2, x3, [sp, #16]\n\t"
176         "ldr x18, [sp, #32]\n\t"
177         "add sp, sp, #48\n\t"
178         ".cfi_adjust_cfa_offset -48\n\t"
179 
180 
181         "blr x3\n\t"              // Call the stub
182         "mov x8, x0\n\t"          // Store result
183         "add sp, sp, #16\n\t"     // Drop the quick "frame"
184         ".cfi_adjust_cfa_offset -16\n\t"
185 
186         // Test d8 - d15. We can use x1 and x2.
187         "movk x1, #0xfad0\n\t"
188         "movk x1, #0xebad, lsl #16\n\t"
189         "movk x1, #0xfad0, lsl #32\n\t"
190         "movk x1, #0xebad, lsl #48\n\t"
191         "fmov x2, d8\n\t"
192         "cmp x1, x2\n\t"
193         "b.ne 1f\n\t"
194         "add x1, x1, 1\n\t"
195 
196         "fmov x2, d9\n\t"
197         "cmp x1, x2\n\t"
198         "b.ne 1f\n\t"
199         "add x1, x1, 1\n\t"
200 
201         "fmov x2, d10\n\t"
202         "cmp x1, x2\n\t"
203         "b.ne 1f\n\t"
204         "add x1, x1, 1\n\t"
205 
206         "fmov x2, d11\n\t"
207         "cmp x1, x2\n\t"
208         "b.ne 1f\n\t"
209         "add x1, x1, 1\n\t"
210 
211         "fmov x2, d12\n\t"
212         "cmp x1, x2\n\t"
213         "b.ne 1f\n\t"
214         "add x1, x1, 1\n\t"
215 
216         "fmov x2, d13\n\t"
217         "cmp x1, x2\n\t"
218         "b.ne 1f\n\t"
219         "add x1, x1, 1\n\t"
220 
221         "fmov x2, d14\n\t"
222         "cmp x1, x2\n\t"
223         "b.ne 1f\n\t"
224         "add x1, x1, 1\n\t"
225 
226         "fmov x2, d15\n\t"
227         "cmp x1, x2\n\t"
228         "b.ne 1f\n\t"
229 
230         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
231 
232         // Finish up.
233         "2:\n\t"
234         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
235         "ldp x2, x3, [sp, #16]\n\t"
236         "ldp x4, x5, [sp, #32]\n\t"
237         "ldp x6, x7, [sp, #48]\n\t"
238         "add sp, sp, #64\n\t"         // Free stack space, now sp as on entry
239         ".cfi_adjust_cfa_offset -64\n\t"
240 
241         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
242         "mov %[result], x8\n\t"              // Store the call result
243 
244         "b 3f\n\t"                     // Goto end
245 
246         // Failed fpr verification.
247         "1:\n\t"
248         "mov x9, #1\n\t"
249         "b 2b\n\t"                     // Goto finish-up
250 
251         // End
252         "3:\n\t"
253         : [result] "=r" (result)
254           // Use the result from r0
255         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
256           [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
257         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
258           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
259           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
260           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
261           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
262           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
263           "memory");  // clobber.
264 #elif defined(__mips__) && !defined(__LP64__)
265     __asm__ __volatile__ (
266         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
267         "addiu $sp, $sp, -64\n\t"
268         "sw $a0, 0($sp)\n\t"
269         "sw $a1, 4($sp)\n\t"
270         "sw $a2, 8($sp)\n\t"
271         "sw $a3, 12($sp)\n\t"
272         "sw $t0, 16($sp)\n\t"
273         "sw $t1, 20($sp)\n\t"
274         "sw $t2, 24($sp)\n\t"
275         "sw $t3, 28($sp)\n\t"
276         "sw $t4, 32($sp)\n\t"
277         "sw $t5, 36($sp)\n\t"
278         "sw $t6, 40($sp)\n\t"
279         "sw $t7, 44($sp)\n\t"
280         // Spill gp register since it is caller save.
281         "sw $gp, 52($sp)\n\t"
282 
283         "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
284         "sw %[referrer], 0($sp)\n\t"
285 
286         // Push everything on the stack, so we don't rely on the order.
287         "addiu $sp, $sp, -20\n\t"
288         "sw %[arg0], 0($sp)\n\t"
289         "sw %[arg1], 4($sp)\n\t"
290         "sw %[arg2], 8($sp)\n\t"
291         "sw %[code], 12($sp)\n\t"
292         "sw %[self], 16($sp)\n\t"
293 
294         // Load call params into the right registers.
295         "lw $a0, 0($sp)\n\t"
296         "lw $a1, 4($sp)\n\t"
297         "lw $a2, 8($sp)\n\t"
298         "lw $t9, 12($sp)\n\t"
299         "lw $s1, 16($sp)\n\t"
300         "addiu $sp, $sp, 20\n\t"
301 
302         "jalr $t9\n\t"             // Call the stub.
303         "nop\n\t"
304         "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
305 
306         // Restore stuff not named clobbered.
307         "lw $a0, 0($sp)\n\t"
308         "lw $a1, 4($sp)\n\t"
309         "lw $a2, 8($sp)\n\t"
310         "lw $a3, 12($sp)\n\t"
311         "lw $t0, 16($sp)\n\t"
312         "lw $t1, 20($sp)\n\t"
313         "lw $t2, 24($sp)\n\t"
314         "lw $t3, 28($sp)\n\t"
315         "lw $t4, 32($sp)\n\t"
316         "lw $t5, 36($sp)\n\t"
317         "lw $t6, 40($sp)\n\t"
318         "lw $t7, 44($sp)\n\t"
319         // Restore gp.
320         "lw $gp, 52($sp)\n\t"
321         "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
322 
323         "move %[result], $v0\n\t"  // Store the call result.
324         : [result] "=r" (result)
325         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
326           [referrer] "r"(referrer)
327         : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
328           "fp", "ra",
329           "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
330           "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
331           "f27", "f28", "f29", "f30", "f31",
332           "memory");  // clobber.
333 #elif defined(__mips__) && defined(__LP64__)
334     __asm__ __volatile__ (
335         // Spill a0-a7 which we say we don't clobber. May contain args.
336         "daddiu $sp, $sp, -64\n\t"
337         "sd $a0, 0($sp)\n\t"
338         "sd $a1, 8($sp)\n\t"
339         "sd $a2, 16($sp)\n\t"
340         "sd $a3, 24($sp)\n\t"
341         "sd $a4, 32($sp)\n\t"
342         "sd $a5, 40($sp)\n\t"
343         "sd $a6, 48($sp)\n\t"
344         "sd $a7, 56($sp)\n\t"
345 
346         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
347         "sd %[referrer], 0($sp)\n\t"
348 
349         // Push everything on the stack, so we don't rely on the order.
350         "daddiu $sp, $sp, -40\n\t"
351         "sd %[arg0], 0($sp)\n\t"
352         "sd %[arg1], 8($sp)\n\t"
353         "sd %[arg2], 16($sp)\n\t"
354         "sd %[code], 24($sp)\n\t"
355         "sd %[self], 32($sp)\n\t"
356 
357         // Load call params into the right registers.
358         "ld $a0, 0($sp)\n\t"
359         "ld $a1, 8($sp)\n\t"
360         "ld $a2, 16($sp)\n\t"
361         "ld $t9, 24($sp)\n\t"
362         "ld $s1, 32($sp)\n\t"
363         "daddiu $sp, $sp, 40\n\t"
364 
365         "jalr $t9\n\t"              // Call the stub.
366         "nop\n\t"
367         "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
368 
369         // Restore stuff not named clobbered.
370         "ld $a0, 0($sp)\n\t"
371         "ld $a1, 8($sp)\n\t"
372         "ld $a2, 16($sp)\n\t"
373         "ld $a3, 24($sp)\n\t"
374         "ld $a4, 32($sp)\n\t"
375         "ld $a5, 40($sp)\n\t"
376         "ld $a6, 48($sp)\n\t"
377         "ld $a7, 56($sp)\n\t"
378         "daddiu $sp, $sp, 64\n\t"
379 
380         "move %[result], $v0\n\t"   // Store the call result.
381         : [result] "=r" (result)
382         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
383           [referrer] "r"(referrer)
384         : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
385           "t8", "t9", "k0", "k1", "fp", "ra",
386           "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
387           "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
388           "f27", "f28", "f29", "f30", "f31",
389           "memory");  // clobber.
390 #elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
391     // Note: Uses the native convention
392     // TODO: Set the thread?
393     __asm__ __volatile__(
394         "pushq %[referrer]\n\t"        // Push referrer
395         "pushq (%%rsp)\n\t"             // & 16B alignment padding
396         ".cfi_adjust_cfa_offset 16\n\t"
397         "call *%%rax\n\t"              // Call the stub
398         "addq $16, %%rsp\n\t"          // Pop null and padding
399         ".cfi_adjust_cfa_offset -16\n\t"
400         : "=a" (result)
401           // Use the result from rax
402         : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "c"(referrer)
403           // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
404         : "rbx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
405           "memory");  // clobber all
406     // TODO: Should we clobber the other registers?
407 #else
408     UNUSED(arg0, arg1, arg2, code, referrer);
409     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
410     result = 0;
411 #endif
412     // Pop transition.
413     self->PopManagedStackFragment(fragment);
414 
415     fp_result = fpr_result;
416     EXPECT_EQ(0U, fp_result);
417 
418     return result;
419   }
420 
421   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)422   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
423                                       Thread* self, ArtMethod* referrer, size_t hidden) {
424     // Push a transition back into managed code onto the linked list in thread.
425     ManagedStack fragment;
426     self->PushManagedStackFragment(&fragment);
427 
428     size_t result;
429     size_t fpr_result = 0;
430 #if defined(__i386__)
431     // TODO: Set the thread?
432     __asm__ __volatile__(
433         "movd %[hidden], %%xmm7\n\t"
434         "subl $12, %%esp\n\t"       // Align stack.
435         "pushl %[referrer]\n\t"     // Store referrer
436         "call *%%edi\n\t"           // Call the stub
437         "addl $16, %%esp"           // Pop referrer
438         : "=a" (result)
439           // Use the result from eax
440         : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer), [hidden]"m"(hidden)
441           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
442         : "memory");  // clobber.
443     // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
444     //       but compilation fails when declaring that.
445 #elif defined(__arm__)
446     __asm__ __volatile__(
447         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
448         ".cfi_adjust_cfa_offset 52\n\t"
449         "push {r9}\n\t"
450         ".cfi_adjust_cfa_offset 4\n\t"
451         "mov r9, %[referrer]\n\n"
452         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
453         ".cfi_adjust_cfa_offset 8\n\t"
454         "ldr r9, [sp, #8]\n\t"
455 
456         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
457         "sub sp, sp, #24\n\t"
458         "str %[arg0], [sp]\n\t"
459         "str %[arg1], [sp, #4]\n\t"
460         "str %[arg2], [sp, #8]\n\t"
461         "str %[code], [sp, #12]\n\t"
462         "str %[self], [sp, #16]\n\t"
463         "str %[hidden], [sp, #20]\n\t"
464         "ldr r0, [sp]\n\t"
465         "ldr r1, [sp, #4]\n\t"
466         "ldr r2, [sp, #8]\n\t"
467         "ldr r3, [sp, #12]\n\t"
468         "ldr r9, [sp, #16]\n\t"
469         "ldr r12, [sp, #20]\n\t"
470         "add sp, sp, #24\n\t"
471 
472         "blx r3\n\t"                // Call the stub
473         "add sp, sp, #12\n\t"       // Pop null and padding
474         ".cfi_adjust_cfa_offset -12\n\t"
475         "pop {r1-r12, lr}\n\t"      // Restore state
476         ".cfi_adjust_cfa_offset -52\n\t"
477         "mov %[result], r0\n\t"     // Save the result
478         : [result] "=r" (result)
479           // Use the result from r0
480         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
481           [referrer] "r"(referrer), [hidden] "r"(hidden)
482         : "memory");  // clobber.
483 #elif defined(__aarch64__)
484     __asm__ __volatile__(
485         // Spill x0-x7 which we say we don't clobber. May contain args.
486         "sub sp, sp, #64\n\t"
487         ".cfi_adjust_cfa_offset 64\n\t"
488         "stp x0, x1, [sp]\n\t"
489         "stp x2, x3, [sp, #16]\n\t"
490         "stp x4, x5, [sp, #32]\n\t"
491         "stp x6, x7, [sp, #48]\n\t"
492 
493         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
494         ".cfi_adjust_cfa_offset 16\n\t"
495         "str %[referrer], [sp]\n\t"    // referrer
496 
497         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
498         "sub sp, sp, #48\n\t"
499         ".cfi_adjust_cfa_offset 48\n\t"
500         // All things are "r" constraints, so direct str/stp should work.
501         "stp %[arg0], %[arg1], [sp]\n\t"
502         "stp %[arg2], %[code], [sp, #16]\n\t"
503         "stp %[self], %[hidden], [sp, #32]\n\t"
504 
505         // Now we definitely have x0-x3 free, use it to garble d8 - d15
506         "movk x0, #0xfad0\n\t"
507         "movk x0, #0xebad, lsl #16\n\t"
508         "movk x0, #0xfad0, lsl #32\n\t"
509         "movk x0, #0xebad, lsl #48\n\t"
510         "fmov d8, x0\n\t"
511         "add x0, x0, 1\n\t"
512         "fmov d9, x0\n\t"
513         "add x0, x0, 1\n\t"
514         "fmov d10, x0\n\t"
515         "add x0, x0, 1\n\t"
516         "fmov d11, x0\n\t"
517         "add x0, x0, 1\n\t"
518         "fmov d12, x0\n\t"
519         "add x0, x0, 1\n\t"
520         "fmov d13, x0\n\t"
521         "add x0, x0, 1\n\t"
522         "fmov d14, x0\n\t"
523         "add x0, x0, 1\n\t"
524         "fmov d15, x0\n\t"
525 
526         // Load call params into the right registers.
527         "ldp x0, x1, [sp]\n\t"
528         "ldp x2, x3, [sp, #16]\n\t"
529         "ldp x18, x17, [sp, #32]\n\t"
530         "add sp, sp, #48\n\t"
531         ".cfi_adjust_cfa_offset -48\n\t"
532 
533         "blr x3\n\t"              // Call the stub
534         "mov x8, x0\n\t"          // Store result
535         "add sp, sp, #16\n\t"     // Drop the quick "frame"
536         ".cfi_adjust_cfa_offset -16\n\t"
537 
538         // Test d8 - d15. We can use x1 and x2.
539         "movk x1, #0xfad0\n\t"
540         "movk x1, #0xebad, lsl #16\n\t"
541         "movk x1, #0xfad0, lsl #32\n\t"
542         "movk x1, #0xebad, lsl #48\n\t"
543         "fmov x2, d8\n\t"
544         "cmp x1, x2\n\t"
545         "b.ne 1f\n\t"
546         "add x1, x1, 1\n\t"
547 
548         "fmov x2, d9\n\t"
549         "cmp x1, x2\n\t"
550         "b.ne 1f\n\t"
551         "add x1, x1, 1\n\t"
552 
553         "fmov x2, d10\n\t"
554         "cmp x1, x2\n\t"
555         "b.ne 1f\n\t"
556         "add x1, x1, 1\n\t"
557 
558         "fmov x2, d11\n\t"
559         "cmp x1, x2\n\t"
560         "b.ne 1f\n\t"
561         "add x1, x1, 1\n\t"
562 
563         "fmov x2, d12\n\t"
564         "cmp x1, x2\n\t"
565         "b.ne 1f\n\t"
566         "add x1, x1, 1\n\t"
567 
568         "fmov x2, d13\n\t"
569         "cmp x1, x2\n\t"
570         "b.ne 1f\n\t"
571         "add x1, x1, 1\n\t"
572 
573         "fmov x2, d14\n\t"
574         "cmp x1, x2\n\t"
575         "b.ne 1f\n\t"
576         "add x1, x1, 1\n\t"
577 
578         "fmov x2, d15\n\t"
579         "cmp x1, x2\n\t"
580         "b.ne 1f\n\t"
581 
582         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
583 
584         // Finish up.
585         "2:\n\t"
586         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
587         "ldp x2, x3, [sp, #16]\n\t"
588         "ldp x4, x5, [sp, #32]\n\t"
589         "ldp x6, x7, [sp, #48]\n\t"
590         "add sp, sp, #64\n\t"         // Free stack space, now sp as on entry
591         ".cfi_adjust_cfa_offset -64\n\t"
592 
593         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
594         "mov %[result], x8\n\t"              // Store the call result
595 
596         "b 3f\n\t"                     // Goto end
597 
598         // Failed fpr verification.
599         "1:\n\t"
600         "mov x9, #1\n\t"
601         "b 2b\n\t"                     // Goto finish-up
602 
603         // End
604         "3:\n\t"
605         : [result] "=r" (result)
606           // Use the result from r0
607         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
608           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
609         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
610           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
611           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
612           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
613           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
614           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
615           "memory");  // clobber.
616 #elif defined(__mips__) && !defined(__LP64__)
617     __asm__ __volatile__ (
618         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
619         "addiu $sp, $sp, -64\n\t"
620         "sw $a0, 0($sp)\n\t"
621         "sw $a1, 4($sp)\n\t"
622         "sw $a2, 8($sp)\n\t"
623         "sw $a3, 12($sp)\n\t"
624         "sw $t0, 16($sp)\n\t"
625         "sw $t1, 20($sp)\n\t"
626         "sw $t2, 24($sp)\n\t"
627         "sw $t3, 28($sp)\n\t"
628         "sw $t4, 32($sp)\n\t"
629         "sw $t5, 36($sp)\n\t"
630         "sw $t6, 40($sp)\n\t"
631         "sw $t7, 44($sp)\n\t"
632         // Spill gp register since it is caller save.
633         "sw $gp, 52($sp)\n\t"
634 
635         "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
636         "sw %[referrer], 0($sp)\n\t"
637 
638         // Push everything on the stack, so we don't rely on the order.
639         "addiu $sp, $sp, -24\n\t"
640         "sw %[arg0], 0($sp)\n\t"
641         "sw %[arg1], 4($sp)\n\t"
642         "sw %[arg2], 8($sp)\n\t"
643         "sw %[code], 12($sp)\n\t"
644         "sw %[self], 16($sp)\n\t"
645         "sw %[hidden], 20($sp)\n\t"
646 
647         // Load call params into the right registers.
648         "lw $a0, 0($sp)\n\t"
649         "lw $a1, 4($sp)\n\t"
650         "lw $a2, 8($sp)\n\t"
651         "lw $t9, 12($sp)\n\t"
652         "lw $s1, 16($sp)\n\t"
653         "lw $t0, 20($sp)\n\t"
654         "addiu $sp, $sp, 24\n\t"
655 
656         "jalr $t9\n\t"             // Call the stub.
657         "nop\n\t"
658         "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
659 
660         // Restore stuff not named clobbered.
661         "lw $a0, 0($sp)\n\t"
662         "lw $a1, 4($sp)\n\t"
663         "lw $a2, 8($sp)\n\t"
664         "lw $a3, 12($sp)\n\t"
665         "lw $t0, 16($sp)\n\t"
666         "lw $t1, 20($sp)\n\t"
667         "lw $t2, 24($sp)\n\t"
668         "lw $t3, 28($sp)\n\t"
669         "lw $t4, 32($sp)\n\t"
670         "lw $t5, 36($sp)\n\t"
671         "lw $t6, 40($sp)\n\t"
672         "lw $t7, 44($sp)\n\t"
673         // Restore gp.
674         "lw $gp, 52($sp)\n\t"
675         "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
676 
677         "move %[result], $v0\n\t"  // Store the call result.
678         : [result] "=r" (result)
679         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
680           [referrer] "r"(referrer), [hidden] "r"(hidden)
681         : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
682           "fp", "ra",
683           "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
684           "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
685           "f27", "f28", "f29", "f30", "f31",
686           "memory");  // clobber.
687 #elif defined(__mips__) && defined(__LP64__)
688     __asm__ __volatile__ (
689         // Spill a0-a7 which we say we don't clobber. May contain args.
690         "daddiu $sp, $sp, -64\n\t"
691         "sd $a0, 0($sp)\n\t"
692         "sd $a1, 8($sp)\n\t"
693         "sd $a2, 16($sp)\n\t"
694         "sd $a3, 24($sp)\n\t"
695         "sd $a4, 32($sp)\n\t"
696         "sd $a5, 40($sp)\n\t"
697         "sd $a6, 48($sp)\n\t"
698         "sd $a7, 56($sp)\n\t"
699 
700         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
701         "sd %[referrer], 0($sp)\n\t"
702 
703         // Push everything on the stack, so we don't rely on the order.
704         "daddiu $sp, $sp, -48\n\t"
705         "sd %[arg0], 0($sp)\n\t"
706         "sd %[arg1], 8($sp)\n\t"
707         "sd %[arg2], 16($sp)\n\t"
708         "sd %[code], 24($sp)\n\t"
709         "sd %[self], 32($sp)\n\t"
710         "sd %[hidden], 40($sp)\n\t"
711 
712         // Load call params into the right registers.
713         "ld $a0, 0($sp)\n\t"
714         "ld $a1, 8($sp)\n\t"
715         "ld $a2, 16($sp)\n\t"
716         "ld $t9, 24($sp)\n\t"
717         "ld $s1, 32($sp)\n\t"
718         "ld $t0, 40($sp)\n\t"
719         "daddiu $sp, $sp, 48\n\t"
720 
721         "jalr $t9\n\t"              // Call the stub.
722         "nop\n\t"
723         "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
724 
725         // Restore stuff not named clobbered.
726         "ld $a0, 0($sp)\n\t"
727         "ld $a1, 8($sp)\n\t"
728         "ld $a2, 16($sp)\n\t"
729         "ld $a3, 24($sp)\n\t"
730         "ld $a4, 32($sp)\n\t"
731         "ld $a5, 40($sp)\n\t"
732         "ld $a6, 48($sp)\n\t"
733         "ld $a7, 56($sp)\n\t"
734         "daddiu $sp, $sp, 64\n\t"
735 
736         "move %[result], $v0\n\t"   // Store the call result.
737         : [result] "=r" (result)
738         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
739           [referrer] "r"(referrer), [hidden] "r"(hidden)
740         : "at", "v0", "v1", "t0", "t1", "t2", "t3", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
741           "t8", "t9", "k0", "k1", "fp", "ra",
742           "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13",
743           "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26",
744           "f27", "f28", "f29", "f30", "f31",
745           "memory");  // clobber.
746 #elif defined(__x86_64__) && !defined(__APPLE__) && defined(__clang__)
747     // Note: Uses the native convention
748     // TODO: Set the thread?
749     __asm__ __volatile__(
750         "pushq %[referrer]\n\t"        // Push referrer
751         "pushq (%%rsp)\n\t"            // & 16B alignment padding
752         ".cfi_adjust_cfa_offset 16\n\t"
753         "call *%%rbx\n\t"              // Call the stub
754         "addq $16, %%rsp\n\t"          // Pop null and padding
755         ".cfi_adjust_cfa_offset -16\n\t"
756         : "=a" (result)
757         // Use the result from rax
758         : "D"(arg0), "S"(arg1), "d"(arg2), "b"(code), [referrer] "c"(referrer), [hidden] "a"(hidden)
759         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
760         : "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
761           "memory");  // clobber all
762     // TODO: Should we clobber the other registers?
763 #else
764     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
765     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
766     result = 0;
767 #endif
768     // Pop transition.
769     self->PopManagedStackFragment(fragment);
770 
771     fp_result = fpr_result;
772     EXPECT_EQ(0U, fp_result);
773 
774     return result;
775   }
776 
777   // Method with 32b arg0, 64b arg1
Invoke3UWithReferrer(size_t arg0,uint64_t arg1,uintptr_t code,Thread * self,ArtMethod * referrer)778   size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
779                               ArtMethod* referrer) {
780 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
781     defined(__aarch64__)
782     // Just pass through.
783     return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
784 #else
785     // Need to split up arguments.
786     uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
787     uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
788 
789     return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
790 #endif
791   }
792 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)793   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
794     int32_t offset;
795 #ifdef __LP64__
796     offset = GetThreadOffset<8>(entrypoint).Int32Value();
797 #else
798     offset = GetThreadOffset<4>(entrypoint).Int32Value();
799 #endif
800     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
801   }
802 
803  protected:
804   size_t fp_result;
805 };
806 
807 
TEST_F(StubTest,Memcpy)808 TEST_F(StubTest, Memcpy) {
809 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
810   Thread* self = Thread::Current();
811 
812   uint32_t orig[20];
813   uint32_t trg[20];
814   for (size_t i = 0; i < 20; ++i) {
815     orig[i] = i;
816     trg[i] = 0;
817   }
818 
819   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
820           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
821 
822   EXPECT_EQ(orig[0], trg[0]);
823 
824   for (size_t i = 1; i < 4; ++i) {
825     EXPECT_NE(orig[i], trg[i]);
826   }
827 
828   for (size_t i = 4; i < 14; ++i) {
829     EXPECT_EQ(orig[i], trg[i]);
830   }
831 
832   for (size_t i = 14; i < 20; ++i) {
833     EXPECT_NE(orig[i], trg[i]);
834   }
835 
836   // TODO: Test overlapping?
837 
838 #else
839   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
840   // Force-print to std::cout so it's also outside the logcat.
841   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
842 #endif
843 }
844 
TEST_F(StubTest,LockObject)845 TEST_F(StubTest, LockObject) {
846 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
847     (defined(__x86_64__) && !defined(__APPLE__))
848   static constexpr size_t kThinLockLoops = 100;
849 
850   Thread* self = Thread::Current();
851 
852   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
853 
854   // Create an object
855   ScopedObjectAccess soa(self);
856   // garbage is created during ClassLinker::Init
857 
858   StackHandleScope<2> hs(soa.Self());
859   Handle<mirror::String> obj(
860       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
861   LockWord lock = obj->GetLockWord(false);
862   LockWord::LockState old_state = lock.GetState();
863   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
864 
865   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
866 
867   LockWord lock_after = obj->GetLockWord(false);
868   LockWord::LockState new_state = lock_after.GetState();
869   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
870   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
871 
872   for (size_t i = 1; i < kThinLockLoops; ++i) {
873     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
874 
875     // Check we're at lock count i
876 
877     LockWord l_inc = obj->GetLockWord(false);
878     LockWord::LockState l_inc_state = l_inc.GetState();
879     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
880     EXPECT_EQ(l_inc.ThinLockCount(), i);
881   }
882 
883   // Force a fat lock by running identity hashcode to fill up lock word.
884   Handle<mirror::String> obj2(hs.NewHandle(
885       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
886 
887   obj2->IdentityHashCode();
888 
889   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
890 
891   LockWord lock_after2 = obj2->GetLockWord(false);
892   LockWord::LockState new_state2 = lock_after2.GetState();
893   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
894   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
895 
896   // Test done.
897 #else
898   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
899   // Force-print to std::cout so it's also outside the logcat.
900   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
901 #endif
902 }
903 
904 
905 class RandGen {
906  public:
RandGen(uint32_t seed)907   explicit RandGen(uint32_t seed) : val_(seed) {}
908 
next()909   uint32_t next() {
910     val_ = val_ * 48271 % 2147483647 + 13;
911     return val_;
912   }
913 
914   uint32_t val_;
915 };
916 
917 
918 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)919 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
920 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
921     (defined(__x86_64__) && !defined(__APPLE__))
922   static constexpr size_t kThinLockLoops = 100;
923 
924   Thread* self = Thread::Current();
925 
926   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
927   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
928   // Create an object
929   ScopedObjectAccess soa(self);
930   // garbage is created during ClassLinker::Init
931   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
932   StackHandleScope<kNumberOfLocks + 1> hs(self);
933   Handle<mirror::String> obj(
934       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
935   LockWord lock = obj->GetLockWord(false);
936   LockWord::LockState old_state = lock.GetState();
937   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
938 
939   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
940   // This should be an illegal monitor state.
941   EXPECT_TRUE(self->IsExceptionPending());
942   self->ClearException();
943 
944   LockWord lock_after = obj->GetLockWord(false);
945   LockWord::LockState new_state = lock_after.GetState();
946   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
947 
948   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
949 
950   LockWord lock_after2 = obj->GetLockWord(false);
951   LockWord::LockState new_state2 = lock_after2.GetState();
952   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
953 
954   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
955 
956   LockWord lock_after3 = obj->GetLockWord(false);
957   LockWord::LockState new_state3 = lock_after3.GetState();
958   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
959 
960   // Stress test:
961   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
962   // each step.
963 
964   RandGen r(0x1234);
965 
966   constexpr size_t kIterations = 10000;  // Number of iterations
967   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
968 
969   size_t counts[kNumberOfLocks];
970   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
971   Handle<mirror::String> objects[kNumberOfLocks];
972 
973   // Initialize = allocate.
974   for (size_t i = 0; i < kNumberOfLocks; ++i) {
975     counts[i] = 0;
976     fat[i] = false;
977     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
978   }
979 
980   for (size_t i = 0; i < kIterations; ++i) {
981     // Select which lock to update.
982     size_t index = r.next() % kNumberOfLocks;
983 
984     // Make lock fat?
985     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
986       fat[index] = true;
987       objects[index]->IdentityHashCode();
988 
989       LockWord lock_iter = objects[index]->GetLockWord(false);
990       LockWord::LockState iter_state = lock_iter.GetState();
991       if (counts[index] == 0) {
992         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
993       } else {
994         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
995       }
996     } else {
997       bool take_lock;  // Whether to lock or unlock in this step.
998       if (counts[index] == 0) {
999         take_lock = true;
1000       } else if (counts[index] == kThinLockLoops) {
1001         take_lock = false;
1002       } else {
1003         // Randomly.
1004         take_lock = r.next() % 2 == 0;
1005       }
1006 
1007       if (take_lock) {
1008         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
1009                       self);
1010         counts[index]++;
1011       } else {
1012         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
1013                       art_quick_unlock_object, self);
1014         counts[index]--;
1015       }
1016 
1017       EXPECT_FALSE(self->IsExceptionPending());
1018 
1019       // Check the new state.
1020       LockWord lock_iter = objects[index]->GetLockWord(true);
1021       LockWord::LockState iter_state = lock_iter.GetState();
1022       if (fat[index]) {
1023         // Abuse MonitorInfo.
1024         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
1025         MonitorInfo info(objects[index].Get());
1026         EXPECT_EQ(counts[index], info.entry_count_) << index;
1027       } else {
1028         if (counts[index] > 0) {
1029           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
1030           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
1031         } else {
1032           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
1033         }
1034       }
1035     }
1036   }
1037 
1038   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
1039   // Go reverse order to correctly handle Handles.
1040   for (size_t i = 0; i < kNumberOfLocks; ++i) {
1041     size_t index = kNumberOfLocks - 1 - i;
1042     size_t count = counts[index];
1043     while (count > 0) {
1044       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
1045                     self);
1046       count--;
1047     }
1048 
1049     LockWord lock_after4 = objects[index]->GetLockWord(false);
1050     LockWord::LockState new_state4 = lock_after4.GetState();
1051     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
1052                 || LockWord::LockState::kFatLocked == new_state4);
1053   }
1054 
1055   // Test done.
1056 #else
1057   UNUSED(test);
1058   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
1059   // Force-print to std::cout so it's also outside the logcat.
1060   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1061 #endif
1062 }
1063 
TEST_F(StubTest,UnlockObject)1064 TEST_F(StubTest, UnlockObject) {
1065   // This will lead to monitor error messages in the log.
1066   ScopedLogSeverity sls(LogSeverity::FATAL);
1067 
1068   TestUnlockObject(this);
1069 }
1070 
1071 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1072     (defined(__x86_64__) && !defined(__APPLE__))
1073 extern "C" void art_quick_check_cast(void);
1074 #endif
1075 
TEST_F(StubTest,CheckCast)1076 TEST_F(StubTest, CheckCast) {
1077 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1078     (defined(__x86_64__) && !defined(__APPLE__))
1079   Thread* self = Thread::Current();
1080 
1081   const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
1082 
1083   // Find some classes.
1084   ScopedObjectAccess soa(self);
1085   // garbage is created during ClassLinker::Init
1086 
1087   StackHandleScope<2> hs(soa.Self());
1088   Handle<mirror::Class> c(
1089       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1090   Handle<mirror::Class> c2(
1091       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
1092 
1093   EXPECT_FALSE(self->IsExceptionPending());
1094 
1095   Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
1096           art_quick_check_cast, self);
1097 
1098   EXPECT_FALSE(self->IsExceptionPending());
1099 
1100   Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
1101           art_quick_check_cast, self);
1102 
1103   EXPECT_FALSE(self->IsExceptionPending());
1104 
1105   Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
1106           art_quick_check_cast, self);
1107 
1108   EXPECT_FALSE(self->IsExceptionPending());
1109 
1110   // TODO: Make the following work. But that would require correct managed frames.
1111 
1112   Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
1113           art_quick_check_cast, self);
1114 
1115   EXPECT_TRUE(self->IsExceptionPending());
1116   self->ClearException();
1117 
1118 #else
1119   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
1120   // Force-print to std::cout so it's also outside the logcat.
1121   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
1122 #endif
1123 }
1124 
1125 
TEST_F(StubTest,APutObj)1126 TEST_F(StubTest, APutObj) {
1127   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1128 
1129 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1130     (defined(__x86_64__) && !defined(__APPLE__))
1131   Thread* self = Thread::Current();
1132 
1133   // Do not check non-checked ones, we'd need handlers and stuff...
1134   const uintptr_t art_quick_aput_obj_with_null_and_bound_check =
1135       StubTest::GetEntrypoint(self, kQuickAputObjectWithNullAndBoundCheck);
1136 
1137   // Create an object
1138   ScopedObjectAccess soa(self);
1139   // garbage is created during ClassLinker::Init
1140 
1141   StackHandleScope<5> hs(soa.Self());
1142   Handle<mirror::Class> c(
1143       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1144   Handle<mirror::Class> ca(
1145       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
1146 
1147   // Build a string array of size 1
1148   Handle<mirror::ObjectArray<mirror::Object>> array(
1149       hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), 10)));
1150 
1151   // Build a string -> should be assignable
1152   Handle<mirror::String> str_obj(
1153       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
1154 
1155   // Build a generic object -> should fail assigning
1156   Handle<mirror::Object> obj_obj(hs.NewHandle(c->AllocObject(soa.Self())));
1157 
1158   // Play with it...
1159 
1160   // 1) Success cases
1161   // 1.1) Assign str_obj to array[0..3]
1162 
1163   EXPECT_FALSE(self->IsExceptionPending());
1164 
1165   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
1166           art_quick_aput_obj_with_null_and_bound_check, self);
1167 
1168   EXPECT_FALSE(self->IsExceptionPending());
1169   EXPECT_EQ(str_obj.Get(), array->Get(0));
1170 
1171   Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
1172           art_quick_aput_obj_with_null_and_bound_check, self);
1173 
1174   EXPECT_FALSE(self->IsExceptionPending());
1175   EXPECT_EQ(str_obj.Get(), array->Get(1));
1176 
1177   Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
1178           art_quick_aput_obj_with_null_and_bound_check, self);
1179 
1180   EXPECT_FALSE(self->IsExceptionPending());
1181   EXPECT_EQ(str_obj.Get(), array->Get(2));
1182 
1183   Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
1184           art_quick_aput_obj_with_null_and_bound_check, self);
1185 
1186   EXPECT_FALSE(self->IsExceptionPending());
1187   EXPECT_EQ(str_obj.Get(), array->Get(3));
1188 
1189   // 1.2) Assign null to array[0..3]
1190 
1191   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
1192           art_quick_aput_obj_with_null_and_bound_check, self);
1193 
1194   EXPECT_FALSE(self->IsExceptionPending());
1195   EXPECT_EQ(nullptr, array->Get(0));
1196 
1197   Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
1198           art_quick_aput_obj_with_null_and_bound_check, self);
1199 
1200   EXPECT_FALSE(self->IsExceptionPending());
1201   EXPECT_EQ(nullptr, array->Get(1));
1202 
1203   Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
1204           art_quick_aput_obj_with_null_and_bound_check, self);
1205 
1206   EXPECT_FALSE(self->IsExceptionPending());
1207   EXPECT_EQ(nullptr, array->Get(2));
1208 
1209   Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
1210           art_quick_aput_obj_with_null_and_bound_check, self);
1211 
1212   EXPECT_FALSE(self->IsExceptionPending());
1213   EXPECT_EQ(nullptr, array->Get(3));
1214 
1215   // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
1216 
1217   // 2) Failure cases (str into str[])
1218   // 2.1) Array = null
1219   // TODO: Throwing NPE needs actual DEX code
1220 
1221 //  Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.Get()),
1222 //          reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
1223 //
1224 //  EXPECT_TRUE(self->IsExceptionPending());
1225 //  self->ClearException();
1226 
1227   // 2.2) Index < 0
1228 
1229   Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
1230           reinterpret_cast<size_t>(str_obj.Get()),
1231           art_quick_aput_obj_with_null_and_bound_check, self);
1232 
1233   EXPECT_TRUE(self->IsExceptionPending());
1234   self->ClearException();
1235 
1236   // 2.3) Index > 0
1237 
1238   Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
1239           art_quick_aput_obj_with_null_and_bound_check, self);
1240 
1241   EXPECT_TRUE(self->IsExceptionPending());
1242   self->ClearException();
1243 
1244   // 3) Failure cases (obj into str[])
1245 
1246   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
1247           art_quick_aput_obj_with_null_and_bound_check, self);
1248 
1249   EXPECT_TRUE(self->IsExceptionPending());
1250   self->ClearException();
1251 
1252   // Tests done.
1253 #else
1254   LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
1255   // Force-print to std::cout so it's also outside the logcat.
1256   std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
1257 #endif
1258 }
1259 
TEST_F(StubTest,AllocObject)1260 TEST_F(StubTest, AllocObject) {
1261   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1262 
1263 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1264     (defined(__x86_64__) && !defined(__APPLE__))
1265   // This will lead to OOM  error messages in the log.
1266   ScopedLogSeverity sls(LogSeverity::FATAL);
1267 
1268   // TODO: Check the "Unresolved" allocation stubs
1269 
1270   Thread* self = Thread::Current();
1271   // Create an object
1272   ScopedObjectAccess soa(self);
1273   // garbage is created during ClassLinker::Init
1274 
1275   StackHandleScope<2> hs(soa.Self());
1276   Handle<mirror::Class> c(
1277       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1278 
1279   // Play with it...
1280 
1281   EXPECT_FALSE(self->IsExceptionPending());
1282   {
1283     // Use an arbitrary method from c to use as referrer
1284     size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()),    // type_idx
1285                             // arbitrary
1286                             reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))),
1287                             0U,
1288                             StubTest::GetEntrypoint(self, kQuickAllocObject),
1289                             self);
1290 
1291     EXPECT_FALSE(self->IsExceptionPending());
1292     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1293     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1294     EXPECT_EQ(c.Get(), obj->GetClass());
1295     VerifyObject(obj);
1296   }
1297 
1298   {
1299     // We can use null in the second argument as we do not need a method here (not used in
1300     // resolved/initialized cases)
1301     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1302                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
1303                             self);
1304 
1305     EXPECT_FALSE(self->IsExceptionPending());
1306     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1307     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1308     EXPECT_EQ(c.Get(), obj->GetClass());
1309     VerifyObject(obj);
1310   }
1311 
1312   {
1313     // We can use null in the second argument as we do not need a method here (not used in
1314     // resolved/initialized cases)
1315     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1316                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1317                             self);
1318 
1319     EXPECT_FALSE(self->IsExceptionPending());
1320     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1321     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1322     EXPECT_EQ(c.Get(), obj->GetClass());
1323     VerifyObject(obj);
1324   }
1325 
1326   // Failure tests.
1327 
1328   // Out-of-memory.
1329   {
1330     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
1331 
1332     // Array helps to fill memory faster.
1333     Handle<mirror::Class> ca(
1334         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1335 
1336     // Use arbitrary large amount for now.
1337     static const size_t kMaxHandles = 1000000;
1338     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
1339 
1340     std::vector<Handle<mirror::Object>> handles;
1341     // Start allocating with 128K
1342     size_t length = 128 * KB / 4;
1343     while (length > 10) {
1344       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
1345           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
1346       if (self->IsExceptionPending() || h.Get() == nullptr) {
1347         self->ClearException();
1348 
1349         // Try a smaller length
1350         length = length / 8;
1351         // Use at most half the reported free space.
1352         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
1353         if (length * 8 > mem) {
1354           length = mem / 8;
1355         }
1356       } else {
1357         handles.push_back(h);
1358       }
1359     }
1360     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1361 
1362     // Allocate simple objects till it fails.
1363     while (!self->IsExceptionPending()) {
1364       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1365       if (!self->IsExceptionPending() && h.Get() != nullptr) {
1366         handles.push_back(h);
1367       }
1368     }
1369     self->ClearException();
1370 
1371     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1372                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1373                             self);
1374     EXPECT_TRUE(self->IsExceptionPending());
1375     self->ClearException();
1376     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1377   }
1378 
1379   // Tests done.
1380 #else
1381   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1382   // Force-print to std::cout so it's also outside the logcat.
1383   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1384 #endif
1385 }
1386 
TEST_F(StubTest,AllocObjectArray)1387 TEST_F(StubTest, AllocObjectArray) {
1388   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1389 
1390 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1391     (defined(__x86_64__) && !defined(__APPLE__))
1392   // TODO: Check the "Unresolved" allocation stubs
1393 
1394   // This will lead to OOM  error messages in the log.
1395   ScopedLogSeverity sls(LogSeverity::FATAL);
1396 
1397   Thread* self = Thread::Current();
1398   // Create an object
1399   ScopedObjectAccess soa(self);
1400   // garbage is created during ClassLinker::Init
1401 
1402   StackHandleScope<2> hs(self);
1403   Handle<mirror::Class> c(
1404       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1405 
1406   // Needed to have a linked method.
1407   Handle<mirror::Class> c_obj(
1408       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1409 
1410   // Play with it...
1411 
1412   EXPECT_FALSE(self->IsExceptionPending());
1413 
1414   // For some reason this does not work, as the type_idx is artificial and outside what the
1415   // resolved types of c_obj allow...
1416 
1417   if ((false)) {
1418     // Use an arbitrary method from c to use as referrer
1419     size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()),    // type_idx
1420                             10U,
1421                             // arbitrary
1422                             reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))),
1423                             StubTest::GetEntrypoint(self, kQuickAllocArray),
1424                             self);
1425 
1426     EXPECT_FALSE(self->IsExceptionPending());
1427     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1428     mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
1429     EXPECT_EQ(c.Get(), obj->GetClass());
1430     VerifyObject(obj);
1431     EXPECT_EQ(obj->GetLength(), 10);
1432   }
1433 
1434   {
1435     // We can use null in the second argument as we do not need a method here (not used in
1436     // resolved/initialized cases)
1437     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
1438                             reinterpret_cast<size_t>(nullptr),
1439                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1440                             self);
1441     EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
1442     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1443     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1444     EXPECT_TRUE(obj->IsArrayInstance());
1445     EXPECT_TRUE(obj->IsObjectArray());
1446     EXPECT_EQ(c.Get(), obj->GetClass());
1447     VerifyObject(obj);
1448     mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
1449     EXPECT_EQ(array->GetLength(), 10);
1450   }
1451 
1452   // Failure tests.
1453 
1454   // Out-of-memory.
1455   {
1456     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
1457                             GB,  // that should fail...
1458                             reinterpret_cast<size_t>(nullptr),
1459                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1460                             self);
1461 
1462     EXPECT_TRUE(self->IsExceptionPending());
1463     self->ClearException();
1464     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1465   }
1466 
1467   // Tests done.
1468 #else
1469   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1470   // Force-print to std::cout so it's also outside the logcat.
1471   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1472 #endif
1473 }
1474 
1475 
TEST_F(StubTest,StringCompareTo)1476 TEST_F(StubTest, StringCompareTo) {
1477   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1478 
1479 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1480   // TODO: Check the "Unresolved" allocation stubs
1481 
1482   Thread* self = Thread::Current();
1483 
1484   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1485 
1486   ScopedObjectAccess soa(self);
1487   // garbage is created during ClassLinker::Init
1488 
1489   // Create some strings
1490   // Use array so we can index into it and use a matrix for expected results
1491   // Setup: The first half is standard. The second half uses a non-zero offset.
1492   // TODO: Shared backing arrays.
1493   const char* c[] = { "", "", "a", "aa", "ab",
1494       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
1495       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
1496       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
1497                                                   // defeat object-equal optimizations.
1498   static constexpr size_t kStringCount = arraysize(c);
1499 
1500   StackHandleScope<kStringCount> hs(self);
1501   Handle<mirror::String> s[kStringCount];
1502 
1503   for (size_t i = 0; i < kStringCount; ++i) {
1504     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1505   }
1506 
1507   // TODO: wide characters
1508 
1509   // Matrix of expectations. First component is first parameter. Note we only check against the
1510   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1511   // rely on String::CompareTo being correct.
1512   int32_t expected[kStringCount][kStringCount];
1513   for (size_t x = 0; x < kStringCount; ++x) {
1514     for (size_t y = 0; y < kStringCount; ++y) {
1515       expected[x][y] = s[x]->CompareTo(s[y].Get());
1516     }
1517   }
1518 
1519   // Play with it...
1520 
1521   for (size_t x = 0; x < kStringCount; ++x) {
1522     for (size_t y = 0; y < kStringCount; ++y) {
1523       // Test string_compareto x y
1524       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1525                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1526                               art_quick_string_compareto, self);
1527 
1528       EXPECT_FALSE(self->IsExceptionPending());
1529 
1530       // The result is a 32b signed integer
1531       union {
1532         size_t r;
1533         int32_t i;
1534       } conv;
1535       conv.r = result;
1536       int32_t e = expected[x][y];
1537       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1538           conv.r;
1539       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1540           conv.r;
1541       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1542           conv.r;
1543     }
1544   }
1545 
1546   // TODO: Deallocate things.
1547 
1548   // Tests done.
1549 #else
1550   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1551   // Force-print to std::cout so it's also outside the logcat.
1552   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1553       std::endl;
1554 #endif
1555 }
1556 
1557 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1558 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1559                                 ArtMethod* referrer, StubTest* test)
1560     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1561 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1562     (defined(__x86_64__) && !defined(__APPLE__))
1563   constexpr size_t num_values = 5;
1564   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1565 
1566   for (size_t i = 0; i < num_values; ++i) {
1567     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1568                               static_cast<size_t>(values[i]),
1569                               0U,
1570                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1571                               self,
1572                               referrer);
1573 
1574     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1575                                            0U, 0U,
1576                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1577                                            self,
1578                                            referrer);
1579     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1580     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1581   }
1582 #else
1583   UNUSED(f, self, referrer, test);
1584   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1585   // Force-print to std::cout so it's also outside the logcat.
1586   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1587 #endif
1588 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1589 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1590                              StubTest* test)
1591     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1592 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1593     (defined(__x86_64__) && !defined(__APPLE__))
1594   int8_t values[] = { -128, -64, 0, 64, 127 };
1595 
1596   for (size_t i = 0; i < arraysize(values); ++i) {
1597     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1598                               static_cast<size_t>(values[i]),
1599                               0U,
1600                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1601                               self,
1602                               referrer);
1603 
1604     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1605                                            0U, 0U,
1606                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1607                                            self,
1608                                            referrer);
1609     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1610   }
1611 #else
1612   UNUSED(f, self, referrer, test);
1613   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1614   // Force-print to std::cout so it's also outside the logcat.
1615   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1616 #endif
1617 }
1618 
1619 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1620 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1621                                   ArtMethod* referrer, StubTest* test)
1622     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1623 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1624     (defined(__x86_64__) && !defined(__APPLE__))
1625   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1626 
1627   for (size_t i = 0; i < arraysize(values); ++i) {
1628     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1629                               reinterpret_cast<size_t>(obj->Get()),
1630                               static_cast<size_t>(values[i]),
1631                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1632                               self,
1633                               referrer);
1634 
1635     uint8_t res = f->GetBoolean(obj->Get());
1636     EXPECT_EQ(values[i], res) << "Iteration " << i;
1637 
1638     f->SetBoolean<false>(obj->Get(), res);
1639 
1640     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1641                                             reinterpret_cast<size_t>(obj->Get()),
1642                                             0U,
1643                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1644                                             self,
1645                                             referrer);
1646     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1647   }
1648 #else
1649   UNUSED(obj, f, self, referrer, test);
1650   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1651   // Force-print to std::cout so it's also outside the logcat.
1652   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1653 #endif
1654 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1655 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1656                              Thread* self, ArtMethod* referrer, StubTest* test)
1657     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1658 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1659     (defined(__x86_64__) && !defined(__APPLE__))
1660   int8_t values[] = { -128, -64, 0, 64, 127 };
1661 
1662   for (size_t i = 0; i < arraysize(values); ++i) {
1663     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1664                               reinterpret_cast<size_t>(obj->Get()),
1665                               static_cast<size_t>(values[i]),
1666                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1667                               self,
1668                               referrer);
1669 
1670     int8_t res = f->GetByte(obj->Get());
1671     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1672     f->SetByte<false>(obj->Get(), ++res);
1673 
1674     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1675                                             reinterpret_cast<size_t>(obj->Get()),
1676                                             0U,
1677                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1678                                             self,
1679                                             referrer);
1680     EXPECT_EQ(res, static_cast<int8_t>(res2));
1681   }
1682 #else
1683   UNUSED(obj, f, self, referrer, test);
1684   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1685   // Force-print to std::cout so it's also outside the logcat.
1686   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1687 #endif
1688 }
1689 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1690 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1691                              StubTest* test)
1692     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1693 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1694     (defined(__x86_64__) && !defined(__APPLE__))
1695   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1696 
1697   for (size_t i = 0; i < arraysize(values); ++i) {
1698     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1699                               static_cast<size_t>(values[i]),
1700                               0U,
1701                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1702                               self,
1703                               referrer);
1704 
1705     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1706                                            0U, 0U,
1707                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1708                                            self,
1709                                            referrer);
1710 
1711     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1712   }
1713 #else
1714   UNUSED(f, self, referrer, test);
1715   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1716   // Force-print to std::cout so it's also outside the logcat.
1717   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1718 #endif
1719 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1720 static void GetSetShortStatic(ArtField* f, Thread* self,
1721                               ArtMethod* referrer, StubTest* test)
1722     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1723 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1724     (defined(__x86_64__) && !defined(__APPLE__))
1725   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1726 
1727   for (size_t i = 0; i < arraysize(values); ++i) {
1728     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1729                               static_cast<size_t>(values[i]),
1730                               0U,
1731                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1732                               self,
1733                               referrer);
1734 
1735     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1736                                            0U, 0U,
1737                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1738                                            self,
1739                                            referrer);
1740 
1741     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1742   }
1743 #else
1744   UNUSED(f, self, referrer, test);
1745   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1746   // Force-print to std::cout so it's also outside the logcat.
1747   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1748 #endif
1749 }
1750 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1751 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1752                                Thread* self, ArtMethod* referrer, StubTest* test)
1753     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1754 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1755     (defined(__x86_64__) && !defined(__APPLE__))
1756   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1757 
1758   for (size_t i = 0; i < arraysize(values); ++i) {
1759     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1760                               reinterpret_cast<size_t>(obj->Get()),
1761                               static_cast<size_t>(values[i]),
1762                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1763                               self,
1764                               referrer);
1765 
1766     uint16_t res = f->GetChar(obj->Get());
1767     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1768     f->SetChar<false>(obj->Get(), ++res);
1769 
1770     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1771                                             reinterpret_cast<size_t>(obj->Get()),
1772                                             0U,
1773                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1774                                             self,
1775                                             referrer);
1776     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1777   }
1778 #else
1779   UNUSED(obj, f, self, referrer, test);
1780   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1781   // Force-print to std::cout so it's also outside the logcat.
1782   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1783 #endif
1784 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1785 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1786                              Thread* self, ArtMethod* referrer, StubTest* test)
1787     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1788 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1789     (defined(__x86_64__) && !defined(__APPLE__))
1790   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1791 
1792   for (size_t i = 0; i < arraysize(values); ++i) {
1793     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1794                               reinterpret_cast<size_t>(obj->Get()),
1795                               static_cast<size_t>(values[i]),
1796                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1797                               self,
1798                               referrer);
1799 
1800     int16_t res = f->GetShort(obj->Get());
1801     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1802     f->SetShort<false>(obj->Get(), ++res);
1803 
1804     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1805                                             reinterpret_cast<size_t>(obj->Get()),
1806                                             0U,
1807                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1808                                             self,
1809                                             referrer);
1810     EXPECT_EQ(res, static_cast<int16_t>(res2));
1811   }
1812 #else
1813   UNUSED(obj, f, self, referrer, test);
1814   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1815   // Force-print to std::cout so it's also outside the logcat.
1816   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1817 #endif
1818 }
1819 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1820 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1821                            StubTest* test)
1822     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1823 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1824     (defined(__x86_64__) && !defined(__APPLE__))
1825   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1826 
1827   for (size_t i = 0; i < arraysize(values); ++i) {
1828     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1829                               static_cast<size_t>(values[i]),
1830                               0U,
1831                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1832                               self,
1833                               referrer);
1834 
1835     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1836                                            0U, 0U,
1837                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1838                                            self,
1839                                            referrer);
1840 
1841 #if defined(__mips__) && defined(__LP64__)
1842     EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
1843 #else
1844     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1845 #endif
1846   }
1847 #else
1848   UNUSED(f, self, referrer, test);
1849   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1850   // Force-print to std::cout so it's also outside the logcat.
1851   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1852 #endif
1853 }
1854 
1855 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1856 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1857                              Thread* self, ArtMethod* referrer, StubTest* test)
1858     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1859 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1860     (defined(__x86_64__) && !defined(__APPLE__))
1861   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1862 
1863   for (size_t i = 0; i < arraysize(values); ++i) {
1864     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1865                               reinterpret_cast<size_t>(obj->Get()),
1866                               static_cast<size_t>(values[i]),
1867                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1868                               self,
1869                               referrer);
1870 
1871     int32_t res = f->GetInt(obj->Get());
1872     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1873 
1874     res++;
1875     f->SetInt<false>(obj->Get(), res);
1876 
1877     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1878                                             reinterpret_cast<size_t>(obj->Get()),
1879                                             0U,
1880                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1881                                             self,
1882                                             referrer);
1883     EXPECT_EQ(res, static_cast<int32_t>(res2));
1884   }
1885 #else
1886   UNUSED(obj, f, self, referrer, test);
1887   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1888   // Force-print to std::cout so it's also outside the logcat.
1889   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1890 #endif
1891 }
1892 
1893 
1894 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1895     (defined(__x86_64__) && !defined(__APPLE__))
1896 
set_and_check_static(uint32_t f_idx,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1897 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1898                                  ArtMethod* referrer, StubTest* test)
1899     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1900   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1901                             reinterpret_cast<size_t>(val),
1902                             0U,
1903                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1904                             self,
1905                             referrer);
1906 
1907   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1908                                          0U, 0U,
1909                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1910                                          self,
1911                                          referrer);
1912 
1913   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1914 }
1915 #endif
1916 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1917 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1918                             StubTest* test)
1919     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1920 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1921     (defined(__x86_64__) && !defined(__APPLE__))
1922   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1923 
1924   // Allocate a string object for simplicity.
1925   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1926   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1927 
1928   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1929 #else
1930   UNUSED(f, self, referrer, test);
1931   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1932   // Force-print to std::cout so it's also outside the logcat.
1933   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1934 #endif
1935 }
1936 
1937 
1938 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1939     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,mirror::Object * trg,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1940 static void set_and_check_instance(ArtField* f, mirror::Object* trg,
1941                                    mirror::Object* val, Thread* self, ArtMethod* referrer,
1942                                    StubTest* test)
1943     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1944   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1945                             reinterpret_cast<size_t>(trg),
1946                             reinterpret_cast<size_t>(val),
1947                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1948                             self,
1949                             referrer);
1950 
1951   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1952                                          reinterpret_cast<size_t>(trg),
1953                                          0U,
1954                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1955                                          self,
1956                                          referrer);
1957 
1958   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1959 
1960   EXPECT_EQ(val, f->GetObj(trg));
1961 }
1962 #endif
1963 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1964 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1965                               Thread* self, ArtMethod* referrer, StubTest* test)
1966     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1967 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1968     (defined(__x86_64__) && !defined(__APPLE__))
1969   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1970 
1971   // Allocate a string object for simplicity.
1972   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1973   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1974 
1975   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1976 #else
1977   UNUSED(obj, f, self, referrer, test);
1978   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1979   // Force-print to std::cout so it's also outside the logcat.
1980   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1981 #endif
1982 }
1983 
1984 
1985 // TODO: Complete these tests for 32b architectures.
1986 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1987 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1988                            StubTest* test)
1989     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1990 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
1991     defined(__aarch64__)
1992   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1993 
1994   for (size_t i = 0; i < arraysize(values); ++i) {
1995     test->Invoke3UWithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1996                                values[i],
1997                                StubTest::GetEntrypoint(self, kQuickSet64Static),
1998                                self,
1999                                referrer);
2000 
2001     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
2002                                            0U, 0U,
2003                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
2004                                            self,
2005                                            referrer);
2006 
2007     EXPECT_EQ(res, values[i]) << "Iteration " << i;
2008   }
2009 #else
2010   UNUSED(f, self, referrer, test);
2011   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
2012   // Force-print to std::cout so it's also outside the logcat.
2013   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
2014 #endif
2015 }
2016 
2017 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)2018 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
2019                              Thread* self, ArtMethod* referrer, StubTest* test)
2020     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2021 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
2022     defined(__aarch64__)
2023   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
2024 
2025   for (size_t i = 0; i < arraysize(values); ++i) {
2026     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
2027                               reinterpret_cast<size_t>(obj->Get()),
2028                               static_cast<size_t>(values[i]),
2029                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
2030                               self,
2031                               referrer);
2032 
2033     int64_t res = f->GetLong(obj->Get());
2034     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
2035 
2036     res++;
2037     f->SetLong<false>(obj->Get(), res);
2038 
2039     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
2040                                             reinterpret_cast<size_t>(obj->Get()),
2041                                             0U,
2042                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
2043                                             self,
2044                                             referrer);
2045     EXPECT_EQ(res, static_cast<int64_t>(res2));
2046   }
2047 #else
2048   UNUSED(obj, f, self, referrer, test);
2049   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
2050   // Force-print to std::cout so it's also outside the logcat.
2051   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
2052 #endif
2053 }
2054 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)2055 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
2056   // garbage is created during ClassLinker::Init
2057 
2058   JNIEnv* env = Thread::Current()->GetJniEnv();
2059   jclass jc = env->FindClass("AllFields");
2060   CHECK(jc != nullptr);
2061   jobject o = env->AllocObject(jc);
2062   CHECK(o != nullptr);
2063 
2064   ScopedObjectAccess soa(self);
2065   StackHandleScope<3> hs(self);
2066   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
2067   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
2068   // Need a method as a referrer
2069   ArtMethod* m = c->GetDirectMethod(0, sizeof(void*));
2070 
2071   // Play with it...
2072 
2073   // Static fields.
2074   ArtField* fields = c->GetSFields();
2075   size_t num_fields = c->NumStaticFields();
2076   for (size_t i = 0; i < num_fields; ++i) {
2077     ArtField* f = &fields[i];
2078     Primitive::Type type = f->GetTypeAsPrimitiveType();
2079     if (test_type != type) {
2080      continue;
2081     }
2082     switch (type) {
2083       case Primitive::Type::kPrimBoolean:
2084         GetSetBooleanStatic(f, self, m, test);
2085         break;
2086       case Primitive::Type::kPrimByte:
2087         GetSetByteStatic(f, self, m, test);
2088         break;
2089       case Primitive::Type::kPrimChar:
2090         GetSetCharStatic(f, self, m, test);
2091         break;
2092       case Primitive::Type::kPrimShort:
2093         GetSetShortStatic(f, self, m, test);
2094         break;
2095       case Primitive::Type::kPrimInt:
2096         GetSet32Static(f, self, m, test);
2097         break;
2098       case Primitive::Type::kPrimLong:
2099         GetSet64Static(f, self, m, test);
2100         break;
2101       case Primitive::Type::kPrimNot:
2102         // Don't try array.
2103         if (f->GetTypeDescriptor()[0] != '[') {
2104           GetSetObjStatic(f, self, m, test);
2105         }
2106         break;
2107       default:
2108         break;  // Skip.
2109     }
2110   }
2111 
2112   // Instance fields.
2113   fields = c->GetIFields();
2114   num_fields = c->NumInstanceFields();
2115   for (size_t i = 0; i < num_fields; ++i) {
2116     ArtField* f = &fields[i];
2117     Primitive::Type type = f->GetTypeAsPrimitiveType();
2118     if (test_type != type) {
2119       continue;
2120     }
2121     switch (type) {
2122       case Primitive::Type::kPrimBoolean:
2123         GetSetBooleanInstance(&obj, f, self, m, test);
2124         break;
2125       case Primitive::Type::kPrimByte:
2126         GetSetByteInstance(&obj, f, self, m, test);
2127         break;
2128       case Primitive::Type::kPrimChar:
2129         GetSetCharInstance(&obj, f, self, m, test);
2130         break;
2131       case Primitive::Type::kPrimShort:
2132         GetSetShortInstance(&obj, f, self, m, test);
2133         break;
2134       case Primitive::Type::kPrimInt:
2135         GetSet32Instance(&obj, f, self, m, test);
2136         break;
2137       case Primitive::Type::kPrimLong:
2138         GetSet64Instance(&obj, f, self, m, test);
2139         break;
2140       case Primitive::Type::kPrimNot:
2141         // Don't try array.
2142         if (f->GetTypeDescriptor()[0] != '[') {
2143           GetSetObjInstance(&obj, f, self, m, test);
2144         }
2145         break;
2146       default:
2147         break;  // Skip.
2148     }
2149   }
2150 
2151   // TODO: Deallocate things.
2152 }
2153 
TEST_F(StubTest,Fields8)2154 TEST_F(StubTest, Fields8) {
2155   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2156 
2157   Thread* self = Thread::Current();
2158 
2159   self->TransitionFromSuspendedToRunnable();
2160   LoadDex("AllFields");
2161   bool started = runtime_->Start();
2162   CHECK(started);
2163 
2164   TestFields(self, this, Primitive::Type::kPrimBoolean);
2165   TestFields(self, this, Primitive::Type::kPrimByte);
2166 }
2167 
TEST_F(StubTest,Fields16)2168 TEST_F(StubTest, Fields16) {
2169   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2170 
2171   Thread* self = Thread::Current();
2172 
2173   self->TransitionFromSuspendedToRunnable();
2174   LoadDex("AllFields");
2175   bool started = runtime_->Start();
2176   CHECK(started);
2177 
2178   TestFields(self, this, Primitive::Type::kPrimChar);
2179   TestFields(self, this, Primitive::Type::kPrimShort);
2180 }
2181 
TEST_F(StubTest,Fields32)2182 TEST_F(StubTest, Fields32) {
2183   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2184 
2185   Thread* self = Thread::Current();
2186 
2187   self->TransitionFromSuspendedToRunnable();
2188   LoadDex("AllFields");
2189   bool started = runtime_->Start();
2190   CHECK(started);
2191 
2192   TestFields(self, this, Primitive::Type::kPrimInt);
2193 }
2194 
TEST_F(StubTest,FieldsObj)2195 TEST_F(StubTest, FieldsObj) {
2196   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2197 
2198   Thread* self = Thread::Current();
2199 
2200   self->TransitionFromSuspendedToRunnable();
2201   LoadDex("AllFields");
2202   bool started = runtime_->Start();
2203   CHECK(started);
2204 
2205   TestFields(self, this, Primitive::Type::kPrimNot);
2206 }
2207 
TEST_F(StubTest,Fields64)2208 TEST_F(StubTest, Fields64) {
2209   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2210 
2211   Thread* self = Thread::Current();
2212 
2213   self->TransitionFromSuspendedToRunnable();
2214   LoadDex("AllFields");
2215   bool started = runtime_->Start();
2216   CHECK(started);
2217 
2218   TestFields(self, this, Primitive::Type::kPrimLong);
2219 }
2220 
TEST_F(StubTest,IMT)2221 TEST_F(StubTest, IMT) {
2222 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
2223     (defined(__x86_64__) && !defined(__APPLE__))
2224   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2225 
2226   Thread* self = Thread::Current();
2227 
2228   ScopedObjectAccess soa(self);
2229   StackHandleScope<7> hs(self);
2230 
2231   JNIEnv* env = Thread::Current()->GetJniEnv();
2232 
2233   // ArrayList
2234 
2235   // Load ArrayList and used methods (JNI).
2236   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
2237   ASSERT_NE(nullptr, arraylist_jclass);
2238   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
2239   ASSERT_NE(nullptr, arraylist_constructor);
2240   jmethodID contains_jmethod = env->GetMethodID(
2241       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
2242   ASSERT_NE(nullptr, contains_jmethod);
2243   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
2244   ASSERT_NE(nullptr, add_jmethod);
2245 
2246   // Get representation.
2247   ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod);
2248 
2249   // Patch up ArrayList.contains.
2250   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
2251     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
2252         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
2253   }
2254 
2255   // List
2256 
2257   // Load List and used methods (JNI).
2258   jclass list_jclass = env->FindClass("java/util/List");
2259   ASSERT_NE(nullptr, list_jclass);
2260   jmethodID inf_contains_jmethod = env->GetMethodID(
2261       list_jclass, "contains", "(Ljava/lang/Object;)Z");
2262   ASSERT_NE(nullptr, inf_contains_jmethod);
2263 
2264   // Get mirror representation.
2265   ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod);
2266 
2267   // Object
2268 
2269   jclass obj_jclass = env->FindClass("java/lang/Object");
2270   ASSERT_NE(nullptr, obj_jclass);
2271   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
2272   ASSERT_NE(nullptr, obj_constructor);
2273 
2274   // Create instances.
2275 
2276   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
2277   ASSERT_NE(nullptr, jarray_list);
2278   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object*>(jarray_list)));
2279 
2280   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
2281   ASSERT_NE(nullptr, jobj);
2282   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(jobj)));
2283 
2284   // Invocation tests.
2285 
2286   // 1. imt_conflict
2287 
2288   // Contains.
2289 
2290   size_t result =
2291       Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
2292                                    reinterpret_cast<size_t>(obj.Get()),
2293                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
2294                                    self, contains_amethod,
2295                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
2296 
2297   ASSERT_FALSE(self->IsExceptionPending());
2298   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
2299 
2300   // Add object.
2301 
2302   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
2303 
2304   ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
2305 
2306   // Contains.
2307 
2308   result = Invoke3WithReferrerAndHidden(
2309       0U, reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(obj.Get()),
2310       StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline), self, contains_amethod,
2311       static_cast<size_t>(inf_contains->GetDexMethodIndex()));
2312 
2313   ASSERT_FALSE(self->IsExceptionPending());
2314   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
2315 
2316   // 2. regular interface trampoline
2317 
2318   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
2319                                reinterpret_cast<size_t>(array_list.Get()),
2320                                reinterpret_cast<size_t>(obj.Get()),
2321                                StubTest::GetEntrypoint(self,
2322                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
2323                                self, contains_amethod);
2324 
2325   ASSERT_FALSE(self->IsExceptionPending());
2326   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
2327 
2328   result = Invoke3WithReferrer(
2329       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
2330       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
2331       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
2332       contains_amethod);
2333 
2334   ASSERT_FALSE(self->IsExceptionPending());
2335   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
2336 #else
2337   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
2338   // Force-print to std::cout so it's also outside the logcat.
2339   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
2340 #endif
2341 }
2342 
TEST_F(StubTest,StringIndexOf)2343 TEST_F(StubTest, StringIndexOf) {
2344 #if defined(__arm__) || defined(__aarch64__)
2345   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
2346 
2347   Thread* self = Thread::Current();
2348   ScopedObjectAccess soa(self);
2349   // garbage is created during ClassLinker::Init
2350 
2351   // Create some strings
2352   // Use array so we can index into it and use a matrix for expected results
2353   // Setup: The first half is standard. The second half uses a non-zero offset.
2354   // TODO: Shared backing arrays.
2355   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
2356   static constexpr size_t kStringCount = arraysize(c_str);
2357   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
2358   static constexpr size_t kCharCount = arraysize(c_char);
2359 
2360   StackHandleScope<kStringCount> hs(self);
2361   Handle<mirror::String> s[kStringCount];
2362 
2363   for (size_t i = 0; i < kStringCount; ++i) {
2364     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
2365   }
2366 
2367   // Matrix of expectations. First component is first parameter. Note we only check against the
2368   // sign, not the value. As we are testing random offsets, we need to compute this and need to
2369   // rely on String::CompareTo being correct.
2370   static constexpr size_t kMaxLen = 9;
2371   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
2372 
2373   // Last dimension: start, offset by 1.
2374   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
2375   for (size_t x = 0; x < kStringCount; ++x) {
2376     for (size_t y = 0; y < kCharCount; ++y) {
2377       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2378         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
2379       }
2380     }
2381   }
2382 
2383   // Play with it...
2384 
2385   for (size_t x = 0; x < kStringCount; ++x) {
2386     for (size_t y = 0; y < kCharCount; ++y) {
2387       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2388         int32_t start = static_cast<int32_t>(z) - 1;
2389 
2390         // Test string_compareto x y
2391         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
2392                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
2393 
2394         EXPECT_FALSE(self->IsExceptionPending());
2395 
2396         // The result is a 32b signed integer
2397         union {
2398           size_t r;
2399           int32_t i;
2400         } conv;
2401         conv.r = result;
2402 
2403         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
2404             c_char[y] << " @ " << start;
2405       }
2406     }
2407   }
2408 
2409   // TODO: Deallocate things.
2410 
2411   // Tests done.
2412 #else
2413   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
2414   // Force-print to std::cout so it's also outside the logcat.
2415   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
2416 #endif
2417 }
2418 
2419 }  // namespace art
2420