1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "base/enums.h"
22 #include "class_linker-inl.h"
23 #include "common_runtime_test.h"
24 #include "entrypoints/quick/quick_entrypoints_enum.h"
25 #include "imt_conflict_table.h"
26 #include "jni_internal.h"
27 #include "linear_alloc.h"
28 #include "mirror/class-inl.h"
29 #include "mirror/string-inl.h"
30 #include "scoped_thread_state_change-inl.h"
31 
32 namespace art {
33 
34 
35 class StubTest : public CommonRuntimeTest {
36  protected:
37   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()38   void SetUp() OVERRIDE {
39     // Do the normal setup.
40     CommonRuntimeTest::SetUp();
41 
42     {
43       // Create callee-save methods
44       ScopedObjectAccess soa(Thread::Current());
45       runtime_->SetInstructionSet(kRuntimeISA);
46       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
47         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
48         if (!runtime_->HasCalleeSaveMethod(type)) {
49           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
50         }
51       }
52     }
53   }
54 
SetUpRuntimeOptions(RuntimeOptions * options)55   void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
56     // Use a smaller heap
57     for (std::pair<std::string, const void*>& pair : *options) {
58       if (pair.first.find("-Xmx") == 0) {
59         pair.first = "-Xmx4M";  // Smallest we can go.
60       }
61     }
62     options->push_back(std::make_pair("-Xint", nullptr));
63   }
64 
65   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)66   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
67     return &self->tlsPtr_;
68   }
69 
70  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)71   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
72     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
73   }
74 
75   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)76   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
77                              ArtMethod* referrer) {
78     return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
79   }
80 
81   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)82   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
83                                       Thread* self, ArtMethod* referrer, size_t hidden) {
84     // Push a transition back into managed code onto the linked list in thread.
85     ManagedStack fragment;
86     self->PushManagedStackFragment(&fragment);
87 
88     size_t result;
89     size_t fpr_result = 0;
90 #if defined(__i386__)
91     // TODO: Set the thread?
92 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
93 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
94     __asm__ __volatile__(
95         "movd %[hidden], %%xmm7\n\t"  // This is a memory op, so do this early. If it is off of
96                                       // esp, then we won't be able to access it after spilling.
97 
98         // Spill 6 registers.
99         PUSH(%%ebx)
100         PUSH(%%ecx)
101         PUSH(%%edx)
102         PUSH(%%esi)
103         PUSH(%%edi)
104         PUSH(%%ebp)
105 
106         // Store the inputs to the stack, but keep the referrer up top, less work.
107         PUSH(%[referrer])           // Align stack.
108         PUSH(%[referrer])           // Store referrer
109 
110         PUSH(%[arg0])
111         PUSH(%[arg1])
112         PUSH(%[arg2])
113         PUSH(%[code])
114         // Now read them back into the required registers.
115         POP(%%edi)
116         POP(%%edx)
117         POP(%%ecx)
118         POP(%%eax)
119         // Call is prepared now.
120 
121         "call *%%edi\n\t"           // Call the stub
122         "addl $8, %%esp\n\t"        // Pop referrer and padding.
123         ".cfi_adjust_cfa_offset -8\n\t"
124 
125         // Restore 6 registers.
126         POP(%%ebp)
127         POP(%%edi)
128         POP(%%esi)
129         POP(%%edx)
130         POP(%%ecx)
131         POP(%%ebx)
132 
133         : "=a" (result)
134           // Use the result from eax
135         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
136           [referrer]"r"(referrer), [hidden]"m"(hidden)
137           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
138         : "memory", "xmm7");  // clobber.
139 #undef PUSH
140 #undef POP
141 #elif defined(__arm__)
142     __asm__ __volatile__(
143         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
144         ".cfi_adjust_cfa_offset 52\n\t"
145         "push {r9}\n\t"
146         ".cfi_adjust_cfa_offset 4\n\t"
147         "mov r9, %[referrer]\n\n"
148         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
149         ".cfi_adjust_cfa_offset 8\n\t"
150         "ldr r9, [sp, #8]\n\t"
151 
152         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
153         "sub sp, sp, #24\n\t"
154         "str %[arg0], [sp]\n\t"
155         "str %[arg1], [sp, #4]\n\t"
156         "str %[arg2], [sp, #8]\n\t"
157         "str %[code], [sp, #12]\n\t"
158         "str %[self], [sp, #16]\n\t"
159         "str %[hidden], [sp, #20]\n\t"
160         "ldr r0, [sp]\n\t"
161         "ldr r1, [sp, #4]\n\t"
162         "ldr r2, [sp, #8]\n\t"
163         "ldr r3, [sp, #12]\n\t"
164         "ldr r9, [sp, #16]\n\t"
165         "ldr r12, [sp, #20]\n\t"
166         "add sp, sp, #24\n\t"
167 
168         "blx r3\n\t"                // Call the stub
169         "add sp, sp, #12\n\t"       // Pop null and padding
170         ".cfi_adjust_cfa_offset -12\n\t"
171         "pop {r1-r12, lr}\n\t"      // Restore state
172         ".cfi_adjust_cfa_offset -52\n\t"
173         "mov %[result], r0\n\t"     // Save the result
174         : [result] "=r" (result)
175           // Use the result from r0
176         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
177           [referrer] "r"(referrer), [hidden] "r"(hidden)
178         : "r0", "memory");  // clobber.
179 #elif defined(__aarch64__)
180     __asm__ __volatile__(
181         // Spill x0-x7 which we say we don't clobber. May contain args.
182         "sub sp, sp, #80\n\t"
183         ".cfi_adjust_cfa_offset 80\n\t"
184         "stp x0, x1, [sp]\n\t"
185         "stp x2, x3, [sp, #16]\n\t"
186         "stp x4, x5, [sp, #32]\n\t"
187         "stp x6, x7, [sp, #48]\n\t"
188         // To be extra defensive, store x20. We do this because some of the stubs might make a
189         // transition into the runtime via the blr instruction below and *not* save x20.
190         "str x20, [sp, #64]\n\t"
191         // 8 byte buffer
192 
193         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
194         ".cfi_adjust_cfa_offset 16\n\t"
195         "str %[referrer], [sp]\n\t"    // referrer
196 
197         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
198         "sub sp, sp, #48\n\t"
199         ".cfi_adjust_cfa_offset 48\n\t"
200         // All things are "r" constraints, so direct str/stp should work.
201         "stp %[arg0], %[arg1], [sp]\n\t"
202         "stp %[arg2], %[code], [sp, #16]\n\t"
203         "stp %[self], %[hidden], [sp, #32]\n\t"
204 
205         // Now we definitely have x0-x3 free, use it to garble d8 - d15
206         "movk x0, #0xfad0\n\t"
207         "movk x0, #0xebad, lsl #16\n\t"
208         "movk x0, #0xfad0, lsl #32\n\t"
209         "movk x0, #0xebad, lsl #48\n\t"
210         "fmov d8, x0\n\t"
211         "add x0, x0, 1\n\t"
212         "fmov d9, x0\n\t"
213         "add x0, x0, 1\n\t"
214         "fmov d10, x0\n\t"
215         "add x0, x0, 1\n\t"
216         "fmov d11, x0\n\t"
217         "add x0, x0, 1\n\t"
218         "fmov d12, x0\n\t"
219         "add x0, x0, 1\n\t"
220         "fmov d13, x0\n\t"
221         "add x0, x0, 1\n\t"
222         "fmov d14, x0\n\t"
223         "add x0, x0, 1\n\t"
224         "fmov d15, x0\n\t"
225 
226         // Load call params into the right registers.
227         "ldp x0, x1, [sp]\n\t"
228         "ldp x2, x3, [sp, #16]\n\t"
229         "ldp x19, x17, [sp, #32]\n\t"
230         "add sp, sp, #48\n\t"
231         ".cfi_adjust_cfa_offset -48\n\t"
232 
233         "blr x3\n\t"              // Call the stub
234         "mov x8, x0\n\t"          // Store result
235         "add sp, sp, #16\n\t"     // Drop the quick "frame"
236         ".cfi_adjust_cfa_offset -16\n\t"
237 
238         // Test d8 - d15. We can use x1 and x2.
239         "movk x1, #0xfad0\n\t"
240         "movk x1, #0xebad, lsl #16\n\t"
241         "movk x1, #0xfad0, lsl #32\n\t"
242         "movk x1, #0xebad, lsl #48\n\t"
243         "fmov x2, d8\n\t"
244         "cmp x1, x2\n\t"
245         "b.ne 1f\n\t"
246         "add x1, x1, 1\n\t"
247 
248         "fmov x2, d9\n\t"
249         "cmp x1, x2\n\t"
250         "b.ne 1f\n\t"
251         "add x1, x1, 1\n\t"
252 
253         "fmov x2, d10\n\t"
254         "cmp x1, x2\n\t"
255         "b.ne 1f\n\t"
256         "add x1, x1, 1\n\t"
257 
258         "fmov x2, d11\n\t"
259         "cmp x1, x2\n\t"
260         "b.ne 1f\n\t"
261         "add x1, x1, 1\n\t"
262 
263         "fmov x2, d12\n\t"
264         "cmp x1, x2\n\t"
265         "b.ne 1f\n\t"
266         "add x1, x1, 1\n\t"
267 
268         "fmov x2, d13\n\t"
269         "cmp x1, x2\n\t"
270         "b.ne 1f\n\t"
271         "add x1, x1, 1\n\t"
272 
273         "fmov x2, d14\n\t"
274         "cmp x1, x2\n\t"
275         "b.ne 1f\n\t"
276         "add x1, x1, 1\n\t"
277 
278         "fmov x2, d15\n\t"
279         "cmp x1, x2\n\t"
280         "b.ne 1f\n\t"
281 
282         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
283 
284         // Finish up.
285         "2:\n\t"
286         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
287         "ldp x2, x3, [sp, #16]\n\t"
288         "ldp x4, x5, [sp, #32]\n\t"
289         "ldp x6, x7, [sp, #48]\n\t"
290         "ldr x20, [sp, #64]\n\t"
291         "add sp, sp, #80\n\t"         // Free stack space, now sp as on entry
292         ".cfi_adjust_cfa_offset -80\n\t"
293 
294         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
295         "mov %[result], x8\n\t"              // Store the call result
296 
297         "b 3f\n\t"                     // Goto end
298 
299         // Failed fpr verification.
300         "1:\n\t"
301         "mov x9, #1\n\t"
302         "b 2b\n\t"                     // Goto finish-up
303 
304         // End
305         "3:\n\t"
306         : [result] "=r" (result)
307           // Use the result from r0
308         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
309           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
310           // Leave one register unclobbered, which is needed for compiling with
311           // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
312           // which means we should unclobber one of the callee-saved registers that are unused.
313           // Here we use x20.
314         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
315           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
316           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
317           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
318           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
319           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
320           "memory");
321 #elif defined(__mips__) && !defined(__LP64__)
322     __asm__ __volatile__ (
323         // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
324         "addiu $sp, $sp, -64\n\t"
325         "sw $a0, 0($sp)\n\t"
326         "sw $a1, 4($sp)\n\t"
327         "sw $a2, 8($sp)\n\t"
328         "sw $a3, 12($sp)\n\t"
329         "sw $t0, 16($sp)\n\t"
330         "sw $t1, 20($sp)\n\t"
331         "sw $t2, 24($sp)\n\t"
332         "sw $t3, 28($sp)\n\t"
333         "sw $t4, 32($sp)\n\t"
334         "sw $t5, 36($sp)\n\t"
335         "sw $t6, 40($sp)\n\t"
336         "sw $t7, 44($sp)\n\t"
337         // Spill gp register since it is caller save.
338         "sw $gp, 52($sp)\n\t"
339 
340         "addiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
341         "sw %[referrer], 0($sp)\n\t"
342 
343         // Push everything on the stack, so we don't rely on the order.
344         "addiu $sp, $sp, -24\n\t"
345         "sw %[arg0], 0($sp)\n\t"
346         "sw %[arg1], 4($sp)\n\t"
347         "sw %[arg2], 8($sp)\n\t"
348         "sw %[code], 12($sp)\n\t"
349         "sw %[self], 16($sp)\n\t"
350         "sw %[hidden], 20($sp)\n\t"
351 
352         // Load call params into the right registers.
353         "lw $a0, 0($sp)\n\t"
354         "lw $a1, 4($sp)\n\t"
355         "lw $a2, 8($sp)\n\t"
356         "lw $t9, 12($sp)\n\t"
357         "lw $s1, 16($sp)\n\t"
358         "lw $t7, 20($sp)\n\t"
359         "addiu $sp, $sp, 24\n\t"
360 
361         "jalr $t9\n\t"             // Call the stub.
362         "nop\n\t"
363         "addiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
364 
365         // Restore stuff not named clobbered.
366         "lw $a0, 0($sp)\n\t"
367         "lw $a1, 4($sp)\n\t"
368         "lw $a2, 8($sp)\n\t"
369         "lw $a3, 12($sp)\n\t"
370         "lw $t0, 16($sp)\n\t"
371         "lw $t1, 20($sp)\n\t"
372         "lw $t2, 24($sp)\n\t"
373         "lw $t3, 28($sp)\n\t"
374         "lw $t4, 32($sp)\n\t"
375         "lw $t5, 36($sp)\n\t"
376         "lw $t6, 40($sp)\n\t"
377         "lw $t7, 44($sp)\n\t"
378         // Restore gp.
379         "lw $gp, 52($sp)\n\t"
380         "addiu $sp, $sp, 64\n\t"   // Free stack space, now sp as on entry.
381 
382         "move %[result], $v0\n\t"  // Store the call result.
383         : [result] "=r" (result)
384         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
385           [referrer] "r"(referrer), [hidden] "r"(hidden)
386         : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
387           "fp", "ra",
388           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
389           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
390           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
391           "memory");  // clobber.
392 #elif defined(__mips__) && defined(__LP64__)
393     __asm__ __volatile__ (
394         // Spill a0-a7 which we say we don't clobber. May contain args.
395         "daddiu $sp, $sp, -64\n\t"
396         "sd $a0, 0($sp)\n\t"
397         "sd $a1, 8($sp)\n\t"
398         "sd $a2, 16($sp)\n\t"
399         "sd $a3, 24($sp)\n\t"
400         "sd $a4, 32($sp)\n\t"
401         "sd $a5, 40($sp)\n\t"
402         "sd $a6, 48($sp)\n\t"
403         "sd $a7, 56($sp)\n\t"
404 
405         "daddiu $sp, $sp, -16\n\t"  // Reserve stack space, 16B aligned.
406         "sd %[referrer], 0($sp)\n\t"
407 
408         // Push everything on the stack, so we don't rely on the order.
409         "daddiu $sp, $sp, -48\n\t"
410         "sd %[arg0], 0($sp)\n\t"
411         "sd %[arg1], 8($sp)\n\t"
412         "sd %[arg2], 16($sp)\n\t"
413         "sd %[code], 24($sp)\n\t"
414         "sd %[self], 32($sp)\n\t"
415         "sd %[hidden], 40($sp)\n\t"
416 
417         // Load call params into the right registers.
418         "ld $a0, 0($sp)\n\t"
419         "ld $a1, 8($sp)\n\t"
420         "ld $a2, 16($sp)\n\t"
421         "ld $t9, 24($sp)\n\t"
422         "ld $s1, 32($sp)\n\t"
423         "ld $t0, 40($sp)\n\t"
424         "daddiu $sp, $sp, 48\n\t"
425 
426         "jalr $t9\n\t"              // Call the stub.
427         "nop\n\t"
428         "daddiu $sp, $sp, 16\n\t"   // Drop the quick "frame".
429 
430         // Restore stuff not named clobbered.
431         "ld $a0, 0($sp)\n\t"
432         "ld $a1, 8($sp)\n\t"
433         "ld $a2, 16($sp)\n\t"
434         "ld $a3, 24($sp)\n\t"
435         "ld $a4, 32($sp)\n\t"
436         "ld $a5, 40($sp)\n\t"
437         "ld $a6, 48($sp)\n\t"
438         "ld $a7, 56($sp)\n\t"
439         "daddiu $sp, $sp, 64\n\t"
440 
441         "move %[result], $v0\n\t"   // Store the call result.
442         : [result] "=r" (result)
443         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
444           [referrer] "r"(referrer), [hidden] "r"(hidden)
445         // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
446         // t0-t3 are ambiguous.
447         : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
448           "s7", "t8", "t9", "k0", "k1", "fp", "ra",
449           "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
450           "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
451           "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
452           "memory");  // clobber.
453 #elif defined(__x86_64__) && !defined(__APPLE__)
454 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
455 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
456     // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
457     //       restoring almost all registers.
458     // TODO: Set the thread?
459     __asm__ __volatile__(
460         // Spill almost everything (except rax, rsp). 14 registers.
461         PUSH(%%rbx)
462         PUSH(%%rcx)
463         PUSH(%%rdx)
464         PUSH(%%rsi)
465         PUSH(%%rdi)
466         PUSH(%%rbp)
467         PUSH(%%r8)
468         PUSH(%%r9)
469         PUSH(%%r10)
470         PUSH(%%r11)
471         PUSH(%%r12)
472         PUSH(%%r13)
473         PUSH(%%r14)
474         PUSH(%%r15)
475 
476         PUSH(%[referrer])              // Push referrer & 16B alignment padding
477         PUSH(%[referrer])
478 
479         // Now juggle the input registers.
480         PUSH(%[arg0])
481         PUSH(%[arg1])
482         PUSH(%[arg2])
483         PUSH(%[hidden])
484         PUSH(%[code])
485         POP(%%r8)
486         POP(%%rax)
487         POP(%%rdx)
488         POP(%%rsi)
489         POP(%%rdi)
490 
491         "call *%%r8\n\t"                  // Call the stub
492         "addq $16, %%rsp\n\t"             // Pop null and padding
493         ".cfi_adjust_cfa_offset -16\n\t"
494 
495         POP(%%r15)
496         POP(%%r14)
497         POP(%%r13)
498         POP(%%r12)
499         POP(%%r11)
500         POP(%%r10)
501         POP(%%r9)
502         POP(%%r8)
503         POP(%%rbp)
504         POP(%%rdi)
505         POP(%%rsi)
506         POP(%%rdx)
507         POP(%%rcx)
508         POP(%%rbx)
509 
510         : "=a" (result)
511         // Use the result from rax
512         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
513           [referrer] "r"(referrer), [hidden] "r"(hidden)
514         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
515         // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
516         : "memory");  // We spill and restore (almost) all registers, so only mention memory here.
517 #undef PUSH
518 #undef POP
519 #else
520     UNUSED(arg0, arg1, arg2, code, referrer, hidden);
521     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
522     result = 0;
523 #endif
524     // Pop transition.
525     self->PopManagedStackFragment(fragment);
526 
527     fp_result = fpr_result;
528     EXPECT_EQ(0U, fp_result);
529 
530     return result;
531   }
532 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)533   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
534     int32_t offset;
535     offset = GetThreadOffset<kRuntimePointerSize>(entrypoint).Int32Value();
536     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
537   }
538 
539  protected:
540   size_t fp_result;
541 };
542 
543 
TEST_F(StubTest,Memcpy)544 TEST_F(StubTest, Memcpy) {
545 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
546   Thread* self = Thread::Current();
547 
548   uint32_t orig[20];
549   uint32_t trg[20];
550   for (size_t i = 0; i < 20; ++i) {
551     orig[i] = i;
552     trg[i] = 0;
553   }
554 
555   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
556           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
557 
558   EXPECT_EQ(orig[0], trg[0]);
559 
560   for (size_t i = 1; i < 4; ++i) {
561     EXPECT_NE(orig[i], trg[i]);
562   }
563 
564   for (size_t i = 4; i < 14; ++i) {
565     EXPECT_EQ(orig[i], trg[i]);
566   }
567 
568   for (size_t i = 14; i < 20; ++i) {
569     EXPECT_NE(orig[i], trg[i]);
570   }
571 
572   // TODO: Test overlapping?
573 
574 #else
575   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
576   // Force-print to std::cout so it's also outside the logcat.
577   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
578 #endif
579 }
580 
TEST_F(StubTest,LockObject)581 TEST_F(StubTest, LockObject) {
582 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
583     (defined(__x86_64__) && !defined(__APPLE__))
584   static constexpr size_t kThinLockLoops = 100;
585 
586   Thread* self = Thread::Current();
587 
588   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
589 
590   // Create an object
591   ScopedObjectAccess soa(self);
592   // garbage is created during ClassLinker::Init
593 
594   StackHandleScope<2> hs(soa.Self());
595   Handle<mirror::String> obj(
596       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
597   LockWord lock = obj->GetLockWord(false);
598   LockWord::LockState old_state = lock.GetState();
599   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
600 
601   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
602 
603   LockWord lock_after = obj->GetLockWord(false);
604   LockWord::LockState new_state = lock_after.GetState();
605   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
606   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
607 
608   for (size_t i = 1; i < kThinLockLoops; ++i) {
609     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
610 
611     // Check we're at lock count i
612 
613     LockWord l_inc = obj->GetLockWord(false);
614     LockWord::LockState l_inc_state = l_inc.GetState();
615     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
616     EXPECT_EQ(l_inc.ThinLockCount(), i);
617   }
618 
619   // Force a fat lock by running identity hashcode to fill up lock word.
620   Handle<mirror::String> obj2(hs.NewHandle(
621       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
622 
623   obj2->IdentityHashCode();
624 
625   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
626 
627   LockWord lock_after2 = obj2->GetLockWord(false);
628   LockWord::LockState new_state2 = lock_after2.GetState();
629   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
630   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
631 
632   // Test done.
633 #else
634   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
635   // Force-print to std::cout so it's also outside the logcat.
636   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
637 #endif
638 }
639 
640 
641 class RandGen {
642  public:
RandGen(uint32_t seed)643   explicit RandGen(uint32_t seed) : val_(seed) {}
644 
next()645   uint32_t next() {
646     val_ = val_ * 48271 % 2147483647 + 13;
647     return val_;
648   }
649 
650   uint32_t val_;
651 };
652 
653 
654 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)655 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
656 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
657     (defined(__x86_64__) && !defined(__APPLE__))
658   static constexpr size_t kThinLockLoops = 100;
659 
660   Thread* self = Thread::Current();
661 
662   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
663   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
664   // Create an object
665   ScopedObjectAccess soa(self);
666   // garbage is created during ClassLinker::Init
667   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
668   StackHandleScope<kNumberOfLocks + 1> hs(self);
669   Handle<mirror::String> obj(
670       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
671   LockWord lock = obj->GetLockWord(false);
672   LockWord::LockState old_state = lock.GetState();
673   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
674 
675   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
676   // This should be an illegal monitor state.
677   EXPECT_TRUE(self->IsExceptionPending());
678   self->ClearException();
679 
680   LockWord lock_after = obj->GetLockWord(false);
681   LockWord::LockState new_state = lock_after.GetState();
682   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
683 
684   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
685 
686   LockWord lock_after2 = obj->GetLockWord(false);
687   LockWord::LockState new_state2 = lock_after2.GetState();
688   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
689 
690   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
691 
692   LockWord lock_after3 = obj->GetLockWord(false);
693   LockWord::LockState new_state3 = lock_after3.GetState();
694   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
695 
696   // Stress test:
697   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
698   // each step.
699 
700   RandGen r(0x1234);
701 
702   constexpr size_t kIterations = 10000;  // Number of iterations
703   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
704 
705   size_t counts[kNumberOfLocks];
706   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
707   Handle<mirror::String> objects[kNumberOfLocks];
708 
709   // Initialize = allocate.
710   for (size_t i = 0; i < kNumberOfLocks; ++i) {
711     counts[i] = 0;
712     fat[i] = false;
713     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
714   }
715 
716   for (size_t i = 0; i < kIterations; ++i) {
717     // Select which lock to update.
718     size_t index = r.next() % kNumberOfLocks;
719 
720     // Make lock fat?
721     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
722       fat[index] = true;
723       objects[index]->IdentityHashCode();
724 
725       LockWord lock_iter = objects[index]->GetLockWord(false);
726       LockWord::LockState iter_state = lock_iter.GetState();
727       if (counts[index] == 0) {
728         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
729       } else {
730         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
731       }
732     } else {
733       bool take_lock;  // Whether to lock or unlock in this step.
734       if (counts[index] == 0) {
735         take_lock = true;
736       } else if (counts[index] == kThinLockLoops) {
737         take_lock = false;
738       } else {
739         // Randomly.
740         take_lock = r.next() % 2 == 0;
741       }
742 
743       if (take_lock) {
744         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
745                       self);
746         counts[index]++;
747       } else {
748         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
749                       art_quick_unlock_object, self);
750         counts[index]--;
751       }
752 
753       EXPECT_FALSE(self->IsExceptionPending());
754 
755       // Check the new state.
756       LockWord lock_iter = objects[index]->GetLockWord(true);
757       LockWord::LockState iter_state = lock_iter.GetState();
758       if (fat[index]) {
759         // Abuse MonitorInfo.
760         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
761         MonitorInfo info(objects[index].Get());
762         EXPECT_EQ(counts[index], info.entry_count_) << index;
763       } else {
764         if (counts[index] > 0) {
765           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
766           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
767         } else {
768           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
769         }
770       }
771     }
772   }
773 
774   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
775   // Go reverse order to correctly handle Handles.
776   for (size_t i = 0; i < kNumberOfLocks; ++i) {
777     size_t index = kNumberOfLocks - 1 - i;
778     size_t count = counts[index];
779     while (count > 0) {
780       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
781                     self);
782       count--;
783     }
784 
785     LockWord lock_after4 = objects[index]->GetLockWord(false);
786     LockWord::LockState new_state4 = lock_after4.GetState();
787     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
788                 || LockWord::LockState::kFatLocked == new_state4);
789   }
790 
791   // Test done.
792 #else
793   UNUSED(test);
794   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
795   // Force-print to std::cout so it's also outside the logcat.
796   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
797 #endif
798 }
799 
TEST_F(StubTest,UnlockObject)800 TEST_F(StubTest, UnlockObject) {
801   // This will lead to monitor error messages in the log.
802   ScopedLogSeverity sls(LogSeverity::FATAL);
803 
804   TestUnlockObject(this);
805 }
806 
807 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
808     (defined(__x86_64__) && !defined(__APPLE__))
809 extern "C" void art_quick_check_instance_of(void);
810 #endif
811 
TEST_F(StubTest,CheckCast)812 TEST_F(StubTest, CheckCast) {
813 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
814     (defined(__x86_64__) && !defined(__APPLE__))
815   Thread* self = Thread::Current();
816 
817   const uintptr_t art_quick_check_instance_of =
818       StubTest::GetEntrypoint(self, kQuickCheckInstanceOf);
819 
820   // Find some classes.
821   ScopedObjectAccess soa(self);
822   // garbage is created during ClassLinker::Init
823 
824   VariableSizedHandleScope hs(soa.Self());
825   Handle<mirror::Class> klass_obj(
826       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
827   Handle<mirror::Class> klass_str(
828       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/String;")));
829   Handle<mirror::Class> klass_list(
830       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/List;")));
831   Handle<mirror::Class> klass_cloneable(
832         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Cloneable;")));
833   Handle<mirror::Class> klass_array_list(
834       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/util/ArrayList;")));
835   Handle<mirror::Object> obj(hs.NewHandle(klass_obj->AllocObject(soa.Self())));
836   Handle<mirror::String> string(hs.NewHandle(
837       mirror::String::AllocFromModifiedUtf8(soa.Self(), "ABCD")));
838   Handle<mirror::Object> array_list(hs.NewHandle(klass_array_list->AllocObject(soa.Self())));
839 
840   EXPECT_FALSE(self->IsExceptionPending());
841 
842   Invoke3(reinterpret_cast<size_t>(obj.Get()),
843           reinterpret_cast<size_t>(klass_obj.Get()),
844           0U,
845           art_quick_check_instance_of,
846           self);
847   EXPECT_FALSE(self->IsExceptionPending());
848 
849   // Expected true: Test string instance of java.lang.String.
850   Invoke3(reinterpret_cast<size_t>(string.Get()),
851           reinterpret_cast<size_t>(klass_str.Get()),
852           0U,
853           art_quick_check_instance_of,
854           self);
855   EXPECT_FALSE(self->IsExceptionPending());
856 
857   // Expected true: Test string instance of java.lang.Object.
858   Invoke3(reinterpret_cast<size_t>(string.Get()),
859           reinterpret_cast<size_t>(klass_obj.Get()),
860           0U,
861           art_quick_check_instance_of,
862           self);
863   EXPECT_FALSE(self->IsExceptionPending());
864 
865   // Expected false: Test object instance of java.lang.String.
866   Invoke3(reinterpret_cast<size_t>(obj.Get()),
867           reinterpret_cast<size_t>(klass_str.Get()),
868           0U,
869           art_quick_check_instance_of,
870           self);
871   EXPECT_TRUE(self->IsExceptionPending());
872   self->ClearException();
873 
874   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
875           reinterpret_cast<size_t>(klass_list.Get()),
876           0U,
877           art_quick_check_instance_of,
878           self);
879   EXPECT_FALSE(self->IsExceptionPending());
880 
881   Invoke3(reinterpret_cast<size_t>(array_list.Get()),
882           reinterpret_cast<size_t>(klass_cloneable.Get()),
883           0U,
884           art_quick_check_instance_of,
885           self);
886   EXPECT_FALSE(self->IsExceptionPending());
887 
888   Invoke3(reinterpret_cast<size_t>(string.Get()),
889           reinterpret_cast<size_t>(klass_array_list.Get()),
890           0U,
891           art_quick_check_instance_of,
892           self);
893   EXPECT_TRUE(self->IsExceptionPending());
894   self->ClearException();
895 
896   Invoke3(reinterpret_cast<size_t>(string.Get()),
897           reinterpret_cast<size_t>(klass_cloneable.Get()),
898           0U,
899           art_quick_check_instance_of,
900           self);
901   EXPECT_TRUE(self->IsExceptionPending());
902   self->ClearException();
903 
904 #else
905   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
906   // Force-print to std::cout so it's also outside the logcat.
907   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
908 #endif
909 }
910 
TEST_F(StubTest,AllocObject)911 TEST_F(StubTest, AllocObject) {
912 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
913     (defined(__x86_64__) && !defined(__APPLE__))
914   // This will lead to OOM  error messages in the log.
915   ScopedLogSeverity sls(LogSeverity::FATAL);
916 
917   // TODO: Check the "Unresolved" allocation stubs
918 
919   Thread* self = Thread::Current();
920   // Create an object
921   ScopedObjectAccess soa(self);
922   // garbage is created during ClassLinker::Init
923 
924   StackHandleScope<2> hs(soa.Self());
925   Handle<mirror::Class> c(
926       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
927 
928   // Play with it...
929 
930   EXPECT_FALSE(self->IsExceptionPending());
931   {
932     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
933                             StubTest::GetEntrypoint(self, kQuickAllocObjectWithChecks),
934                             self);
935 
936     EXPECT_FALSE(self->IsExceptionPending());
937     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
938     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
939     EXPECT_EQ(c.Get(), obj->GetClass());
940     VerifyObject(obj);
941   }
942 
943   {
944     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
945                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
946                             self);
947 
948     EXPECT_FALSE(self->IsExceptionPending());
949     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
950     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
951     EXPECT_EQ(c.Get(), obj->GetClass());
952     VerifyObject(obj);
953   }
954 
955   {
956     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
957                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
958                             self);
959 
960     EXPECT_FALSE(self->IsExceptionPending());
961     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
962     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
963     EXPECT_EQ(c.Get(), obj->GetClass());
964     VerifyObject(obj);
965   }
966 
967   // Failure tests.
968 
969   // Out-of-memory.
970   {
971     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
972 
973     // Array helps to fill memory faster.
974     Handle<mirror::Class> ca(
975         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
976 
977     // Use arbitrary large amount for now.
978     static const size_t kMaxHandles = 1000000;
979     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
980 
981     std::vector<Handle<mirror::Object>> handles;
982     // Start allocating with 128K
983     size_t length = 128 * KB / 4;
984     while (length > 10) {
985       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
986           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
987       if (self->IsExceptionPending() || h == nullptr) {
988         self->ClearException();
989 
990         // Try a smaller length
991         length = length / 8;
992         // Use at most half the reported free space.
993         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
994         if (length * 8 > mem) {
995           length = mem / 8;
996         }
997       } else {
998         handles.push_back(h);
999       }
1000     }
1001     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1002 
1003     // Allocate simple objects till it fails.
1004     while (!self->IsExceptionPending()) {
1005       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1006       if (!self->IsExceptionPending() && h != nullptr) {
1007         handles.push_back(h);
1008       }
1009     }
1010     self->ClearException();
1011 
1012     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1013                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1014                             self);
1015     EXPECT_TRUE(self->IsExceptionPending());
1016     self->ClearException();
1017     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1018   }
1019 
1020   // Tests done.
1021 #else
1022   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1023   // Force-print to std::cout so it's also outside the logcat.
1024   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1025 #endif
1026 }
1027 
TEST_F(StubTest,AllocObjectArray)1028 TEST_F(StubTest, AllocObjectArray) {
1029 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1030     (defined(__x86_64__) && !defined(__APPLE__))
1031   // TODO: Check the "Unresolved" allocation stubs
1032 
1033   // This will lead to OOM  error messages in the log.
1034   ScopedLogSeverity sls(LogSeverity::FATAL);
1035 
1036   Thread* self = Thread::Current();
1037   // Create an object
1038   ScopedObjectAccess soa(self);
1039   // garbage is created during ClassLinker::Init
1040 
1041   StackHandleScope<1> hs(self);
1042   Handle<mirror::Class> c(
1043       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1044 
1045   // Play with it...
1046 
1047   EXPECT_FALSE(self->IsExceptionPending());
1048 
1049   {
1050     // We can use null in the second argument as we do not need a method here (not used in
1051     // resolved/initialized cases)
1052     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
1053                             reinterpret_cast<size_t>(nullptr),
1054                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1055                             self);
1056     EXPECT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1057     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1058     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1059     EXPECT_TRUE(obj->IsArrayInstance());
1060     EXPECT_TRUE(obj->IsObjectArray());
1061     EXPECT_EQ(c.Get(), obj->GetClass());
1062     VerifyObject(obj);
1063     mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
1064     EXPECT_EQ(array->GetLength(), 10);
1065   }
1066 
1067   // Failure tests.
1068 
1069   // Out-of-memory.
1070   {
1071     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
1072                             GB,  // that should fail...
1073                             reinterpret_cast<size_t>(nullptr),
1074                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved32),
1075                             self);
1076 
1077     EXPECT_TRUE(self->IsExceptionPending());
1078     self->ClearException();
1079     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1080   }
1081 
1082   // Tests done.
1083 #else
1084   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1085   // Force-print to std::cout so it's also outside the logcat.
1086   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1087 #endif
1088 }
1089 
1090 
TEST_F(StubTest,StringCompareTo)1091 TEST_F(StubTest, StringCompareTo) {
1092   TEST_DISABLED_FOR_STRING_COMPRESSION();
1093   // There is no StringCompareTo runtime entrypoint for __arm__ or __aarch64__.
1094 #if defined(__i386__) || defined(__mips__) || \
1095     (defined(__x86_64__) && !defined(__APPLE__))
1096   // TODO: Check the "Unresolved" allocation stubs
1097 
1098   Thread* self = Thread::Current();
1099 
1100   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1101 
1102   ScopedObjectAccess soa(self);
1103   // garbage is created during ClassLinker::Init
1104 
1105   // Create some strings
1106   // Use array so we can index into it and use a matrix for expected results
1107   // Setup: The first half is standard. The second half uses a non-zero offset.
1108   // TODO: Shared backing arrays.
1109   const char* c[] = { "", "", "a", "aa", "ab",
1110       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
1111       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
1112       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
1113                                                   // defeat object-equal optimizations.
1114   static constexpr size_t kStringCount = arraysize(c);
1115 
1116   StackHandleScope<kStringCount> hs(self);
1117   Handle<mirror::String> s[kStringCount];
1118 
1119   for (size_t i = 0; i < kStringCount; ++i) {
1120     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1121   }
1122 
1123   // TODO: wide characters
1124 
1125   // Matrix of expectations. First component is first parameter. Note we only check against the
1126   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1127   // rely on String::CompareTo being correct.
1128   int32_t expected[kStringCount][kStringCount];
1129   for (size_t x = 0; x < kStringCount; ++x) {
1130     for (size_t y = 0; y < kStringCount; ++y) {
1131       expected[x][y] = s[x]->CompareTo(s[y].Get());
1132     }
1133   }
1134 
1135   // Play with it...
1136 
1137   for (size_t x = 0; x < kStringCount; ++x) {
1138     for (size_t y = 0; y < kStringCount; ++y) {
1139       // Test string_compareto x y
1140       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1141                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1142                               art_quick_string_compareto, self);
1143 
1144       EXPECT_FALSE(self->IsExceptionPending());
1145 
1146       // The result is a 32b signed integer
1147       union {
1148         size_t r;
1149         int32_t i;
1150       } conv;
1151       conv.r = result;
1152       int32_t e = expected[x][y];
1153       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1154           conv.r;
1155       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1156           conv.r;
1157       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1158           conv.r;
1159     }
1160   }
1161 
1162   // TODO: Deallocate things.
1163 
1164   // Tests done.
1165 #else
1166   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1167   // Force-print to std::cout so it's also outside the logcat.
1168   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1169       std::endl;
1170 #endif
1171 }
1172 
1173 
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1174 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1175                                 ArtMethod* referrer, StubTest* test)
1176     REQUIRES_SHARED(Locks::mutator_lock_) {
1177 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1178     (defined(__x86_64__) && !defined(__APPLE__))
1179   constexpr size_t num_values = 5;
1180   uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1181 
1182   for (size_t i = 0; i < num_values; ++i) {
1183     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1184                               static_cast<size_t>(values[i]),
1185                               0U,
1186                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1187                               self,
1188                               referrer);
1189 
1190     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1191                                            0U, 0U,
1192                                            StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1193                                            self,
1194                                            referrer);
1195     // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1196     EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1197   }
1198 #else
1199   UNUSED(f, self, referrer, test);
1200   LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1201   // Force-print to std::cout so it's also outside the logcat.
1202   std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1203 #endif
1204 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1205 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1206                              StubTest* test)
1207     REQUIRES_SHARED(Locks::mutator_lock_) {
1208 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1209     (defined(__x86_64__) && !defined(__APPLE__))
1210   int8_t values[] = { -128, -64, 0, 64, 127 };
1211 
1212   for (size_t i = 0; i < arraysize(values); ++i) {
1213     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1214                               static_cast<size_t>(values[i]),
1215                               0U,
1216                               StubTest::GetEntrypoint(self, kQuickSet8Static),
1217                               self,
1218                               referrer);
1219 
1220     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1221                                            0U, 0U,
1222                                            StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1223                                            self,
1224                                            referrer);
1225     EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1226   }
1227 #else
1228   UNUSED(f, self, referrer, test);
1229   LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1230   // Force-print to std::cout so it's also outside the logcat.
1231   std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1232 #endif
1233 }
1234 
1235 
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1236 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1237                                   ArtMethod* referrer, StubTest* test)
1238     REQUIRES_SHARED(Locks::mutator_lock_) {
1239 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1240     (defined(__x86_64__) && !defined(__APPLE__))
1241   uint8_t values[] = { 0, true, 2, 128, 0xFF };
1242 
1243   for (size_t i = 0; i < arraysize(values); ++i) {
1244     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1245                               reinterpret_cast<size_t>(obj->Get()),
1246                               static_cast<size_t>(values[i]),
1247                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1248                               self,
1249                               referrer);
1250 
1251     uint8_t res = f->GetBoolean(obj->Get());
1252     EXPECT_EQ(values[i], res) << "Iteration " << i;
1253 
1254     f->SetBoolean<false>(obj->Get(), res);
1255 
1256     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1257                                             reinterpret_cast<size_t>(obj->Get()),
1258                                             0U,
1259                                             StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1260                                             self,
1261                                             referrer);
1262     EXPECT_EQ(res, static_cast<uint8_t>(res2));
1263   }
1264 #else
1265   UNUSED(obj, f, self, referrer, test);
1266   LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1267   // Force-print to std::cout so it's also outside the logcat.
1268   std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1269 #endif
1270 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1271 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1272                              Thread* self, ArtMethod* referrer, StubTest* test)
1273     REQUIRES_SHARED(Locks::mutator_lock_) {
1274 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1275     (defined(__x86_64__) && !defined(__APPLE__))
1276   int8_t values[] = { -128, -64, 0, 64, 127 };
1277 
1278   for (size_t i = 0; i < arraysize(values); ++i) {
1279     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1280                               reinterpret_cast<size_t>(obj->Get()),
1281                               static_cast<size_t>(values[i]),
1282                               StubTest::GetEntrypoint(self, kQuickSet8Instance),
1283                               self,
1284                               referrer);
1285 
1286     int8_t res = f->GetByte(obj->Get());
1287     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1288     f->SetByte<false>(obj->Get(), ++res);
1289 
1290     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1291                                             reinterpret_cast<size_t>(obj->Get()),
1292                                             0U,
1293                                             StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1294                                             self,
1295                                             referrer);
1296     EXPECT_EQ(res, static_cast<int8_t>(res2));
1297   }
1298 #else
1299   UNUSED(obj, f, self, referrer, test);
1300   LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1301   // Force-print to std::cout so it's also outside the logcat.
1302   std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1303 #endif
1304 }
1305 
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1306 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1307                              StubTest* test)
1308     REQUIRES_SHARED(Locks::mutator_lock_) {
1309 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1310     (defined(__x86_64__) && !defined(__APPLE__))
1311   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1312 
1313   for (size_t i = 0; i < arraysize(values); ++i) {
1314     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1315                               static_cast<size_t>(values[i]),
1316                               0U,
1317                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1318                               self,
1319                               referrer);
1320 
1321     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1322                                            0U, 0U,
1323                                            StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1324                                            self,
1325                                            referrer);
1326 
1327     EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1328   }
1329 #else
1330   UNUSED(f, self, referrer, test);
1331   LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1332   // Force-print to std::cout so it's also outside the logcat.
1333   std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1334 #endif
1335 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1336 static void GetSetShortStatic(ArtField* f, Thread* self,
1337                               ArtMethod* referrer, StubTest* test)
1338     REQUIRES_SHARED(Locks::mutator_lock_) {
1339 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1340     (defined(__x86_64__) && !defined(__APPLE__))
1341   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1342 
1343   for (size_t i = 0; i < arraysize(values); ++i) {
1344     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1345                               static_cast<size_t>(values[i]),
1346                               0U,
1347                               StubTest::GetEntrypoint(self, kQuickSet16Static),
1348                               self,
1349                               referrer);
1350 
1351     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1352                                            0U, 0U,
1353                                            StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1354                                            self,
1355                                            referrer);
1356 
1357     EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1358   }
1359 #else
1360   UNUSED(f, self, referrer, test);
1361   LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1362   // Force-print to std::cout so it's also outside the logcat.
1363   std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1364 #endif
1365 }
1366 
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1367 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1368                                Thread* self, ArtMethod* referrer, StubTest* test)
1369     REQUIRES_SHARED(Locks::mutator_lock_) {
1370 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1371     (defined(__x86_64__) && !defined(__APPLE__))
1372   uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1373 
1374   for (size_t i = 0; i < arraysize(values); ++i) {
1375     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1376                               reinterpret_cast<size_t>(obj->Get()),
1377                               static_cast<size_t>(values[i]),
1378                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1379                               self,
1380                               referrer);
1381 
1382     uint16_t res = f->GetChar(obj->Get());
1383     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1384     f->SetChar<false>(obj->Get(), ++res);
1385 
1386     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1387                                             reinterpret_cast<size_t>(obj->Get()),
1388                                             0U,
1389                                             StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1390                                             self,
1391                                             referrer);
1392     EXPECT_EQ(res, static_cast<uint16_t>(res2));
1393   }
1394 #else
1395   UNUSED(obj, f, self, referrer, test);
1396   LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1397   // Force-print to std::cout so it's also outside the logcat.
1398   std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1399 #endif
1400 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1401 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1402                              Thread* self, ArtMethod* referrer, StubTest* test)
1403     REQUIRES_SHARED(Locks::mutator_lock_) {
1404 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1405     (defined(__x86_64__) && !defined(__APPLE__))
1406   int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1407 
1408   for (size_t i = 0; i < arraysize(values); ++i) {
1409     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1410                               reinterpret_cast<size_t>(obj->Get()),
1411                               static_cast<size_t>(values[i]),
1412                               StubTest::GetEntrypoint(self, kQuickSet16Instance),
1413                               self,
1414                               referrer);
1415 
1416     int16_t res = f->GetShort(obj->Get());
1417     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1418     f->SetShort<false>(obj->Get(), ++res);
1419 
1420     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1421                                             reinterpret_cast<size_t>(obj->Get()),
1422                                             0U,
1423                                             StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1424                                             self,
1425                                             referrer);
1426     EXPECT_EQ(res, static_cast<int16_t>(res2));
1427   }
1428 #else
1429   UNUSED(obj, f, self, referrer, test);
1430   LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1431   // Force-print to std::cout so it's also outside the logcat.
1432   std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1433 #endif
1434 }
1435 
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1436 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1437                            StubTest* test)
1438     REQUIRES_SHARED(Locks::mutator_lock_) {
1439 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1440     (defined(__x86_64__) && !defined(__APPLE__))
1441   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1442 
1443   for (size_t i = 0; i < arraysize(values); ++i) {
1444     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1445                               static_cast<size_t>(values[i]),
1446                               0U,
1447                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1448                               self,
1449                               referrer);
1450 
1451     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1452                                            0U, 0U,
1453                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1454                                            self,
1455                                            referrer);
1456 
1457 #if defined(__mips__) && defined(__LP64__)
1458     EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
1459 #else
1460     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1461 #endif
1462   }
1463 #else
1464   UNUSED(f, self, referrer, test);
1465   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1466   // Force-print to std::cout so it's also outside the logcat.
1467   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1468 #endif
1469 }
1470 
1471 
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1472 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1473                              Thread* self, ArtMethod* referrer, StubTest* test)
1474     REQUIRES_SHARED(Locks::mutator_lock_) {
1475 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1476     (defined(__x86_64__) && !defined(__APPLE__))
1477   uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1478 
1479   for (size_t i = 0; i < arraysize(values); ++i) {
1480     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1481                               reinterpret_cast<size_t>(obj->Get()),
1482                               static_cast<size_t>(values[i]),
1483                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1484                               self,
1485                               referrer);
1486 
1487     int32_t res = f->GetInt(obj->Get());
1488     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1489 
1490     res++;
1491     f->SetInt<false>(obj->Get(), res);
1492 
1493     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1494                                             reinterpret_cast<size_t>(obj->Get()),
1495                                             0U,
1496                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1497                                             self,
1498                                             referrer);
1499     EXPECT_EQ(res, static_cast<int32_t>(res2));
1500   }
1501 #else
1502   UNUSED(obj, f, self, referrer, test);
1503   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1504   // Force-print to std::cout so it's also outside the logcat.
1505   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1506 #endif
1507 }
1508 
1509 
1510 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1511     (defined(__x86_64__) && !defined(__APPLE__))
1512 
set_and_check_static(uint32_t f_idx,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1513 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1514                                  ArtMethod* referrer, StubTest* test)
1515     REQUIRES_SHARED(Locks::mutator_lock_) {
1516   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1517                             reinterpret_cast<size_t>(val),
1518                             0U,
1519                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1520                             self,
1521                             referrer);
1522 
1523   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1524                                          0U, 0U,
1525                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1526                                          self,
1527                                          referrer);
1528 
1529   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1530 }
1531 #endif
1532 
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1533 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1534                             StubTest* test)
1535     REQUIRES_SHARED(Locks::mutator_lock_) {
1536 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1537     (defined(__x86_64__) && !defined(__APPLE__))
1538   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1539 
1540   // Allocate a string object for simplicity.
1541   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1542   set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1543 
1544   set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1545 #else
1546   UNUSED(f, self, referrer, test);
1547   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1548   // Force-print to std::cout so it's also outside the logcat.
1549   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1550 #endif
1551 }
1552 
1553 
1554 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1555     (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,mirror::Object * trg,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1556 static void set_and_check_instance(ArtField* f, mirror::Object* trg,
1557                                    mirror::Object* val, Thread* self, ArtMethod* referrer,
1558                                    StubTest* test)
1559     REQUIRES_SHARED(Locks::mutator_lock_) {
1560   test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1561                             reinterpret_cast<size_t>(trg),
1562                             reinterpret_cast<size_t>(val),
1563                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1564                             self,
1565                             referrer);
1566 
1567   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1568                                          reinterpret_cast<size_t>(trg),
1569                                          0U,
1570                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1571                                          self,
1572                                          referrer);
1573 
1574   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1575 
1576   EXPECT_OBJ_PTR_EQ(val, f->GetObj(trg));
1577 }
1578 #endif
1579 
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1580 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1581                               Thread* self, ArtMethod* referrer, StubTest* test)
1582     REQUIRES_SHARED(Locks::mutator_lock_) {
1583 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1584     (defined(__x86_64__) && !defined(__APPLE__))
1585   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1586 
1587   // Allocate a string object for simplicity.
1588   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1589   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1590 
1591   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1592 #else
1593   UNUSED(obj, f, self, referrer, test);
1594   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1595   // Force-print to std::cout so it's also outside the logcat.
1596   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1597 #endif
1598 }
1599 
1600 
1601 // TODO: Complete these tests for 32b architectures
1602 
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1603 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1604                            StubTest* test)
1605     REQUIRES_SHARED(Locks::mutator_lock_) {
1606 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
1607     || defined(__aarch64__)
1608   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1609 
1610   for (size_t i = 0; i < arraysize(values); ++i) {
1611     // 64 bit FieldSet stores the set value in the second register.
1612     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1613                               values[i],
1614                               0U,
1615                               StubTest::GetEntrypoint(self, kQuickSet64Static),
1616                               self,
1617                               referrer);
1618 
1619     size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1620                                            0U, 0U,
1621                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1622                                            self,
1623                                            referrer);
1624 
1625     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1626   }
1627 #else
1628   UNUSED(f, self, referrer, test);
1629   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1630   // Force-print to std::cout so it's also outside the logcat.
1631   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1632 #endif
1633 }
1634 
1635 
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1636 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1637                              Thread* self, ArtMethod* referrer, StubTest* test)
1638     REQUIRES_SHARED(Locks::mutator_lock_) {
1639 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
1640     defined(__aarch64__)
1641   uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1642 
1643   for (size_t i = 0; i < arraysize(values); ++i) {
1644     test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1645                               reinterpret_cast<size_t>(obj->Get()),
1646                               static_cast<size_t>(values[i]),
1647                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1648                               self,
1649                               referrer);
1650 
1651     int64_t res = f->GetLong(obj->Get());
1652     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1653 
1654     res++;
1655     f->SetLong<false>(obj->Get(), res);
1656 
1657     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1658                                             reinterpret_cast<size_t>(obj->Get()),
1659                                             0U,
1660                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1661                                             self,
1662                                             referrer);
1663     EXPECT_EQ(res, static_cast<int64_t>(res2));
1664   }
1665 #else
1666   UNUSED(obj, f, self, referrer, test);
1667   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1668   // Force-print to std::cout so it's also outside the logcat.
1669   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1670 #endif
1671 }
1672 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1673 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1674   // garbage is created during ClassLinker::Init
1675 
1676   JNIEnv* env = Thread::Current()->GetJniEnv();
1677   jclass jc = env->FindClass("AllFields");
1678   CHECK(jc != nullptr);
1679   jobject o = env->AllocObject(jc);
1680   CHECK(o != nullptr);
1681 
1682   ScopedObjectAccess soa(self);
1683   StackHandleScope<3> hs(self);
1684   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(o)));
1685   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1686   // Need a method as a referrer
1687   ArtMethod* m = c->GetDirectMethod(0, kRuntimePointerSize);
1688 
1689   // Play with it...
1690 
1691   // Static fields.
1692   for (ArtField& f : c->GetSFields()) {
1693     Primitive::Type type = f.GetTypeAsPrimitiveType();
1694     if (test_type != type) {
1695      continue;
1696     }
1697     switch (type) {
1698       case Primitive::Type::kPrimBoolean:
1699         GetSetBooleanStatic(&f, self, m, test);
1700         break;
1701       case Primitive::Type::kPrimByte:
1702         GetSetByteStatic(&f, self, m, test);
1703         break;
1704       case Primitive::Type::kPrimChar:
1705         GetSetCharStatic(&f, self, m, test);
1706         break;
1707       case Primitive::Type::kPrimShort:
1708         GetSetShortStatic(&f, self, m, test);
1709         break;
1710       case Primitive::Type::kPrimInt:
1711         GetSet32Static(&f, self, m, test);
1712         break;
1713       case Primitive::Type::kPrimLong:
1714         GetSet64Static(&f, self, m, test);
1715         break;
1716       case Primitive::Type::kPrimNot:
1717         // Don't try array.
1718         if (f.GetTypeDescriptor()[0] != '[') {
1719           GetSetObjStatic(&f, self, m, test);
1720         }
1721         break;
1722       default:
1723         break;  // Skip.
1724     }
1725   }
1726 
1727   // Instance fields.
1728   for (ArtField& f : c->GetIFields()) {
1729     Primitive::Type type = f.GetTypeAsPrimitiveType();
1730     if (test_type != type) {
1731       continue;
1732     }
1733     switch (type) {
1734       case Primitive::Type::kPrimBoolean:
1735         GetSetBooleanInstance(&obj, &f, self, m, test);
1736         break;
1737       case Primitive::Type::kPrimByte:
1738         GetSetByteInstance(&obj, &f, self, m, test);
1739         break;
1740       case Primitive::Type::kPrimChar:
1741         GetSetCharInstance(&obj, &f, self, m, test);
1742         break;
1743       case Primitive::Type::kPrimShort:
1744         GetSetShortInstance(&obj, &f, self, m, test);
1745         break;
1746       case Primitive::Type::kPrimInt:
1747         GetSet32Instance(&obj, &f, self, m, test);
1748         break;
1749       case Primitive::Type::kPrimLong:
1750         GetSet64Instance(&obj, &f, self, m, test);
1751         break;
1752       case Primitive::Type::kPrimNot:
1753         // Don't try array.
1754         if (f.GetTypeDescriptor()[0] != '[') {
1755           GetSetObjInstance(&obj, &f, self, m, test);
1756         }
1757         break;
1758       default:
1759         break;  // Skip.
1760     }
1761   }
1762 
1763   // TODO: Deallocate things.
1764 }
1765 
TEST_F(StubTest,Fields8)1766 TEST_F(StubTest, Fields8) {
1767   Thread* self = Thread::Current();
1768 
1769   self->TransitionFromSuspendedToRunnable();
1770   LoadDex("AllFields");
1771   bool started = runtime_->Start();
1772   CHECK(started);
1773 
1774   TestFields(self, this, Primitive::Type::kPrimBoolean);
1775   TestFields(self, this, Primitive::Type::kPrimByte);
1776 }
1777 
TEST_F(StubTest,Fields16)1778 TEST_F(StubTest, Fields16) {
1779   Thread* self = Thread::Current();
1780 
1781   self->TransitionFromSuspendedToRunnable();
1782   LoadDex("AllFields");
1783   bool started = runtime_->Start();
1784   CHECK(started);
1785 
1786   TestFields(self, this, Primitive::Type::kPrimChar);
1787   TestFields(self, this, Primitive::Type::kPrimShort);
1788 }
1789 
TEST_F(StubTest,Fields32)1790 TEST_F(StubTest, Fields32) {
1791   Thread* self = Thread::Current();
1792 
1793   self->TransitionFromSuspendedToRunnable();
1794   LoadDex("AllFields");
1795   bool started = runtime_->Start();
1796   CHECK(started);
1797 
1798   TestFields(self, this, Primitive::Type::kPrimInt);
1799 }
1800 
TEST_F(StubTest,FieldsObj)1801 TEST_F(StubTest, FieldsObj) {
1802   Thread* self = Thread::Current();
1803 
1804   self->TransitionFromSuspendedToRunnable();
1805   LoadDex("AllFields");
1806   bool started = runtime_->Start();
1807   CHECK(started);
1808 
1809   TestFields(self, this, Primitive::Type::kPrimNot);
1810 }
1811 
TEST_F(StubTest,Fields64)1812 TEST_F(StubTest, Fields64) {
1813   Thread* self = Thread::Current();
1814 
1815   self->TransitionFromSuspendedToRunnable();
1816   LoadDex("AllFields");
1817   bool started = runtime_->Start();
1818   CHECK(started);
1819 
1820   TestFields(self, this, Primitive::Type::kPrimLong);
1821 }
1822 
1823 // Disabled, b/27991555 .
1824 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1825 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1826 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1827 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1828 TEST_F(StubTest, DISABLED_IMT) {
1829 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1830     (defined(__x86_64__) && !defined(__APPLE__))
1831   Thread* self = Thread::Current();
1832 
1833   ScopedObjectAccess soa(self);
1834   StackHandleScope<7> hs(self);
1835 
1836   JNIEnv* env = Thread::Current()->GetJniEnv();
1837 
1838   // ArrayList
1839 
1840   // Load ArrayList and used methods (JNI).
1841   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1842   ASSERT_NE(nullptr, arraylist_jclass);
1843   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1844   ASSERT_NE(nullptr, arraylist_constructor);
1845   jmethodID contains_jmethod = env->GetMethodID(
1846       arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1847   ASSERT_NE(nullptr, contains_jmethod);
1848   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1849   ASSERT_NE(nullptr, add_jmethod);
1850 
1851   // Get representation.
1852   ArtMethod* contains_amethod = jni::DecodeArtMethod(contains_jmethod);
1853 
1854   // Patch up ArrayList.contains.
1855   if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1856     contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1857         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1858   }
1859 
1860   // List
1861 
1862   // Load List and used methods (JNI).
1863   jclass list_jclass = env->FindClass("java/util/List");
1864   ASSERT_NE(nullptr, list_jclass);
1865   jmethodID inf_contains_jmethod = env->GetMethodID(
1866       list_jclass, "contains", "(Ljava/lang/Object;)Z");
1867   ASSERT_NE(nullptr, inf_contains_jmethod);
1868 
1869   // Get mirror representation.
1870   ArtMethod* inf_contains = jni::DecodeArtMethod(inf_contains_jmethod);
1871 
1872   // Object
1873 
1874   jclass obj_jclass = env->FindClass("java/lang/Object");
1875   ASSERT_NE(nullptr, obj_jclass);
1876   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1877   ASSERT_NE(nullptr, obj_constructor);
1878 
1879   // Create instances.
1880 
1881   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1882   ASSERT_NE(nullptr, jarray_list);
1883   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object>(jarray_list)));
1884 
1885   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1886   ASSERT_NE(nullptr, jobj);
1887   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object>(jobj)));
1888 
1889   // Invocation tests.
1890 
1891   // 1. imt_conflict
1892 
1893   // Contains.
1894 
1895   // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
1896   // that will create it: the runtime stub expects to be called by compiled code.
1897   LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
1898   ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
1899   ImtConflictTable* empty_conflict_table =
1900       Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
1901   void* data = linear_alloc->Alloc(
1902       self,
1903       ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, kRuntimePointerSize));
1904   ImtConflictTable* new_table = new (data) ImtConflictTable(
1905       empty_conflict_table, inf_contains, contains_amethod, kRuntimePointerSize);
1906   conflict_method->SetImtConflictTable(new_table, kRuntimePointerSize);
1907 
1908   size_t result =
1909       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1910                                    reinterpret_cast<size_t>(array_list.Get()),
1911                                    reinterpret_cast<size_t>(obj.Get()),
1912                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1913                                    self,
1914                                    contains_amethod,
1915                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1916 
1917   ASSERT_FALSE(self->IsExceptionPending());
1918   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1919 
1920   // Add object.
1921 
1922   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1923 
1924   ASSERT_FALSE(self->IsExceptionPending()) << mirror::Object::PrettyTypeOf(self->GetException());
1925 
1926   // Contains.
1927 
1928   result =
1929       Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
1930                                    reinterpret_cast<size_t>(array_list.Get()),
1931                                    reinterpret_cast<size_t>(obj.Get()),
1932                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1933                                    self,
1934                                    contains_amethod,
1935                                    static_cast<size_t>(inf_contains->GetDexMethodIndex()));
1936 
1937   ASSERT_FALSE(self->IsExceptionPending());
1938   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1939 
1940   // 2. regular interface trampoline
1941 
1942   result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1943                                reinterpret_cast<size_t>(array_list.Get()),
1944                                reinterpret_cast<size_t>(obj.Get()),
1945                                StubTest::GetEntrypoint(self,
1946                                    kQuickInvokeInterfaceTrampolineWithAccessCheck),
1947                                self, contains_amethod);
1948 
1949   ASSERT_FALSE(self->IsExceptionPending());
1950   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1951 
1952   result = Invoke3WithReferrer(
1953       static_cast<size_t>(inf_contains->GetDexMethodIndex()),
1954       reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
1955       StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
1956       contains_amethod);
1957 
1958   ASSERT_FALSE(self->IsExceptionPending());
1959   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1960 #else
1961   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1962   // Force-print to std::cout so it's also outside the logcat.
1963   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1964 #endif
1965 }
1966 
TEST_F(StubTest,StringIndexOf)1967 TEST_F(StubTest, StringIndexOf) {
1968 #if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
1969   Thread* self = Thread::Current();
1970   ScopedObjectAccess soa(self);
1971   // garbage is created during ClassLinker::Init
1972 
1973   // Create some strings
1974   // Use array so we can index into it and use a matrix for expected results
1975   // Setup: The first half is standard. The second half uses a non-zero offset.
1976   // TODO: Shared backing arrays.
1977   const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1978   static constexpr size_t kStringCount = arraysize(c_str);
1979   const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
1980   static constexpr size_t kCharCount = arraysize(c_char);
1981 
1982   StackHandleScope<kStringCount> hs(self);
1983   Handle<mirror::String> s[kStringCount];
1984 
1985   for (size_t i = 0; i < kStringCount; ++i) {
1986     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
1987   }
1988 
1989   // Matrix of expectations. First component is first parameter. Note we only check against the
1990   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1991   // rely on String::CompareTo being correct.
1992   static constexpr size_t kMaxLen = 9;
1993   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
1994 
1995   // Last dimension: start, offset by 1.
1996   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
1997   for (size_t x = 0; x < kStringCount; ++x) {
1998     for (size_t y = 0; y < kCharCount; ++y) {
1999       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2000         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
2001       }
2002     }
2003   }
2004 
2005   // Play with it...
2006 
2007   for (size_t x = 0; x < kStringCount; ++x) {
2008     for (size_t y = 0; y < kCharCount; ++y) {
2009       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2010         int32_t start = static_cast<int32_t>(z) - 1;
2011 
2012         // Test string_compareto x y
2013         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
2014                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
2015 
2016         EXPECT_FALSE(self->IsExceptionPending());
2017 
2018         // The result is a 32b signed integer
2019         union {
2020           size_t r;
2021           int32_t i;
2022         } conv;
2023         conv.r = result;
2024 
2025         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
2026             c_char[y] << " @ " << start;
2027       }
2028     }
2029   }
2030 
2031   // TODO: Deallocate things.
2032 
2033   // Tests done.
2034 #else
2035   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
2036   // Force-print to std::cout so it's also outside the logcat.
2037   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
2038 #endif
2039 }
2040 
2041 // TODO: Exercise the ReadBarrierMarkRegX entry points.
2042 
TEST_F(StubTest,ReadBarrier)2043 TEST_F(StubTest, ReadBarrier) {
2044 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2045       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2046   Thread* self = Thread::Current();
2047 
2048   const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
2049 
2050   // Create an object
2051   ScopedObjectAccess soa(self);
2052   // garbage is created during ClassLinker::Init
2053 
2054   StackHandleScope<2> hs(soa.Self());
2055   Handle<mirror::Class> c(
2056       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
2057 
2058   // Build an object instance
2059   Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
2060 
2061   EXPECT_FALSE(self->IsExceptionPending());
2062 
2063   size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
2064                           mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
2065 
2066   EXPECT_FALSE(self->IsExceptionPending());
2067   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2068   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2069   EXPECT_EQ(klass, obj->GetClass());
2070 
2071   // Tests done.
2072 #else
2073   LOG(INFO) << "Skipping read_barrier_slow";
2074   // Force-print to std::cout so it's also outside the logcat.
2075   std::cout << "Skipping read_barrier_slow" << std::endl;
2076 #endif
2077 }
2078 
TEST_F(StubTest,ReadBarrierForRoot)2079 TEST_F(StubTest, ReadBarrierForRoot) {
2080 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2081       defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2082   Thread* self = Thread::Current();
2083 
2084   const uintptr_t readBarrierForRootSlow =
2085       StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
2086 
2087   // Create an object
2088   ScopedObjectAccess soa(self);
2089   // garbage is created during ClassLinker::Init
2090 
2091   StackHandleScope<1> hs(soa.Self());
2092 
2093   Handle<mirror::String> obj(
2094       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
2095 
2096   EXPECT_FALSE(self->IsExceptionPending());
2097 
2098   GcRoot<mirror::Class>& root = mirror::String::java_lang_String_;
2099   size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
2100 
2101   EXPECT_FALSE(self->IsExceptionPending());
2102   EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2103   mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2104   EXPECT_EQ(klass, obj->GetClass());
2105 
2106   // Tests done.
2107 #else
2108   LOG(INFO) << "Skipping read_barrier_for_root_slow";
2109   // Force-print to std::cout so it's also outside the logcat.
2110   std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
2111 #endif
2112 }
2113 
2114 }  // namespace art
2115