1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cstdio>
18 
19 #include "common_runtime_test.h"
20 #include "entrypoints/quick/quick_entrypoints_enum.h"
21 #include "mirror/art_field-inl.h"
22 #include "mirror/art_method-inl.h"
23 #include "mirror/class-inl.h"
24 #include "mirror/string-inl.h"
25 #include "scoped_thread_state_change.h"
26 
27 namespace art {
28 
29 
30 class StubTest : public CommonRuntimeTest {
31  protected:
32   // We need callee-save methods set up in the Runtime for exceptions.
SetUp()33   void SetUp() OVERRIDE {
34     // Do the normal setup.
35     CommonRuntimeTest::SetUp();
36 
37     {
38       // Create callee-save methods
39       ScopedObjectAccess soa(Thread::Current());
40       runtime_->SetInstructionSet(kRuntimeISA);
41       for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
42         Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
43         if (!runtime_->HasCalleeSaveMethod(type)) {
44           runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(type), type);
45         }
46       }
47     }
48   }
49 
SetUpRuntimeOptions(RuntimeOptions * options)50   void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
51     // Use a smaller heap
52     for (std::pair<std::string, const void*>& pair : *options) {
53       if (pair.first.find("-Xmx") == 0) {
54         pair.first = "-Xmx4M";  // Smallest we can go.
55       }
56     }
57     options->push_back(std::make_pair("-Xint", nullptr));
58   }
59 
60   // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)61   Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
62     return &self->tlsPtr_;
63   }
64 
65  public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)66   size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
67     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
68   }
69 
70   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,mirror::ArtMethod * referrer)71   size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
72                              mirror::ArtMethod* referrer) {
73     // Push a transition back into managed code onto the linked list in thread.
74     ManagedStack fragment;
75     self->PushManagedStackFragment(&fragment);
76 
77     size_t result;
78     size_t fpr_result = 0;
79 #if defined(__i386__)
80     // TODO: Set the thread?
81     __asm__ __volatile__(
82         "subl $12, %%esp\n\t"       // Align stack.
83         "pushl %[referrer]\n\t"     // Store referrer.
84         "call *%%edi\n\t"           // Call the stub
85         "addl $16, %%esp"           // Pop referrer
86         : "=a" (result)
87           // Use the result from eax
88         : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer)
89           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
90         : "memory");  // clobber.
91     // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
92     //       but compilation fails when declaring that.
93 #elif defined(__arm__)
94     __asm__ __volatile__(
95         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
96         ".cfi_adjust_cfa_offset 52\n\t"
97         "push {r9}\n\t"
98         ".cfi_adjust_cfa_offset 4\n\t"
99         "mov r9, %[referrer]\n\n"
100         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
101         ".cfi_adjust_cfa_offset 8\n\t"
102         "ldr r9, [sp, #8]\n\t"
103 
104         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
105         "sub sp, sp, #20\n\t"
106         "str %[arg0], [sp]\n\t"
107         "str %[arg1], [sp, #4]\n\t"
108         "str %[arg2], [sp, #8]\n\t"
109         "str %[code], [sp, #12]\n\t"
110         "str %[self], [sp, #16]\n\t"
111         "ldr r0, [sp]\n\t"
112         "ldr r1, [sp, #4]\n\t"
113         "ldr r2, [sp, #8]\n\t"
114         "ldr r3, [sp, #12]\n\t"
115         "ldr r9, [sp, #16]\n\t"
116         "add sp, sp, #20\n\t"
117 
118         "blx r3\n\t"                // Call the stub
119         "add sp, sp, #12\n\t"       // Pop nullptr and padding
120         ".cfi_adjust_cfa_offset -12\n\t"
121         "pop {r1-r12, lr}\n\t"      // Restore state
122         ".cfi_adjust_cfa_offset -52\n\t"
123         "mov %[result], r0\n\t"     // Save the result
124         : [result] "=r" (result)
125           // Use the result from r0
126         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
127           [referrer] "r"(referrer)
128         : "memory");  // clobber.
129 #elif defined(__aarch64__)
130     __asm__ __volatile__(
131         // Spill x0-x7 which we say we don't clobber. May contain args.
132         "sub sp, sp, #64\n\t"
133         ".cfi_adjust_cfa_offset 64\n\t"
134         "stp x0, x1, [sp]\n\t"
135         "stp x2, x3, [sp, #16]\n\t"
136         "stp x4, x5, [sp, #32]\n\t"
137         "stp x6, x7, [sp, #48]\n\t"
138 
139         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
140         ".cfi_adjust_cfa_offset 16\n\t"
141         "str %[referrer], [sp]\n\t"    // referrer
142 
143         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
144         "sub sp, sp, #48\n\t"
145         ".cfi_adjust_cfa_offset 48\n\t"
146         // All things are "r" constraints, so direct str/stp should work.
147         "stp %[arg0], %[arg1], [sp]\n\t"
148         "stp %[arg2], %[code], [sp, #16]\n\t"
149         "str %[self], [sp, #32]\n\t"
150 
151         // Now we definitely have x0-x3 free, use it to garble d8 - d15
152         "movk x0, #0xfad0\n\t"
153         "movk x0, #0xebad, lsl #16\n\t"
154         "movk x0, #0xfad0, lsl #32\n\t"
155         "movk x0, #0xebad, lsl #48\n\t"
156         "fmov d8, x0\n\t"
157         "add x0, x0, 1\n\t"
158         "fmov d9, x0\n\t"
159         "add x0, x0, 1\n\t"
160         "fmov d10, x0\n\t"
161         "add x0, x0, 1\n\t"
162         "fmov d11, x0\n\t"
163         "add x0, x0, 1\n\t"
164         "fmov d12, x0\n\t"
165         "add x0, x0, 1\n\t"
166         "fmov d13, x0\n\t"
167         "add x0, x0, 1\n\t"
168         "fmov d14, x0\n\t"
169         "add x0, x0, 1\n\t"
170         "fmov d15, x0\n\t"
171 
172         // Load call params into the right registers.
173         "ldp x0, x1, [sp]\n\t"
174         "ldp x2, x3, [sp, #16]\n\t"
175         "ldr x18, [sp, #32]\n\t"
176         "add sp, sp, #48\n\t"
177         ".cfi_adjust_cfa_offset -48\n\t"
178 
179 
180         "blr x3\n\t"              // Call the stub
181         "mov x8, x0\n\t"          // Store result
182         "add sp, sp, #16\n\t"     // Drop the quick "frame"
183         ".cfi_adjust_cfa_offset -16\n\t"
184 
185         // Test d8 - d15. We can use x1 and x2.
186         "movk x1, #0xfad0\n\t"
187         "movk x1, #0xebad, lsl #16\n\t"
188         "movk x1, #0xfad0, lsl #32\n\t"
189         "movk x1, #0xebad, lsl #48\n\t"
190         "fmov x2, d8\n\t"
191         "cmp x1, x2\n\t"
192         "b.ne 1f\n\t"
193         "add x1, x1, 1\n\t"
194 
195         "fmov x2, d9\n\t"
196         "cmp x1, x2\n\t"
197         "b.ne 1f\n\t"
198         "add x1, x1, 1\n\t"
199 
200         "fmov x2, d10\n\t"
201         "cmp x1, x2\n\t"
202         "b.ne 1f\n\t"
203         "add x1, x1, 1\n\t"
204 
205         "fmov x2, d11\n\t"
206         "cmp x1, x2\n\t"
207         "b.ne 1f\n\t"
208         "add x1, x1, 1\n\t"
209 
210         "fmov x2, d12\n\t"
211         "cmp x1, x2\n\t"
212         "b.ne 1f\n\t"
213         "add x1, x1, 1\n\t"
214 
215         "fmov x2, d13\n\t"
216         "cmp x1, x2\n\t"
217         "b.ne 1f\n\t"
218         "add x1, x1, 1\n\t"
219 
220         "fmov x2, d14\n\t"
221         "cmp x1, x2\n\t"
222         "b.ne 1f\n\t"
223         "add x1, x1, 1\n\t"
224 
225         "fmov x2, d15\n\t"
226         "cmp x1, x2\n\t"
227         "b.ne 1f\n\t"
228 
229         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
230 
231         // Finish up.
232         "2:\n\t"
233         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
234         "ldp x2, x3, [sp, #16]\n\t"
235         "ldp x4, x5, [sp, #32]\n\t"
236         "ldp x6, x7, [sp, #48]\n\t"
237         "add sp, sp, #64\n\t"         // Free stack space, now sp as on entry
238         ".cfi_adjust_cfa_offset -64\n\t"
239 
240         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
241         "mov %[result], x8\n\t"              // Store the call result
242 
243         "b 3f\n\t"                     // Goto end
244 
245         // Failed fpr verification.
246         "1:\n\t"
247         "mov x9, #1\n\t"
248         "b 2b\n\t"                     // Goto finish-up
249 
250         // End
251         "3:\n\t"
252         : [result] "=r" (result)
253           // Use the result from r0
254         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
255           [referrer] "r"(referrer), [fpr_result] "m" (fpr_result)
256         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
257           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
258           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
259           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
260           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
261           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
262           "memory");  // clobber.
263 #elif defined(__x86_64__) && !defined(__APPLE__)
264     // Note: Uses the native convention
265     // TODO: Set the thread?
266     __asm__ __volatile__(
267         "pushq %[referrer]\n\t"        // Push referrer
268         "pushq (%%rsp)\n\t"             // & 16B alignment padding
269         ".cfi_adjust_cfa_offset 16\n\t"
270         "call *%%rax\n\t"              // Call the stub
271         "addq $16, %%rsp\n\t"          // Pop nullptr and padding
272         ".cfi_adjust_cfa_offset -16\n\t"
273         : "=a" (result)
274           // Use the result from rax
275         : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer)
276           // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
277         : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
278           "memory");  // clobber all
279     // TODO: Should we clobber the other registers?
280 #else
281     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
282     result = 0;
283 #endif
284     // Pop transition.
285     self->PopManagedStackFragment(fragment);
286 
287     fp_result = fpr_result;
288     EXPECT_EQ(0U, fp_result);
289 
290     return result;
291   }
292 
293   // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,mirror::ArtMethod * referrer,size_t hidden)294   size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
295                                       Thread* self, mirror::ArtMethod* referrer, size_t hidden) {
296     // Push a transition back into managed code onto the linked list in thread.
297     ManagedStack fragment;
298     self->PushManagedStackFragment(&fragment);
299 
300     size_t result;
301     size_t fpr_result = 0;
302 #if defined(__i386__)
303     // TODO: Set the thread?
304     __asm__ __volatile__(
305         "movd %[hidden], %%xmm0\n\t"
306         "subl $12, %%esp\n\t"       // Align stack.
307         "pushl %[referrer]\n\t"     // Store referrer
308         "call *%%edi\n\t"           // Call the stub
309         "addl $16, %%esp"           // Pop referrer
310         : "=a" (result)
311           // Use the result from eax
312         : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"m"(referrer), [hidden]"r"(hidden)
313           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
314         : "memory");  // clobber.
315     // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
316     //       but compilation fails when declaring that.
317 #elif defined(__arm__)
318     __asm__ __volatile__(
319         "push {r1-r12, lr}\n\t"     // Save state, 13*4B = 52B
320         ".cfi_adjust_cfa_offset 52\n\t"
321         "push {r9}\n\t"
322         ".cfi_adjust_cfa_offset 4\n\t"
323         "mov r9, %[referrer]\n\n"
324         "str r9, [sp, #-8]!\n\t"   // Push referrer, +8B padding so 16B aligned
325         ".cfi_adjust_cfa_offset 8\n\t"
326         "ldr r9, [sp, #8]\n\t"
327 
328         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
329         "sub sp, sp, #24\n\t"
330         "str %[arg0], [sp]\n\t"
331         "str %[arg1], [sp, #4]\n\t"
332         "str %[arg2], [sp, #8]\n\t"
333         "str %[code], [sp, #12]\n\t"
334         "str %[self], [sp, #16]\n\t"
335         "str %[hidden], [sp, #20]\n\t"
336         "ldr r0, [sp]\n\t"
337         "ldr r1, [sp, #4]\n\t"
338         "ldr r2, [sp, #8]\n\t"
339         "ldr r3, [sp, #12]\n\t"
340         "ldr r9, [sp, #16]\n\t"
341         "ldr r12, [sp, #20]\n\t"
342         "add sp, sp, #24\n\t"
343 
344         "blx r3\n\t"                // Call the stub
345         "add sp, sp, #12\n\t"       // Pop nullptr and padding
346         ".cfi_adjust_cfa_offset -12\n\t"
347         "pop {r1-r12, lr}\n\t"      // Restore state
348         ".cfi_adjust_cfa_offset -52\n\t"
349         "mov %[result], r0\n\t"     // Save the result
350         : [result] "=r" (result)
351           // Use the result from r0
352         : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
353           [referrer] "r"(referrer), [hidden] "r"(hidden)
354         : "memory");  // clobber.
355 #elif defined(__aarch64__)
356     __asm__ __volatile__(
357         // Spill x0-x7 which we say we don't clobber. May contain args.
358         "sub sp, sp, #64\n\t"
359         ".cfi_adjust_cfa_offset 64\n\t"
360         "stp x0, x1, [sp]\n\t"
361         "stp x2, x3, [sp, #16]\n\t"
362         "stp x4, x5, [sp, #32]\n\t"
363         "stp x6, x7, [sp, #48]\n\t"
364 
365         "sub sp, sp, #16\n\t"          // Reserve stack space, 16B aligned
366         ".cfi_adjust_cfa_offset 16\n\t"
367         "str %[referrer], [sp]\n\t"    // referrer
368 
369         // Push everything on the stack, so we don't rely on the order. What a mess. :-(
370         "sub sp, sp, #48\n\t"
371         ".cfi_adjust_cfa_offset 48\n\t"
372         // All things are "r" constraints, so direct str/stp should work.
373         "stp %[arg0], %[arg1], [sp]\n\t"
374         "stp %[arg2], %[code], [sp, #16]\n\t"
375         "stp %[self], %[hidden], [sp, #32]\n\t"
376 
377         // Now we definitely have x0-x3 free, use it to garble d8 - d15
378         "movk x0, #0xfad0\n\t"
379         "movk x0, #0xebad, lsl #16\n\t"
380         "movk x0, #0xfad0, lsl #32\n\t"
381         "movk x0, #0xebad, lsl #48\n\t"
382         "fmov d8, x0\n\t"
383         "add x0, x0, 1\n\t"
384         "fmov d9, x0\n\t"
385         "add x0, x0, 1\n\t"
386         "fmov d10, x0\n\t"
387         "add x0, x0, 1\n\t"
388         "fmov d11, x0\n\t"
389         "add x0, x0, 1\n\t"
390         "fmov d12, x0\n\t"
391         "add x0, x0, 1\n\t"
392         "fmov d13, x0\n\t"
393         "add x0, x0, 1\n\t"
394         "fmov d14, x0\n\t"
395         "add x0, x0, 1\n\t"
396         "fmov d15, x0\n\t"
397 
398         // Load call params into the right registers.
399         "ldp x0, x1, [sp]\n\t"
400         "ldp x2, x3, [sp, #16]\n\t"
401         "ldp x18, x12, [sp, #32]\n\t"
402         "add sp, sp, #48\n\t"
403         ".cfi_adjust_cfa_offset -48\n\t"
404 
405         "blr x3\n\t"              // Call the stub
406         "mov x8, x0\n\t"          // Store result
407         "add sp, sp, #16\n\t"     // Drop the quick "frame"
408         ".cfi_adjust_cfa_offset -16\n\t"
409 
410         // Test d8 - d15. We can use x1 and x2.
411         "movk x1, #0xfad0\n\t"
412         "movk x1, #0xebad, lsl #16\n\t"
413         "movk x1, #0xfad0, lsl #32\n\t"
414         "movk x1, #0xebad, lsl #48\n\t"
415         "fmov x2, d8\n\t"
416         "cmp x1, x2\n\t"
417         "b.ne 1f\n\t"
418         "add x1, x1, 1\n\t"
419 
420         "fmov x2, d9\n\t"
421         "cmp x1, x2\n\t"
422         "b.ne 1f\n\t"
423         "add x1, x1, 1\n\t"
424 
425         "fmov x2, d10\n\t"
426         "cmp x1, x2\n\t"
427         "b.ne 1f\n\t"
428         "add x1, x1, 1\n\t"
429 
430         "fmov x2, d11\n\t"
431         "cmp x1, x2\n\t"
432         "b.ne 1f\n\t"
433         "add x1, x1, 1\n\t"
434 
435         "fmov x2, d12\n\t"
436         "cmp x1, x2\n\t"
437         "b.ne 1f\n\t"
438         "add x1, x1, 1\n\t"
439 
440         "fmov x2, d13\n\t"
441         "cmp x1, x2\n\t"
442         "b.ne 1f\n\t"
443         "add x1, x1, 1\n\t"
444 
445         "fmov x2, d14\n\t"
446         "cmp x1, x2\n\t"
447         "b.ne 1f\n\t"
448         "add x1, x1, 1\n\t"
449 
450         "fmov x2, d15\n\t"
451         "cmp x1, x2\n\t"
452         "b.ne 1f\n\t"
453 
454         "mov x9, #0\n\t"              // Use x9 as flag, in clobber list
455 
456         // Finish up.
457         "2:\n\t"
458         "ldp x0, x1, [sp]\n\t"        // Restore stuff not named clobbered, may contain fpr_result
459         "ldp x2, x3, [sp, #16]\n\t"
460         "ldp x4, x5, [sp, #32]\n\t"
461         "ldp x6, x7, [sp, #48]\n\t"
462         "add sp, sp, #64\n\t"         // Free stack space, now sp as on entry
463         ".cfi_adjust_cfa_offset -64\n\t"
464 
465         "str x9, %[fpr_result]\n\t"   // Store the FPR comparison result
466         "mov %[result], x8\n\t"              // Store the call result
467 
468         "b 3f\n\t"                     // Goto end
469 
470         // Failed fpr verification.
471         "1:\n\t"
472         "mov x9, #1\n\t"
473         "b 2b\n\t"                     // Goto finish-up
474 
475         // End
476         "3:\n\t"
477         : [result] "=r" (result)
478           // Use the result from r0
479         : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
480           [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
481         : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19", "x20",
482           "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
483           "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
484           "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
485           "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
486           "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
487           "memory");  // clobber.
488 #elif defined(__x86_64__) && !defined(__APPLE__)
489     // Note: Uses the native convention
490     // TODO: Set the thread?
491     __asm__ __volatile__(
492         "movq %[hidden], %%r9\n\t"     // No need to save r9, listed as clobbered
493         "movd %%r9, %%xmm0\n\t"
494         "pushq %[referrer]\n\t"        // Push referrer
495         "pushq (%%rsp)\n\t"            // & 16B alignment padding
496         ".cfi_adjust_cfa_offset 16\n\t"
497         "call *%%rax\n\t"              // Call the stub
498         "addq $16, %%rsp\n\t"          // Pop nullptr and padding
499         ".cfi_adjust_cfa_offset -16\n\t"
500         : "=a" (result)
501         // Use the result from rax
502         : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer), [hidden] "m"(hidden)
503         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
504         : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
505           "memory");  // clobber all
506     // TODO: Should we clobber the other registers?
507 #else
508     LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
509     result = 0;
510 #endif
511     // Pop transition.
512     self->PopManagedStackFragment(fragment);
513 
514     fp_result = fpr_result;
515     EXPECT_EQ(0U, fp_result);
516 
517     return result;
518   }
519 
520   // Method with 32b arg0, 64b arg1
Invoke3UWithReferrer(size_t arg0,uint64_t arg1,uintptr_t code,Thread * self,mirror::ArtMethod * referrer)521   size_t Invoke3UWithReferrer(size_t arg0, uint64_t arg1, uintptr_t code, Thread* self,
522                               mirror::ArtMethod* referrer) {
523 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
524     // Just pass through.
525     return Invoke3WithReferrer(arg0, arg1, 0U, code, self, referrer);
526 #else
527     // Need to split up arguments.
528     uint32_t lower = static_cast<uint32_t>(arg1 & 0xFFFFFFFF);
529     uint32_t upper = static_cast<uint32_t>((arg1 >> 32) & 0xFFFFFFFF);
530 
531     return Invoke3WithReferrer(arg0, lower, upper, code, self, referrer);
532 #endif
533   }
534 
535   // Method with 32b arg0, 32b arg1, 64b arg2
Invoke3UUWithReferrer(uint32_t arg0,uint32_t arg1,uint64_t arg2,uintptr_t code,Thread * self,mirror::ArtMethod * referrer)536   size_t Invoke3UUWithReferrer(uint32_t arg0, uint32_t arg1, uint64_t arg2, uintptr_t code,
537                                Thread* self, mirror::ArtMethod* referrer) {
538 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
539     // Just pass through.
540     return Invoke3WithReferrer(arg0, arg1, arg2, code, self, referrer);
541 #else
542     // TODO: Needs 4-param invoke.
543     return 0;
544 #endif
545   }
546 
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)547   static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
548     int32_t offset;
549 #ifdef __LP64__
550     offset = GetThreadOffset<8>(entrypoint).Int32Value();
551 #else
552     offset = GetThreadOffset<4>(entrypoint).Int32Value();
553 #endif
554     return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
555   }
556 
557  protected:
558   size_t fp_result;
559 };
560 
561 
TEST_F(StubTest,Memcpy)562 TEST_F(StubTest, Memcpy) {
563 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__))
564   Thread* self = Thread::Current();
565 
566   uint32_t orig[20];
567   uint32_t trg[20];
568   for (size_t i = 0; i < 20; ++i) {
569     orig[i] = i;
570     trg[i] = 0;
571   }
572 
573   Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
574           10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
575 
576   EXPECT_EQ(orig[0], trg[0]);
577 
578   for (size_t i = 1; i < 4; ++i) {
579     EXPECT_NE(orig[i], trg[i]);
580   }
581 
582   for (size_t i = 4; i < 14; ++i) {
583     EXPECT_EQ(orig[i], trg[i]);
584   }
585 
586   for (size_t i = 14; i < 20; ++i) {
587     EXPECT_NE(orig[i], trg[i]);
588   }
589 
590   // TODO: Test overlapping?
591 
592 #else
593   LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
594   // Force-print to std::cout so it's also outside the logcat.
595   std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
596 #endif
597 }
598 
TEST_F(StubTest,LockObject)599 TEST_F(StubTest, LockObject) {
600 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
601   static constexpr size_t kThinLockLoops = 100;
602 
603   Thread* self = Thread::Current();
604 
605   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
606 
607   // Create an object
608   ScopedObjectAccess soa(self);
609   // garbage is created during ClassLinker::Init
610 
611   StackHandleScope<2> hs(soa.Self());
612   Handle<mirror::String> obj(
613       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
614   LockWord lock = obj->GetLockWord(false);
615   LockWord::LockState old_state = lock.GetState();
616   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
617 
618   Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
619 
620   LockWord lock_after = obj->GetLockWord(false);
621   LockWord::LockState new_state = lock_after.GetState();
622   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
623   EXPECT_EQ(lock_after.ThinLockCount(), 0U);  // Thin lock starts count at zero
624 
625   for (size_t i = 1; i < kThinLockLoops; ++i) {
626     Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
627 
628     // Check we're at lock count i
629 
630     LockWord l_inc = obj->GetLockWord(false);
631     LockWord::LockState l_inc_state = l_inc.GetState();
632     EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
633     EXPECT_EQ(l_inc.ThinLockCount(), i);
634   }
635 
636   // Force a fat lock by running identity hashcode to fill up lock word.
637   Handle<mirror::String> obj2(hs.NewHandle(
638       mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
639 
640   obj2->IdentityHashCode();
641 
642   Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
643 
644   LockWord lock_after2 = obj2->GetLockWord(false);
645   LockWord::LockState new_state2 = lock_after2.GetState();
646   EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
647   EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
648 
649   // Test done.
650 #else
651   LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
652   // Force-print to std::cout so it's also outside the logcat.
653   std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
654 #endif
655 }
656 
657 
658 class RandGen {
659  public:
RandGen(uint32_t seed)660   explicit RandGen(uint32_t seed) : val_(seed) {}
661 
next()662   uint32_t next() {
663     val_ = val_ * 48271 % 2147483647 + 13;
664     return val_;
665   }
666 
667   uint32_t val_;
668 };
669 
670 
671 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)672 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
673 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
674   static constexpr size_t kThinLockLoops = 100;
675 
676   Thread* self = Thread::Current();
677 
678   const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
679   const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
680   // Create an object
681   ScopedObjectAccess soa(self);
682   // garbage is created during ClassLinker::Init
683   static constexpr size_t kNumberOfLocks = 10;  // Number of objects = lock
684   StackHandleScope<kNumberOfLocks + 1> hs(self);
685   Handle<mirror::String> obj(
686       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
687   LockWord lock = obj->GetLockWord(false);
688   LockWord::LockState old_state = lock.GetState();
689   EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
690 
691   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
692   // This should be an illegal monitor state.
693   EXPECT_TRUE(self->IsExceptionPending());
694   self->ClearException();
695 
696   LockWord lock_after = obj->GetLockWord(false);
697   LockWord::LockState new_state = lock_after.GetState();
698   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
699 
700   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
701 
702   LockWord lock_after2 = obj->GetLockWord(false);
703   LockWord::LockState new_state2 = lock_after2.GetState();
704   EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
705 
706   test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
707 
708   LockWord lock_after3 = obj->GetLockWord(false);
709   LockWord::LockState new_state3 = lock_after3.GetState();
710   EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
711 
712   // Stress test:
713   // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
714   // each step.
715 
716   RandGen r(0x1234);
717 
718   constexpr size_t kIterations = 10000;  // Number of iterations
719   constexpr size_t kMoveToFat = 1000;     // Chance of 1:kMoveFat to make a lock fat.
720 
721   size_t counts[kNumberOfLocks];
722   bool fat[kNumberOfLocks];  // Whether a lock should be thin or fat.
723   Handle<mirror::String> objects[kNumberOfLocks];
724 
725   // Initialize = allocate.
726   for (size_t i = 0; i < kNumberOfLocks; ++i) {
727     counts[i] = 0;
728     fat[i] = false;
729     objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
730   }
731 
732   for (size_t i = 0; i < kIterations; ++i) {
733     // Select which lock to update.
734     size_t index = r.next() % kNumberOfLocks;
735 
736     // Make lock fat?
737     if (!fat[index] && (r.next() % kMoveToFat == 0)) {
738       fat[index] = true;
739       objects[index]->IdentityHashCode();
740 
741       LockWord lock_iter = objects[index]->GetLockWord(false);
742       LockWord::LockState iter_state = lock_iter.GetState();
743       if (counts[index] == 0) {
744         EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
745       } else {
746         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
747       }
748     } else {
749       bool lock;  // Whether to lock or unlock in this step.
750       if (counts[index] == 0) {
751         lock = true;
752       } else if (counts[index] == kThinLockLoops) {
753         lock = false;
754       } else {
755         // Randomly.
756         lock = r.next() % 2 == 0;
757       }
758 
759       if (lock) {
760         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
761                       self);
762         counts[index]++;
763       } else {
764         test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
765                       art_quick_unlock_object, self);
766         counts[index]--;
767       }
768 
769       EXPECT_FALSE(self->IsExceptionPending());
770 
771       // Check the new state.
772       LockWord lock_iter = objects[index]->GetLockWord(true);
773       LockWord::LockState iter_state = lock_iter.GetState();
774       if (fat[index]) {
775         // Abuse MonitorInfo.
776         EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
777         MonitorInfo info(objects[index].Get());
778         EXPECT_EQ(counts[index], info.entry_count_) << index;
779       } else {
780         if (counts[index] > 0) {
781           EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
782           EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
783         } else {
784           EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
785         }
786       }
787     }
788   }
789 
790   // Unlock the remaining count times and then check it's unlocked. Then deallocate.
791   // Go reverse order to correctly handle Handles.
792   for (size_t i = 0; i < kNumberOfLocks; ++i) {
793     size_t index = kNumberOfLocks - 1 - i;
794     size_t count = counts[index];
795     while (count > 0) {
796       test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
797                     self);
798       count--;
799     }
800 
801     LockWord lock_after4 = objects[index]->GetLockWord(false);
802     LockWord::LockState new_state4 = lock_after4.GetState();
803     EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
804                 || LockWord::LockState::kFatLocked == new_state4);
805   }
806 
807   // Test done.
808 #else
809   LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
810   // Force-print to std::cout so it's also outside the logcat.
811   std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
812 #endif
813 }
814 
TEST_F(StubTest,UnlockObject)815 TEST_F(StubTest, UnlockObject) {
816   TestUnlockObject(this);
817 }
818 
819 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
820 extern "C" void art_quick_check_cast(void);
821 #endif
822 
TEST_F(StubTest,CheckCast)823 TEST_F(StubTest, CheckCast) {
824 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
825   Thread* self = Thread::Current();
826 
827   const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
828 
829   // Find some classes.
830   ScopedObjectAccess soa(self);
831   // garbage is created during ClassLinker::Init
832 
833   StackHandleScope<2> hs(soa.Self());
834   Handle<mirror::Class> c(
835       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
836   Handle<mirror::Class> c2(
837       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
838 
839   EXPECT_FALSE(self->IsExceptionPending());
840 
841   Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
842           art_quick_check_cast, self);
843 
844   EXPECT_FALSE(self->IsExceptionPending());
845 
846   Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
847           art_quick_check_cast, self);
848 
849   EXPECT_FALSE(self->IsExceptionPending());
850 
851   Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
852           art_quick_check_cast, self);
853 
854   EXPECT_FALSE(self->IsExceptionPending());
855 
856   // TODO: Make the following work. But that would require correct managed frames.
857 
858   Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
859           art_quick_check_cast, self);
860 
861   EXPECT_TRUE(self->IsExceptionPending());
862   self->ClearException();
863 
864 #else
865   LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
866   // Force-print to std::cout so it's also outside the logcat.
867   std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
868 #endif
869 }
870 
871 
TEST_F(StubTest,APutObj)872 TEST_F(StubTest, APutObj) {
873   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
874 
875 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
876   Thread* self = Thread::Current();
877 
878   // Do not check non-checked ones, we'd need handlers and stuff...
879   const uintptr_t art_quick_aput_obj_with_null_and_bound_check =
880       StubTest::GetEntrypoint(self, kQuickAputObjectWithNullAndBoundCheck);
881 
882   // Create an object
883   ScopedObjectAccess soa(self);
884   // garbage is created during ClassLinker::Init
885 
886   StackHandleScope<5> hs(soa.Self());
887   Handle<mirror::Class> c(
888       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
889   Handle<mirror::Class> ca(
890       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
891 
892   // Build a string array of size 1
893   Handle<mirror::ObjectArray<mirror::Object>> array(
894       hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), 10)));
895 
896   // Build a string -> should be assignable
897   Handle<mirror::String> str_obj(
898       hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
899 
900   // Build a generic object -> should fail assigning
901   Handle<mirror::Object> obj_obj(hs.NewHandle(c->AllocObject(soa.Self())));
902 
903   // Play with it...
904 
905   // 1) Success cases
906   // 1.1) Assign str_obj to array[0..3]
907 
908   EXPECT_FALSE(self->IsExceptionPending());
909 
910   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
911           art_quick_aput_obj_with_null_and_bound_check, self);
912 
913   EXPECT_FALSE(self->IsExceptionPending());
914   EXPECT_EQ(str_obj.Get(), array->Get(0));
915 
916   Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
917           art_quick_aput_obj_with_null_and_bound_check, self);
918 
919   EXPECT_FALSE(self->IsExceptionPending());
920   EXPECT_EQ(str_obj.Get(), array->Get(1));
921 
922   Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
923           art_quick_aput_obj_with_null_and_bound_check, self);
924 
925   EXPECT_FALSE(self->IsExceptionPending());
926   EXPECT_EQ(str_obj.Get(), array->Get(2));
927 
928   Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
929           art_quick_aput_obj_with_null_and_bound_check, self);
930 
931   EXPECT_FALSE(self->IsExceptionPending());
932   EXPECT_EQ(str_obj.Get(), array->Get(3));
933 
934   // 1.2) Assign null to array[0..3]
935 
936   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
937           art_quick_aput_obj_with_null_and_bound_check, self);
938 
939   EXPECT_FALSE(self->IsExceptionPending());
940   EXPECT_EQ(nullptr, array->Get(0));
941 
942   Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
943           art_quick_aput_obj_with_null_and_bound_check, self);
944 
945   EXPECT_FALSE(self->IsExceptionPending());
946   EXPECT_EQ(nullptr, array->Get(1));
947 
948   Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
949           art_quick_aput_obj_with_null_and_bound_check, self);
950 
951   EXPECT_FALSE(self->IsExceptionPending());
952   EXPECT_EQ(nullptr, array->Get(2));
953 
954   Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
955           art_quick_aput_obj_with_null_and_bound_check, self);
956 
957   EXPECT_FALSE(self->IsExceptionPending());
958   EXPECT_EQ(nullptr, array->Get(3));
959 
960   // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
961 
962   // 2) Failure cases (str into str[])
963   // 2.1) Array = null
964   // TODO: Throwing NPE needs actual DEX code
965 
966 //  Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.Get()),
967 //          reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
968 //
969 //  EXPECT_TRUE(self->IsExceptionPending());
970 //  self->ClearException();
971 
972   // 2.2) Index < 0
973 
974   Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
975           reinterpret_cast<size_t>(str_obj.Get()),
976           art_quick_aput_obj_with_null_and_bound_check, self);
977 
978   EXPECT_TRUE(self->IsExceptionPending());
979   self->ClearException();
980 
981   // 2.3) Index > 0
982 
983   Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
984           art_quick_aput_obj_with_null_and_bound_check, self);
985 
986   EXPECT_TRUE(self->IsExceptionPending());
987   self->ClearException();
988 
989   // 3) Failure cases (obj into str[])
990 
991   Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
992           art_quick_aput_obj_with_null_and_bound_check, self);
993 
994   EXPECT_TRUE(self->IsExceptionPending());
995   self->ClearException();
996 
997   // Tests done.
998 #else
999   LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
1000   // Force-print to std::cout so it's also outside the logcat.
1001   std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
1002 #endif
1003 }
1004 
TEST_F(StubTest,AllocObject)1005 TEST_F(StubTest, AllocObject) {
1006   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1007 
1008 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1009   // TODO: Check the "Unresolved" allocation stubs
1010 
1011   Thread* self = Thread::Current();
1012   // Create an object
1013   ScopedObjectAccess soa(self);
1014   // garbage is created during ClassLinker::Init
1015 
1016   StackHandleScope<2> hs(soa.Self());
1017   Handle<mirror::Class> c(
1018       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1019 
1020   // Play with it...
1021 
1022   EXPECT_FALSE(self->IsExceptionPending());
1023   {
1024     // Use an arbitrary method from c to use as referrer
1025     size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()),    // type_idx
1026                             reinterpret_cast<size_t>(c->GetVirtualMethod(0)),  // arbitrary
1027                             0U,
1028                             StubTest::GetEntrypoint(self, kQuickAllocObject),
1029                             self);
1030 
1031     EXPECT_FALSE(self->IsExceptionPending());
1032     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1033     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1034     EXPECT_EQ(c.Get(), obj->GetClass());
1035     VerifyObject(obj);
1036   }
1037 
1038   {
1039     // We can use nullptr in the second argument as we do not need a method here (not used in
1040     // resolved/initialized cases)
1041     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
1042                             StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
1043                             self);
1044 
1045     EXPECT_FALSE(self->IsExceptionPending());
1046     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1047     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1048     EXPECT_EQ(c.Get(), obj->GetClass());
1049     VerifyObject(obj);
1050   }
1051 
1052   {
1053     // We can use nullptr in the second argument as we do not need a method here (not used in
1054     // resolved/initialized cases)
1055     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
1056                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1057                             self);
1058 
1059     EXPECT_FALSE(self->IsExceptionPending());
1060     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1061     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1062     EXPECT_EQ(c.Get(), obj->GetClass());
1063     VerifyObject(obj);
1064   }
1065 
1066   // Failure tests.
1067 
1068   // Out-of-memory.
1069   {
1070     Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
1071 
1072     // Array helps to fill memory faster.
1073     Handle<mirror::Class> ca(
1074         hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1075 
1076     // Use arbitrary large amount for now.
1077     static const size_t kMaxHandles = 1000000;
1078     std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
1079 
1080     std::vector<Handle<mirror::Object>> handles;
1081     // Start allocating with 128K
1082     size_t length = 128 * KB / 4;
1083     while (length > 10) {
1084       Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
1085           mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
1086       if (self->IsExceptionPending() || h.Get() == nullptr) {
1087         self->ClearException();
1088 
1089         // Try a smaller length
1090         length = length / 8;
1091         // Use at most half the reported free space.
1092         size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
1093         if (length * 8 > mem) {
1094           length = mem / 8;
1095         }
1096       } else {
1097         handles.push_back(h);
1098       }
1099     }
1100     LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1101 
1102     // Allocate simple objects till it fails.
1103     while (!self->IsExceptionPending()) {
1104       Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1105       if (!self->IsExceptionPending() && h.Get() != nullptr) {
1106         handles.push_back(h);
1107       }
1108     }
1109     self->ClearException();
1110 
1111     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 0U,
1112                             StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1113                             self);
1114     EXPECT_TRUE(self->IsExceptionPending());
1115     self->ClearException();
1116     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1117   }
1118 
1119   // Tests done.
1120 #else
1121   LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1122   // Force-print to std::cout so it's also outside the logcat.
1123   std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1124 #endif
1125 }
1126 
TEST_F(StubTest,AllocObjectArray)1127 TEST_F(StubTest, AllocObjectArray) {
1128   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1129 
1130 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1131   // TODO: Check the "Unresolved" allocation stubs
1132 
1133   Thread* self = Thread::Current();
1134   // Create an object
1135   ScopedObjectAccess soa(self);
1136   // garbage is created during ClassLinker::Init
1137 
1138   StackHandleScope<2> hs(self);
1139   Handle<mirror::Class> c(
1140       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1141 
1142   // Needed to have a linked method.
1143   Handle<mirror::Class> c_obj(
1144       hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1145 
1146   // Play with it...
1147 
1148   EXPECT_FALSE(self->IsExceptionPending());
1149 
1150   // For some reason this does not work, as the type_idx is artificial and outside what the
1151   // resolved types of c_obj allow...
1152 
1153   if (false) {
1154     // Use an arbitrary method from c to use as referrer
1155     size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()),    // type_idx
1156                             reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0)),  // arbitrary
1157                             10U,
1158                             StubTest::GetEntrypoint(self, kQuickAllocArray),
1159                             self);
1160 
1161     EXPECT_FALSE(self->IsExceptionPending());
1162     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1163     mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
1164     EXPECT_EQ(c.Get(), obj->GetClass());
1165     VerifyObject(obj);
1166     EXPECT_EQ(obj->GetLength(), 10);
1167   }
1168 
1169   {
1170     // We can use nullptr in the second argument as we do not need a method here (not used in
1171     // resolved/initialized cases)
1172     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr), 10U,
1173                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1174                             self);
1175     EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
1176     EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1177     mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1178     EXPECT_TRUE(obj->IsArrayInstance());
1179     EXPECT_TRUE(obj->IsObjectArray());
1180     EXPECT_EQ(c.Get(), obj->GetClass());
1181     VerifyObject(obj);
1182     mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
1183     EXPECT_EQ(array->GetLength(), 10);
1184   }
1185 
1186   // Failure tests.
1187 
1188   // Out-of-memory.
1189   {
1190     size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(nullptr),
1191                             GB,  // that should fail...
1192                             StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1193                             self);
1194 
1195     EXPECT_TRUE(self->IsExceptionPending());
1196     self->ClearException();
1197     EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1198   }
1199 
1200   // Tests done.
1201 #else
1202   LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1203   // Force-print to std::cout so it's also outside the logcat.
1204   std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1205 #endif
1206 }
1207 
1208 
TEST_F(StubTest,StringCompareTo)1209 TEST_F(StubTest, StringCompareTo) {
1210   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1211 
1212 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1213   // TODO: Check the "Unresolved" allocation stubs
1214 
1215   Thread* self = Thread::Current();
1216 
1217   const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1218 
1219   ScopedObjectAccess soa(self);
1220   // garbage is created during ClassLinker::Init
1221 
1222   // Create some strings
1223   // Use array so we can index into it and use a matrix for expected results
1224   // Setup: The first half is standard. The second half uses a non-zero offset.
1225   // TODO: Shared backing arrays.
1226   static constexpr size_t kBaseStringCount  = 8;
1227   const char* c[kBaseStringCount] = { "", "", "a", "aa", "ab",
1228       "aacaacaacaacaacaac",  // This one's under the default limit to go to __memcmp16.
1229       "aacaacaacaacaacaacaacaacaacaacaacaac",     // This one's over.
1230       "aacaacaacaacaacaacaacaacaacaacaacaaca" };  // As is this one. We need a separate one to
1231                                                   // defeat object-equal optimizations.
1232 
1233   static constexpr size_t kStringCount = 2 * kBaseStringCount;
1234 
1235   StackHandleScope<kStringCount> hs(self);
1236   Handle<mirror::String> s[kStringCount];
1237 
1238   for (size_t i = 0; i < kBaseStringCount; ++i) {
1239     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1240   }
1241 
1242   RandGen r(0x1234);
1243 
1244   for (size_t i = kBaseStringCount; i < kStringCount; ++i) {
1245     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i - kBaseStringCount]));
1246     int32_t length = s[i]->GetLength();
1247     if (length > 1) {
1248       // Set a random offset and length.
1249       int32_t new_offset = 1 + (r.next() % (length - 1));
1250       int32_t rest = length - new_offset - 1;
1251       int32_t new_length = 1 + (rest > 0 ? r.next() % rest : 0);
1252 
1253       s[i]->SetField32<false>(mirror::String::CountOffset(), new_length);
1254       s[i]->SetField32<false>(mirror::String::OffsetOffset(), new_offset);
1255     }
1256   }
1257 
1258   // TODO: wide characters
1259 
1260   // Matrix of expectations. First component is first parameter. Note we only check against the
1261   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1262   // rely on String::CompareTo being correct.
1263   int32_t expected[kStringCount][kStringCount];
1264   for (size_t x = 0; x < kStringCount; ++x) {
1265     for (size_t y = 0; y < kStringCount; ++y) {
1266       expected[x][y] = s[x]->CompareTo(s[y].Get());
1267     }
1268   }
1269 
1270   // Play with it...
1271 
1272   for (size_t x = 0; x < kStringCount; ++x) {
1273     for (size_t y = 0; y < kStringCount; ++y) {
1274       // Test string_compareto x y
1275       size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1276                               reinterpret_cast<size_t>(s[y].Get()), 0U,
1277                               art_quick_string_compareto, self);
1278 
1279       EXPECT_FALSE(self->IsExceptionPending());
1280 
1281       // The result is a 32b signed integer
1282       union {
1283         size_t r;
1284         int32_t i;
1285       } conv;
1286       conv.r = result;
1287       int32_t e = expected[x][y];
1288       EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1289           conv.r;
1290       EXPECT_TRUE(e < 0 ? conv.i < 0 : true)   << "x=" << c[x] << " y="  << c[y] << " res=" <<
1291           conv.r;
1292       EXPECT_TRUE(e > 0 ? conv.i > 0 : true)   << "x=" << c[x] << " y=" << c[y] << " res=" <<
1293           conv.r;
1294     }
1295   }
1296 
1297   // TODO: Deallocate things.
1298 
1299   // Tests done.
1300 #else
1301   LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1302   // Force-print to std::cout so it's also outside the logcat.
1303   std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1304       std::endl;
1305 #endif
1306 }
1307 
1308 
GetSet32Static(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1309 static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
1310                            mirror::ArtMethod* referrer, StubTest* test)
1311     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1312 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1313   constexpr size_t num_values = 7;
1314   uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1315 
1316   for (size_t i = 0; i < num_values; ++i) {
1317     test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1318                               static_cast<size_t>(values[i]),
1319                               0U,
1320                               StubTest::GetEntrypoint(self, kQuickSet32Static),
1321                               self,
1322                               referrer);
1323 
1324     size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1325                                            0U, 0U,
1326                                            StubTest::GetEntrypoint(self, kQuickGet32Static),
1327                                            self,
1328                                            referrer);
1329 
1330     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1331   }
1332 #else
1333   LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1334   // Force-print to std::cout so it's also outside the logcat.
1335   std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1336 #endif
1337 }
1338 
1339 
GetSet32Instance(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1340 static void GetSet32Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
1341                              Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1342     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1343 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1344   constexpr size_t num_values = 7;
1345   uint32_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1346 
1347   for (size_t i = 0; i < num_values; ++i) {
1348     test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1349                               reinterpret_cast<size_t>(obj->Get()),
1350                               static_cast<size_t>(values[i]),
1351                               StubTest::GetEntrypoint(self, kQuickSet32Instance),
1352                               self,
1353                               referrer);
1354 
1355     int32_t res = f->Get()->GetInt(obj->Get());
1356     EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1357 
1358     res++;
1359     f->Get()->SetInt<false>(obj->Get(), res);
1360 
1361     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1362                                             reinterpret_cast<size_t>(obj->Get()),
1363                                             0U,
1364                                             StubTest::GetEntrypoint(self, kQuickGet32Instance),
1365                                             self,
1366                                             referrer);
1367     EXPECT_EQ(res, static_cast<int32_t>(res2));
1368   }
1369 #else
1370   LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1371   // Force-print to std::cout so it's also outside the logcat.
1372   std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1373 #endif
1374 }
1375 
1376 
1377 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1378 
set_and_check_static(uint32_t f_idx,mirror::Object * val,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1379 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1380                                  mirror::ArtMethod* referrer, StubTest* test)
1381     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1382   test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1383                             reinterpret_cast<size_t>(val),
1384                             0U,
1385                             StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1386                             self,
1387                             referrer);
1388 
1389   size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1390                                          0U, 0U,
1391                                          StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1392                                          self,
1393                                          referrer);
1394 
1395   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1396 }
1397 #endif
1398 
GetSetObjStatic(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1399 static void GetSetObjStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
1400                             mirror::ArtMethod* referrer, StubTest* test)
1401     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1402 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1403   set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1404 
1405   // Allocate a string object for simplicity.
1406   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1407   set_and_check_static((*f)->GetDexFieldIndex(), str, self, referrer, test);
1408 
1409   set_and_check_static((*f)->GetDexFieldIndex(), nullptr, self, referrer, test);
1410 #else
1411   LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1412   // Force-print to std::cout so it's also outside the logcat.
1413   std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1414 #endif
1415 }
1416 
1417 
1418 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(Handle<mirror::ArtField> * f,mirror::Object * trg,mirror::Object * val,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1419 static void set_and_check_instance(Handle<mirror::ArtField>* f, mirror::Object* trg,
1420                                    mirror::Object* val, Thread* self, mirror::ArtMethod* referrer,
1421                                    StubTest* test)
1422     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1423   test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1424                             reinterpret_cast<size_t>(trg),
1425                             reinterpret_cast<size_t>(val),
1426                             StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1427                             self,
1428                             referrer);
1429 
1430   size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1431                                          reinterpret_cast<size_t>(trg),
1432                                          0U,
1433                                          StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1434                                          self,
1435                                          referrer);
1436 
1437   EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1438 
1439   EXPECT_EQ(val, f->Get()->GetObj(trg));
1440 }
1441 #endif
1442 
GetSetObjInstance(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1443 static void GetSetObjInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
1444                               Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1445     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1446 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1447   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1448 
1449   // Allocate a string object for simplicity.
1450   mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1451   set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1452 
1453   set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1454 #else
1455   LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1456   // Force-print to std::cout so it's also outside the logcat.
1457   std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1458 #endif
1459 }
1460 
1461 
1462 // TODO: Complete these tests for 32b architectures.
1463 
GetSet64Static(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1464 static void GetSet64Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
1465                            mirror::ArtMethod* referrer, StubTest* test)
1466     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1467 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1468   constexpr size_t num_values = 8;
1469   uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1470 
1471   for (size_t i = 0; i < num_values; ++i) {
1472     test->Invoke3UWithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1473                                values[i],
1474                                StubTest::GetEntrypoint(self, kQuickSet64Static),
1475                                self,
1476                                referrer);
1477 
1478     size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1479                                            0U, 0U,
1480                                            StubTest::GetEntrypoint(self, kQuickGet64Static),
1481                                            self,
1482                                            referrer);
1483 
1484     EXPECT_EQ(res, values[i]) << "Iteration " << i;
1485   }
1486 #else
1487   LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1488   // Force-print to std::cout so it's also outside the logcat.
1489   std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1490 #endif
1491 }
1492 
1493 
GetSet64Instance(Handle<mirror::Object> * obj,Handle<mirror::ArtField> * f,Thread * self,mirror::ArtMethod * referrer,StubTest * test)1494 static void GetSet64Instance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
1495                              Thread* self, mirror::ArtMethod* referrer, StubTest* test)
1496     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1497 #if (defined(__x86_64__) && !defined(__APPLE__)) || defined(__aarch64__)
1498   constexpr size_t num_values = 8;
1499   uint64_t values[num_values] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1500 
1501   for (size_t i = 0; i < num_values; ++i) {
1502     test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1503                               reinterpret_cast<size_t>(obj->Get()),
1504                               static_cast<size_t>(values[i]),
1505                               StubTest::GetEntrypoint(self, kQuickSet64Instance),
1506                               self,
1507                               referrer);
1508 
1509     int64_t res = f->Get()->GetLong(obj->Get());
1510     EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1511 
1512     res++;
1513     f->Get()->SetLong<false>(obj->Get(), res);
1514 
1515     size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
1516                                             reinterpret_cast<size_t>(obj->Get()),
1517                                             0U,
1518                                             StubTest::GetEntrypoint(self, kQuickGet64Instance),
1519                                             self,
1520                                             referrer);
1521     EXPECT_EQ(res, static_cast<int64_t>(res2));
1522   }
1523 #else
1524   LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1525   // Force-print to std::cout so it's also outside the logcat.
1526   std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1527 #endif
1528 }
1529 
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1530 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1531   // garbage is created during ClassLinker::Init
1532 
1533   JNIEnv* env = Thread::Current()->GetJniEnv();
1534   jclass jc = env->FindClass("AllFields");
1535   CHECK(jc != NULL);
1536   jobject o = env->AllocObject(jc);
1537   CHECK(o != NULL);
1538 
1539   ScopedObjectAccess soa(self);
1540   StackHandleScope<5> hs(self);
1541   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
1542   Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1543   // Need a method as a referrer
1544   Handle<mirror::ArtMethod> m(hs.NewHandle(c->GetDirectMethod(0)));
1545 
1546   // Play with it...
1547 
1548   // Static fields.
1549   {
1550     Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetSFields()));
1551     int32_t num_fields = fields->GetLength();
1552     for (int32_t i = 0; i < num_fields; ++i) {
1553       StackHandleScope<1> hs(self);
1554       Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
1555 
1556       Primitive::Type type = f->GetTypeAsPrimitiveType();
1557       switch (type) {
1558         case Primitive::Type::kPrimInt:
1559           if (test_type == type) {
1560             GetSet32Static(&obj, &f, self, m.Get(), test);
1561           }
1562           break;
1563 
1564         case Primitive::Type::kPrimLong:
1565           if (test_type == type) {
1566             GetSet64Static(&obj, &f, self, m.Get(), test);
1567           }
1568           break;
1569 
1570         case Primitive::Type::kPrimNot:
1571           // Don't try array.
1572           if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
1573             GetSetObjStatic(&obj, &f, self, m.Get(), test);
1574           }
1575           break;
1576 
1577         default:
1578           break;  // Skip.
1579       }
1580     }
1581   }
1582 
1583   // Instance fields.
1584   {
1585     Handle<mirror::ObjectArray<mirror::ArtField>> fields(hs.NewHandle(c.Get()->GetIFields()));
1586     int32_t num_fields = fields->GetLength();
1587     for (int32_t i = 0; i < num_fields; ++i) {
1588       StackHandleScope<1> hs(self);
1589       Handle<mirror::ArtField> f(hs.NewHandle(fields->Get(i)));
1590 
1591       Primitive::Type type = f->GetTypeAsPrimitiveType();
1592       switch (type) {
1593         case Primitive::Type::kPrimInt:
1594           if (test_type == type) {
1595             GetSet32Instance(&obj, &f, self, m.Get(), test);
1596           }
1597           break;
1598 
1599         case Primitive::Type::kPrimLong:
1600           if (test_type == type) {
1601             GetSet64Instance(&obj, &f, self, m.Get(), test);
1602           }
1603           break;
1604 
1605         case Primitive::Type::kPrimNot:
1606           // Don't try array.
1607           if (test_type == type && f->GetTypeDescriptor()[0] != '[') {
1608             GetSetObjInstance(&obj, &f, self, m.Get(), test);
1609           }
1610           break;
1611 
1612         default:
1613           break;  // Skip.
1614       }
1615     }
1616   }
1617 
1618   // TODO: Deallocate things.
1619 }
1620 
1621 
TEST_F(StubTest,Fields32)1622 TEST_F(StubTest, Fields32) {
1623   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1624 
1625   Thread* self = Thread::Current();
1626 
1627   self->TransitionFromSuspendedToRunnable();
1628   LoadDex("AllFields");
1629   bool started = runtime_->Start();
1630   CHECK(started);
1631 
1632   TestFields(self, this, Primitive::Type::kPrimInt);
1633 }
1634 
TEST_F(StubTest,FieldsObj)1635 TEST_F(StubTest, FieldsObj) {
1636   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1637 
1638   Thread* self = Thread::Current();
1639 
1640   self->TransitionFromSuspendedToRunnable();
1641   LoadDex("AllFields");
1642   bool started = runtime_->Start();
1643   CHECK(started);
1644 
1645   TestFields(self, this, Primitive::Type::kPrimNot);
1646 }
1647 
TEST_F(StubTest,Fields64)1648 TEST_F(StubTest, Fields64) {
1649   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1650 
1651   Thread* self = Thread::Current();
1652 
1653   self->TransitionFromSuspendedToRunnable();
1654   LoadDex("AllFields");
1655   bool started = runtime_->Start();
1656   CHECK(started);
1657 
1658   TestFields(self, this, Primitive::Type::kPrimLong);
1659 }
1660 
1661 
TEST_F(StubTest,IMT)1662 TEST_F(StubTest, IMT) {
1663 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
1664   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1665 
1666   Thread* self = Thread::Current();
1667 
1668   ScopedObjectAccess soa(self);
1669   StackHandleScope<7> hs(self);
1670 
1671   JNIEnv* env = Thread::Current()->GetJniEnv();
1672 
1673   // ArrayList
1674 
1675   // Load ArrayList and used methods (JNI).
1676   jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1677   ASSERT_NE(nullptr, arraylist_jclass);
1678   jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1679   ASSERT_NE(nullptr, arraylist_constructor);
1680   jmethodID contains_jmethod = env->GetMethodID(arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1681   ASSERT_NE(nullptr, contains_jmethod);
1682   jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1683   ASSERT_NE(nullptr, add_jmethod);
1684 
1685   // Get mirror representation.
1686   Handle<mirror::ArtMethod> contains_amethod(hs.NewHandle(soa.DecodeMethod(contains_jmethod)));
1687 
1688   // Patch up ArrayList.contains.
1689   if (contains_amethod.Get()->GetEntryPointFromQuickCompiledCode() == nullptr) {
1690     contains_amethod.Get()->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1691         StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1692   }
1693 
1694   // List
1695 
1696   // Load List and used methods (JNI).
1697   jclass list_jclass = env->FindClass("java/util/List");
1698   ASSERT_NE(nullptr, list_jclass);
1699   jmethodID inf_contains_jmethod = env->GetMethodID(list_jclass, "contains", "(Ljava/lang/Object;)Z");
1700   ASSERT_NE(nullptr, inf_contains_jmethod);
1701 
1702   // Get mirror representation.
1703   Handle<mirror::ArtMethod> inf_contains(hs.NewHandle(soa.DecodeMethod(inf_contains_jmethod)));
1704 
1705   // Object
1706 
1707   jclass obj_jclass = env->FindClass("java/lang/Object");
1708   ASSERT_NE(nullptr, obj_jclass);
1709   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1710   ASSERT_NE(nullptr, obj_constructor);
1711 
1712   // Sanity check: check that there is a conflict for List.contains in ArrayList.
1713 
1714   mirror::Class* arraylist_class = soa.Decode<mirror::Class*>(arraylist_jclass);
1715   mirror::ArtMethod* m = arraylist_class->GetEmbeddedImTableEntry(
1716       inf_contains->GetDexMethodIndex() % mirror::Class::kImtSize);
1717 
1718   if (!m->IsImtConflictMethod()) {
1719     LOG(WARNING) << "Test is meaningless, no IMT conflict in setup: " <<
1720         PrettyMethod(m, true);
1721     LOG(WARNING) << "Please update StubTest.IMT.";
1722     return;
1723   }
1724 
1725   // Create instances.
1726 
1727   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1728   ASSERT_NE(nullptr, jarray_list);
1729   Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object*>(jarray_list)));
1730 
1731   jobject jobj = env->NewObject(obj_jclass, obj_constructor);
1732   ASSERT_NE(nullptr, jobj);
1733   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(jobj)));
1734 
1735   // Invoke.
1736 
1737   size_t result =
1738       Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
1739                                    reinterpret_cast<size_t>(obj.Get()),
1740                                    StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1741                                    self, contains_amethod.Get(),
1742                                    static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
1743 
1744   ASSERT_FALSE(self->IsExceptionPending());
1745   EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
1746 
1747   // Add object.
1748 
1749   env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
1750 
1751   ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
1752 
1753   // Invoke again.
1754 
1755   result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
1756                                         reinterpret_cast<size_t>(obj.Get()),
1757                                         StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
1758                                         self, contains_amethod.Get(),
1759                                         static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()));
1760 
1761   ASSERT_FALSE(self->IsExceptionPending());
1762   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
1763 #else
1764   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
1765   // Force-print to std::cout so it's also outside the logcat.
1766   std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
1767 #endif
1768 }
1769 
TEST_F(StubTest,StringIndexOf)1770 TEST_F(StubTest, StringIndexOf) {
1771 #if defined(__arm__) || defined(__aarch64__)
1772   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
1773 
1774   Thread* self = Thread::Current();
1775   ScopedObjectAccess soa(self);
1776   // garbage is created during ClassLinker::Init
1777 
1778   // Create some strings
1779   // Use array so we can index into it and use a matrix for expected results
1780   // Setup: The first half is standard. The second half uses a non-zero offset.
1781   // TODO: Shared backing arrays.
1782   static constexpr size_t kStringCount = 7;
1783   const char* c_str[kStringCount] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
1784   static constexpr size_t kCharCount = 5;
1785   const char c_char[kCharCount] = { 'a', 'b', 'c', 'd', 'e' };
1786 
1787   StackHandleScope<kStringCount> hs(self);
1788   Handle<mirror::String> s[kStringCount];
1789 
1790   for (size_t i = 0; i < kStringCount; ++i) {
1791     s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
1792   }
1793 
1794   // Matrix of expectations. First component is first parameter. Note we only check against the
1795   // sign, not the value. As we are testing random offsets, we need to compute this and need to
1796   // rely on String::CompareTo being correct.
1797   static constexpr size_t kMaxLen = 9;
1798   DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
1799 
1800   // Last dimension: start, offset by 1.
1801   int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
1802   for (size_t x = 0; x < kStringCount; ++x) {
1803     for (size_t y = 0; y < kCharCount; ++y) {
1804       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1805         expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
1806       }
1807     }
1808   }
1809 
1810   // Play with it...
1811 
1812   for (size_t x = 0; x < kStringCount; ++x) {
1813     for (size_t y = 0; y < kCharCount; ++y) {
1814       for (size_t z = 0; z <= kMaxLen + 2; ++z) {
1815         int32_t start = static_cast<int32_t>(z) - 1;
1816 
1817         // Test string_compareto x y
1818         size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
1819                                 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
1820 
1821         EXPECT_FALSE(self->IsExceptionPending());
1822 
1823         // The result is a 32b signed integer
1824         union {
1825           size_t r;
1826           int32_t i;
1827         } conv;
1828         conv.r = result;
1829 
1830         EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
1831             c_char[y] << " @ " << start;
1832       }
1833     }
1834   }
1835 
1836   // TODO: Deallocate things.
1837 
1838   // Tests done.
1839 #else
1840   LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
1841   // Force-print to std::cout so it's also outside the logcat.
1842   std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
1843 #endif
1844 }
1845 
1846 }  // namespace art
1847