1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <cstdio>
18
19 #include "art_field-inl.h"
20 #include "art_method-inl.h"
21 #include "class_linker-inl.h"
22 #include "common_runtime_test.h"
23 #include "entrypoints/quick/quick_entrypoints_enum.h"
24 #include "linear_alloc.h"
25 #include "mirror/class-inl.h"
26 #include "mirror/string-inl.h"
27 #include "scoped_thread_state_change.h"
28
29 namespace art {
30
31
32 class StubTest : public CommonRuntimeTest {
33 protected:
34 // We need callee-save methods set up in the Runtime for exceptions.
SetUp()35 void SetUp() OVERRIDE {
36 // Do the normal setup.
37 CommonRuntimeTest::SetUp();
38
39 {
40 // Create callee-save methods
41 ScopedObjectAccess soa(Thread::Current());
42 runtime_->SetInstructionSet(kRuntimeISA);
43 for (int i = 0; i < Runtime::kLastCalleeSaveType; i++) {
44 Runtime::CalleeSaveType type = Runtime::CalleeSaveType(i);
45 if (!runtime_->HasCalleeSaveMethod(type)) {
46 runtime_->SetCalleeSaveMethod(runtime_->CreateCalleeSaveMethod(), type);
47 }
48 }
49 }
50 }
51
SetUpRuntimeOptions(RuntimeOptions * options)52 void SetUpRuntimeOptions(RuntimeOptions *options) OVERRIDE {
53 // Use a smaller heap
54 for (std::pair<std::string, const void*>& pair : *options) {
55 if (pair.first.find("-Xmx") == 0) {
56 pair.first = "-Xmx4M"; // Smallest we can go.
57 }
58 }
59 options->push_back(std::make_pair("-Xint", nullptr));
60 }
61
62 // Helper function needed since TEST_F makes a new class.
GetTlsPtr(Thread * self)63 Thread::tls_ptr_sized_values* GetTlsPtr(Thread* self) {
64 return &self->tlsPtr_;
65 }
66
67 public:
Invoke3(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self)68 size_t Invoke3(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self) {
69 return Invoke3WithReferrer(arg0, arg1, arg2, code, self, nullptr);
70 }
71
72 // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrer(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer)73 size_t Invoke3WithReferrer(size_t arg0, size_t arg1, size_t arg2, uintptr_t code, Thread* self,
74 ArtMethod* referrer) {
75 return Invoke3WithReferrerAndHidden(arg0, arg1, arg2, code, self, referrer, 0);
76 }
77
78 // TODO: Set up a frame according to referrer's specs.
Invoke3WithReferrerAndHidden(size_t arg0,size_t arg1,size_t arg2,uintptr_t code,Thread * self,ArtMethod * referrer,size_t hidden)79 size_t Invoke3WithReferrerAndHidden(size_t arg0, size_t arg1, size_t arg2, uintptr_t code,
80 Thread* self, ArtMethod* referrer, size_t hidden) {
81 // Push a transition back into managed code onto the linked list in thread.
82 ManagedStack fragment;
83 self->PushManagedStackFragment(&fragment);
84
85 size_t result;
86 size_t fpr_result = 0;
87 #if defined(__i386__)
88 // TODO: Set the thread?
89 #define PUSH(reg) "push " # reg "\n\t .cfi_adjust_cfa_offset 4\n\t"
90 #define POP(reg) "pop " # reg "\n\t .cfi_adjust_cfa_offset -4\n\t"
91 __asm__ __volatile__(
92 "movd %[hidden], %%xmm7\n\t" // This is a memory op, so do this early. If it is off of
93 // esp, then we won't be able to access it after spilling.
94
95 // Spill 6 registers.
96 PUSH(%%ebx)
97 PUSH(%%ecx)
98 PUSH(%%edx)
99 PUSH(%%esi)
100 PUSH(%%edi)
101 PUSH(%%ebp)
102
103 // Store the inputs to the stack, but keep the referrer up top, less work.
104 PUSH(%[referrer]) // Align stack.
105 PUSH(%[referrer]) // Store referrer
106
107 PUSH(%[arg0])
108 PUSH(%[arg1])
109 PUSH(%[arg2])
110 PUSH(%[code])
111 // Now read them back into the required registers.
112 POP(%%edi)
113 POP(%%edx)
114 POP(%%ecx)
115 POP(%%eax)
116 // Call is prepared now.
117
118 "call *%%edi\n\t" // Call the stub
119 "addl $8, %%esp\n\t" // Pop referrer and padding.
120 ".cfi_adjust_cfa_offset -8\n\t"
121
122 // Restore 6 registers.
123 POP(%%ebp)
124 POP(%%edi)
125 POP(%%esi)
126 POP(%%edx)
127 POP(%%ecx)
128 POP(%%ebx)
129
130 : "=a" (result)
131 // Use the result from eax
132 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
133 [referrer]"r"(referrer), [hidden]"m"(hidden)
134 // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
135 : "memory", "xmm7"); // clobber.
136 #undef PUSH
137 #undef POP
138 #elif defined(__arm__)
139 __asm__ __volatile__(
140 "push {r1-r12, lr}\n\t" // Save state, 13*4B = 52B
141 ".cfi_adjust_cfa_offset 52\n\t"
142 "push {r9}\n\t"
143 ".cfi_adjust_cfa_offset 4\n\t"
144 "mov r9, %[referrer]\n\n"
145 "str r9, [sp, #-8]!\n\t" // Push referrer, +8B padding so 16B aligned
146 ".cfi_adjust_cfa_offset 8\n\t"
147 "ldr r9, [sp, #8]\n\t"
148
149 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
150 "sub sp, sp, #24\n\t"
151 "str %[arg0], [sp]\n\t"
152 "str %[arg1], [sp, #4]\n\t"
153 "str %[arg2], [sp, #8]\n\t"
154 "str %[code], [sp, #12]\n\t"
155 "str %[self], [sp, #16]\n\t"
156 "str %[hidden], [sp, #20]\n\t"
157 "ldr r0, [sp]\n\t"
158 "ldr r1, [sp, #4]\n\t"
159 "ldr r2, [sp, #8]\n\t"
160 "ldr r3, [sp, #12]\n\t"
161 "ldr r9, [sp, #16]\n\t"
162 "ldr r12, [sp, #20]\n\t"
163 "add sp, sp, #24\n\t"
164
165 "blx r3\n\t" // Call the stub
166 "add sp, sp, #12\n\t" // Pop null and padding
167 ".cfi_adjust_cfa_offset -12\n\t"
168 "pop {r1-r12, lr}\n\t" // Restore state
169 ".cfi_adjust_cfa_offset -52\n\t"
170 "mov %[result], r0\n\t" // Save the result
171 : [result] "=r" (result)
172 // Use the result from r0
173 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
174 [referrer] "r"(referrer), [hidden] "r"(hidden)
175 : "r0", "memory"); // clobber.
176 #elif defined(__aarch64__)
177 __asm__ __volatile__(
178 // Spill x0-x7 which we say we don't clobber. May contain args.
179 "sub sp, sp, #80\n\t"
180 ".cfi_adjust_cfa_offset 80\n\t"
181 "stp x0, x1, [sp]\n\t"
182 "stp x2, x3, [sp, #16]\n\t"
183 "stp x4, x5, [sp, #32]\n\t"
184 "stp x6, x7, [sp, #48]\n\t"
185 // To be extra defensive, store x20. We do this because some of the stubs might make a
186 // transition into the runtime via the blr instruction below and *not* save x20.
187 "str x20, [sp, #64]\n\t"
188 // 8 byte buffer
189
190 "sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
191 ".cfi_adjust_cfa_offset 16\n\t"
192 "str %[referrer], [sp]\n\t" // referrer
193
194 // Push everything on the stack, so we don't rely on the order. What a mess. :-(
195 "sub sp, sp, #48\n\t"
196 ".cfi_adjust_cfa_offset 48\n\t"
197 // All things are "r" constraints, so direct str/stp should work.
198 "stp %[arg0], %[arg1], [sp]\n\t"
199 "stp %[arg2], %[code], [sp, #16]\n\t"
200 "stp %[self], %[hidden], [sp, #32]\n\t"
201
202 // Now we definitely have x0-x3 free, use it to garble d8 - d15
203 "movk x0, #0xfad0\n\t"
204 "movk x0, #0xebad, lsl #16\n\t"
205 "movk x0, #0xfad0, lsl #32\n\t"
206 "movk x0, #0xebad, lsl #48\n\t"
207 "fmov d8, x0\n\t"
208 "add x0, x0, 1\n\t"
209 "fmov d9, x0\n\t"
210 "add x0, x0, 1\n\t"
211 "fmov d10, x0\n\t"
212 "add x0, x0, 1\n\t"
213 "fmov d11, x0\n\t"
214 "add x0, x0, 1\n\t"
215 "fmov d12, x0\n\t"
216 "add x0, x0, 1\n\t"
217 "fmov d13, x0\n\t"
218 "add x0, x0, 1\n\t"
219 "fmov d14, x0\n\t"
220 "add x0, x0, 1\n\t"
221 "fmov d15, x0\n\t"
222
223 // Load call params into the right registers.
224 "ldp x0, x1, [sp]\n\t"
225 "ldp x2, x3, [sp, #16]\n\t"
226 "ldp x19, x17, [sp, #32]\n\t"
227 "add sp, sp, #48\n\t"
228 ".cfi_adjust_cfa_offset -48\n\t"
229
230 "blr x3\n\t" // Call the stub
231 "mov x8, x0\n\t" // Store result
232 "add sp, sp, #16\n\t" // Drop the quick "frame"
233 ".cfi_adjust_cfa_offset -16\n\t"
234
235 // Test d8 - d15. We can use x1 and x2.
236 "movk x1, #0xfad0\n\t"
237 "movk x1, #0xebad, lsl #16\n\t"
238 "movk x1, #0xfad0, lsl #32\n\t"
239 "movk x1, #0xebad, lsl #48\n\t"
240 "fmov x2, d8\n\t"
241 "cmp x1, x2\n\t"
242 "b.ne 1f\n\t"
243 "add x1, x1, 1\n\t"
244
245 "fmov x2, d9\n\t"
246 "cmp x1, x2\n\t"
247 "b.ne 1f\n\t"
248 "add x1, x1, 1\n\t"
249
250 "fmov x2, d10\n\t"
251 "cmp x1, x2\n\t"
252 "b.ne 1f\n\t"
253 "add x1, x1, 1\n\t"
254
255 "fmov x2, d11\n\t"
256 "cmp x1, x2\n\t"
257 "b.ne 1f\n\t"
258 "add x1, x1, 1\n\t"
259
260 "fmov x2, d12\n\t"
261 "cmp x1, x2\n\t"
262 "b.ne 1f\n\t"
263 "add x1, x1, 1\n\t"
264
265 "fmov x2, d13\n\t"
266 "cmp x1, x2\n\t"
267 "b.ne 1f\n\t"
268 "add x1, x1, 1\n\t"
269
270 "fmov x2, d14\n\t"
271 "cmp x1, x2\n\t"
272 "b.ne 1f\n\t"
273 "add x1, x1, 1\n\t"
274
275 "fmov x2, d15\n\t"
276 "cmp x1, x2\n\t"
277 "b.ne 1f\n\t"
278
279 "mov x9, #0\n\t" // Use x9 as flag, in clobber list
280
281 // Finish up.
282 "2:\n\t"
283 "ldp x0, x1, [sp]\n\t" // Restore stuff not named clobbered, may contain fpr_result
284 "ldp x2, x3, [sp, #16]\n\t"
285 "ldp x4, x5, [sp, #32]\n\t"
286 "ldp x6, x7, [sp, #48]\n\t"
287 "ldr x20, [sp, #64]\n\t"
288 "add sp, sp, #80\n\t" // Free stack space, now sp as on entry
289 ".cfi_adjust_cfa_offset -80\n\t"
290
291 "str x9, %[fpr_result]\n\t" // Store the FPR comparison result
292 "mov %[result], x8\n\t" // Store the call result
293
294 "b 3f\n\t" // Goto end
295
296 // Failed fpr verification.
297 "1:\n\t"
298 "mov x9, #1\n\t"
299 "b 2b\n\t" // Goto finish-up
300
301 // End
302 "3:\n\t"
303 : [result] "=r" (result)
304 // Use the result from r0
305 : [arg0] "0"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
306 [referrer] "r"(referrer), [hidden] "r"(hidden), [fpr_result] "m" (fpr_result)
307 // Leave one register unclobbered, which is needed for compiling with
308 // -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
309 // which means we should unclobber one of the callee-saved registers that are unused.
310 // Here we use x20.
311 : "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
312 "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
313 "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
314 "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
315 "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
316 "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
317 "memory");
318 #elif defined(__mips__) && !defined(__LP64__)
319 __asm__ __volatile__ (
320 // Spill a0-a3 and t0-t7 which we say we don't clobber. May contain args.
321 "addiu $sp, $sp, -64\n\t"
322 "sw $a0, 0($sp)\n\t"
323 "sw $a1, 4($sp)\n\t"
324 "sw $a2, 8($sp)\n\t"
325 "sw $a3, 12($sp)\n\t"
326 "sw $t0, 16($sp)\n\t"
327 "sw $t1, 20($sp)\n\t"
328 "sw $t2, 24($sp)\n\t"
329 "sw $t3, 28($sp)\n\t"
330 "sw $t4, 32($sp)\n\t"
331 "sw $t5, 36($sp)\n\t"
332 "sw $t6, 40($sp)\n\t"
333 "sw $t7, 44($sp)\n\t"
334 // Spill gp register since it is caller save.
335 "sw $gp, 52($sp)\n\t"
336
337 "addiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
338 "sw %[referrer], 0($sp)\n\t"
339
340 // Push everything on the stack, so we don't rely on the order.
341 "addiu $sp, $sp, -24\n\t"
342 "sw %[arg0], 0($sp)\n\t"
343 "sw %[arg1], 4($sp)\n\t"
344 "sw %[arg2], 8($sp)\n\t"
345 "sw %[code], 12($sp)\n\t"
346 "sw %[self], 16($sp)\n\t"
347 "sw %[hidden], 20($sp)\n\t"
348
349 // Load call params into the right registers.
350 "lw $a0, 0($sp)\n\t"
351 "lw $a1, 4($sp)\n\t"
352 "lw $a2, 8($sp)\n\t"
353 "lw $t9, 12($sp)\n\t"
354 "lw $s1, 16($sp)\n\t"
355 "lw $t0, 20($sp)\n\t"
356 "addiu $sp, $sp, 24\n\t"
357
358 "jalr $t9\n\t" // Call the stub.
359 "nop\n\t"
360 "addiu $sp, $sp, 16\n\t" // Drop the quick "frame".
361
362 // Restore stuff not named clobbered.
363 "lw $a0, 0($sp)\n\t"
364 "lw $a1, 4($sp)\n\t"
365 "lw $a2, 8($sp)\n\t"
366 "lw $a3, 12($sp)\n\t"
367 "lw $t0, 16($sp)\n\t"
368 "lw $t1, 20($sp)\n\t"
369 "lw $t2, 24($sp)\n\t"
370 "lw $t3, 28($sp)\n\t"
371 "lw $t4, 32($sp)\n\t"
372 "lw $t5, 36($sp)\n\t"
373 "lw $t6, 40($sp)\n\t"
374 "lw $t7, 44($sp)\n\t"
375 // Restore gp.
376 "lw $gp, 52($sp)\n\t"
377 "addiu $sp, $sp, 64\n\t" // Free stack space, now sp as on entry.
378
379 "move %[result], $v0\n\t" // Store the call result.
380 : [result] "=r" (result)
381 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
382 [referrer] "r"(referrer), [hidden] "r"(hidden)
383 : "at", "v0", "v1", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "t8", "t9", "k0", "k1",
384 "fp", "ra",
385 "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
386 "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
387 "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
388 "memory"); // clobber.
389 #elif defined(__mips__) && defined(__LP64__)
390 __asm__ __volatile__ (
391 // Spill a0-a7 which we say we don't clobber. May contain args.
392 "daddiu $sp, $sp, -64\n\t"
393 "sd $a0, 0($sp)\n\t"
394 "sd $a1, 8($sp)\n\t"
395 "sd $a2, 16($sp)\n\t"
396 "sd $a3, 24($sp)\n\t"
397 "sd $a4, 32($sp)\n\t"
398 "sd $a5, 40($sp)\n\t"
399 "sd $a6, 48($sp)\n\t"
400 "sd $a7, 56($sp)\n\t"
401
402 "daddiu $sp, $sp, -16\n\t" // Reserve stack space, 16B aligned.
403 "sd %[referrer], 0($sp)\n\t"
404
405 // Push everything on the stack, so we don't rely on the order.
406 "daddiu $sp, $sp, -48\n\t"
407 "sd %[arg0], 0($sp)\n\t"
408 "sd %[arg1], 8($sp)\n\t"
409 "sd %[arg2], 16($sp)\n\t"
410 "sd %[code], 24($sp)\n\t"
411 "sd %[self], 32($sp)\n\t"
412 "sd %[hidden], 40($sp)\n\t"
413
414 // Load call params into the right registers.
415 "ld $a0, 0($sp)\n\t"
416 "ld $a1, 8($sp)\n\t"
417 "ld $a2, 16($sp)\n\t"
418 "ld $t9, 24($sp)\n\t"
419 "ld $s1, 32($sp)\n\t"
420 "ld $t0, 40($sp)\n\t"
421 "daddiu $sp, $sp, 48\n\t"
422
423 "jalr $t9\n\t" // Call the stub.
424 "nop\n\t"
425 "daddiu $sp, $sp, 16\n\t" // Drop the quick "frame".
426
427 // Restore stuff not named clobbered.
428 "ld $a0, 0($sp)\n\t"
429 "ld $a1, 8($sp)\n\t"
430 "ld $a2, 16($sp)\n\t"
431 "ld $a3, 24($sp)\n\t"
432 "ld $a4, 32($sp)\n\t"
433 "ld $a5, 40($sp)\n\t"
434 "ld $a6, 48($sp)\n\t"
435 "ld $a7, 56($sp)\n\t"
436 "daddiu $sp, $sp, 64\n\t"
437
438 "move %[result], $v0\n\t" // Store the call result.
439 : [result] "=r" (result)
440 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code), [self] "r"(self),
441 [referrer] "r"(referrer), [hidden] "r"(hidden)
442 // Instead aliases t0-t3, register names $12-$15 has been used in the clobber list because
443 // t0-t3 are ambiguous.
444 : "at", "v0", "v1", "$12", "$13", "$14", "$15", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
445 "s7", "t8", "t9", "k0", "k1", "fp", "ra",
446 "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", "$f10", "$f11",
447 "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22",
448 "$f23", "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
449 "memory"); // clobber.
450 #elif defined(__x86_64__) && !defined(__APPLE__)
451 #define PUSH(reg) "pushq " # reg "\n\t .cfi_adjust_cfa_offset 8\n\t"
452 #define POP(reg) "popq " # reg "\n\t .cfi_adjust_cfa_offset -8\n\t"
453 // Note: Uses the native convention. We do a callee-save regimen by manually spilling and
454 // restoring almost all registers.
455 // TODO: Set the thread?
456 __asm__ __volatile__(
457 // Spill almost everything (except rax, rsp). 14 registers.
458 PUSH(%%rbx)
459 PUSH(%%rcx)
460 PUSH(%%rdx)
461 PUSH(%%rsi)
462 PUSH(%%rdi)
463 PUSH(%%rbp)
464 PUSH(%%r8)
465 PUSH(%%r9)
466 PUSH(%%r10)
467 PUSH(%%r11)
468 PUSH(%%r12)
469 PUSH(%%r13)
470 PUSH(%%r14)
471 PUSH(%%r15)
472
473 PUSH(%[referrer]) // Push referrer & 16B alignment padding
474 PUSH(%[referrer])
475
476 // Now juggle the input registers.
477 PUSH(%[arg0])
478 PUSH(%[arg1])
479 PUSH(%[arg2])
480 PUSH(%[hidden])
481 PUSH(%[code])
482 POP(%%r8)
483 POP(%%rax)
484 POP(%%rdx)
485 POP(%%rsi)
486 POP(%%rdi)
487
488 "call *%%r8\n\t" // Call the stub
489 "addq $16, %%rsp\n\t" // Pop null and padding
490 ".cfi_adjust_cfa_offset -16\n\t"
491
492 POP(%%r15)
493 POP(%%r14)
494 POP(%%r13)
495 POP(%%r12)
496 POP(%%r11)
497 POP(%%r10)
498 POP(%%r9)
499 POP(%%r8)
500 POP(%%rbp)
501 POP(%%rdi)
502 POP(%%rsi)
503 POP(%%rdx)
504 POP(%%rcx)
505 POP(%%rbx)
506
507 : "=a" (result)
508 // Use the result from rax
509 : [arg0] "r"(arg0), [arg1] "r"(arg1), [arg2] "r"(arg2), [code] "r"(code),
510 [referrer] "r"(referrer), [hidden] "r"(hidden)
511 // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into some other
512 // register. We can't use "b" (rbx), as ASAN uses this for the frame pointer.
513 : "memory"); // We spill and restore (almost) all registers, so only mention memory here.
514 #undef PUSH
515 #undef POP
516 #else
517 UNUSED(arg0, arg1, arg2, code, referrer, hidden);
518 LOG(WARNING) << "Was asked to invoke for an architecture I do not understand.";
519 result = 0;
520 #endif
521 // Pop transition.
522 self->PopManagedStackFragment(fragment);
523
524 fp_result = fpr_result;
525 EXPECT_EQ(0U, fp_result);
526
527 return result;
528 }
529
GetEntrypoint(Thread * self,QuickEntrypointEnum entrypoint)530 static uintptr_t GetEntrypoint(Thread* self, QuickEntrypointEnum entrypoint) {
531 int32_t offset;
532 #ifdef __LP64__
533 offset = GetThreadOffset<8>(entrypoint).Int32Value();
534 #else
535 offset = GetThreadOffset<4>(entrypoint).Int32Value();
536 #endif
537 return *reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(self) + offset);
538 }
539
540 protected:
541 size_t fp_result;
542 };
543
544
TEST_F(StubTest,Memcpy)545 TEST_F(StubTest, Memcpy) {
546 #if defined(__i386__) || (defined(__x86_64__) && !defined(__APPLE__)) || defined(__mips__)
547 Thread* self = Thread::Current();
548
549 uint32_t orig[20];
550 uint32_t trg[20];
551 for (size_t i = 0; i < 20; ++i) {
552 orig[i] = i;
553 trg[i] = 0;
554 }
555
556 Invoke3(reinterpret_cast<size_t>(&trg[4]), reinterpret_cast<size_t>(&orig[4]),
557 10 * sizeof(uint32_t), StubTest::GetEntrypoint(self, kQuickMemcpy), self);
558
559 EXPECT_EQ(orig[0], trg[0]);
560
561 for (size_t i = 1; i < 4; ++i) {
562 EXPECT_NE(orig[i], trg[i]);
563 }
564
565 for (size_t i = 4; i < 14; ++i) {
566 EXPECT_EQ(orig[i], trg[i]);
567 }
568
569 for (size_t i = 14; i < 20; ++i) {
570 EXPECT_NE(orig[i], trg[i]);
571 }
572
573 // TODO: Test overlapping?
574
575 #else
576 LOG(INFO) << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA;
577 // Force-print to std::cout so it's also outside the logcat.
578 std::cout << "Skipping memcpy as I don't know how to do that on " << kRuntimeISA << std::endl;
579 #endif
580 }
581
TEST_F(StubTest,LockObject)582 TEST_F(StubTest, LockObject) {
583 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
584 (defined(__x86_64__) && !defined(__APPLE__))
585 static constexpr size_t kThinLockLoops = 100;
586
587 Thread* self = Thread::Current();
588
589 const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
590
591 // Create an object
592 ScopedObjectAccess soa(self);
593 // garbage is created during ClassLinker::Init
594
595 StackHandleScope<2> hs(soa.Self());
596 Handle<mirror::String> obj(
597 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
598 LockWord lock = obj->GetLockWord(false);
599 LockWord::LockState old_state = lock.GetState();
600 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
601
602 Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
603
604 LockWord lock_after = obj->GetLockWord(false);
605 LockWord::LockState new_state = lock_after.GetState();
606 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state);
607 EXPECT_EQ(lock_after.ThinLockCount(), 0U); // Thin lock starts count at zero
608
609 for (size_t i = 1; i < kThinLockLoops; ++i) {
610 Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
611
612 // Check we're at lock count i
613
614 LockWord l_inc = obj->GetLockWord(false);
615 LockWord::LockState l_inc_state = l_inc.GetState();
616 EXPECT_EQ(LockWord::LockState::kThinLocked, l_inc_state);
617 EXPECT_EQ(l_inc.ThinLockCount(), i);
618 }
619
620 // Force a fat lock by running identity hashcode to fill up lock word.
621 Handle<mirror::String> obj2(hs.NewHandle(
622 mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
623
624 obj2->IdentityHashCode();
625
626 Invoke3(reinterpret_cast<size_t>(obj2.Get()), 0U, 0U, art_quick_lock_object, self);
627
628 LockWord lock_after2 = obj2->GetLockWord(false);
629 LockWord::LockState new_state2 = lock_after2.GetState();
630 EXPECT_EQ(LockWord::LockState::kFatLocked, new_state2);
631 EXPECT_NE(lock_after2.FatLockMonitor(), static_cast<Monitor*>(nullptr));
632
633 // Test done.
634 #else
635 LOG(INFO) << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA;
636 // Force-print to std::cout so it's also outside the logcat.
637 std::cout << "Skipping lock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
638 #endif
639 }
640
641
642 class RandGen {
643 public:
RandGen(uint32_t seed)644 explicit RandGen(uint32_t seed) : val_(seed) {}
645
next()646 uint32_t next() {
647 val_ = val_ * 48271 % 2147483647 + 13;
648 return val_;
649 }
650
651 uint32_t val_;
652 };
653
654
655 // NO_THREAD_SAFETY_ANALYSIS as we do not want to grab exclusive mutator lock for MonitorInfo.
TestUnlockObject(StubTest * test)656 static void TestUnlockObject(StubTest* test) NO_THREAD_SAFETY_ANALYSIS {
657 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
658 (defined(__x86_64__) && !defined(__APPLE__))
659 static constexpr size_t kThinLockLoops = 100;
660
661 Thread* self = Thread::Current();
662
663 const uintptr_t art_quick_lock_object = StubTest::GetEntrypoint(self, kQuickLockObject);
664 const uintptr_t art_quick_unlock_object = StubTest::GetEntrypoint(self, kQuickUnlockObject);
665 // Create an object
666 ScopedObjectAccess soa(self);
667 // garbage is created during ClassLinker::Init
668 static constexpr size_t kNumberOfLocks = 10; // Number of objects = lock
669 StackHandleScope<kNumberOfLocks + 1> hs(self);
670 Handle<mirror::String> obj(
671 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
672 LockWord lock = obj->GetLockWord(false);
673 LockWord::LockState old_state = lock.GetState();
674 EXPECT_EQ(LockWord::LockState::kUnlocked, old_state);
675
676 test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
677 // This should be an illegal monitor state.
678 EXPECT_TRUE(self->IsExceptionPending());
679 self->ClearException();
680
681 LockWord lock_after = obj->GetLockWord(false);
682 LockWord::LockState new_state = lock_after.GetState();
683 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state);
684
685 test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_lock_object, self);
686
687 LockWord lock_after2 = obj->GetLockWord(false);
688 LockWord::LockState new_state2 = lock_after2.GetState();
689 EXPECT_EQ(LockWord::LockState::kThinLocked, new_state2);
690
691 test->Invoke3(reinterpret_cast<size_t>(obj.Get()), 0U, 0U, art_quick_unlock_object, self);
692
693 LockWord lock_after3 = obj->GetLockWord(false);
694 LockWord::LockState new_state3 = lock_after3.GetState();
695 EXPECT_EQ(LockWord::LockState::kUnlocked, new_state3);
696
697 // Stress test:
698 // Keep a number of objects and their locks in flight. Randomly lock or unlock one of them in
699 // each step.
700
701 RandGen r(0x1234);
702
703 constexpr size_t kIterations = 10000; // Number of iterations
704 constexpr size_t kMoveToFat = 1000; // Chance of 1:kMoveFat to make a lock fat.
705
706 size_t counts[kNumberOfLocks];
707 bool fat[kNumberOfLocks]; // Whether a lock should be thin or fat.
708 Handle<mirror::String> objects[kNumberOfLocks];
709
710 // Initialize = allocate.
711 for (size_t i = 0; i < kNumberOfLocks; ++i) {
712 counts[i] = 0;
713 fat[i] = false;
714 objects[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), ""));
715 }
716
717 for (size_t i = 0; i < kIterations; ++i) {
718 // Select which lock to update.
719 size_t index = r.next() % kNumberOfLocks;
720
721 // Make lock fat?
722 if (!fat[index] && (r.next() % kMoveToFat == 0)) {
723 fat[index] = true;
724 objects[index]->IdentityHashCode();
725
726 LockWord lock_iter = objects[index]->GetLockWord(false);
727 LockWord::LockState iter_state = lock_iter.GetState();
728 if (counts[index] == 0) {
729 EXPECT_EQ(LockWord::LockState::kHashCode, iter_state);
730 } else {
731 EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state);
732 }
733 } else {
734 bool take_lock; // Whether to lock or unlock in this step.
735 if (counts[index] == 0) {
736 take_lock = true;
737 } else if (counts[index] == kThinLockLoops) {
738 take_lock = false;
739 } else {
740 // Randomly.
741 take_lock = r.next() % 2 == 0;
742 }
743
744 if (take_lock) {
745 test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_lock_object,
746 self);
747 counts[index]++;
748 } else {
749 test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U,
750 art_quick_unlock_object, self);
751 counts[index]--;
752 }
753
754 EXPECT_FALSE(self->IsExceptionPending());
755
756 // Check the new state.
757 LockWord lock_iter = objects[index]->GetLockWord(true);
758 LockWord::LockState iter_state = lock_iter.GetState();
759 if (fat[index]) {
760 // Abuse MonitorInfo.
761 EXPECT_EQ(LockWord::LockState::kFatLocked, iter_state) << index;
762 MonitorInfo info(objects[index].Get());
763 EXPECT_EQ(counts[index], info.entry_count_) << index;
764 } else {
765 if (counts[index] > 0) {
766 EXPECT_EQ(LockWord::LockState::kThinLocked, iter_state);
767 EXPECT_EQ(counts[index] - 1, lock_iter.ThinLockCount());
768 } else {
769 EXPECT_EQ(LockWord::LockState::kUnlocked, iter_state);
770 }
771 }
772 }
773 }
774
775 // Unlock the remaining count times and then check it's unlocked. Then deallocate.
776 // Go reverse order to correctly handle Handles.
777 for (size_t i = 0; i < kNumberOfLocks; ++i) {
778 size_t index = kNumberOfLocks - 1 - i;
779 size_t count = counts[index];
780 while (count > 0) {
781 test->Invoke3(reinterpret_cast<size_t>(objects[index].Get()), 0U, 0U, art_quick_unlock_object,
782 self);
783 count--;
784 }
785
786 LockWord lock_after4 = objects[index]->GetLockWord(false);
787 LockWord::LockState new_state4 = lock_after4.GetState();
788 EXPECT_TRUE(LockWord::LockState::kUnlocked == new_state4
789 || LockWord::LockState::kFatLocked == new_state4);
790 }
791
792 // Test done.
793 #else
794 UNUSED(test);
795 LOG(INFO) << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA;
796 // Force-print to std::cout so it's also outside the logcat.
797 std::cout << "Skipping unlock_object as I don't know how to do that on " << kRuntimeISA << std::endl;
798 #endif
799 }
800
TEST_F(StubTest,UnlockObject)801 TEST_F(StubTest, UnlockObject) {
802 // This will lead to monitor error messages in the log.
803 ScopedLogSeverity sls(LogSeverity::FATAL);
804
805 TestUnlockObject(this);
806 }
807
808 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
809 (defined(__x86_64__) && !defined(__APPLE__))
810 extern "C" void art_quick_check_cast(void);
811 #endif
812
TEST_F(StubTest,CheckCast)813 TEST_F(StubTest, CheckCast) {
814 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
815 (defined(__x86_64__) && !defined(__APPLE__))
816 Thread* self = Thread::Current();
817
818 const uintptr_t art_quick_check_cast = StubTest::GetEntrypoint(self, kQuickCheckCast);
819
820 // Find some classes.
821 ScopedObjectAccess soa(self);
822 // garbage is created during ClassLinker::Init
823
824 StackHandleScope<2> hs(soa.Self());
825 Handle<mirror::Class> c(
826 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
827 Handle<mirror::Class> c2(
828 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
829
830 EXPECT_FALSE(self->IsExceptionPending());
831
832 Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
833 art_quick_check_cast, self);
834
835 EXPECT_FALSE(self->IsExceptionPending());
836
837 Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
838 art_quick_check_cast, self);
839
840 EXPECT_FALSE(self->IsExceptionPending());
841
842 Invoke3(reinterpret_cast<size_t>(c.Get()), reinterpret_cast<size_t>(c2.Get()), 0U,
843 art_quick_check_cast, self);
844
845 EXPECT_FALSE(self->IsExceptionPending());
846
847 // TODO: Make the following work. But that would require correct managed frames.
848
849 Invoke3(reinterpret_cast<size_t>(c2.Get()), reinterpret_cast<size_t>(c.Get()), 0U,
850 art_quick_check_cast, self);
851
852 EXPECT_TRUE(self->IsExceptionPending());
853 self->ClearException();
854
855 #else
856 LOG(INFO) << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA;
857 // Force-print to std::cout so it's also outside the logcat.
858 std::cout << "Skipping check_cast as I don't know how to do that on " << kRuntimeISA << std::endl;
859 #endif
860 }
861
862
TEST_F(StubTest,APutObj)863 TEST_F(StubTest, APutObj) {
864 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
865 (defined(__x86_64__) && !defined(__APPLE__))
866 Thread* self = Thread::Current();
867
868 // Do not check non-checked ones, we'd need handlers and stuff...
869 const uintptr_t art_quick_aput_obj_with_null_and_bound_check =
870 StubTest::GetEntrypoint(self, kQuickAputObjectWithNullAndBoundCheck);
871
872 // Create an object
873 ScopedObjectAccess soa(self);
874 // garbage is created during ClassLinker::Init
875
876 StackHandleScope<5> hs(soa.Self());
877 Handle<mirror::Class> c(
878 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
879 Handle<mirror::Class> ca(
880 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/String;")));
881
882 // Build a string array of size 1
883 Handle<mirror::ObjectArray<mirror::Object>> array(
884 hs.NewHandle(mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), 10)));
885
886 // Build a string -> should be assignable
887 Handle<mirror::String> str_obj(
888 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
889
890 // Build a generic object -> should fail assigning
891 Handle<mirror::Object> obj_obj(hs.NewHandle(c->AllocObject(soa.Self())));
892
893 // Play with it...
894
895 // 1) Success cases
896 // 1.1) Assign str_obj to array[0..3]
897
898 EXPECT_FALSE(self->IsExceptionPending());
899
900 Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(str_obj.Get()),
901 art_quick_aput_obj_with_null_and_bound_check, self);
902
903 EXPECT_FALSE(self->IsExceptionPending());
904 EXPECT_EQ(str_obj.Get(), array->Get(0));
905
906 Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(str_obj.Get()),
907 art_quick_aput_obj_with_null_and_bound_check, self);
908
909 EXPECT_FALSE(self->IsExceptionPending());
910 EXPECT_EQ(str_obj.Get(), array->Get(1));
911
912 Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(str_obj.Get()),
913 art_quick_aput_obj_with_null_and_bound_check, self);
914
915 EXPECT_FALSE(self->IsExceptionPending());
916 EXPECT_EQ(str_obj.Get(), array->Get(2));
917
918 Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(str_obj.Get()),
919 art_quick_aput_obj_with_null_and_bound_check, self);
920
921 EXPECT_FALSE(self->IsExceptionPending());
922 EXPECT_EQ(str_obj.Get(), array->Get(3));
923
924 // 1.2) Assign null to array[0..3]
925
926 Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(nullptr),
927 art_quick_aput_obj_with_null_and_bound_check, self);
928
929 EXPECT_FALSE(self->IsExceptionPending());
930 EXPECT_EQ(nullptr, array->Get(0));
931
932 Invoke3(reinterpret_cast<size_t>(array.Get()), 1U, reinterpret_cast<size_t>(nullptr),
933 art_quick_aput_obj_with_null_and_bound_check, self);
934
935 EXPECT_FALSE(self->IsExceptionPending());
936 EXPECT_EQ(nullptr, array->Get(1));
937
938 Invoke3(reinterpret_cast<size_t>(array.Get()), 2U, reinterpret_cast<size_t>(nullptr),
939 art_quick_aput_obj_with_null_and_bound_check, self);
940
941 EXPECT_FALSE(self->IsExceptionPending());
942 EXPECT_EQ(nullptr, array->Get(2));
943
944 Invoke3(reinterpret_cast<size_t>(array.Get()), 3U, reinterpret_cast<size_t>(nullptr),
945 art_quick_aput_obj_with_null_and_bound_check, self);
946
947 EXPECT_FALSE(self->IsExceptionPending());
948 EXPECT_EQ(nullptr, array->Get(3));
949
950 // TODO: Check _which_ exception is thrown. Then make 3) check that it's the right check order.
951
952 // 2) Failure cases (str into str[])
953 // 2.1) Array = null
954 // TODO: Throwing NPE needs actual DEX code
955
956 // Invoke3(reinterpret_cast<size_t>(nullptr), 0U, reinterpret_cast<size_t>(str_obj.Get()),
957 // reinterpret_cast<uintptr_t>(&art_quick_aput_obj_with_null_and_bound_check), self);
958 //
959 // EXPECT_TRUE(self->IsExceptionPending());
960 // self->ClearException();
961
962 // 2.2) Index < 0
963
964 Invoke3(reinterpret_cast<size_t>(array.Get()), static_cast<size_t>(-1),
965 reinterpret_cast<size_t>(str_obj.Get()),
966 art_quick_aput_obj_with_null_and_bound_check, self);
967
968 EXPECT_TRUE(self->IsExceptionPending());
969 self->ClearException();
970
971 // 2.3) Index > 0
972
973 Invoke3(reinterpret_cast<size_t>(array.Get()), 10U, reinterpret_cast<size_t>(str_obj.Get()),
974 art_quick_aput_obj_with_null_and_bound_check, self);
975
976 EXPECT_TRUE(self->IsExceptionPending());
977 self->ClearException();
978
979 // 3) Failure cases (obj into str[])
980
981 Invoke3(reinterpret_cast<size_t>(array.Get()), 0U, reinterpret_cast<size_t>(obj_obj.Get()),
982 art_quick_aput_obj_with_null_and_bound_check, self);
983
984 EXPECT_TRUE(self->IsExceptionPending());
985 self->ClearException();
986
987 // Tests done.
988 #else
989 LOG(INFO) << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA;
990 // Force-print to std::cout so it's also outside the logcat.
991 std::cout << "Skipping aput_obj as I don't know how to do that on " << kRuntimeISA << std::endl;
992 #endif
993 }
994
TEST_F(StubTest,AllocObject)995 TEST_F(StubTest, AllocObject) {
996 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
997 (defined(__x86_64__) && !defined(__APPLE__))
998 // This will lead to OOM error messages in the log.
999 ScopedLogSeverity sls(LogSeverity::FATAL);
1000
1001 // TODO: Check the "Unresolved" allocation stubs
1002
1003 Thread* self = Thread::Current();
1004 // Create an object
1005 ScopedObjectAccess soa(self);
1006 // garbage is created during ClassLinker::Init
1007
1008 StackHandleScope<2> hs(soa.Self());
1009 Handle<mirror::Class> c(
1010 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1011
1012 // Play with it...
1013
1014 EXPECT_FALSE(self->IsExceptionPending());
1015 {
1016 // Use an arbitrary method from c to use as referrer
1017 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
1018 // arbitrary
1019 reinterpret_cast<size_t>(c->GetVirtualMethod(0, sizeof(void*))),
1020 0U,
1021 StubTest::GetEntrypoint(self, kQuickAllocObject),
1022 self);
1023
1024 EXPECT_FALSE(self->IsExceptionPending());
1025 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1026 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1027 EXPECT_EQ(c.Get(), obj->GetClass());
1028 VerifyObject(obj);
1029 }
1030
1031 {
1032 // We can use null in the second argument as we do not need a method here (not used in
1033 // resolved/initialized cases)
1034 size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1035 StubTest::GetEntrypoint(self, kQuickAllocObjectResolved),
1036 self);
1037
1038 EXPECT_FALSE(self->IsExceptionPending());
1039 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1040 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1041 EXPECT_EQ(c.Get(), obj->GetClass());
1042 VerifyObject(obj);
1043 }
1044
1045 {
1046 // We can use null in the second argument as we do not need a method here (not used in
1047 // resolved/initialized cases)
1048 size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1049 StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1050 self);
1051
1052 EXPECT_FALSE(self->IsExceptionPending());
1053 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1054 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1055 EXPECT_EQ(c.Get(), obj->GetClass());
1056 VerifyObject(obj);
1057 }
1058
1059 // Failure tests.
1060
1061 // Out-of-memory.
1062 {
1063 Runtime::Current()->GetHeap()->SetIdealFootprint(1 * GB);
1064
1065 // Array helps to fill memory faster.
1066 Handle<mirror::Class> ca(
1067 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1068
1069 // Use arbitrary large amount for now.
1070 static const size_t kMaxHandles = 1000000;
1071 std::unique_ptr<StackHandleScope<kMaxHandles>> hsp(new StackHandleScope<kMaxHandles>(self));
1072
1073 std::vector<Handle<mirror::Object>> handles;
1074 // Start allocating with 128K
1075 size_t length = 128 * KB / 4;
1076 while (length > 10) {
1077 Handle<mirror::Object> h(hsp->NewHandle<mirror::Object>(
1078 mirror::ObjectArray<mirror::Object>::Alloc(soa.Self(), ca.Get(), length / 4)));
1079 if (self->IsExceptionPending() || h.Get() == nullptr) {
1080 self->ClearException();
1081
1082 // Try a smaller length
1083 length = length / 8;
1084 // Use at most half the reported free space.
1085 size_t mem = Runtime::Current()->GetHeap()->GetFreeMemory();
1086 if (length * 8 > mem) {
1087 length = mem / 8;
1088 }
1089 } else {
1090 handles.push_back(h);
1091 }
1092 }
1093 LOG(INFO) << "Used " << handles.size() << " arrays to fill space.";
1094
1095 // Allocate simple objects till it fails.
1096 while (!self->IsExceptionPending()) {
1097 Handle<mirror::Object> h = hsp->NewHandle(c->AllocObject(soa.Self()));
1098 if (!self->IsExceptionPending() && h.Get() != nullptr) {
1099 handles.push_back(h);
1100 }
1101 }
1102 self->ClearException();
1103
1104 size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 0u, 0U,
1105 StubTest::GetEntrypoint(self, kQuickAllocObjectInitialized),
1106 self);
1107 EXPECT_TRUE(self->IsExceptionPending());
1108 self->ClearException();
1109 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1110 }
1111
1112 // Tests done.
1113 #else
1114 LOG(INFO) << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA;
1115 // Force-print to std::cout so it's also outside the logcat.
1116 std::cout << "Skipping alloc_object as I don't know how to do that on " << kRuntimeISA << std::endl;
1117 #endif
1118 }
1119
TEST_F(StubTest,AllocObjectArray)1120 TEST_F(StubTest, AllocObjectArray) {
1121 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1122 (defined(__x86_64__) && !defined(__APPLE__))
1123 // TODO: Check the "Unresolved" allocation stubs
1124
1125 // This will lead to OOM error messages in the log.
1126 ScopedLogSeverity sls(LogSeverity::FATAL);
1127
1128 Thread* self = Thread::Current();
1129 // Create an object
1130 ScopedObjectAccess soa(self);
1131 // garbage is created during ClassLinker::Init
1132
1133 StackHandleScope<2> hs(self);
1134 Handle<mirror::Class> c(
1135 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "[Ljava/lang/Object;")));
1136
1137 // Needed to have a linked method.
1138 Handle<mirror::Class> c_obj(
1139 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
1140
1141 // Play with it...
1142
1143 EXPECT_FALSE(self->IsExceptionPending());
1144
1145 // For some reason this does not work, as the type_idx is artificial and outside what the
1146 // resolved types of c_obj allow...
1147
1148 if ((false)) {
1149 // Use an arbitrary method from c to use as referrer
1150 size_t result = Invoke3(static_cast<size_t>(c->GetDexTypeIndex()), // type_idx
1151 10U,
1152 // arbitrary
1153 reinterpret_cast<size_t>(c_obj->GetVirtualMethod(0, sizeof(void*))),
1154 StubTest::GetEntrypoint(self, kQuickAllocArray),
1155 self);
1156
1157 EXPECT_FALSE(self->IsExceptionPending());
1158 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1159 mirror::Array* obj = reinterpret_cast<mirror::Array*>(result);
1160 EXPECT_EQ(c.Get(), obj->GetClass());
1161 VerifyObject(obj);
1162 EXPECT_EQ(obj->GetLength(), 10);
1163 }
1164
1165 {
1166 // We can use null in the second argument as we do not need a method here (not used in
1167 // resolved/initialized cases)
1168 size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()), 10U,
1169 reinterpret_cast<size_t>(nullptr),
1170 StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1171 self);
1172 EXPECT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
1173 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
1174 mirror::Object* obj = reinterpret_cast<mirror::Object*>(result);
1175 EXPECT_TRUE(obj->IsArrayInstance());
1176 EXPECT_TRUE(obj->IsObjectArray());
1177 EXPECT_EQ(c.Get(), obj->GetClass());
1178 VerifyObject(obj);
1179 mirror::Array* array = reinterpret_cast<mirror::Array*>(result);
1180 EXPECT_EQ(array->GetLength(), 10);
1181 }
1182
1183 // Failure tests.
1184
1185 // Out-of-memory.
1186 {
1187 size_t result = Invoke3(reinterpret_cast<size_t>(c.Get()),
1188 GB, // that should fail...
1189 reinterpret_cast<size_t>(nullptr),
1190 StubTest::GetEntrypoint(self, kQuickAllocArrayResolved),
1191 self);
1192
1193 EXPECT_TRUE(self->IsExceptionPending());
1194 self->ClearException();
1195 EXPECT_EQ(reinterpret_cast<size_t>(nullptr), result);
1196 }
1197
1198 // Tests done.
1199 #else
1200 LOG(INFO) << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA;
1201 // Force-print to std::cout so it's also outside the logcat.
1202 std::cout << "Skipping alloc_array as I don't know how to do that on " << kRuntimeISA << std::endl;
1203 #endif
1204 }
1205
1206
TEST_F(StubTest,StringCompareTo)1207 TEST_F(StubTest, StringCompareTo) {
1208 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || \
1209 defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__))
1210 // TODO: Check the "Unresolved" allocation stubs
1211
1212 Thread* self = Thread::Current();
1213
1214 const uintptr_t art_quick_string_compareto = StubTest::GetEntrypoint(self, kQuickStringCompareTo);
1215
1216 ScopedObjectAccess soa(self);
1217 // garbage is created during ClassLinker::Init
1218
1219 // Create some strings
1220 // Use array so we can index into it and use a matrix for expected results
1221 // Setup: The first half is standard. The second half uses a non-zero offset.
1222 // TODO: Shared backing arrays.
1223 const char* c[] = { "", "", "a", "aa", "ab",
1224 "aacaacaacaacaacaac", // This one's under the default limit to go to __memcmp16.
1225 "aacaacaacaacaacaacaacaacaacaacaacaac", // This one's over.
1226 "aacaacaacaacaacaacaacaacaacaacaacaaca" }; // As is this one. We need a separate one to
1227 // defeat object-equal optimizations.
1228 static constexpr size_t kStringCount = arraysize(c);
1229
1230 StackHandleScope<kStringCount> hs(self);
1231 Handle<mirror::String> s[kStringCount];
1232
1233 for (size_t i = 0; i < kStringCount; ++i) {
1234 s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c[i]));
1235 }
1236
1237 // TODO: wide characters
1238
1239 // Matrix of expectations. First component is first parameter. Note we only check against the
1240 // sign, not the value. As we are testing random offsets, we need to compute this and need to
1241 // rely on String::CompareTo being correct.
1242 int32_t expected[kStringCount][kStringCount];
1243 for (size_t x = 0; x < kStringCount; ++x) {
1244 for (size_t y = 0; y < kStringCount; ++y) {
1245 expected[x][y] = s[x]->CompareTo(s[y].Get());
1246 }
1247 }
1248
1249 // Play with it...
1250
1251 for (size_t x = 0; x < kStringCount; ++x) {
1252 for (size_t y = 0; y < kStringCount; ++y) {
1253 // Test string_compareto x y
1254 size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()),
1255 reinterpret_cast<size_t>(s[y].Get()), 0U,
1256 art_quick_string_compareto, self);
1257
1258 EXPECT_FALSE(self->IsExceptionPending());
1259
1260 // The result is a 32b signed integer
1261 union {
1262 size_t r;
1263 int32_t i;
1264 } conv;
1265 conv.r = result;
1266 int32_t e = expected[x][y];
1267 EXPECT_TRUE(e == 0 ? conv.i == 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1268 conv.r;
1269 EXPECT_TRUE(e < 0 ? conv.i < 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1270 conv.r;
1271 EXPECT_TRUE(e > 0 ? conv.i > 0 : true) << "x=" << c[x] << " y=" << c[y] << " res=" <<
1272 conv.r;
1273 }
1274 }
1275
1276 // TODO: Deallocate things.
1277
1278 // Tests done.
1279 #else
1280 LOG(INFO) << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA;
1281 // Force-print to std::cout so it's also outside the logcat.
1282 std::cout << "Skipping string_compareto as I don't know how to do that on " << kRuntimeISA <<
1283 std::endl;
1284 #endif
1285 }
1286
1287
GetSetBooleanStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1288 static void GetSetBooleanStatic(ArtField* f, Thread* self,
1289 ArtMethod* referrer, StubTest* test)
1290 SHARED_REQUIRES(Locks::mutator_lock_) {
1291 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1292 (defined(__x86_64__) && !defined(__APPLE__))
1293 constexpr size_t num_values = 5;
1294 uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
1295
1296 for (size_t i = 0; i < num_values; ++i) {
1297 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1298 static_cast<size_t>(values[i]),
1299 0U,
1300 StubTest::GetEntrypoint(self, kQuickSet8Static),
1301 self,
1302 referrer);
1303
1304 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1305 0U, 0U,
1306 StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
1307 self,
1308 referrer);
1309 // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
1310 EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
1311 }
1312 #else
1313 UNUSED(f, self, referrer, test);
1314 LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
1315 // Force-print to std::cout so it's also outside the logcat.
1316 std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1317 #endif
1318 }
GetSetByteStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1319 static void GetSetByteStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1320 StubTest* test)
1321 SHARED_REQUIRES(Locks::mutator_lock_) {
1322 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1323 (defined(__x86_64__) && !defined(__APPLE__))
1324 int8_t values[] = { -128, -64, 0, 64, 127 };
1325
1326 for (size_t i = 0; i < arraysize(values); ++i) {
1327 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1328 static_cast<size_t>(values[i]),
1329 0U,
1330 StubTest::GetEntrypoint(self, kQuickSet8Static),
1331 self,
1332 referrer);
1333
1334 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1335 0U, 0U,
1336 StubTest::GetEntrypoint(self, kQuickGetByteStatic),
1337 self,
1338 referrer);
1339 EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
1340 }
1341 #else
1342 UNUSED(f, self, referrer, test);
1343 LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
1344 // Force-print to std::cout so it's also outside the logcat.
1345 std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1346 #endif
1347 }
1348
1349
GetSetBooleanInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1350 static void GetSetBooleanInstance(Handle<mirror::Object>* obj, ArtField* f, Thread* self,
1351 ArtMethod* referrer, StubTest* test)
1352 SHARED_REQUIRES(Locks::mutator_lock_) {
1353 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1354 (defined(__x86_64__) && !defined(__APPLE__))
1355 uint8_t values[] = { 0, true, 2, 128, 0xFF };
1356
1357 for (size_t i = 0; i < arraysize(values); ++i) {
1358 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1359 reinterpret_cast<size_t>(obj->Get()),
1360 static_cast<size_t>(values[i]),
1361 StubTest::GetEntrypoint(self, kQuickSet8Instance),
1362 self,
1363 referrer);
1364
1365 uint8_t res = f->GetBoolean(obj->Get());
1366 EXPECT_EQ(values[i], res) << "Iteration " << i;
1367
1368 f->SetBoolean<false>(obj->Get(), res);
1369
1370 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1371 reinterpret_cast<size_t>(obj->Get()),
1372 0U,
1373 StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
1374 self,
1375 referrer);
1376 EXPECT_EQ(res, static_cast<uint8_t>(res2));
1377 }
1378 #else
1379 UNUSED(obj, f, self, referrer, test);
1380 LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
1381 // Force-print to std::cout so it's also outside the logcat.
1382 std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1383 #endif
1384 }
GetSetByteInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1385 static void GetSetByteInstance(Handle<mirror::Object>* obj, ArtField* f,
1386 Thread* self, ArtMethod* referrer, StubTest* test)
1387 SHARED_REQUIRES(Locks::mutator_lock_) {
1388 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1389 (defined(__x86_64__) && !defined(__APPLE__))
1390 int8_t values[] = { -128, -64, 0, 64, 127 };
1391
1392 for (size_t i = 0; i < arraysize(values); ++i) {
1393 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1394 reinterpret_cast<size_t>(obj->Get()),
1395 static_cast<size_t>(values[i]),
1396 StubTest::GetEntrypoint(self, kQuickSet8Instance),
1397 self,
1398 referrer);
1399
1400 int8_t res = f->GetByte(obj->Get());
1401 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1402 f->SetByte<false>(obj->Get(), ++res);
1403
1404 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1405 reinterpret_cast<size_t>(obj->Get()),
1406 0U,
1407 StubTest::GetEntrypoint(self, kQuickGetByteInstance),
1408 self,
1409 referrer);
1410 EXPECT_EQ(res, static_cast<int8_t>(res2));
1411 }
1412 #else
1413 UNUSED(obj, f, self, referrer, test);
1414 LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
1415 // Force-print to std::cout so it's also outside the logcat.
1416 std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1417 #endif
1418 }
1419
GetSetCharStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1420 static void GetSetCharStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1421 StubTest* test)
1422 SHARED_REQUIRES(Locks::mutator_lock_) {
1423 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1424 (defined(__x86_64__) && !defined(__APPLE__))
1425 uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1426
1427 for (size_t i = 0; i < arraysize(values); ++i) {
1428 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1429 static_cast<size_t>(values[i]),
1430 0U,
1431 StubTest::GetEntrypoint(self, kQuickSet16Static),
1432 self,
1433 referrer);
1434
1435 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1436 0U, 0U,
1437 StubTest::GetEntrypoint(self, kQuickGetCharStatic),
1438 self,
1439 referrer);
1440
1441 EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
1442 }
1443 #else
1444 UNUSED(f, self, referrer, test);
1445 LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
1446 // Force-print to std::cout so it's also outside the logcat.
1447 std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1448 #endif
1449 }
GetSetShortStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1450 static void GetSetShortStatic(ArtField* f, Thread* self,
1451 ArtMethod* referrer, StubTest* test)
1452 SHARED_REQUIRES(Locks::mutator_lock_) {
1453 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1454 (defined(__x86_64__) && !defined(__APPLE__))
1455 int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1456
1457 for (size_t i = 0; i < arraysize(values); ++i) {
1458 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1459 static_cast<size_t>(values[i]),
1460 0U,
1461 StubTest::GetEntrypoint(self, kQuickSet16Static),
1462 self,
1463 referrer);
1464
1465 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1466 0U, 0U,
1467 StubTest::GetEntrypoint(self, kQuickGetShortStatic),
1468 self,
1469 referrer);
1470
1471 EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
1472 }
1473 #else
1474 UNUSED(f, self, referrer, test);
1475 LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
1476 // Force-print to std::cout so it's also outside the logcat.
1477 std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
1478 #endif
1479 }
1480
GetSetCharInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1481 static void GetSetCharInstance(Handle<mirror::Object>* obj, ArtField* f,
1482 Thread* self, ArtMethod* referrer, StubTest* test)
1483 SHARED_REQUIRES(Locks::mutator_lock_) {
1484 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1485 (defined(__x86_64__) && !defined(__APPLE__))
1486 uint16_t values[] = { 0, 1, 2, 255, 32768, 0xFFFF };
1487
1488 for (size_t i = 0; i < arraysize(values); ++i) {
1489 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1490 reinterpret_cast<size_t>(obj->Get()),
1491 static_cast<size_t>(values[i]),
1492 StubTest::GetEntrypoint(self, kQuickSet16Instance),
1493 self,
1494 referrer);
1495
1496 uint16_t res = f->GetChar(obj->Get());
1497 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1498 f->SetChar<false>(obj->Get(), ++res);
1499
1500 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1501 reinterpret_cast<size_t>(obj->Get()),
1502 0U,
1503 StubTest::GetEntrypoint(self, kQuickGetCharInstance),
1504 self,
1505 referrer);
1506 EXPECT_EQ(res, static_cast<uint16_t>(res2));
1507 }
1508 #else
1509 UNUSED(obj, f, self, referrer, test);
1510 LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
1511 // Force-print to std::cout so it's also outside the logcat.
1512 std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1513 #endif
1514 }
GetSetShortInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1515 static void GetSetShortInstance(Handle<mirror::Object>* obj, ArtField* f,
1516 Thread* self, ArtMethod* referrer, StubTest* test)
1517 SHARED_REQUIRES(Locks::mutator_lock_) {
1518 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1519 (defined(__x86_64__) && !defined(__APPLE__))
1520 int16_t values[] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
1521
1522 for (size_t i = 0; i < arraysize(values); ++i) {
1523 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1524 reinterpret_cast<size_t>(obj->Get()),
1525 static_cast<size_t>(values[i]),
1526 StubTest::GetEntrypoint(self, kQuickSet16Instance),
1527 self,
1528 referrer);
1529
1530 int16_t res = f->GetShort(obj->Get());
1531 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1532 f->SetShort<false>(obj->Get(), ++res);
1533
1534 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1535 reinterpret_cast<size_t>(obj->Get()),
1536 0U,
1537 StubTest::GetEntrypoint(self, kQuickGetShortInstance),
1538 self,
1539 referrer);
1540 EXPECT_EQ(res, static_cast<int16_t>(res2));
1541 }
1542 #else
1543 UNUSED(obj, f, self, referrer, test);
1544 LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
1545 // Force-print to std::cout so it's also outside the logcat.
1546 std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1547 #endif
1548 }
1549
GetSet32Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1550 static void GetSet32Static(ArtField* f, Thread* self, ArtMethod* referrer,
1551 StubTest* test)
1552 SHARED_REQUIRES(Locks::mutator_lock_) {
1553 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1554 (defined(__x86_64__) && !defined(__APPLE__))
1555 uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1556
1557 for (size_t i = 0; i < arraysize(values); ++i) {
1558 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1559 static_cast<size_t>(values[i]),
1560 0U,
1561 StubTest::GetEntrypoint(self, kQuickSet32Static),
1562 self,
1563 referrer);
1564
1565 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1566 0U, 0U,
1567 StubTest::GetEntrypoint(self, kQuickGet32Static),
1568 self,
1569 referrer);
1570
1571 #if defined(__mips__) && defined(__LP64__)
1572 EXPECT_EQ(static_cast<uint32_t>(res), values[i]) << "Iteration " << i;
1573 #else
1574 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1575 #endif
1576 }
1577 #else
1578 UNUSED(f, self, referrer, test);
1579 LOG(INFO) << "Skipping set32static as I don't know how to do that on " << kRuntimeISA;
1580 // Force-print to std::cout so it's also outside the logcat.
1581 std::cout << "Skipping set32static as I don't know how to do that on " << kRuntimeISA << std::endl;
1582 #endif
1583 }
1584
1585
GetSet32Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1586 static void GetSet32Instance(Handle<mirror::Object>* obj, ArtField* f,
1587 Thread* self, ArtMethod* referrer, StubTest* test)
1588 SHARED_REQUIRES(Locks::mutator_lock_) {
1589 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1590 (defined(__x86_64__) && !defined(__APPLE__))
1591 uint32_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF };
1592
1593 for (size_t i = 0; i < arraysize(values); ++i) {
1594 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1595 reinterpret_cast<size_t>(obj->Get()),
1596 static_cast<size_t>(values[i]),
1597 StubTest::GetEntrypoint(self, kQuickSet32Instance),
1598 self,
1599 referrer);
1600
1601 int32_t res = f->GetInt(obj->Get());
1602 EXPECT_EQ(res, static_cast<int32_t>(values[i])) << "Iteration " << i;
1603
1604 res++;
1605 f->SetInt<false>(obj->Get(), res);
1606
1607 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1608 reinterpret_cast<size_t>(obj->Get()),
1609 0U,
1610 StubTest::GetEntrypoint(self, kQuickGet32Instance),
1611 self,
1612 referrer);
1613 EXPECT_EQ(res, static_cast<int32_t>(res2));
1614 }
1615 #else
1616 UNUSED(obj, f, self, referrer, test);
1617 LOG(INFO) << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA;
1618 // Force-print to std::cout so it's also outside the logcat.
1619 std::cout << "Skipping set32instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1620 #endif
1621 }
1622
1623
1624 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1625 (defined(__x86_64__) && !defined(__APPLE__))
1626
set_and_check_static(uint32_t f_idx,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1627 static void set_and_check_static(uint32_t f_idx, mirror::Object* val, Thread* self,
1628 ArtMethod* referrer, StubTest* test)
1629 SHARED_REQUIRES(Locks::mutator_lock_) {
1630 test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1631 reinterpret_cast<size_t>(val),
1632 0U,
1633 StubTest::GetEntrypoint(self, kQuickSetObjStatic),
1634 self,
1635 referrer);
1636
1637 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f_idx),
1638 0U, 0U,
1639 StubTest::GetEntrypoint(self, kQuickGetObjStatic),
1640 self,
1641 referrer);
1642
1643 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1644 }
1645 #endif
1646
GetSetObjStatic(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1647 static void GetSetObjStatic(ArtField* f, Thread* self, ArtMethod* referrer,
1648 StubTest* test)
1649 SHARED_REQUIRES(Locks::mutator_lock_) {
1650 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1651 (defined(__x86_64__) && !defined(__APPLE__))
1652 set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1653
1654 // Allocate a string object for simplicity.
1655 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1656 set_and_check_static(f->GetDexFieldIndex(), str, self, referrer, test);
1657
1658 set_and_check_static(f->GetDexFieldIndex(), nullptr, self, referrer, test);
1659 #else
1660 UNUSED(f, self, referrer, test);
1661 LOG(INFO) << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA;
1662 // Force-print to std::cout so it's also outside the logcat.
1663 std::cout << "Skipping setObjstatic as I don't know how to do that on " << kRuntimeISA << std::endl;
1664 #endif
1665 }
1666
1667
1668 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1669 (defined(__x86_64__) && !defined(__APPLE__))
set_and_check_instance(ArtField * f,mirror::Object * trg,mirror::Object * val,Thread * self,ArtMethod * referrer,StubTest * test)1670 static void set_and_check_instance(ArtField* f, mirror::Object* trg,
1671 mirror::Object* val, Thread* self, ArtMethod* referrer,
1672 StubTest* test)
1673 SHARED_REQUIRES(Locks::mutator_lock_) {
1674 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1675 reinterpret_cast<size_t>(trg),
1676 reinterpret_cast<size_t>(val),
1677 StubTest::GetEntrypoint(self, kQuickSetObjInstance),
1678 self,
1679 referrer);
1680
1681 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1682 reinterpret_cast<size_t>(trg),
1683 0U,
1684 StubTest::GetEntrypoint(self, kQuickGetObjInstance),
1685 self,
1686 referrer);
1687
1688 EXPECT_EQ(res, reinterpret_cast<size_t>(val)) << "Value " << val;
1689
1690 EXPECT_EQ(val, f->GetObj(trg));
1691 }
1692 #endif
1693
GetSetObjInstance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1694 static void GetSetObjInstance(Handle<mirror::Object>* obj, ArtField* f,
1695 Thread* self, ArtMethod* referrer, StubTest* test)
1696 SHARED_REQUIRES(Locks::mutator_lock_) {
1697 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1698 (defined(__x86_64__) && !defined(__APPLE__))
1699 set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1700
1701 // Allocate a string object for simplicity.
1702 mirror::String* str = mirror::String::AllocFromModifiedUtf8(self, "Test");
1703 set_and_check_instance(f, obj->Get(), str, self, referrer, test);
1704
1705 set_and_check_instance(f, obj->Get(), nullptr, self, referrer, test);
1706 #else
1707 UNUSED(obj, f, self, referrer, test);
1708 LOG(INFO) << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA;
1709 // Force-print to std::cout so it's also outside the logcat.
1710 std::cout << "Skipping setObjinstance as I don't know how to do that on " << kRuntimeISA << std::endl;
1711 #endif
1712 }
1713
1714
1715 // TODO: Complete these tests for 32b architectures
1716
GetSet64Static(ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1717 static void GetSet64Static(ArtField* f, Thread* self, ArtMethod* referrer,
1718 StubTest* test)
1719 SHARED_REQUIRES(Locks::mutator_lock_) {
1720 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) \
1721 || defined(__aarch64__)
1722 uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1723
1724 for (size_t i = 0; i < arraysize(values); ++i) {
1725 // 64 bit FieldSet stores the set value in the second register.
1726 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1727 0U,
1728 values[i],
1729 StubTest::GetEntrypoint(self, kQuickSet64Static),
1730 self,
1731 referrer);
1732
1733 size_t res = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1734 0U, 0U,
1735 StubTest::GetEntrypoint(self, kQuickGet64Static),
1736 self,
1737 referrer);
1738
1739 EXPECT_EQ(res, values[i]) << "Iteration " << i;
1740 }
1741 #else
1742 UNUSED(f, self, referrer, test);
1743 LOG(INFO) << "Skipping set64static as I don't know how to do that on " << kRuntimeISA;
1744 // Force-print to std::cout so it's also outside the logcat.
1745 std::cout << "Skipping set64static as I don't know how to do that on " << kRuntimeISA << std::endl;
1746 #endif
1747 }
1748
1749
GetSet64Instance(Handle<mirror::Object> * obj,ArtField * f,Thread * self,ArtMethod * referrer,StubTest * test)1750 static void GetSet64Instance(Handle<mirror::Object>* obj, ArtField* f,
1751 Thread* self, ArtMethod* referrer, StubTest* test)
1752 SHARED_REQUIRES(Locks::mutator_lock_) {
1753 #if (defined(__x86_64__) && !defined(__APPLE__)) || (defined(__mips__) && defined(__LP64__)) || \
1754 defined(__aarch64__)
1755 uint64_t values[] = { 0, 1, 2, 255, 32768, 1000000, 0xFFFFFFFF, 0xFFFFFFFFFFFF };
1756
1757 for (size_t i = 0; i < arraysize(values); ++i) {
1758 test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1759 reinterpret_cast<size_t>(obj->Get()),
1760 static_cast<size_t>(values[i]),
1761 StubTest::GetEntrypoint(self, kQuickSet64Instance),
1762 self,
1763 referrer);
1764
1765 int64_t res = f->GetLong(obj->Get());
1766 EXPECT_EQ(res, static_cast<int64_t>(values[i])) << "Iteration " << i;
1767
1768 res++;
1769 f->SetLong<false>(obj->Get(), res);
1770
1771 size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>(f->GetDexFieldIndex()),
1772 reinterpret_cast<size_t>(obj->Get()),
1773 0U,
1774 StubTest::GetEntrypoint(self, kQuickGet64Instance),
1775 self,
1776 referrer);
1777 EXPECT_EQ(res, static_cast<int64_t>(res2));
1778 }
1779 #else
1780 UNUSED(obj, f, self, referrer, test);
1781 LOG(INFO) << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA;
1782 // Force-print to std::cout so it's also outside the logcat.
1783 std::cout << "Skipping set64instance as I don't know how to do that on " << kRuntimeISA << std::endl;
1784 #endif
1785 }
1786
TestFields(Thread * self,StubTest * test,Primitive::Type test_type)1787 static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type) {
1788 // garbage is created during ClassLinker::Init
1789
1790 JNIEnv* env = Thread::Current()->GetJniEnv();
1791 jclass jc = env->FindClass("AllFields");
1792 CHECK(jc != nullptr);
1793 jobject o = env->AllocObject(jc);
1794 CHECK(o != nullptr);
1795
1796 ScopedObjectAccess soa(self);
1797 StackHandleScope<3> hs(self);
1798 Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(o)));
1799 Handle<mirror::Class> c(hs.NewHandle(obj->GetClass()));
1800 // Need a method as a referrer
1801 ArtMethod* m = c->GetDirectMethod(0, sizeof(void*));
1802
1803 // Play with it...
1804
1805 // Static fields.
1806 for (ArtField& f : c->GetSFields()) {
1807 Primitive::Type type = f.GetTypeAsPrimitiveType();
1808 if (test_type != type) {
1809 continue;
1810 }
1811 switch (type) {
1812 case Primitive::Type::kPrimBoolean:
1813 GetSetBooleanStatic(&f, self, m, test);
1814 break;
1815 case Primitive::Type::kPrimByte:
1816 GetSetByteStatic(&f, self, m, test);
1817 break;
1818 case Primitive::Type::kPrimChar:
1819 GetSetCharStatic(&f, self, m, test);
1820 break;
1821 case Primitive::Type::kPrimShort:
1822 GetSetShortStatic(&f, self, m, test);
1823 break;
1824 case Primitive::Type::kPrimInt:
1825 GetSet32Static(&f, self, m, test);
1826 break;
1827 case Primitive::Type::kPrimLong:
1828 GetSet64Static(&f, self, m, test);
1829 break;
1830 case Primitive::Type::kPrimNot:
1831 // Don't try array.
1832 if (f.GetTypeDescriptor()[0] != '[') {
1833 GetSetObjStatic(&f, self, m, test);
1834 }
1835 break;
1836 default:
1837 break; // Skip.
1838 }
1839 }
1840
1841 // Instance fields.
1842 for (ArtField& f : c->GetIFields()) {
1843 Primitive::Type type = f.GetTypeAsPrimitiveType();
1844 if (test_type != type) {
1845 continue;
1846 }
1847 switch (type) {
1848 case Primitive::Type::kPrimBoolean:
1849 GetSetBooleanInstance(&obj, &f, self, m, test);
1850 break;
1851 case Primitive::Type::kPrimByte:
1852 GetSetByteInstance(&obj, &f, self, m, test);
1853 break;
1854 case Primitive::Type::kPrimChar:
1855 GetSetCharInstance(&obj, &f, self, m, test);
1856 break;
1857 case Primitive::Type::kPrimShort:
1858 GetSetShortInstance(&obj, &f, self, m, test);
1859 break;
1860 case Primitive::Type::kPrimInt:
1861 GetSet32Instance(&obj, &f, self, m, test);
1862 break;
1863 case Primitive::Type::kPrimLong:
1864 GetSet64Instance(&obj, &f, self, m, test);
1865 break;
1866 case Primitive::Type::kPrimNot:
1867 // Don't try array.
1868 if (f.GetTypeDescriptor()[0] != '[') {
1869 GetSetObjInstance(&obj, &f, self, m, test);
1870 }
1871 break;
1872 default:
1873 break; // Skip.
1874 }
1875 }
1876
1877 // TODO: Deallocate things.
1878 }
1879
TEST_F(StubTest,Fields8)1880 TEST_F(StubTest, Fields8) {
1881 Thread* self = Thread::Current();
1882
1883 self->TransitionFromSuspendedToRunnable();
1884 LoadDex("AllFields");
1885 bool started = runtime_->Start();
1886 CHECK(started);
1887
1888 TestFields(self, this, Primitive::Type::kPrimBoolean);
1889 TestFields(self, this, Primitive::Type::kPrimByte);
1890 }
1891
TEST_F(StubTest,Fields16)1892 TEST_F(StubTest, Fields16) {
1893 Thread* self = Thread::Current();
1894
1895 self->TransitionFromSuspendedToRunnable();
1896 LoadDex("AllFields");
1897 bool started = runtime_->Start();
1898 CHECK(started);
1899
1900 TestFields(self, this, Primitive::Type::kPrimChar);
1901 TestFields(self, this, Primitive::Type::kPrimShort);
1902 }
1903
TEST_F(StubTest,Fields32)1904 TEST_F(StubTest, Fields32) {
1905 Thread* self = Thread::Current();
1906
1907 self->TransitionFromSuspendedToRunnable();
1908 LoadDex("AllFields");
1909 bool started = runtime_->Start();
1910 CHECK(started);
1911
1912 TestFields(self, this, Primitive::Type::kPrimInt);
1913 }
1914
TEST_F(StubTest,FieldsObj)1915 TEST_F(StubTest, FieldsObj) {
1916 Thread* self = Thread::Current();
1917
1918 self->TransitionFromSuspendedToRunnable();
1919 LoadDex("AllFields");
1920 bool started = runtime_->Start();
1921 CHECK(started);
1922
1923 TestFields(self, this, Primitive::Type::kPrimNot);
1924 }
1925
TEST_F(StubTest,Fields64)1926 TEST_F(StubTest, Fields64) {
1927 Thread* self = Thread::Current();
1928
1929 self->TransitionFromSuspendedToRunnable();
1930 LoadDex("AllFields");
1931 bool started = runtime_->Start();
1932 CHECK(started);
1933
1934 TestFields(self, this, Primitive::Type::kPrimLong);
1935 }
1936
1937 // Disabled, b/27991555 .
1938 // FIXME: Hacking the entry point to point to art_quick_to_interpreter_bridge is broken.
1939 // The bridge calls through to GetCalleeSaveMethodCaller() which looks up the pre-header
1940 // and gets a bogus OatQuickMethodHeader* pointing into our assembly code just before
1941 // the bridge and uses that to check for inlined frames, crashing in the process.
TEST_F(StubTest,DISABLED_IMT)1942 TEST_F(StubTest, DISABLED_IMT) {
1943 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || defined(__mips__) || \
1944 (defined(__x86_64__) && !defined(__APPLE__))
1945 Thread* self = Thread::Current();
1946
1947 ScopedObjectAccess soa(self);
1948 StackHandleScope<7> hs(self);
1949
1950 JNIEnv* env = Thread::Current()->GetJniEnv();
1951
1952 // ArrayList
1953
1954 // Load ArrayList and used methods (JNI).
1955 jclass arraylist_jclass = env->FindClass("java/util/ArrayList");
1956 ASSERT_NE(nullptr, arraylist_jclass);
1957 jmethodID arraylist_constructor = env->GetMethodID(arraylist_jclass, "<init>", "()V");
1958 ASSERT_NE(nullptr, arraylist_constructor);
1959 jmethodID contains_jmethod = env->GetMethodID(
1960 arraylist_jclass, "contains", "(Ljava/lang/Object;)Z");
1961 ASSERT_NE(nullptr, contains_jmethod);
1962 jmethodID add_jmethod = env->GetMethodID(arraylist_jclass, "add", "(Ljava/lang/Object;)Z");
1963 ASSERT_NE(nullptr, add_jmethod);
1964
1965 // Get representation.
1966 ArtMethod* contains_amethod = soa.DecodeMethod(contains_jmethod);
1967
1968 // Patch up ArrayList.contains.
1969 if (contains_amethod->GetEntryPointFromQuickCompiledCode() == nullptr) {
1970 contains_amethod->SetEntryPointFromQuickCompiledCode(reinterpret_cast<void*>(
1971 StubTest::GetEntrypoint(self, kQuickQuickToInterpreterBridge)));
1972 }
1973
1974 // List
1975
1976 // Load List and used methods (JNI).
1977 jclass list_jclass = env->FindClass("java/util/List");
1978 ASSERT_NE(nullptr, list_jclass);
1979 jmethodID inf_contains_jmethod = env->GetMethodID(
1980 list_jclass, "contains", "(Ljava/lang/Object;)Z");
1981 ASSERT_NE(nullptr, inf_contains_jmethod);
1982
1983 // Get mirror representation.
1984 ArtMethod* inf_contains = soa.DecodeMethod(inf_contains_jmethod);
1985
1986 // Object
1987
1988 jclass obj_jclass = env->FindClass("java/lang/Object");
1989 ASSERT_NE(nullptr, obj_jclass);
1990 jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
1991 ASSERT_NE(nullptr, obj_constructor);
1992
1993 // Create instances.
1994
1995 jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
1996 ASSERT_NE(nullptr, jarray_list);
1997 Handle<mirror::Object> array_list(hs.NewHandle(soa.Decode<mirror::Object*>(jarray_list)));
1998
1999 jobject jobj = env->NewObject(obj_jclass, obj_constructor);
2000 ASSERT_NE(nullptr, jobj);
2001 Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(jobj)));
2002
2003 // Invocation tests.
2004
2005 // 1. imt_conflict
2006
2007 // Contains.
2008
2009 // We construct the ImtConflictTable ourselves, as we cannot go into the runtime stub
2010 // that will create it: the runtime stub expects to be called by compiled code.
2011 LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc();
2012 ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc);
2013 ImtConflictTable* empty_conflict_table =
2014 Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc);
2015 void* data = linear_alloc->Alloc(
2016 self,
2017 ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*)));
2018 ImtConflictTable* new_table = new (data) ImtConflictTable(
2019 empty_conflict_table, inf_contains, contains_amethod, sizeof(void*));
2020 conflict_method->SetImtConflictTable(new_table, sizeof(void*));
2021
2022 size_t result =
2023 Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
2024 reinterpret_cast<size_t>(array_list.Get()),
2025 reinterpret_cast<size_t>(obj.Get()),
2026 StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
2027 self,
2028 contains_amethod,
2029 static_cast<size_t>(inf_contains->GetDexMethodIndex()));
2030
2031 ASSERT_FALSE(self->IsExceptionPending());
2032 EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
2033
2034 // Add object.
2035
2036 env->CallBooleanMethod(jarray_list, add_jmethod, jobj);
2037
2038 ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException());
2039
2040 // Contains.
2041
2042 result =
2043 Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method),
2044 reinterpret_cast<size_t>(array_list.Get()),
2045 reinterpret_cast<size_t>(obj.Get()),
2046 StubTest::GetEntrypoint(self, kQuickQuickImtConflictTrampoline),
2047 self,
2048 contains_amethod,
2049 static_cast<size_t>(inf_contains->GetDexMethodIndex()));
2050
2051 ASSERT_FALSE(self->IsExceptionPending());
2052 EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
2053
2054 // 2. regular interface trampoline
2055
2056 result = Invoke3WithReferrer(static_cast<size_t>(inf_contains->GetDexMethodIndex()),
2057 reinterpret_cast<size_t>(array_list.Get()),
2058 reinterpret_cast<size_t>(obj.Get()),
2059 StubTest::GetEntrypoint(self,
2060 kQuickInvokeInterfaceTrampolineWithAccessCheck),
2061 self, contains_amethod);
2062
2063 ASSERT_FALSE(self->IsExceptionPending());
2064 EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
2065
2066 result = Invoke3WithReferrer(
2067 static_cast<size_t>(inf_contains->GetDexMethodIndex()),
2068 reinterpret_cast<size_t>(array_list.Get()), reinterpret_cast<size_t>(array_list.Get()),
2069 StubTest::GetEntrypoint(self, kQuickInvokeInterfaceTrampolineWithAccessCheck), self,
2070 contains_amethod);
2071
2072 ASSERT_FALSE(self->IsExceptionPending());
2073 EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
2074 #else
2075 LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
2076 // Force-print to std::cout so it's also outside the logcat.
2077 std::cout << "Skipping imt as I don't know how to do that on " << kRuntimeISA << std::endl;
2078 #endif
2079 }
2080
TEST_F(StubTest,StringIndexOf)2081 TEST_F(StubTest, StringIndexOf) {
2082 #if defined(__arm__) || defined(__aarch64__) || defined(__mips__)
2083 Thread* self = Thread::Current();
2084 ScopedObjectAccess soa(self);
2085 // garbage is created during ClassLinker::Init
2086
2087 // Create some strings
2088 // Use array so we can index into it and use a matrix for expected results
2089 // Setup: The first half is standard. The second half uses a non-zero offset.
2090 // TODO: Shared backing arrays.
2091 const char* c_str[] = { "", "a", "ba", "cba", "dcba", "edcba", "asdfghjkl" };
2092 static constexpr size_t kStringCount = arraysize(c_str);
2093 const char c_char[] = { 'a', 'b', 'c', 'd', 'e' };
2094 static constexpr size_t kCharCount = arraysize(c_char);
2095
2096 StackHandleScope<kStringCount> hs(self);
2097 Handle<mirror::String> s[kStringCount];
2098
2099 for (size_t i = 0; i < kStringCount; ++i) {
2100 s[i] = hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), c_str[i]));
2101 }
2102
2103 // Matrix of expectations. First component is first parameter. Note we only check against the
2104 // sign, not the value. As we are testing random offsets, we need to compute this and need to
2105 // rely on String::CompareTo being correct.
2106 static constexpr size_t kMaxLen = 9;
2107 DCHECK_LE(strlen(c_str[kStringCount-1]), kMaxLen) << "Please fix the indexof test.";
2108
2109 // Last dimension: start, offset by 1.
2110 int32_t expected[kStringCount][kCharCount][kMaxLen + 3];
2111 for (size_t x = 0; x < kStringCount; ++x) {
2112 for (size_t y = 0; y < kCharCount; ++y) {
2113 for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2114 expected[x][y][z] = s[x]->FastIndexOf(c_char[y], static_cast<int32_t>(z) - 1);
2115 }
2116 }
2117 }
2118
2119 // Play with it...
2120
2121 for (size_t x = 0; x < kStringCount; ++x) {
2122 for (size_t y = 0; y < kCharCount; ++y) {
2123 for (size_t z = 0; z <= kMaxLen + 2; ++z) {
2124 int32_t start = static_cast<int32_t>(z) - 1;
2125
2126 // Test string_compareto x y
2127 size_t result = Invoke3(reinterpret_cast<size_t>(s[x].Get()), c_char[y], start,
2128 StubTest::GetEntrypoint(self, kQuickIndexOf), self);
2129
2130 EXPECT_FALSE(self->IsExceptionPending());
2131
2132 // The result is a 32b signed integer
2133 union {
2134 size_t r;
2135 int32_t i;
2136 } conv;
2137 conv.r = result;
2138
2139 EXPECT_EQ(expected[x][y][z], conv.i) << "Wrong result for " << c_str[x] << " / " <<
2140 c_char[y] << " @ " << start;
2141 }
2142 }
2143 }
2144
2145 // TODO: Deallocate things.
2146
2147 // Tests done.
2148 #else
2149 LOG(INFO) << "Skipping indexof as I don't know how to do that on " << kRuntimeISA;
2150 // Force-print to std::cout so it's also outside the logcat.
2151 std::cout << "Skipping indexof as I don't know how to do that on " << kRuntimeISA << std::endl;
2152 #endif
2153 }
2154
TEST_F(StubTest,ReadBarrier)2155 TEST_F(StubTest, ReadBarrier) {
2156 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2157 defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2158 Thread* self = Thread::Current();
2159
2160 const uintptr_t readBarrierSlow = StubTest::GetEntrypoint(self, kQuickReadBarrierSlow);
2161
2162 // Create an object
2163 ScopedObjectAccess soa(self);
2164 // garbage is created during ClassLinker::Init
2165
2166 StackHandleScope<2> hs(soa.Self());
2167 Handle<mirror::Class> c(
2168 hs.NewHandle(class_linker_->FindSystemClass(soa.Self(), "Ljava/lang/Object;")));
2169
2170 // Build an object instance
2171 Handle<mirror::Object> obj(hs.NewHandle(c->AllocObject(soa.Self())));
2172
2173 EXPECT_FALSE(self->IsExceptionPending());
2174
2175 size_t result = Invoke3(0U, reinterpret_cast<size_t>(obj.Get()),
2176 mirror::Object::ClassOffset().SizeValue(), readBarrierSlow, self);
2177
2178 EXPECT_FALSE(self->IsExceptionPending());
2179 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2180 mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2181 EXPECT_EQ(klass, obj->GetClass());
2182
2183 // Tests done.
2184 #else
2185 LOG(INFO) << "Skipping read_barrier_slow";
2186 // Force-print to std::cout so it's also outside the logcat.
2187 std::cout << "Skipping read_barrier_slow" << std::endl;
2188 #endif
2189 }
2190
TEST_F(StubTest,ReadBarrierForRoot)2191 TEST_F(StubTest, ReadBarrierForRoot) {
2192 #if defined(ART_USE_READ_BARRIER) && (defined(__i386__) || defined(__arm__) || \
2193 defined(__aarch64__) || defined(__mips__) || (defined(__x86_64__) && !defined(__APPLE__)))
2194 Thread* self = Thread::Current();
2195
2196 const uintptr_t readBarrierForRootSlow =
2197 StubTest::GetEntrypoint(self, kQuickReadBarrierForRootSlow);
2198
2199 // Create an object
2200 ScopedObjectAccess soa(self);
2201 // garbage is created during ClassLinker::Init
2202
2203 StackHandleScope<1> hs(soa.Self());
2204
2205 Handle<mirror::String> obj(
2206 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), "hello, world!")));
2207
2208 EXPECT_FALSE(self->IsExceptionPending());
2209
2210 GcRoot<mirror::Class>& root = mirror::String::java_lang_String_;
2211 size_t result = Invoke3(reinterpret_cast<size_t>(&root), 0U, 0U, readBarrierForRootSlow, self);
2212
2213 EXPECT_FALSE(self->IsExceptionPending());
2214 EXPECT_NE(reinterpret_cast<size_t>(nullptr), result);
2215 mirror::Class* klass = reinterpret_cast<mirror::Class*>(result);
2216 EXPECT_EQ(klass, obj->GetClass());
2217
2218 // Tests done.
2219 #else
2220 LOG(INFO) << "Skipping read_barrier_for_root_slow";
2221 // Force-print to std::cout so it's also outside the logcat.
2222 std::cout << "Skipping read_barrier_for_root_slow" << std::endl;
2223 #endif
2224 }
2225
2226 } // namespace art
2227