1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_arm64.S" 18#include "interpreter/cfi_asm_support.h" 19 20#include "arch/quick_alloc_entrypoints.S" 21 22.macro SAVE_REG_INCREASE_FRAME reg, frame_adjustment 23 str \reg, [sp, #-(\frame_adjustment)]! 24 .cfi_adjust_cfa_offset (\frame_adjustment) 25 .cfi_rel_offset \reg, 0 26.endm 27 28.macro RESTORE_REG_DECREASE_FRAME reg, frame_adjustment 29 ldr \reg, [sp], #(\frame_adjustment) 30 .cfi_restore \reg 31 .cfi_adjust_cfa_offset -(\frame_adjustment) 32.endm 33 34.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment 35 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 36 .cfi_adjust_cfa_offset (\frame_adjustment) 37 .cfi_rel_offset \reg1, 0 38 .cfi_rel_offset \reg2, 8 39.endm 40 41.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment 42 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 43 .cfi_restore \reg1 44 .cfi_restore \reg2 45 .cfi_adjust_cfa_offset -(\frame_adjustment) 46.endm 47 48.macro POP_SAVE_REFS_ONLY_FRAME 49 DECREASE_FRAME 96 50.endm 51 52 /* 53 * Macro that sets up the callee save frame to conform with 54 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). 55 * 56 * TODO This is probably too conservative - saving FP & LR. 57 */ 58.macro SETUP_SAVE_REFS_AND_ARGS_FRAME 59 // art::Runtime* xIP0 = art::Runtime::instance_; 60 // Our registers aren't intermixed - just spill in order. 61 LOAD_RUNTIME_INSTANCE xIP0 62 63 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveRefAndArgs]; 64 ldr xIP0, [xIP0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET] 65 66 INCREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS 67 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp 68 69 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kSaveRefsAndArgs]. 70 // Place sp in Thread::Current()->top_quick_frame. 71 mov xIP0, sp 72 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 73.endm 74 75.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 76 INCREASE_FRAME FRAME_SIZE_SAVE_REFS_AND_ARGS 77 SETUP_SAVE_REFS_AND_ARGS_FRAME_INTERNAL sp 78 str x0, [sp, #0] // Store ArtMethod* to bottom of stack. 79 // Place sp in Thread::Current()->top_quick_frame. 80 mov xIP0, sp 81 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 82.endm 83 84 /* 85 * Macro that sets up the callee save frame to conform with 86 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 87 * when the SP has already been decremented by FRAME_SIZE_SAVE_EVERYTHING 88 * and saving registers x29 and LR is handled elsewhere. 89 */ 90.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 91 // Ugly compile-time check, but we only have the preprocessor. 92#if (FRAME_SIZE_SAVE_EVERYTHING != 512) 93#error "FRAME_SIZE_SAVE_EVERYTHING(ARM64) size not as expected." 94#endif 95 96 // Save FP registers. 97 stp d0, d1, [sp, #16] 98 stp d2, d3, [sp, #32] 99 stp d4, d5, [sp, #48] 100 stp d6, d7, [sp, #64] 101 stp d8, d9, [sp, #80] 102 stp d10, d11, [sp, #96] 103 stp d12, d13, [sp, #112] 104 stp d14, d15, [sp, #128] 105 stp d16, d17, [sp, #144] 106 stp d18, d19, [sp, #160] 107 stp d20, d21, [sp, #176] 108 stp d22, d23, [sp, #192] 109 stp d24, d25, [sp, #208] 110 stp d26, d27, [sp, #224] 111 stp d28, d29, [sp, #240] 112 stp d30, d31, [sp, #256] 113 114 // Save core registers. 115 SAVE_TWO_REGS x0, x1, 272 116 SAVE_TWO_REGS x2, x3, 288 117 SAVE_TWO_REGS x4, x5, 304 118 SAVE_TWO_REGS x6, x7, 320 119 SAVE_TWO_REGS x8, x9, 336 120 SAVE_TWO_REGS x10, x11, 352 121 SAVE_TWO_REGS x12, x13, 368 122 SAVE_TWO_REGS x14, x15, 384 123 SAVE_TWO_REGS x16, x17, 400 // Do not save the platform register. 124 SAVE_TWO_REGS x19, x20, 416 125 SAVE_TWO_REGS x21, x22, 432 126 SAVE_TWO_REGS x23, x24, 448 127 SAVE_TWO_REGS x25, x26, 464 128 SAVE_TWO_REGS x27, x28, 480 129 130 // art::Runtime* xIP0 = art::Runtime::instance_; 131 LOAD_RUNTIME_INSTANCE xIP0 132 133 // ArtMethod* xIP0 = Runtime::instance_->callee_save_methods_[kSaveEverything]; 134 ldr xIP0, [xIP0, \runtime_method_offset] 135 136 // Store ArtMethod* Runtime::callee_save_methods_[kSaveEverything]. 137 str xIP0, [sp] 138 // Place sp in Thread::Current()->top_quick_frame. 139 mov xIP0, sp 140 str xIP0, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 141.endm 142 143 /* 144 * Macro that sets up the callee save frame to conform with 145 * Runtime::CreateCalleeSaveMethod(kSaveEverything) 146 */ 147.macro SETUP_SAVE_EVERYTHING_FRAME runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 148 INCREASE_FRAME 512 149 SAVE_TWO_REGS x29, xLR, 496 150 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR \runtime_method_offset 151.endm 152 153.macro RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 154 // Restore FP registers. 155 ldp d0, d1, [sp, #16] 156 ldp d2, d3, [sp, #32] 157 ldp d4, d5, [sp, #48] 158 ldp d6, d7, [sp, #64] 159 ldp d8, d9, [sp, #80] 160 ldp d10, d11, [sp, #96] 161 ldp d12, d13, [sp, #112] 162 ldp d14, d15, [sp, #128] 163 ldp d16, d17, [sp, #144] 164 ldp d18, d19, [sp, #160] 165 ldp d20, d21, [sp, #176] 166 ldp d22, d23, [sp, #192] 167 ldp d24, d25, [sp, #208] 168 ldp d26, d27, [sp, #224] 169 ldp d28, d29, [sp, #240] 170 ldp d30, d31, [sp, #256] 171 172 // Restore core registers, except x0. 173 RESTORE_REG x1, 280 174 RESTORE_TWO_REGS x2, x3, 288 175 RESTORE_TWO_REGS x4, x5, 304 176 RESTORE_TWO_REGS x6, x7, 320 177 RESTORE_TWO_REGS x8, x9, 336 178 RESTORE_TWO_REGS x10, x11, 352 179 RESTORE_TWO_REGS x12, x13, 368 180 RESTORE_TWO_REGS x14, x15, 384 181 RESTORE_TWO_REGS x16, x17, 400 // Do not restore the platform register. 182 RESTORE_TWO_REGS x19, x20, 416 183 RESTORE_TWO_REGS x21, x22, 432 184 RESTORE_TWO_REGS x23, x24, 448 185 RESTORE_TWO_REGS x25, x26, 464 186 RESTORE_TWO_REGS x27, x28, 480 187 RESTORE_TWO_REGS x29, xLR, 496 188 189 DECREASE_FRAME 512 190.endm 191 192.macro RESTORE_SAVE_EVERYTHING_FRAME 193 RESTORE_REG x0, 272 194 RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 195.endm 196 197.macro RETURN_IF_RESULT_IS_ZERO 198 cbnz x0, 1f // result non-zero branch over 199 ret // return 2001: 201.endm 202 203.macro RETURN_IF_RESULT_IS_NON_ZERO 204 cbz x0, 1f // result zero branch over 205 ret // return 2061: 207.endm 208 209// Same as above with x1. This is helpful in stubs that want to avoid clobbering another register. 210.macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 211 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1 212.endm 213 214.macro RETURN_IF_W0_IS_ZERO_OR_DELIVER 215 cbnz w0, 1f // result non-zero branch over 216 ret // return 2171: 218 DELIVER_PENDING_EXCEPTION 219.endm 220 221.macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 222 .extern \cxx_name 223ENTRY \c_name 224 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 225 mov x0, xSELF // pass Thread::Current 226 bl \cxx_name // \cxx_name(Thread*) 227 brk 0 228END \c_name 229.endm 230 231.macro NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 232 .extern \cxx_name 233ENTRY \c_name 234 SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context 235 mov x0, xSELF // pass Thread::Current 236 bl \cxx_name // \cxx_name(Thread*) 237 brk 0 238END \c_name 239.endm 240 241.macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 242 .extern \cxx_name 243ENTRY \c_name 244 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context. 245 mov x1, xSELF // pass Thread::Current. 246 bl \cxx_name // \cxx_name(arg, Thread*). 247 brk 0 248END \c_name 249.endm 250 251.macro TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING c_name, cxx_name 252 .extern \cxx_name 253ENTRY \c_name 254 SETUP_SAVE_EVERYTHING_FRAME // save all registers as basis for long jump context 255 mov x2, xSELF // pass Thread::Current 256 bl \cxx_name // \cxx_name(arg1, arg2, Thread*) 257 brk 0 258END \c_name 259.endm 260 261 /* 262 * Called by managed code, saves callee saves and then calls artThrowException 263 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 264 */ 265ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 266 267 /* 268 * Called by managed code to create and deliver a NullPointerException. 269 */ 270NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 271 272 /* 273 * Call installed by a signal handler to create and deliver a NullPointerException. 274 */ 275 .extern art_quick_throw_null_pointer_exception_from_signal 276ENTRY art_quick_throw_null_pointer_exception_from_signal 277 // The fault handler pushes the gc map address, i.e. "return address", to stack 278 // and passes the fault address in LR. So we need to set up the CFI info accordingly. 279 .cfi_def_cfa_offset __SIZEOF_POINTER__ 280 .cfi_rel_offset lr, 0 281 // Save all registers as basis for long jump context. 282 INCREASE_FRAME (FRAME_SIZE_SAVE_EVERYTHING - __SIZEOF_POINTER__) 283 SAVE_REG x29, (FRAME_SIZE_SAVE_EVERYTHING - 2 * __SIZEOF_POINTER__) // LR already saved. 284 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP_SKIP_X29_LR 285 mov x0, lr // pass the fault address stored in LR by the fault handler. 286 mov x1, xSELF // pass Thread::Current. 287 bl artThrowNullPointerExceptionFromSignal // (arg, Thread*). 288 brk 0 289END art_quick_throw_null_pointer_exception_from_signal 290 291 /* 292 * Called by managed code to create and deliver an ArithmeticException. 293 */ 294NO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_div_zero, artThrowDivZeroFromCode 295 296 /* 297 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 298 * index, arg2 holds limit. 299 */ 300TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 301 302 /* 303 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 304 * as if thrown from a call to String.charAt(). Arg1 holds index, arg2 holds limit. 305 */ 306TWO_ARG_RUNTIME_EXCEPTION_SAVE_EVERYTHING art_quick_throw_string_bounds, artThrowStringBoundsFromCode 307 308 /* 309 * Called by managed code to create and deliver a StackOverflowError. 310 */ 311NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 312 313 /* 314 * All generated callsites for interface invokes and invocation slow paths will load arguments 315 * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain 316 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 317 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1. 318 * 319 * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting 320 * of the target Method* in x0 and method->code_ in x1. 321 * 322 * If unsuccessful, the helper will return null/????. There will be a pending exception in the 323 * thread and we branch to another stub to deliver it. 324 * 325 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 326 * pointing back to the original caller. 327 * 328 * Adapted from ARM32 code. 329 * 330 * Clobbers xIP0. 331 */ 332.macro INVOKE_TRAMPOLINE_BODY cxx_name 333 .extern \cxx_name 334 SETUP_SAVE_REFS_AND_ARGS_FRAME // save callee saves in case allocation triggers GC 335 // Helper signature is always 336 // (method_idx, *this_object, *caller_method, *self, sp) 337 338 mov x2, xSELF // pass Thread::Current 339 mov x3, sp 340 bl \cxx_name // (method_idx, this, Thread*, SP) 341 mov xIP0, x1 // save Method*->code_ 342 RESTORE_SAVE_REFS_AND_ARGS_FRAME 343 REFRESH_MARKING_REGISTER 344 cbz x0, 1f // did we find the target? if not go to exception delivery 345 br xIP0 // tail call to target 3461: 347 DELIVER_PENDING_EXCEPTION 348.endm 349.macro INVOKE_TRAMPOLINE c_name, cxx_name 350ENTRY \c_name 351 INVOKE_TRAMPOLINE_BODY \cxx_name 352END \c_name 353.endm 354 355INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 356 357INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 358INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 359INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 360INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 361 362 363.macro INVOKE_STUB_CREATE_FRAME 364SAVE_SIZE=6*8 // x4, x5, x19, x20, FP, LR saved. 365 SAVE_TWO_REGS_INCREASE_FRAME x4, x5, SAVE_SIZE 366 SAVE_TWO_REGS x19, x20, 16 367 SAVE_TWO_REGS xFP, xLR, 32 368 369 mov xFP, sp // Use xFP for frame pointer, as it's callee-saved. 370 .cfi_def_cfa_register xFP 371 372 add x10, x2, #(__SIZEOF_POINTER__ + 0xf) // Reserve space for ArtMethod*, arguments and 373 and x10, x10, # ~0xf // round up for 16-byte stack alignment. 374 sub sp, sp, x10 // Adjust SP for ArtMethod*, args and alignment padding. 375 376 mov xSELF, x3 // Move thread pointer into SELF register. 377 378 // Copy arguments into stack frame. 379 // Use simple copy routine for now. 380 // 4 bytes per slot. 381 // X1 - source address 382 // W2 - args length 383 // X9 - destination address. 384 // W10 - temporary 385 add x9, sp, #8 // Destination address is bottom of stack + null. 386 387 // Copy parameters into the stack. Use numeric label as this is a macro and Clang's assembler 388 // does not have unique-id variables. 3891: 390 cbz w2, 2f 391 sub w2, w2, #4 // Need 65536 bytes of range. 392 ldr w10, [x1, x2] 393 str w10, [x9, x2] 394 b 1b 395 3962: 397 // Store null into ArtMethod* at bottom of frame. 398 str xzr, [sp] 399.endm 400 401.macro INVOKE_STUB_CALL_AND_RETURN 402 403 REFRESH_MARKING_REGISTER 404 405 // load method-> METHOD_QUICK_CODE_OFFSET 406 ldr x9, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 407 // Branch to method. 408 blr x9 409 410 // Pop the ArtMethod* (null), arguments and alignment padding from the stack. 411 mov sp, xFP 412 .cfi_def_cfa_register sp 413 414 // Restore saved registers including value address and shorty address. 415 RESTORE_TWO_REGS x19, x20, 16 416 RESTORE_TWO_REGS xFP, xLR, 32 417 RESTORE_TWO_REGS_DECREASE_FRAME x4, x5, SAVE_SIZE 418 419 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 420 ldrb w10, [x5] 421 422 // Check the return type and store the correct register into the jvalue in memory. 423 // Use numeric label as this is a macro and Clang's assembler does not have unique-id variables. 424 425 // Don't set anything for a void type. 426 cmp w10, #'V' 427 beq 1f 428 429 // Is it a double? 430 cmp w10, #'D' 431 beq 2f 432 433 // Is it a float? 434 cmp w10, #'F' 435 beq 3f 436 437 // Just store x0. Doesn't matter if it is 64 or 32 bits. 438 str x0, [x4] 439 4401: // Finish up. 441 ret 442 4432: // Store double. 444 str d0, [x4] 445 ret 446 4473: // Store float. 448 str s0, [x4] 449 ret 450 451.endm 452 453 454/* 455 * extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 456 * uint32_t *args, x1 457 * uint32_t argsize, w2 458 * Thread *self, x3 459 * JValue *result, x4 460 * char *shorty); x5 461 * +----------------------+ 462 * | | 463 * | C/C++ frame | 464 * | LR'' | 465 * | FP'' | <- SP' 466 * +----------------------+ 467 * +----------------------+ 468 * | x28 | <- TODO: Remove callee-saves. 469 * | : | 470 * | x19 | 471 * | SP' | 472 * | X5 | 473 * | X4 | Saved registers 474 * | LR' | 475 * | FP' | <- FP 476 * +----------------------+ 477 * | uint32_t out[n-1] | 478 * | : : | Outs 479 * | uint32_t out[0] | 480 * | ArtMethod* | <- SP value=null 481 * +----------------------+ 482 * 483 * Outgoing registers: 484 * x0 - Method* 485 * x1-x7 - integer parameters. 486 * d0-d7 - Floating point parameters. 487 * xSELF = self 488 * SP = & of ArtMethod* 489 * x1 = "this" pointer. 490 * 491 */ 492ENTRY art_quick_invoke_stub 493 // Spill registers as per AACPS64 calling convention. 494 INVOKE_STUB_CREATE_FRAME 495 496 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 497 // Parse the passed shorty to determine which register to load. 498 // Load addresses for routines that load WXSD registers. 499 adr x11, .LstoreW2 500 adr x12, .LstoreX2 501 adr x13, .LstoreS0 502 adr x14, .LstoreD0 503 504 // Initialize routine offsets to 0 for integers and floats. 505 // x8 for integers, x15 for floating point. 506 mov x8, #0 507 mov x15, #0 508 509 add x10, x5, #1 // Load shorty address, plus one to skip return value. 510 ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer. 511 512 // Loop to fill registers. 513.LfillRegisters: 514 ldrb w17, [x10], #1 // Load next character in signature, and increment. 515 cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated. 516 517 cmp w17, #'F' // is this a float? 518 bne .LisDouble 519 520 cmp x15, # 8*12 // Skip this load if all registers full. 521 beq .Ladvance4 522 523 add x17, x13, x15 // Calculate subroutine to jump to. 524 br x17 525 526.LisDouble: 527 cmp w17, #'D' // is this a double? 528 bne .LisLong 529 530 cmp x15, # 8*12 // Skip this load if all registers full. 531 beq .Ladvance8 532 533 add x17, x14, x15 // Calculate subroutine to jump to. 534 br x17 535 536.LisLong: 537 cmp w17, #'J' // is this a long? 538 bne .LisOther 539 540 cmp x8, # 6*12 // Skip this load if all registers full. 541 beq .Ladvance8 542 543 add x17, x12, x8 // Calculate subroutine to jump to. 544 br x17 545 546.LisOther: // Everything else takes one vReg. 547 cmp x8, # 6*12 // Skip this load if all registers full. 548 beq .Ladvance4 549 550 add x17, x11, x8 // Calculate subroutine to jump to. 551 br x17 552 553.Ladvance4: 554 add x9, x9, #4 555 b .LfillRegisters 556 557.Ladvance8: 558 add x9, x9, #8 559 b .LfillRegisters 560 561// Macro for loading a parameter into a register. 562// counter - the register with offset into these tables 563// size - the size of the register - 4 or 8 bytes. 564// register - the name of the register to be loaded. 565.macro LOADREG counter size register return 566 ldr \register , [x9], #\size 567 add \counter, \counter, 12 568 b \return 569.endm 570 571// Store ints. 572.LstoreW2: 573 LOADREG x8 4 w2 .LfillRegisters 574 LOADREG x8 4 w3 .LfillRegisters 575 LOADREG x8 4 w4 .LfillRegisters 576 LOADREG x8 4 w5 .LfillRegisters 577 LOADREG x8 4 w6 .LfillRegisters 578 LOADREG x8 4 w7 .LfillRegisters 579 580// Store longs. 581.LstoreX2: 582 LOADREG x8 8 x2 .LfillRegisters 583 LOADREG x8 8 x3 .LfillRegisters 584 LOADREG x8 8 x4 .LfillRegisters 585 LOADREG x8 8 x5 .LfillRegisters 586 LOADREG x8 8 x6 .LfillRegisters 587 LOADREG x8 8 x7 .LfillRegisters 588 589// Store singles. 590.LstoreS0: 591 LOADREG x15 4 s0 .LfillRegisters 592 LOADREG x15 4 s1 .LfillRegisters 593 LOADREG x15 4 s2 .LfillRegisters 594 LOADREG x15 4 s3 .LfillRegisters 595 LOADREG x15 4 s4 .LfillRegisters 596 LOADREG x15 4 s5 .LfillRegisters 597 LOADREG x15 4 s6 .LfillRegisters 598 LOADREG x15 4 s7 .LfillRegisters 599 600// Store doubles. 601.LstoreD0: 602 LOADREG x15 8 d0 .LfillRegisters 603 LOADREG x15 8 d1 .LfillRegisters 604 LOADREG x15 8 d2 .LfillRegisters 605 LOADREG x15 8 d3 .LfillRegisters 606 LOADREG x15 8 d4 .LfillRegisters 607 LOADREG x15 8 d5 .LfillRegisters 608 LOADREG x15 8 d6 .LfillRegisters 609 LOADREG x15 8 d7 .LfillRegisters 610 611 612.LcallFunction: 613 614 INVOKE_STUB_CALL_AND_RETURN 615 616END art_quick_invoke_stub 617 618/* extern"C" 619 * void art_quick_invoke_static_stub(ArtMethod *method, x0 620 * uint32_t *args, x1 621 * uint32_t argsize, w2 622 * Thread *self, x3 623 * JValue *result, x4 624 * char *shorty); x5 625 */ 626ENTRY art_quick_invoke_static_stub 627 // Spill registers as per AACPS64 calling convention. 628 INVOKE_STUB_CREATE_FRAME 629 630 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 631 // Parse the passed shorty to determine which register to load. 632 // Load addresses for routines that load WXSD registers. 633 adr x11, .LstoreW1_2 634 adr x12, .LstoreX1_2 635 adr x13, .LstoreS0_2 636 adr x14, .LstoreD0_2 637 638 // Initialize routine offsets to 0 for integers and floats. 639 // x8 for integers, x15 for floating point. 640 mov x8, #0 641 mov x15, #0 642 643 add x10, x5, #1 // Load shorty address, plus one to skip return value. 644 645 // Loop to fill registers. 646.LfillRegisters2: 647 ldrb w17, [x10], #1 // Load next character in signature, and increment. 648 cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated. 649 650 cmp w17, #'F' // is this a float? 651 bne .LisDouble2 652 653 cmp x15, # 8*12 // Skip this load if all registers full. 654 beq .Ladvance4_2 655 656 add x17, x13, x15 // Calculate subroutine to jump to. 657 br x17 658 659.LisDouble2: 660 cmp w17, #'D' // is this a double? 661 bne .LisLong2 662 663 cmp x15, # 8*12 // Skip this load if all registers full. 664 beq .Ladvance8_2 665 666 add x17, x14, x15 // Calculate subroutine to jump to. 667 br x17 668 669.LisLong2: 670 cmp w17, #'J' // is this a long? 671 bne .LisOther2 672 673 cmp x8, # 7*12 // Skip this load if all registers full. 674 beq .Ladvance8_2 675 676 add x17, x12, x8 // Calculate subroutine to jump to. 677 br x17 678 679.LisOther2: // Everything else takes one vReg. 680 cmp x8, # 7*12 // Skip this load if all registers full. 681 beq .Ladvance4_2 682 683 add x17, x11, x8 // Calculate subroutine to jump to. 684 br x17 685 686.Ladvance4_2: 687 add x9, x9, #4 688 b .LfillRegisters2 689 690.Ladvance8_2: 691 add x9, x9, #8 692 b .LfillRegisters2 693 694// Store ints. 695.LstoreW1_2: 696 LOADREG x8 4 w1 .LfillRegisters2 697 LOADREG x8 4 w2 .LfillRegisters2 698 LOADREG x8 4 w3 .LfillRegisters2 699 LOADREG x8 4 w4 .LfillRegisters2 700 LOADREG x8 4 w5 .LfillRegisters2 701 LOADREG x8 4 w6 .LfillRegisters2 702 LOADREG x8 4 w7 .LfillRegisters2 703 704// Store longs. 705.LstoreX1_2: 706 LOADREG x8 8 x1 .LfillRegisters2 707 LOADREG x8 8 x2 .LfillRegisters2 708 LOADREG x8 8 x3 .LfillRegisters2 709 LOADREG x8 8 x4 .LfillRegisters2 710 LOADREG x8 8 x5 .LfillRegisters2 711 LOADREG x8 8 x6 .LfillRegisters2 712 LOADREG x8 8 x7 .LfillRegisters2 713 714// Store singles. 715.LstoreS0_2: 716 LOADREG x15 4 s0 .LfillRegisters2 717 LOADREG x15 4 s1 .LfillRegisters2 718 LOADREG x15 4 s2 .LfillRegisters2 719 LOADREG x15 4 s3 .LfillRegisters2 720 LOADREG x15 4 s4 .LfillRegisters2 721 LOADREG x15 4 s5 .LfillRegisters2 722 LOADREG x15 4 s6 .LfillRegisters2 723 LOADREG x15 4 s7 .LfillRegisters2 724 725// Store doubles. 726.LstoreD0_2: 727 LOADREG x15 8 d0 .LfillRegisters2 728 LOADREG x15 8 d1 .LfillRegisters2 729 LOADREG x15 8 d2 .LfillRegisters2 730 LOADREG x15 8 d3 .LfillRegisters2 731 LOADREG x15 8 d4 .LfillRegisters2 732 LOADREG x15 8 d5 .LfillRegisters2 733 LOADREG x15 8 d6 .LfillRegisters2 734 LOADREG x15 8 d7 .LfillRegisters2 735 736 737.LcallFunction2: 738 739 INVOKE_STUB_CALL_AND_RETURN 740 741END art_quick_invoke_static_stub 742 743 744 745/* extern"C" void art_quick_osr_stub(void** stack, x0 746 * size_t stack_size_in_bytes, x1 747 * const uint8_t* native_pc, x2 748 * JValue *result, x3 749 * char *shorty, x4 750 * Thread *self) x5 751 */ 752ENTRY art_quick_osr_stub 753 SAVE_SIZE=22*8 754 SAVE_TWO_REGS_INCREASE_FRAME x3, x4, SAVE_SIZE 755 SAVE_TWO_REGS x19, x20, 16 756 SAVE_TWO_REGS x21, x22, 32 757 SAVE_TWO_REGS x23, x24, 48 758 SAVE_TWO_REGS x25, x26, 64 759 SAVE_TWO_REGS x27, x28, 80 760 SAVE_TWO_REGS xFP, xLR, 96 761 stp d8, d9, [sp, #112] 762 stp d10, d11, [sp, #128] 763 stp d12, d13, [sp, #144] 764 stp d14, d15, [sp, #160] 765 766 mov xSELF, x5 // Move thread pointer into SELF register. 767 REFRESH_MARKING_REGISTER 768 769 INCREASE_FRAME 16 770 str xzr, [sp] // Store null for ArtMethod* slot 771 // Branch to stub. 772 bl .Losr_entry 773 .cfi_remember_state 774 DECREASE_FRAME 16 775 776 // Restore saved registers including value address and shorty address. 777 ldp d8, d9, [sp, #112] 778 ldp d10, d11, [sp, #128] 779 ldp d12, d13, [sp, #144] 780 ldp d14, d15, [sp, #160] 781 RESTORE_TWO_REGS x19, x20, 16 782 RESTORE_TWO_REGS x21, x22, 32 783 RESTORE_TWO_REGS x23, x24, 48 784 RESTORE_TWO_REGS x25, x26, 64 785 RESTORE_TWO_REGS x27, x28, 80 786 RESTORE_TWO_REGS xFP, xLR, 96 787 RESTORE_TWO_REGS_DECREASE_FRAME x3, x4, SAVE_SIZE 788 789 // The compiler put the result in x0. Doesn't matter if it is 64 or 32 bits. 790 str x0, [x3] 791 ret 792 793.Losr_entry: 794 .cfi_restore_state // Reset unwind info so following code unwinds. 795 .cfi_def_cfa_offset (SAVE_SIZE+16) // workaround for clang bug: 31975598 796 797 mov x9, sp // Save stack pointer. 798 .cfi_def_cfa_register x9 799 800 // Update stack pointer for the callee 801 sub sp, sp, x1 802 803 // Update link register slot expected by the callee. 804 sub w1, w1, #8 805 str lr, [sp, x1] 806 807 // Copy arguments into stack frame. 808 // Use simple copy routine for now. 809 // 4 bytes per slot. 810 // X0 - source address 811 // W1 - args length 812 // SP - destination address. 813 // W10 - temporary 814.Losr_loop_entry: 815 cbz w1, .Losr_loop_exit 816 sub w1, w1, #4 817 ldr w10, [x0, x1] 818 str w10, [sp, x1] 819 b .Losr_loop_entry 820 821.Losr_loop_exit: 822 // Branch to the OSR entry point. 823 br x2 824 825END art_quick_osr_stub 826 827 /* 828 * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_. 829 * Both must reside on the stack, between current SP and target SP. 830 * IP0 and IP1 shall be clobbered rather than retrieved from gprs_. 831 */ 832 833ENTRY art_quick_do_long_jump 834 // Load FPRs 835 ldp d0, d1, [x1, #0] 836 ldp d2, d3, [x1, #16] 837 ldp d4, d5, [x1, #32] 838 ldp d6, d7, [x1, #48] 839 ldp d8, d9, [x1, #64] 840 ldp d10, d11, [x1, #80] 841 ldp d12, d13, [x1, #96] 842 ldp d14, d15, [x1, #112] 843 ldp d16, d17, [x1, #128] 844 ldp d18, d19, [x1, #144] 845 ldp d20, d21, [x1, #160] 846 ldp d22, d23, [x1, #176] 847 ldp d24, d25, [x1, #192] 848 ldp d26, d27, [x1, #208] 849 ldp d28, d29, [x1, #224] 850 ldp d30, d31, [x1, #240] 851 852 // Load GPRs. Delay loading x0, x1 because x0 is used as gprs_. 853 ldp x2, x3, [x0, #16] 854 ldp x4, x5, [x0, #32] 855 ldp x6, x7, [x0, #48] 856 ldp x8, x9, [x0, #64] 857 ldp x10, x11, [x0, #80] 858 ldp x12, x13, [x0, #96] 859 ldp x14, x15, [x0, #112] 860 // Do not load IP0 (x16) and IP1 (x17), these shall be clobbered below. 861 // Don't load the platform register (x18) either. 862 ldr x19, [x0, #152] // xSELF. 863 ldp x20, x21, [x0, #160] // For Baker RB, wMR (w20) is reloaded below. 864 ldp x22, x23, [x0, #176] 865 ldp x24, x25, [x0, #192] 866 ldp x26, x27, [x0, #208] 867 ldp x28, x29, [x0, #224] 868 ldp x30, xIP0, [x0, #240] // LR and SP, load SP to IP0. 869 870 // Load PC to IP1, it's at the end (after the space for the unused XZR). 871 ldr xIP1, [x0, #33*8] 872 873 // Load x0, x1. 874 ldp x0, x1, [x0, #0] 875 876 // Set SP. Do not access fprs_ and gprs_ from now, they are below SP. 877 mov sp, xIP0 878 879 REFRESH_MARKING_REGISTER 880 881 br xIP1 882END art_quick_do_long_jump 883 884 /* 885 * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the 886 * possibly null object to lock. 887 * 888 * Derived from arm32 code. 889 */ 890 .extern artLockObjectFromCode 891ENTRY art_quick_lock_object 892 ldr w1, [xSELF, #THREAD_ID_OFFSET] 893 cbz w0, art_quick_lock_object_no_inline 894 // Exclusive load/store has no immediate anymore. 895 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET 896.Lretry_lock: 897 ldaxr w2, [x4] // Acquire needed only in most common case. 898 eor w3, w2, w1 // Prepare the value to store if unlocked 899 // (thread id, count of 0 and preserved read barrier bits), 900 // or prepare to compare thread id for recursive lock check 901 // (lock_word.ThreadId() ^ self->ThreadId()). 902 tst w2, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits. 903 b.ne .Lnot_unlocked // Check if unlocked. 904 // unlocked case - store w3: original lock word plus thread id, preserved read barrier bits. 905 stxr w2, w3, [x4] 906 cbnz w2, .Lretry_lock // If the store failed, retry. 907 ret 908.Lnot_unlocked: // w2: original lock word, w1: thread id, w3: w2 ^ w1 909 // Check lock word state and thread id together, 910 tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED) 911 b.ne art_quick_lock_object_no_inline 912 add w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // Increment the recursive lock count. 913 tst w3, #LOCK_WORD_THIN_LOCK_COUNT_MASK_SHIFTED // Test the new thin lock count. 914 b.eq art_quick_lock_object_no_inline // Zero as the new count indicates overflow, go slow path. 915 stxr w2, w3, [x4] 916 cbnz w2, .Lretry_lock // If the store failed, retry. 917 ret 918END art_quick_lock_object 919 920ENTRY art_quick_lock_object_no_inline 921 // This is also the slow path for art_quick_lock_object. 922 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case we block 923 mov x1, xSELF // pass Thread::Current 924 bl artLockObjectFromCode // (Object* obj, Thread*) 925 RESTORE_SAVE_REFS_ONLY_FRAME 926 REFRESH_MARKING_REGISTER 927 RETURN_IF_W0_IS_ZERO_OR_DELIVER 928END art_quick_lock_object_no_inline 929 930 /* 931 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 932 * x0 holds the possibly null object to lock. 933 * 934 * Derived from arm32 code. 935 */ 936 .extern artUnlockObjectFromCode 937ENTRY art_quick_unlock_object 938 ldr w1, [xSELF, #THREAD_ID_OFFSET] 939 cbz x0, art_quick_unlock_object_no_inline 940 // Exclusive load/store has no immediate anymore. 941 add x4, x0, #MIRROR_OBJECT_LOCK_WORD_OFFSET 942.Lretry_unlock: 943#ifndef USE_READ_BARRIER 944 ldr w2, [x4] 945#else 946 ldxr w2, [x4] // Need to use atomic instructions for read barrier. 947#endif 948 eor w3, w2, w1 // Prepare the value to store if simply locked 949 // (mostly 0s, and preserved read barrier bits), 950 // or prepare to compare thread id for recursive lock check 951 // (lock_word.ThreadId() ^ self->ThreadId()). 952 tst w3, #LOCK_WORD_GC_STATE_MASK_SHIFTED_TOGGLED // Test the non-gc bits. 953 b.ne .Lnot_simply_locked // Locked recursively or by other thread? 954 // Transition to unlocked. 955#ifndef USE_READ_BARRIER 956 stlr w3, [x4] 957#else 958 stlxr w2, w3, [x4] // Need to use atomic instructions for read barrier. 959 cbnz w2, .Lretry_unlock // If the store failed, retry. 960#endif 961 ret 962.Lnot_simply_locked: 963 // Check lock word state and thread id together, 964 tst w3, #(LOCK_WORD_STATE_MASK_SHIFTED | LOCK_WORD_THIN_LOCK_OWNER_MASK_SHIFTED) 965 b.ne art_quick_unlock_object_no_inline 966 sub w3, w2, #LOCK_WORD_THIN_LOCK_COUNT_ONE // decrement count 967#ifndef USE_READ_BARRIER 968 str w3, [x4] 969#else 970 stxr w2, w3, [x4] // Need to use atomic instructions for read barrier. 971 cbnz w2, .Lretry_unlock // If the store failed, retry. 972#endif 973 ret 974END art_quick_unlock_object 975 976ENTRY art_quick_unlock_object_no_inline 977 // This is also the slow path for art_quick_unlock_object. 978 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case exception allocation triggers GC 979 mov x1, xSELF // pass Thread::Current 980 bl artUnlockObjectFromCode // (Object* obj, Thread*) 981 RESTORE_SAVE_REFS_ONLY_FRAME 982 REFRESH_MARKING_REGISTER 983 RETURN_IF_W0_IS_ZERO_OR_DELIVER 984END art_quick_unlock_object_no_inline 985 986 /* 987 * Entry from managed code that calls artInstanceOfFromCode and on failure calls 988 * artThrowClassCastExceptionForObject. 989 */ 990 .extern artInstanceOfFromCode 991 .extern artThrowClassCastExceptionForObject 992ENTRY art_quick_check_instance_of 993 // Type check using the bit string passes null as the target class. In that case just throw. 994 cbz x1, .Lthrow_class_cast_exception_for_bitstring_check 995 996 // Store arguments and link register 997 // Stack needs to be 16B aligned on calls. 998 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 999 SAVE_REG xLR, 24 1000 1001 // Call runtime code 1002 bl artInstanceOfFromCode 1003 1004 // Restore LR. 1005 RESTORE_REG xLR, 24 1006 1007 // Check for exception 1008 cbz x0, .Lthrow_class_cast_exception 1009 1010 // Restore and return 1011 .cfi_remember_state 1012 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1013 ret 1014 .cfi_restore_state // Reset unwind info so following code unwinds. 1015 .cfi_def_cfa_offset 32 // workaround for clang bug: 31975598 1016 1017.Lthrow_class_cast_exception: 1018 // Restore 1019 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1020 1021.Lthrow_class_cast_exception_for_bitstring_check: 1022 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME // save all registers as basis for long jump context 1023 mov x2, xSELF // pass Thread::Current 1024 bl artThrowClassCastExceptionForObject // (Object*, Class*, Thread*) 1025 brk 0 // We should not return here... 1026END art_quick_check_instance_of 1027 1028// Restore xReg's value from [sp, #offset] if xReg is not the same as xExclude. 1029.macro POP_REG_NE xReg, offset, xExclude 1030 .ifnc \xReg, \xExclude 1031 ldr \xReg, [sp, #\offset] // restore xReg 1032 .cfi_restore \xReg 1033 .endif 1034.endm 1035 1036// Restore xReg1's value from [sp, #offset] if xReg1 is not the same as xExclude. 1037// Restore xReg2's value from [sp, #(offset + 8)] if xReg2 is not the same as xExclude. 1038.macro POP_REGS_NE xReg1, xReg2, offset, xExclude 1039 .ifc \xReg1, \xExclude 1040 ldr \xReg2, [sp, #(\offset + 8)] // restore xReg2 1041 .else 1042 .ifc \xReg2, \xExclude 1043 ldr \xReg1, [sp, #\offset] // restore xReg1 1044 .else 1045 ldp \xReg1, \xReg2, [sp, #\offset] // restore xReg1 and xReg2 1046 .endif 1047 .endif 1048 .cfi_restore \xReg1 1049 .cfi_restore \xReg2 1050.endm 1051 1052 /* 1053 * Macro to insert read barrier, only used in art_quick_aput_obj. 1054 * xDest, wDest and xObj are registers, offset is a defined literal such as 1055 * MIRROR_OBJECT_CLASS_OFFSET. Dest needs both x and w versions of the same register to handle 1056 * name mismatch between instructions. This macro uses the lower 32b of register when possible. 1057 * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. 1058 */ 1059.macro READ_BARRIER xDest, wDest, xObj, xTemp, wTemp, offset, number 1060#ifdef USE_READ_BARRIER 1061# ifdef USE_BAKER_READ_BARRIER 1062 ldr \wTemp, [\xObj, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 1063 tbnz \wTemp, #LOCK_WORD_READ_BARRIER_STATE_SHIFT, .Lrb_slowpath\number 1064 // False dependency to avoid needing load/load fence. 1065 add \xObj, \xObj, \xTemp, lsr #32 1066 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1067 UNPOISON_HEAP_REF \wDest 1068 b .Lrb_exit\number 1069# endif // USE_BAKER_READ_BARRIER 1070.Lrb_slowpath\number: 1071 // Store registers used in art_quick_aput_obj (x0-x4, LR), stack is 16B aligned. 1072 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 48 1073 SAVE_TWO_REGS x2, x3, 16 1074 SAVE_TWO_REGS x4, xLR, 32 1075 1076 // mov x0, \xRef // pass ref in x0 (no-op for now since parameter ref is unused) 1077 .ifnc \xObj, x1 1078 mov x1, \xObj // pass xObj 1079 .endif 1080 mov w2, #\offset // pass offset 1081 bl artReadBarrierSlow // artReadBarrierSlow(ref, xObj, offset) 1082 // No need to unpoison return value in w0, artReadBarrierSlow() would do the unpoisoning. 1083 .ifnc \wDest, w0 1084 mov \wDest, w0 // save return value in wDest 1085 .endif 1086 1087 // Conditionally restore saved registers 1088 POP_REG_NE x0, 0, \xDest 1089 POP_REG_NE x1, 8, \xDest 1090 POP_REG_NE x2, 16, \xDest 1091 POP_REG_NE x3, 24, \xDest 1092 POP_REG_NE x4, 32, \xDest 1093 RESTORE_REG xLR, 40 1094 DECREASE_FRAME 48 1095.Lrb_exit\number: 1096#else 1097 ldr \wDest, [\xObj, #\offset] // Heap reference = 32b. This also zero-extends to \xDest. 1098 UNPOISON_HEAP_REF \wDest 1099#endif // USE_READ_BARRIER 1100.endm 1101 1102#ifdef USE_READ_BARRIER 1103 .extern artReadBarrierSlow 1104#endif 1105ENTRY art_quick_aput_obj 1106 cbz x2, .Ldo_aput_null 1107 READ_BARRIER x3, w3, x0, x3, w3, MIRROR_OBJECT_CLASS_OFFSET, 0 // Heap reference = 32b 1108 // This also zero-extends to x3 1109 READ_BARRIER x3, w3, x3, x4, w4, MIRROR_CLASS_COMPONENT_TYPE_OFFSET, 1 // Heap reference = 32b 1110 // This also zero-extends to x3 1111 READ_BARRIER x4, w4, x2, x4, w4, MIRROR_OBJECT_CLASS_OFFSET, 2 // Heap reference = 32b 1112 // This also zero-extends to x4 1113 cmp w3, w4 // value's type == array's component type - trivial assignability 1114 bne .Lcheck_assignability 1115.Ldo_aput: 1116 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1117 // "Compress" = do nothing 1118 POISON_HEAP_REF w2 1119 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1120 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1121 lsr x0, x0, #CARD_TABLE_CARD_SHIFT 1122 strb w3, [x3, x0] 1123 ret 1124.Ldo_aput_null: 1125 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1126 // "Compress" = do nothing 1127 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1128 ret 1129.Lcheck_assignability: 1130 // Store arguments and link register 1131 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 32 1132 SAVE_TWO_REGS x2, xLR, 16 1133 1134 // Call runtime code 1135 mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1136 mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1137 bl artIsAssignableFromCode 1138 1139 // Check for exception 1140 cbz x0, .Lthrow_array_store_exception 1141 1142 // Restore 1143 .cfi_remember_state 1144 RESTORE_TWO_REGS x2, xLR, 16 1145 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1146 1147 add x3, x0, #MIRROR_OBJECT_ARRAY_DATA_OFFSET 1148 // "Compress" = do nothing 1149 POISON_HEAP_REF w2 1150 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1151 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1152 lsr x0, x0, #CARD_TABLE_CARD_SHIFT 1153 strb w3, [x3, x0] 1154 ret 1155 .cfi_restore_state // Reset unwind info so following code unwinds. 1156 .cfi_def_cfa_offset 32 // workaround for clang bug: 31975598 1157.Lthrow_array_store_exception: 1158 RESTORE_TWO_REGS x2, xLR, 16 1159 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 32 1160 1161 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1162 mov x1, x2 // Pass value. 1163 mov x2, xSELF // Pass Thread::Current. 1164 bl artThrowArrayStoreException // (Object*, Object*, Thread*). 1165 brk 0 // Unreached. 1166END art_quick_aput_obj 1167 1168// Macro to facilitate adding new allocation entrypoints. 1169.macro ONE_ARG_DOWNCALL name, entrypoint, return 1170 .extern \entrypoint 1171ENTRY \name 1172 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1173 mov x1, xSELF // pass Thread::Current 1174 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1175 RESTORE_SAVE_REFS_ONLY_FRAME 1176 REFRESH_MARKING_REGISTER 1177 \return 1178END \name 1179.endm 1180 1181// Macro to facilitate adding new allocation entrypoints. 1182.macro TWO_ARG_DOWNCALL name, entrypoint, return 1183 .extern \entrypoint 1184ENTRY \name 1185 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1186 mov x2, xSELF // pass Thread::Current 1187 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*) 1188 RESTORE_SAVE_REFS_ONLY_FRAME 1189 REFRESH_MARKING_REGISTER 1190 \return 1191END \name 1192.endm 1193 1194// Macro to facilitate adding new allocation entrypoints. 1195.macro THREE_ARG_DOWNCALL name, entrypoint, return 1196 .extern \entrypoint 1197ENTRY \name 1198 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1199 mov x3, xSELF // pass Thread::Current 1200 bl \entrypoint 1201 RESTORE_SAVE_REFS_ONLY_FRAME 1202 REFRESH_MARKING_REGISTER 1203 \return 1204END \name 1205.endm 1206 1207// Macro to facilitate adding new allocation entrypoints. 1208.macro FOUR_ARG_DOWNCALL name, entrypoint, return 1209 .extern \entrypoint 1210ENTRY \name 1211 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1212 mov x4, xSELF // pass Thread::Current 1213 bl \entrypoint // 1214 RESTORE_SAVE_REFS_ONLY_FRAME 1215 REFRESH_MARKING_REGISTER 1216 \return 1217END \name 1218.endm 1219 1220// Macros taking opportunity of code similarities for downcalls. 1221.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1222 .extern \entrypoint 1223ENTRY \name 1224 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1225 mov x1, xSELF // pass Thread::Current 1226 bl \entrypoint // (uint32_t type_idx, Thread*) 1227 RESTORE_SAVE_REFS_ONLY_FRAME 1228 REFRESH_MARKING_REGISTER 1229 \return 1230END \name 1231.endm 1232 1233.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1234 .extern \entrypoint 1235ENTRY \name 1236 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1237 mov x2, xSELF // pass Thread::Current 1238 bl \entrypoint 1239 RESTORE_SAVE_REFS_ONLY_FRAME 1240 REFRESH_MARKING_REGISTER 1241 \return 1242END \name 1243.endm 1244 1245.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1246 .extern \entrypoint 1247ENTRY \name 1248 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1249 mov x3, xSELF // pass Thread::Current 1250 bl \entrypoint 1251 RESTORE_SAVE_REFS_ONLY_FRAME 1252 REFRESH_MARKING_REGISTER 1253 \return 1254END \name 1255.endm 1256 1257 /* 1258 * Macro for resolution and initialization of indexed DEX file 1259 * constants such as classes and strings. 1260 */ 1261.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL name, entrypoint, runtime_method_offset = RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET 1262 .extern \entrypoint 1263ENTRY \name 1264 SETUP_SAVE_EVERYTHING_FRAME \runtime_method_offset // save everything for stack crawl 1265 mov x1, xSELF // pass Thread::Current 1266 bl \entrypoint // (int32_t index, Thread* self) 1267 cbz w0, 1f // If result is null, deliver the OOME. 1268 .cfi_remember_state 1269 RESTORE_SAVE_EVERYTHING_FRAME_KEEP_X0 1270 REFRESH_MARKING_REGISTER 1271 ret // return 1272 .cfi_restore_state 1273 .cfi_def_cfa_offset FRAME_SIZE_SAVE_EVERYTHING // workaround for clang bug: 31975598 12741: 1275 DELIVER_PENDING_EXCEPTION_FRAME_READY 1276END \name 1277.endm 1278 1279.macro ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT name, entrypoint 1280 ONE_ARG_SAVE_EVERYTHING_DOWNCALL \name, \entrypoint, RUNTIME_SAVE_EVERYTHING_FOR_CLINIT_METHOD_OFFSET 1281.endm 1282 1283.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1284 cbz w0, 1f // result zero branch over 1285 ret // return 12861: 1287 DELIVER_PENDING_EXCEPTION 1288.endm 1289 1290 /* 1291 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 1292 * failure. 1293 */ 1294TWO_ARG_REF_DOWNCALL art_quick_handle_fill_data, artHandleFillArrayDataFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1295 1296 /* 1297 * Entry from managed code when uninitialized static storage, this stub will run the class 1298 * initializer and deliver the exception on error. On success the static storage base is 1299 * returned. 1300 */ 1301ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_initialize_static_storage, artInitializeStaticStorageFromCode 1302ONE_ARG_SAVE_EVERYTHING_DOWNCALL_FOR_CLINIT art_quick_resolve_type, artResolveTypeFromCode 1303ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_type_and_verify_access, artResolveTypeAndVerifyAccessFromCode 1304ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_handle, artResolveMethodHandleFromCode 1305ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_method_type, artResolveMethodTypeFromCode 1306ONE_ARG_SAVE_EVERYTHING_DOWNCALL art_quick_resolve_string, artResolveStringFromCode 1307 1308// Note: Functions `art{Get,Set}<Kind>{Static,Instance}FromCompiledCode` are 1309// defined with a macro in runtime/entrypoints/quick/quick_field_entrypoints.cc. 1310 1311ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1312ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1313ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1314ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1315ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1316ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1317ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1318 1319TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1320TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1321TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1322TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1323TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1324TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1325TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1326 1327TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1328TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1329TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1330TWO_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1331TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1332 1333THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1334THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1335THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1336THREE_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1337THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1338 1339// Generate the allocation entrypoints for each allocator. 1340GENERATE_ALLOC_ENTRYPOINTS_FOR_NON_TLAB_ALLOCATORS 1341// Comment out allocators that have arm64 specific asm. 1342// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 1343// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_region_tlab, RegionTLAB) 1344GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_region_tlab, RegionTLAB) 1345GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_region_tlab, RegionTLAB) 1346// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_region_tlab, RegionTLAB) 1347// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_region_tlab, RegionTLAB) 1348// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_region_tlab, RegionTLAB) 1349// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_region_tlab, RegionTLAB) 1350// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_region_tlab, RegionTLAB) 1351GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_region_tlab, RegionTLAB) 1352GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_region_tlab, RegionTLAB) 1353GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_region_tlab, RegionTLAB) 1354 1355// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) 1356// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB) 1357GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB) 1358GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_OBJECT(_tlab, TLAB) 1359// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB) 1360// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED8(_tlab, TLAB) 1361// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED16(_tlab, TLAB) 1362// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED32(_tlab, TLAB) 1363// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED64(_tlab, TLAB) 1364GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_BYTES(_tlab, TLAB) 1365GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_CHARS(_tlab, TLAB) 1366GENERATE_ALLOC_ENTRYPOINTS_ALLOC_STRING_FROM_STRING(_tlab, TLAB) 1367 1368// If isInitialized=1 then the compiler assumes the object's class has already been initialized. 1369// If isInitialized=0 the compiler can only assume it's been at least resolved. 1370.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name, isInitialized 1371ENTRY \c_name 1372 // Fast path rosalloc allocation. 1373 // x0: type, xSELF(x19): Thread::Current 1374 // x1-x7: free. 1375 ldr x3, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] // Check if the thread local 1376 // allocation stack has room. 1377 // ldp won't work due to large offset. 1378 ldr x4, [xSELF, #THREAD_LOCAL_ALLOC_STACK_END_OFFSET] 1379 cmp x3, x4 1380 bhs .Lslow_path\c_name 1381 ldr w3, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x3) 1382 cmp x3, #ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE // Check if the size is for a thread 1383 // local allocation. 1384 // If the class is not yet visibly initialized, or it is finalizable, 1385 // the object size will be very large to force the branch below to be taken. 1386 // 1387 // See Class::SetStatus() in class.cc for more details. 1388 bhs .Lslow_path\c_name 1389 // Compute the rosalloc bracket index 1390 // from the size. Since the size is 1391 // already aligned we can combine the 1392 // two shifts together. 1393 add x4, xSELF, x3, lsr #(ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 1394 // Subtract pointer size since ther 1395 // are no runs for 0 byte allocations 1396 // and the size is already aligned. 1397 ldr x4, [x4, #(THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)] 1398 // Load the free list head (x3). This 1399 // will be the return val. 1400 ldr x3, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1401 cbz x3, .Lslow_path\c_name 1402 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1403 ldr x1, [x3, #ROSALLOC_SLOT_NEXT_OFFSET] // Load the next pointer of the head 1404 // and update the list head with the 1405 // next pointer. 1406 str x1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)] 1407 // Store the class pointer in the 1408 // header. This also overwrites the 1409 // next pointer. The offsets are 1410 // asserted to match. 1411 1412#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1413#error "Class pointer needs to overwrite next pointer." 1414#endif 1415 POISON_HEAP_REF w0 1416 str w0, [x3, #MIRROR_OBJECT_CLASS_OFFSET] 1417 // Push the new object onto the thread 1418 // local allocation stack and 1419 // increment the thread local 1420 // allocation stack top. 1421 ldr x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1422 str w3, [x1], #COMPRESSED_REFERENCE_SIZE // (Increment x1 as a side effect.) 1423 str x1, [xSELF, #THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET] 1424 // Decrement the size of the free list 1425 1426 // After this "STR" the object is published to the thread local allocation stack, 1427 // and it will be observable from a runtime internal (eg. Heap::VisitObjects) point of view. 1428 // It is not yet visible to the running (user) compiled code until after the return. 1429 // 1430 // To avoid the memory barrier prior to the "STR", a trick is employed, by differentiating 1431 // the state of the allocation stack slot. It can be a pointer to one of: 1432 // 0) Null entry, because the stack was bumped but the new pointer wasn't written yet. 1433 // (The stack initial state is "null" pointers). 1434 // 1) A partially valid object, with an invalid class pointer to the next free rosalloc slot. 1435 // 2) A fully valid object, with a valid class pointer pointing to a real class. 1436 // Other states are not allowed. 1437 // 1438 // An object that is invalid only temporarily, and will eventually become valid. 1439 // The internal runtime code simply checks if the object is not null or is partial and then 1440 // ignores it. 1441 // 1442 // (Note: The actual check is done by seeing if a non-null object has a class pointer pointing 1443 // to ClassClass, and that the ClassClass's class pointer is self-cyclic. A rosalloc free slot 1444 // "next" pointer is not-cyclic.) 1445 // 1446 // See also b/28790624 for a listing of CLs dealing with this race. 1447 ldr w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1448 sub x1, x1, #1 1449 // TODO: consider combining this store 1450 // and the list head store above using 1451 // strd. 1452 str w1, [x4, #(ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)] 1453 1454 mov x0, x3 // Set the return value and return. 1455 // No barrier. The class is already observably initialized (otherwise the fast 1456 // path size check above would fail) and new-instance allocations are protected 1457 // from publishing by the compiler which inserts its own StoreStore barrier. 1458 ret 1459.Lslow_path\c_name: 1460 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1461 mov x1, xSELF // pass Thread::Current 1462 bl \cxx_name 1463 RESTORE_SAVE_REFS_ONLY_FRAME 1464 REFRESH_MARKING_REGISTER 1465 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1466END \c_name 1467.endm 1468 1469ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc, /* isInitialized */ 0 1470ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc, /* isInitialized */ 1 1471 1472// If isInitialized=1 then the compiler assumes the object's class has already been initialized. 1473// If isInitialized=0 the compiler can only assume it's been at least resolved. 1474.macro ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED slowPathLabel isInitialized 1475 ldr x4, [xSELF, #THREAD_LOCAL_POS_OFFSET] 1476 ldr x5, [xSELF, #THREAD_LOCAL_END_OFFSET] 1477 ldr w7, [x0, #MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET] // Load the object size (x7). 1478 add x6, x4, x7 // Add object size to tlab pos. 1479 cmp x6, x5 // Check if it fits, overflow works 1480 // since the tlab pos and end are 32 1481 // bit values. 1482 1483 // If the class is not yet visibly initialized, or it is finalizable, 1484 // the object size will be very large to force the branch below to be taken. 1485 // 1486 // See Class::SetStatus() in class.cc for more details. 1487 bhi \slowPathLabel 1488 str x6, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1489 ldr x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1490 add x5, x5, #1 1491 str x5, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1492 POISON_HEAP_REF w0 1493 str w0, [x4, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1494 // Fence. This is "ish" not "ishst" so 1495 // that the code after this allocation 1496 // site will see the right values in 1497 // the fields of the class. 1498 mov x0, x4 1499 // No barrier. The class is already observably initialized (otherwise the fast 1500 // path size check above would fail) and new-instance allocations are protected 1501 // from publishing by the compiler which inserts its own StoreStore barrier. 1502 ret 1503.endm 1504 1505// The common code for art_quick_alloc_object_*region_tlab 1506.macro GENERATE_ALLOC_OBJECT_RESOLVED_TLAB name, entrypoint, isInitialized 1507ENTRY \name 1508 // Fast path region tlab allocation. 1509 // x0: type, xSELF(x19): Thread::Current 1510 // x1-x7: free. 1511 ALLOC_OBJECT_TLAB_FAST_PATH_RESOLVED .Lslow_path\name, \isInitialized 1512.Lslow_path\name: 1513 SETUP_SAVE_REFS_ONLY_FRAME // Save callee saves in case of GC. 1514 mov x1, xSELF // Pass Thread::Current. 1515 bl \entrypoint // (mirror::Class*, Thread*) 1516 RESTORE_SAVE_REFS_ONLY_FRAME 1517 REFRESH_MARKING_REGISTER 1518 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1519END \name 1520.endm 1521 1522GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_region_tlab, artAllocObjectFromCodeResolvedRegionTLAB, /* isInitialized */ 0 1523GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_region_tlab, artAllocObjectFromCodeInitializedRegionTLAB, /* isInitialized */ 1 1524GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_resolved_tlab, artAllocObjectFromCodeResolvedTLAB, /* isInitialized */ 0 1525GENERATE_ALLOC_OBJECT_RESOLVED_TLAB art_quick_alloc_object_initialized_tlab, artAllocObjectFromCodeInitializedTLAB, /* isInitialized */ 1 1526 1527.macro ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE slowPathLabel, xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1528 and \xTemp1, \xTemp1, #OBJECT_ALIGNMENT_MASK_TOGGLED64 // Apply alignment mask 1529 // (addr + 7) & ~7. The mask must 1530 // be 64 bits to keep high bits in 1531 // case of overflow. 1532 // Negative sized arrays are handled here since xCount holds a zero extended 32 bit value. 1533 // Negative ints become large 64 bit unsigned ints which will always be larger than max signed 1534 // 32 bit int. Since the max shift for arrays is 3, it can not become a negative 64 bit int. 1535 cmp \xTemp1, #MIN_LARGE_OBJECT_THRESHOLD // Possibly a large object, go slow 1536 bhs \slowPathLabel // path. 1537 1538 ldr \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Check tlab for space, note that 1539 // we use (end - begin) to handle 1540 // negative size arrays. It is 1541 // assumed that a negative size will 1542 // always be greater unsigned than 1543 // region size. 1544 ldr \xTemp2, [xSELF, #THREAD_LOCAL_END_OFFSET] 1545 sub \xTemp2, \xTemp2, \xTemp0 1546 cmp \xTemp1, \xTemp2 1547 1548 // The array class is always initialized here. Unlike new-instance, 1549 // this does not act as a double test. 1550 bhi \slowPathLabel 1551 // "Point of no slow path". Won't go to the slow path from here on. OK to clobber x0 and x1. 1552 // Move old thread_local_pos to x0 1553 // for the return value. 1554 mov x0, \xTemp0 1555 add \xTemp0, \xTemp0, \xTemp1 1556 str \xTemp0, [xSELF, #THREAD_LOCAL_POS_OFFSET] // Store new thread_local_pos. 1557 ldr \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] // Increment thread_local_objects. 1558 add \xTemp0, \xTemp0, #1 1559 str \xTemp0, [xSELF, #THREAD_LOCAL_OBJECTS_OFFSET] 1560 POISON_HEAP_REF \wClass 1561 str \wClass, [x0, #MIRROR_OBJECT_CLASS_OFFSET] // Store the class pointer. 1562 str \wCount, [x0, #MIRROR_ARRAY_LENGTH_OFFSET] // Store the array length. 1563 // Fence. 1564// new-array is special. The class is loaded and immediately goes to the Initialized state 1565// before it is published. Therefore the only fence needed is for the publication of the object. 1566// See ClassLinker::CreateArrayClass() for more details. 1567 1568// For publication of the new array, we don't need a 'dmb ishst' here. 1569// The compiler generates 'dmb ishst' for all new-array insts. 1570 ret 1571.endm 1572 1573.macro GENERATE_ALLOC_ARRAY_TLAB name, entrypoint, size_setup 1574ENTRY \name 1575 // Fast path array allocation for region tlab allocation. 1576 // x0: mirror::Class* type 1577 // x1: int32_t component_count 1578 // x2-x7: free. 1579 mov x3, x0 1580 \size_setup x3, w3, x1, w1, x4, w4, x5, w5, x6, w6 1581 ALLOC_ARRAY_TLAB_FAST_PATH_RESOLVED_WITH_SIZE .Lslow_path\name, x3, w3, x1, w1, x4, w4, x5, w5, x6, w6 1582.Lslow_path\name: 1583 // x0: mirror::Class* klass 1584 // x1: int32_t component_count 1585 // x2: Thread* self 1586 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 1587 mov x2, xSELF // pass Thread::Current 1588 bl \entrypoint 1589 RESTORE_SAVE_REFS_ONLY_FRAME 1590 REFRESH_MARKING_REGISTER 1591 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1592END \name 1593.endm 1594 1595.macro COMPUTE_ARRAY_SIZE_UNKNOWN xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1596 // Array classes are never finalizable or uninitialized, no need to check. 1597 ldr \wTemp0, [\xClass, #MIRROR_CLASS_COMPONENT_TYPE_OFFSET] // Load component type 1598 UNPOISON_HEAP_REF \wTemp0 1599 ldr \wTemp0, [\xTemp0, #MIRROR_CLASS_OBJECT_PRIMITIVE_TYPE_OFFSET] 1600 lsr \xTemp0, \xTemp0, #PRIMITIVE_TYPE_SIZE_SHIFT_SHIFT // Component size shift is in high 16 1601 // bits. 1602 // xCount is holding a 32 bit value, 1603 // it can not overflow. 1604 lsl \xTemp1, \xCount, \xTemp0 // Calculate data size 1605 // Add array data offset and alignment. 1606 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1607#if MIRROR_LONG_ARRAY_DATA_OFFSET != MIRROR_INT_ARRAY_DATA_OFFSET + 4 1608#error Long array data offset must be 4 greater than int array data offset. 1609#endif 1610 1611 add \xTemp0, \xTemp0, #1 // Add 4 to the length only if the 1612 // component size shift is 3 1613 // (for 64 bit alignment). 1614 and \xTemp0, \xTemp0, #4 1615 add \xTemp1, \xTemp1, \xTemp0 1616.endm 1617 1618.macro COMPUTE_ARRAY_SIZE_8 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1619 // Add array data offset and alignment. 1620 add \xTemp1, \xCount, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1621.endm 1622 1623.macro COMPUTE_ARRAY_SIZE_16 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1624 lsl \xTemp1, \xCount, #1 1625 // Add array data offset and alignment. 1626 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1627.endm 1628 1629.macro COMPUTE_ARRAY_SIZE_32 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1630 lsl \xTemp1, \xCount, #2 1631 // Add array data offset and alignment. 1632 add \xTemp1, \xTemp1, #(MIRROR_INT_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1633.endm 1634 1635.macro COMPUTE_ARRAY_SIZE_64 xClass, wClass, xCount, wCount, xTemp0, wTemp0, xTemp1, wTemp1, xTemp2, wTemp2 1636 lsl \xTemp1, \xCount, #3 1637 // Add array data offset and alignment. 1638 add \xTemp1, \xTemp1, #(MIRROR_WIDE_ARRAY_DATA_OFFSET + OBJECT_ALIGNMENT_MASK) 1639.endm 1640 1641// TODO(ngeoffray): art_quick_alloc_array_resolved_region_tlab is not used for arm64, remove 1642// the entrypoint once all backends have been updated to use the size variants. 1643GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN 1644GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_8 1645GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_16 1646GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_32 1647GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_region_tlab, artAllocArrayFromCodeResolvedRegionTLAB, COMPUTE_ARRAY_SIZE_64 1648GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_UNKNOWN 1649GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved8_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_8 1650GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved16_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_16 1651GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved32_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_32 1652GENERATE_ALLOC_ARRAY_TLAB art_quick_alloc_array_resolved64_tlab, artAllocArrayFromCodeResolvedTLAB, COMPUTE_ARRAY_SIZE_64 1653 1654 /* 1655 * Called by managed code when the thread has been asked to suspend. 1656 */ 1657 .extern artTestSuspendFromCode 1658ENTRY art_quick_test_suspend 1659 SETUP_SAVE_EVERYTHING_FRAME RUNTIME_SAVE_EVERYTHING_FOR_SUSPEND_CHECK_METHOD_OFFSET // save callee saves for stack crawl 1660 mov x0, xSELF 1661 bl artTestSuspendFromCode // (Thread*) 1662 RESTORE_SAVE_EVERYTHING_FRAME 1663 REFRESH_MARKING_REGISTER 1664 ret 1665END art_quick_test_suspend 1666 1667ENTRY art_quick_implicit_suspend 1668 mov x0, xSELF 1669 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves for stack crawl 1670 bl artTestSuspendFromCode // (Thread*) 1671 RESTORE_SAVE_REFS_ONLY_FRAME 1672 REFRESH_MARKING_REGISTER 1673 ret 1674END art_quick_implicit_suspend 1675 1676 /* 1677 * Called by managed code that is attempting to call a method on a proxy class. On entry 1678 * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy 1679 * method agrees with a ref and args callee save frame. 1680 */ 1681 .extern artQuickProxyInvokeHandler 1682ENTRY art_quick_proxy_invoke_handler 1683 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 1684 mov x2, xSELF // pass Thread::Current 1685 mov x3, sp // pass SP 1686 bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) 1687 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 1688 cbnz x2, .Lexception_in_proxy // success if no exception is pending 1689 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Restore frame 1690 REFRESH_MARKING_REGISTER 1691 fmov d0, x0 // Store result in d0 in case it was float or double 1692 ret // return on success 1693.Lexception_in_proxy: 1694 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1695 DELIVER_PENDING_EXCEPTION 1696END art_quick_proxy_invoke_handler 1697 1698 /* 1699 * Called to resolve an imt conflict. 1700 * x0 is the conflict ArtMethod. 1701 * xIP1 is a hidden argument that holds the target interface method's dex method index. 1702 * 1703 * Note that this stub writes to xIP0, xIP1, x13-x15, and x0. 1704 */ 1705 .extern artLookupResolvedMethod 1706ENTRY art_quick_imt_conflict_trampoline 1707 ldr xIP0, [sp, #0] // Load referrer 1708 // Load the declaring class (without read barrier) and access flags (for obsolete method check). 1709 // The obsolete flag is set with suspended threads, so we do not need an acquire operation here. 1710#if ART_METHOD_ACCESS_FLAGS_OFFSET != ART_METHOD_DECLARING_CLASS_OFFSET + 4 1711#error "Expecting declaring class and access flags to be consecutive for LDP." 1712#endif 1713 ldp wIP0, w15, [xIP0, #ART_METHOD_DECLARING_CLASS_OFFSET] 1714 // If the method is obsolete, just go through the dex cache miss slow path. 1715 tbnz x15, #ACC_OBSOLETE_METHOD_SHIFT, .Limt_conflict_trampoline_dex_cache_miss 1716 ldr wIP0, [xIP0, #MIRROR_CLASS_DEX_CACHE_OFFSET] // Load the DexCache (without read barrier). 1717 UNPOISON_HEAP_REF wIP0 1718 ubfx x15, xIP1, #0, #METHOD_DEX_CACHE_HASH_BITS // Calculate DexCache method slot index. 1719 ldr xIP0, [xIP0, #MIRROR_DEX_CACHE_RESOLVED_METHODS_OFFSET] // Load the resolved methods. 1720 add xIP0, xIP0, x15, lsl #(POINTER_SIZE_SHIFT + 1) // Load DexCache method slot address. 1721 1722 // Relaxed atomic load x14:x15 from the dex cache slot. 1723.Limt_conflict_trampoline_retry_load: 1724 ldxp x14, x15, [xIP0] 1725 stxp w13, x14, x15, [xIP0] 1726 cbnz w13, .Limt_conflict_trampoline_retry_load 1727 1728 cmp x15, xIP1 // Compare method index to see if we had a DexCache method hit. 1729 bne .Limt_conflict_trampoline_dex_cache_miss 1730.Limt_conflict_trampoline_have_interface_method: 1731 ldr xIP1, [x0, #ART_METHOD_JNI_OFFSET_64] // Load ImtConflictTable 1732 ldr x0, [xIP1] // Load first entry in ImtConflictTable. 1733.Limt_table_iterate: 1734 cmp x0, x14 1735 // Branch if found. Benchmarks have shown doing a branch here is better. 1736 beq .Limt_table_found 1737 // If the entry is null, the interface method is not in the ImtConflictTable. 1738 cbz x0, .Lconflict_trampoline 1739 // Iterate over the entries of the ImtConflictTable. 1740 ldr x0, [xIP1, #(2 * __SIZEOF_POINTER__)]! 1741 b .Limt_table_iterate 1742.Limt_table_found: 1743 // We successfully hit an entry in the table. Load the target method 1744 // and jump to it. 1745 ldr x0, [xIP1, #__SIZEOF_POINTER__] 1746 ldr xIP0, [x0, #ART_METHOD_QUICK_CODE_OFFSET_64] 1747 br xIP0 1748.Lconflict_trampoline: 1749 // Call the runtime stub to populate the ImtConflictTable and jump to the 1750 // resolved method. 1751 mov x0, x14 // Load interface method 1752 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 1753.Limt_conflict_trampoline_dex_cache_miss: 1754 // We're not creating a proper runtime method frame here, 1755 // artLookupResolvedMethod() is not allowed to walk the stack. 1756 1757 // Save GPR args and return address, allocate space for FPR args, align stack. 1758 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8) 1759 SAVE_TWO_REGS x2, x3, 16 1760 SAVE_TWO_REGS x4, x5, 32 1761 SAVE_TWO_REGS x6, x7, 48 1762 SAVE_REG xLR, (8 * 8 + 8 * 8 + 8) 1763 1764 // Save FPR args. 1765 stp d0, d1, [sp, #64] 1766 stp d2, d3, [sp, #80] 1767 stp d4, d5, [sp, #96] 1768 stp d6, d7, [sp, #112] 1769 1770 mov x0, xIP1 // Pass method index. 1771 ldr x1, [sp, #(8 * 8 + 8 * 8 + 8 + 8)] // Pass referrer. 1772 bl artLookupResolvedMethod // (uint32_t method_index, ArtMethod* referrer) 1773 mov x14, x0 // Move the interface method to x14 where the loop above expects it. 1774 1775 // Restore FPR args. 1776 ldp d0, d1, [sp, #64] 1777 ldp d2, d3, [sp, #80] 1778 ldp d4, d5, [sp, #96] 1779 ldp d6, d7, [sp, #112] 1780 1781 // Restore GPR args and return address. 1782 RESTORE_REG xLR, (8 * 8 + 8 * 8 + 8) 1783 RESTORE_TWO_REGS x2, x3, 16 1784 RESTORE_TWO_REGS x4, x5, 32 1785 RESTORE_TWO_REGS x6, x7, 48 1786 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, (8 * 8 + 8 * 8 + 8 + 8) 1787 1788 // If the method wasn't resolved, skip the lookup and go to artInvokeInterfaceTrampoline(). 1789 cbz x14, .Lconflict_trampoline 1790 b .Limt_conflict_trampoline_have_interface_method 1791END art_quick_imt_conflict_trampoline 1792 1793ENTRY art_quick_resolution_trampoline 1794 SETUP_SAVE_REFS_AND_ARGS_FRAME 1795 mov x2, xSELF 1796 mov x3, sp 1797 bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) 1798 cbz x0, 1f 1799 mov xIP0, x0 // Remember returned code pointer in xIP0. 1800 ldr x0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. 1801 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1802 REFRESH_MARKING_REGISTER 1803 br xIP0 18041: 1805 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1806 DELIVER_PENDING_EXCEPTION 1807END art_quick_resolution_trampoline 1808 1809/* 1810 * Generic JNI frame layout: 1811 * 1812 * #-------------------# 1813 * | | 1814 * | caller method... | 1815 * #-------------------# <--- SP on entry 1816 * | Return X30/LR | 1817 * | X29/FP | callee save 1818 * | X28 | callee save 1819 * | X27 | callee save 1820 * | X26 | callee save 1821 * | X25 | callee save 1822 * | X24 | callee save 1823 * | X23 | callee save 1824 * | X22 | callee save 1825 * | X21 | callee save 1826 * | X20 | callee save 1827 * | X7 | arg7 1828 * | X6 | arg6 1829 * | X5 | arg5 1830 * | X4 | arg4 1831 * | X3 | arg3 1832 * | X2 | arg2 1833 * | X1 | arg1 1834 * | D7 | float arg 8 1835 * | D6 | float arg 7 1836 * | D5 | float arg 6 1837 * | D4 | float arg 5 1838 * | D3 | float arg 4 1839 * | D2 | float arg 3 1840 * | D1 | float arg 2 1841 * | D0 | float arg 1 1842 * | padding | // 8B 1843 * | Method* | <- X0 (Managed frame similar to SaveRefsAndArgs.) 1844 * #-------------------# 1845 * | local ref cookie | // 4B 1846 * | padding | // 0B or 4B to align handle scope on 8B address 1847 * | handle scope | // Size depends on number of references; multiple of 4B. 1848 * #-------------------# 1849 * | JNI Stack Args | // Empty if all args fit into registers x0-x7, d0-d7. 1850 * #-------------------# <--- SP on native call (1) 1851 * | Free scratch | 1852 * #-------------------# 1853 * | SP for JNI call | // Pointer to (1). 1854 * #-------------------# 1855 * | Hidden arg | // For @CriticalNative 1856 * #-------------------# 1857 * | | 1858 * | Stack for Regs | The trampoline assembly will pop these values 1859 * | | into registers for native call 1860 * #-------------------# 1861 */ 1862 /* 1863 * Called to do a generic JNI down-call 1864 */ 1865 .extern artQuickGenericJniTrampoline 1866ENTRY art_quick_generic_jni_trampoline 1867 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_X0 1868 1869 // Save SP , so we can have static CFI info. 1870 mov x28, sp 1871 .cfi_def_cfa_register x28 1872 1873 // This looks the same, but is different: this will be updated to point to the bottom 1874 // of the frame when the handle scope is inserted. 1875 mov xFP, sp 1876 1877 mov xIP0, #5120 1878 sub sp, sp, xIP0 1879 1880 // prepare for artQuickGenericJniTrampoline call 1881 // (Thread*, managed_sp, reserved_area) 1882 // x0 x1 x2 <= C calling convention 1883 // xSELF xFP sp <= where they are 1884 1885 mov x0, xSELF // Thread* 1886 mov x1, xFP // SP for the managed frame. 1887 mov x2, sp // reserved area for arguments and other saved data (up to managed frame) 1888 bl artQuickGenericJniTrampoline // (Thread*, sp) 1889 1890 // The C call will have registered the complete save-frame on success. 1891 // The result of the call is: 1892 // x0: pointer to native code, 0 on error. 1893 // The bottom of the reserved area contains values for arg registers, 1894 // hidden arg register and SP for out args for the call. 1895 1896 // Check for error (class init check or locking for synchronized native method can throw). 1897 cbz x0, .Lexception_in_native 1898 1899 // Save the code pointer 1900 mov xIP0, x0 1901 1902 // Load parameters from frame into registers. 1903 ldp x0, x1, [sp] 1904 ldp x2, x3, [sp, #16] 1905 ldp x4, x5, [sp, #32] 1906 ldp x6, x7, [sp, #48] 1907 1908 ldp d0, d1, [sp, #64] 1909 ldp d2, d3, [sp, #80] 1910 ldp d4, d5, [sp, #96] 1911 ldp d6, d7, [sp, #112] 1912 1913 // Load hidden arg (x15) for @CriticalNative and SP for out args. 1914 ldp x15, xIP1, [sp, #128] 1915 1916 // Apply the new SP for out args, releasing unneeded reserved area. 1917 mov sp, xIP1 1918 1919 blr xIP0 // native call. 1920 1921 // result sign extension is handled in C code 1922 // prepare for artQuickGenericJniEndTrampoline call 1923 // (Thread*, result, result_f) 1924 // x0 x1 x2 <= C calling convention 1925 mov x1, x0 // Result (from saved). 1926 mov x0, xSELF // Thread register. 1927 fmov x2, d0 // d0 will contain floating point result, but needs to go into x2 1928 1929 bl artQuickGenericJniEndTrampoline 1930 1931 // Pending exceptions possible. 1932 ldr x2, [xSELF, THREAD_EXCEPTION_OFFSET] 1933 cbnz x2, .Lexception_in_native 1934 1935 // Tear down the alloca. 1936 mov sp, x28 1937 .cfi_def_cfa_register sp 1938 1939 // Tear down the callee-save frame. 1940 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1941 REFRESH_MARKING_REGISTER 1942 1943 // store into fpr, for when it's a fpr return... 1944 fmov d0, x0 1945 ret 1946 1947.Lexception_in_native: 1948 // Move to x1 then sp to please assembler. 1949 ldr x1, [xSELF, # THREAD_TOP_QUICK_FRAME_OFFSET] 1950 add sp, x1, #-1 // Remove the GenericJNI tag. 1951 .cfi_def_cfa_register sp 1952 # This will create a new save-all frame, required by the runtime. 1953 DELIVER_PENDING_EXCEPTION 1954END art_quick_generic_jni_trampoline 1955 1956/* 1957 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those 1958 * of a quick call: 1959 * x0 = method being called/to bridge to. 1960 * x1..x7, d0..d7 = arguments to that method. 1961 */ 1962ENTRY art_quick_to_interpreter_bridge 1963 SETUP_SAVE_REFS_AND_ARGS_FRAME // Set up frame and save arguments. 1964 1965 // x0 will contain mirror::ArtMethod* method. 1966 mov x1, xSELF // How to get Thread::Current() ??? 1967 mov x2, sp 1968 1969 // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 1970 // mirror::ArtMethod** sp) 1971 bl artQuickToInterpreterBridge 1972 1973 RESTORE_SAVE_REFS_AND_ARGS_FRAME // TODO: no need to restore arguments in this case. 1974 REFRESH_MARKING_REGISTER 1975 1976 fmov d0, x0 1977 1978 RETURN_OR_DELIVER_PENDING_EXCEPTION 1979END art_quick_to_interpreter_bridge 1980 1981/* 1982 * Called to attempt to execute an obsolete method. 1983 */ 1984ONE_ARG_RUNTIME_EXCEPTION art_invoke_obsolete_method_stub, artInvokeObsoleteMethod 1985 1986 1987// 1988// Instrumentation-related stubs 1989// 1990 .extern artInstrumentationMethodEntryFromCode 1991ENTRY art_quick_instrumentation_entry 1992 SETUP_SAVE_REFS_AND_ARGS_FRAME 1993 1994 mov x20, x0 // Preserve method reference in a callee-save. 1995 1996 mov x2, xSELF 1997 mov x3, sp // Pass SP 1998 bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP) 1999 2000 mov xIP0, x0 // x0 = result of call. 2001 mov x0, x20 // Reload method reference. 2002 2003 RESTORE_SAVE_REFS_AND_ARGS_FRAME // Note: will restore xSELF 2004 REFRESH_MARKING_REGISTER 2005 cbz xIP0, 1f // Deliver the pending exception if method is null. 2006 adr xLR, art_quick_instrumentation_exit 2007 br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit. 2008 20091: 2010 DELIVER_PENDING_EXCEPTION 2011END art_quick_instrumentation_entry 2012 2013 .extern artInstrumentationMethodExitFromCode 2014ENTRY art_quick_instrumentation_exit 2015 mov xLR, #0 // Clobber LR for later checks. 2016 SETUP_SAVE_EVERYTHING_FRAME 2017 2018 add x3, sp, #16 // Pass floating-point result pointer, in kSaveEverything frame. 2019 add x2, sp, #272 // Pass integer result pointer, in kSaveEverything frame. 2020 mov x1, sp // Pass SP. 2021 mov x0, xSELF // Pass Thread. 2022 bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res*, fpr_res*) 2023 2024 cbz x0, .Ldo_deliver_instrumentation_exception 2025 // Handle error 2026 cbnz x1, .Ldeoptimize 2027 // Normal return. 2028 str x0, [sp, #FRAME_SIZE_SAVE_EVERYTHING - 8] 2029 // Set return pc. 2030 RESTORE_SAVE_EVERYTHING_FRAME 2031 REFRESH_MARKING_REGISTER 2032 br lr 2033.Ldo_deliver_instrumentation_exception: 2034 DELIVER_PENDING_EXCEPTION_FRAME_READY 2035.Ldeoptimize: 2036 str x1, [sp, #FRAME_SIZE_SAVE_EVERYTHING - 8] 2037 // Set return pc. 2038 RESTORE_SAVE_EVERYTHING_FRAME 2039 // Jump to art_quick_deoptimize. 2040 b art_quick_deoptimize 2041END art_quick_instrumentation_exit 2042 2043 /* 2044 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 2045 * will long jump to the upcall with a special exception of -1. 2046 */ 2047 .extern artDeoptimize 2048ENTRY art_quick_deoptimize 2049 SETUP_SAVE_EVERYTHING_FRAME 2050 mov x0, xSELF // Pass thread. 2051 bl artDeoptimize // (Thread*) 2052 brk 0 2053END art_quick_deoptimize 2054 2055 /* 2056 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 2057 * will long jump to the upcall with a special exception of -1. 2058 */ 2059 .extern artDeoptimizeFromCompiledCode 2060ENTRY art_quick_deoptimize_from_compiled_code 2061 SETUP_SAVE_EVERYTHING_FRAME 2062 mov x1, xSELF // Pass thread. 2063 bl artDeoptimizeFromCompiledCode // (DeoptimizationKind, Thread*) 2064 brk 0 2065END art_quick_deoptimize_from_compiled_code 2066 2067 2068 /* 2069 * String's indexOf. 2070 * 2071 * TODO: Not very optimized. 2072 * On entry: 2073 * x0: string object (known non-null) 2074 * w1: char to match (known <= 0xFFFF) 2075 * w2: Starting offset in string data 2076 */ 2077ENTRY art_quick_indexof 2078#if (STRING_COMPRESSION_FEATURE) 2079 ldr w4, [x0, #MIRROR_STRING_COUNT_OFFSET] 2080#else 2081 ldr w3, [x0, #MIRROR_STRING_COUNT_OFFSET] 2082#endif 2083 add x0, x0, #MIRROR_STRING_VALUE_OFFSET 2084#if (STRING_COMPRESSION_FEATURE) 2085 /* w4 holds count (with flag) and w3 holds actual length */ 2086 lsr w3, w4, #1 2087#endif 2088 /* Clamp start to [0..count] */ 2089 cmp w2, #0 2090 csel w2, wzr, w2, lt 2091 cmp w2, w3 2092 csel w2, w3, w2, gt 2093 2094 /* Save a copy to compute result */ 2095 mov x5, x0 2096 2097#if (STRING_COMPRESSION_FEATURE) 2098 tbz w4, #0, .Lstring_indexof_compressed 2099#endif 2100 /* Build pointer to start of data to compare and pre-bias */ 2101 add x0, x0, x2, lsl #1 2102 sub x0, x0, #2 2103 /* Compute iteration count */ 2104 sub w2, w3, w2 2105 2106 /* 2107 * At this point we have: 2108 * x0: start of the data to test 2109 * w1: char to compare 2110 * w2: iteration count 2111 * x5: original start of string data 2112 */ 2113 2114 subs w2, w2, #4 2115 b.lt .Lindexof_remainder 2116 2117.Lindexof_loop4: 2118 ldrh w6, [x0, #2]! 2119 ldrh w7, [x0, #2]! 2120 ldrh wIP0, [x0, #2]! 2121 ldrh wIP1, [x0, #2]! 2122 cmp w6, w1 2123 b.eq .Lmatch_0 2124 cmp w7, w1 2125 b.eq .Lmatch_1 2126 cmp wIP0, w1 2127 b.eq .Lmatch_2 2128 cmp wIP1, w1 2129 b.eq .Lmatch_3 2130 subs w2, w2, #4 2131 b.ge .Lindexof_loop4 2132 2133.Lindexof_remainder: 2134 adds w2, w2, #4 2135 b.eq .Lindexof_nomatch 2136 2137.Lindexof_loop1: 2138 ldrh w6, [x0, #2]! 2139 cmp w6, w1 2140 b.eq .Lmatch_3 2141 subs w2, w2, #1 2142 b.ne .Lindexof_loop1 2143 2144.Lindexof_nomatch: 2145 mov x0, #-1 2146 ret 2147 2148.Lmatch_0: 2149 sub x0, x0, #6 2150 sub x0, x0, x5 2151 asr x0, x0, #1 2152 ret 2153.Lmatch_1: 2154 sub x0, x0, #4 2155 sub x0, x0, x5 2156 asr x0, x0, #1 2157 ret 2158.Lmatch_2: 2159 sub x0, x0, #2 2160 sub x0, x0, x5 2161 asr x0, x0, #1 2162 ret 2163.Lmatch_3: 2164 sub x0, x0, x5 2165 asr x0, x0, #1 2166 ret 2167#if (STRING_COMPRESSION_FEATURE) 2168 /* 2169 * Comparing compressed string character-per-character with 2170 * input character 2171 */ 2172.Lstring_indexof_compressed: 2173 add x0, x0, x2 2174 sub x0, x0, #1 2175 sub w2, w3, w2 2176.Lstring_indexof_compressed_loop: 2177 subs w2, w2, #1 2178 b.lt .Lindexof_nomatch 2179 ldrb w6, [x0, #1]! 2180 cmp w6, w1 2181 b.eq .Lstring_indexof_compressed_matched 2182 b .Lstring_indexof_compressed_loop 2183.Lstring_indexof_compressed_matched: 2184 sub x0, x0, x5 2185 ret 2186#endif 2187END art_quick_indexof 2188 2189 .extern artStringBuilderAppend 2190ENTRY art_quick_string_builder_append 2191 SETUP_SAVE_REFS_ONLY_FRAME // save callee saves in case of GC 2192 add x1, sp, #(FRAME_SIZE_SAVE_REFS_ONLY + __SIZEOF_POINTER__) // pass args 2193 mov x2, xSELF // pass Thread::Current 2194 bl artStringBuilderAppend // (uint32_t, const unit32_t*, Thread*) 2195 RESTORE_SAVE_REFS_ONLY_FRAME 2196 REFRESH_MARKING_REGISTER 2197 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 2198END art_quick_string_builder_append 2199 2200 /* 2201 * Create a function `name` calling the ReadBarrier::Mark routine, 2202 * getting its argument and returning its result through W register 2203 * `wreg` (corresponding to X register `xreg`), saving and restoring 2204 * all caller-save registers. 2205 * 2206 * If `wreg` is different from `w0`, the generated function follows a 2207 * non-standard runtime calling convention: 2208 * - register `wreg` is used to pass the (sole) argument of this 2209 * function (instead of W0); 2210 * - register `wreg` is used to return the result of this function 2211 * (instead of W0); 2212 * - W0 is treated like a normal (non-argument) caller-save register; 2213 * - everything else is the same as in the standard runtime calling 2214 * convention (e.g. standard callee-save registers are preserved). 2215 */ 2216.macro READ_BARRIER_MARK_REG name, wreg, xreg 2217ENTRY \name 2218 // Reference is null, no work to do at all. 2219 cbz \wreg, .Lret_rb_\name 2220 // Use wIP0 as temp and check the mark bit of the reference. wIP0 is not used by the compiler. 2221 ldr wIP0, [\xreg, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2222 tbz wIP0, #LOCK_WORD_MARK_BIT_SHIFT, .Lnot_marked_rb_\name 2223.Lret_rb_\name: 2224 ret 2225.Lnot_marked_rb_\name: 2226 // Check if the top two bits are one, if this is the case it is a forwarding address. 2227 tst wIP0, wIP0, lsl #1 2228 bmi .Lret_forwarding_address\name 2229.Lslow_rb_\name: 2230 /* 2231 * Allocate 44 stack slots * 8 = 352 bytes: 2232 * - 19 slots for core registers X0-15, X17, X19, LR 2233 * - 1 slot padding 2234 * - 24 slots for floating-point registers D0-D7 and D16-D31 2235 */ 2236 // We must not clobber IP1 since code emitted for HLoadClass and HLoadString 2237 // relies on IP1 being preserved. 2238 // Save all potentially live caller-save core registers. 2239 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 352 2240 SAVE_TWO_REGS x2, x3, 16 2241 SAVE_TWO_REGS x4, x5, 32 2242 SAVE_TWO_REGS x6, x7, 48 2243 SAVE_TWO_REGS x8, x9, 64 2244 SAVE_TWO_REGS x10, x11, 80 2245 SAVE_TWO_REGS x12, x13, 96 2246 SAVE_TWO_REGS x14, x15, 112 2247 SAVE_TWO_REGS x17, x19, 128 // Skip x16, i.e. IP0, and x18, the platform register. 2248 SAVE_REG xLR, 144 // Save also return address. 2249 // Save all potentially live caller-save floating-point registers. 2250 stp d0, d1, [sp, #160] 2251 stp d2, d3, [sp, #176] 2252 stp d4, d5, [sp, #192] 2253 stp d6, d7, [sp, #208] 2254 stp d16, d17, [sp, #224] 2255 stp d18, d19, [sp, #240] 2256 stp d20, d21, [sp, #256] 2257 stp d22, d23, [sp, #272] 2258 stp d24, d25, [sp, #288] 2259 stp d26, d27, [sp, #304] 2260 stp d28, d29, [sp, #320] 2261 stp d30, d31, [sp, #336] 2262 2263 .ifnc \wreg, w0 2264 mov w0, \wreg // Pass arg1 - obj from `wreg` 2265 .endif 2266 bl artReadBarrierMark // artReadBarrierMark(obj) 2267 .ifnc \wreg, w0 2268 mov \wreg, w0 // Return result into `wreg` 2269 .endif 2270 2271 // Restore core regs, except `xreg`, as `wreg` is used to return the 2272 // result of this function (simply remove it from the stack instead). 2273 POP_REGS_NE x0, x1, 0, \xreg 2274 POP_REGS_NE x2, x3, 16, \xreg 2275 POP_REGS_NE x4, x5, 32, \xreg 2276 POP_REGS_NE x6, x7, 48, \xreg 2277 POP_REGS_NE x8, x9, 64, \xreg 2278 POP_REGS_NE x10, x11, 80, \xreg 2279 POP_REGS_NE x12, x13, 96, \xreg 2280 POP_REGS_NE x14, x15, 112, \xreg 2281 POP_REGS_NE x17, x19, 128, \xreg 2282 POP_REG_NE xLR, 144, \xreg // Restore also return address. 2283 // Restore floating-point registers. 2284 ldp d0, d1, [sp, #160] 2285 ldp d2, d3, [sp, #176] 2286 ldp d4, d5, [sp, #192] 2287 ldp d6, d7, [sp, #208] 2288 ldp d16, d17, [sp, #224] 2289 ldp d18, d19, [sp, #240] 2290 ldp d20, d21, [sp, #256] 2291 ldp d22, d23, [sp, #272] 2292 ldp d24, d25, [sp, #288] 2293 ldp d26, d27, [sp, #304] 2294 ldp d28, d29, [sp, #320] 2295 ldp d30, d31, [sp, #336] 2296 // Remove frame and return. 2297 DECREASE_FRAME 352 2298 ret 2299.Lret_forwarding_address\name: 2300 // Shift left by the forwarding address shift. This clears out the state bits since they are 2301 // in the top 2 bits of the lock word. 2302 lsl \wreg, wIP0, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 2303 ret 2304END \name 2305.endm 2306 2307READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg00, w0, x0 2308READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, w1, x1 2309READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, w2, x2 2310READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, w3, x3 2311READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, w4, x4 2312READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, w5, x5 2313READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, w6, x6 2314READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, w7, x7 2315READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, w8, x8 2316READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, w9, x9 2317READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, w10, x10 2318READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, w11, x11 2319READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, w12, x12 2320READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, w13, x13 2321READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, w14, x14 2322READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg15, w15, x15 2323// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg16, w16, x16 ip0 is blocked 2324READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, w17, x17 2325// READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, w18, x18 x18 is blocked 2326READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, w19, x19 2327READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, w20, x20 2328READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, w21, x21 2329READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, w22, x22 2330READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg23, w23, x23 2331READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg24, w24, x24 2332READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg25, w25, x25 2333READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg26, w26, x26 2334READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg27, w27, x27 2335READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg28, w28, x28 2336READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, w29, x29 2337 2338 2339.macro SELECT_X_OR_W_FOR_MACRO macro_to_use, x, w, xreg 2340 .if \xreg 2341 \macro_to_use \x 2342 .else 2343 \macro_to_use \w 2344 .endif 2345.endm 2346 2347.macro FOR_REGISTERS macro_for_register, macro_for_reserved_register, xreg 2348 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x0, w0, \xreg 2349 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x1, w1, \xreg 2350 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x2, w2, \xreg 2351 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x3, w3, \xreg 2352 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x4, w4, \xreg 2353 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x5, w5, \xreg 2354 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x6, w6, \xreg 2355 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x7, w7, \xreg 2356 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x8, w8, \xreg 2357 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x9, w9, \xreg 2358 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x10, w10, \xreg 2359 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x11, w11, \xreg 2360 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x12, w12, \xreg 2361 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x13, w13, \xreg 2362 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x14, w14, \xreg 2363 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x15, w15, \xreg 2364 \macro_for_reserved_register // IP0 is reserved 2365 \macro_for_reserved_register // IP1 is reserved 2366 \macro_for_reserved_register // x18 is reserved 2367 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x19, w19, \xreg 2368 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x20, w20, \xreg 2369 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x21, w21, \xreg 2370 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x22, w22, \xreg 2371 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x23, w23, \xreg 2372 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x24, w24, \xreg 2373 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x25, w25, \xreg 2374 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x26, w26, \xreg 2375 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x27, w27, \xreg 2376 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x28, w28, \xreg 2377 SELECT_X_OR_W_FOR_MACRO \macro_for_register, x29, w29, \xreg 2378 \macro_for_reserved_register // lr is reserved 2379 \macro_for_reserved_register // sp is reserved 2380.endm 2381 2382.macro FOR_XREGISTERS macro_for_register, macro_for_reserved_register 2383 FOR_REGISTERS \macro_for_register, \macro_for_reserved_register, /* xreg */ 1 2384.endm 2385 2386.macro FOR_WREGISTERS macro_for_register, macro_for_reserved_register 2387 FOR_REGISTERS \macro_for_register, \macro_for_reserved_register, /* xreg */ 0 2388.endm 2389 2390.macro BRK0_BRK0 2391 brk 0 2392 brk 0 2393.endm 2394 2395#if BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET != BAKER_MARK_INTROSPECTION_ARRAY_LDR_OFFSET 2396#error "Array and field introspection code sharing requires same LDR offset." 2397#endif 2398.macro INTROSPECTION_ARRAY_LOAD index_reg 2399 ldr wIP0, [xIP0, \index_reg, lsl #2] 2400 b art_quick_read_barrier_mark_introspection 2401.endm 2402 2403.macro MOV_WIP0_TO_WREG_AND_BL_LR reg 2404 mov \reg, wIP0 2405 br lr // Do not use RET as we do not enter the entrypoint with "BL". 2406.endm 2407 2408.macro READ_BARRIER_MARK_INTROSPECTION_SLOW_PATH ldr_offset 2409 /* 2410 * Allocate 42 stack slots * 8 = 336 bytes: 2411 * - 18 slots for core registers X0-15, X19, LR 2412 * - 24 slots for floating-point registers D0-D7 and D16-D31 2413 */ 2414 // Save all potentially live caller-save core registers. 2415 SAVE_TWO_REGS_INCREASE_FRAME x0, x1, 336 2416 SAVE_TWO_REGS x2, x3, 16 2417 SAVE_TWO_REGS x4, x5, 32 2418 SAVE_TWO_REGS x6, x7, 48 2419 SAVE_TWO_REGS x8, x9, 64 2420 SAVE_TWO_REGS x10, x11, 80 2421 SAVE_TWO_REGS x12, x13, 96 2422 SAVE_TWO_REGS x14, x15, 112 2423 // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register. 2424 SAVE_TWO_REGS x19, xLR, 128 // Save return address. 2425 // Save all potentially live caller-save floating-point registers. 2426 stp d0, d1, [sp, #144] 2427 stp d2, d3, [sp, #160] 2428 stp d4, d5, [sp, #176] 2429 stp d6, d7, [sp, #192] 2430 stp d16, d17, [sp, #208] 2431 stp d18, d19, [sp, #224] 2432 stp d20, d21, [sp, #240] 2433 stp d22, d23, [sp, #256] 2434 stp d24, d25, [sp, #272] 2435 stp d26, d27, [sp, #288] 2436 stp d28, d29, [sp, #304] 2437 stp d30, d31, [sp, #320] 2438 2439 mov x0, xIP0 2440 bl artReadBarrierMark // artReadBarrierMark(obj) 2441 mov xIP0, x0 2442 2443 // Restore core regs, except x0 and x1 as the return register switch case 2444 // address calculation is smoother with an extra register. 2445 RESTORE_TWO_REGS x2, x3, 16 2446 RESTORE_TWO_REGS x4, x5, 32 2447 RESTORE_TWO_REGS x6, x7, 48 2448 RESTORE_TWO_REGS x8, x9, 64 2449 RESTORE_TWO_REGS x10, x11, 80 2450 RESTORE_TWO_REGS x12, x13, 96 2451 RESTORE_TWO_REGS x14, x15, 112 2452 // Skip x16, x17, i.e. IP0, IP1, and x18, the platform register. 2453 RESTORE_TWO_REGS x19, xLR, 128 // Restore return address. 2454 // Restore caller-save floating-point registers. 2455 ldp d0, d1, [sp, #144] 2456 ldp d2, d3, [sp, #160] 2457 ldp d4, d5, [sp, #176] 2458 ldp d6, d7, [sp, #192] 2459 ldp d16, d17, [sp, #208] 2460 ldp d18, d19, [sp, #224] 2461 ldp d20, d21, [sp, #240] 2462 ldp d22, d23, [sp, #256] 2463 ldp d24, d25, [sp, #272] 2464 ldp d26, d27, [sp, #288] 2465 ldp d28, d29, [sp, #304] 2466 ldp d30, d31, [sp, #320] 2467 2468 ldr x0, [lr, #\ldr_offset] // Load the instruction. 2469 adr xIP1, .Lmark_introspection_return_switch 2470 bfi xIP1, x0, #3, #5 // Calculate switch case address. 2471 RESTORE_TWO_REGS_DECREASE_FRAME x0, x1, 336 2472 br xIP1 2473.endm 2474 2475 /* 2476 * Use introspection to load a reference from the same address as the LDR 2477 * instruction in generated code would load (unless loaded by the thunk, 2478 * see below), call ReadBarrier::Mark() with that reference if needed 2479 * and return it in the same register as the LDR instruction would load. 2480 * 2481 * The entrypoint is called through a thunk that differs across load kinds. 2482 * For field and array loads the LDR instruction in generated code follows 2483 * the branch to the thunk, i.e. the LDR is at [LR, #-4], and the thunk 2484 * knows the holder and performs the gray bit check, returning to the LDR 2485 * instruction if the object is not gray, so this entrypoint no longer 2486 * needs to know anything about the holder. For GC root loads, the LDR 2487 * instruction in generated code precedes the branch to the thunk (i.e. 2488 * the LDR is at [LR, #-8]) and the thunk does not do the gray bit check. 2489 * 2490 * For field accesses and array loads with a constant index the thunk loads 2491 * the reference into IP0 using introspection and calls the main entrypoint, 2492 * art_quick_read_barrier_mark_introspection. With heap poisoning enabled, 2493 * the passed reference is poisoned. 2494 * 2495 * For array accesses with non-constant index, the thunk inserts the bits 2496 * 16-21 of the LDR instruction to the entrypoint address, effectively 2497 * calculating a switch case label based on the index register (bits 16-20) 2498 * and adding an extra offset (bit 21 is set) to differentiate from the 2499 * main entrypoint, then moves the base register to IP0 and jumps to the 2500 * switch case. Therefore we need to align the main entrypoint to 512 bytes, 2501 * accounting for a 256-byte offset followed by 32 array entrypoints 2502 * starting at art_quick_read_barrier_mark_introspection_arrays, each 2503 * containing an LDR (register) and a branch to the main entrypoint. 2504 * 2505 * For GC root accesses we cannot use the main entrypoint because of the 2506 * different offset where the LDR instruction in generated code is located. 2507 * (And even with heap poisoning enabled, GC roots are not poisoned.) 2508 * To re-use the same entrypoint pointer in generated code, we make sure 2509 * that the gc root entrypoint (a copy of the entrypoint with a different 2510 * offset for introspection loads) is located at a known offset (768 bytes, 2511 * or BAKER_MARK_INTROSPECTION_GC_ROOT_ENTRYPOINT_OFFSET) from the main 2512 * entrypoint and the GC root thunk adjusts the entrypoint pointer, moves 2513 * the root register to IP0 and jumps to the customized entrypoint, 2514 * art_quick_read_barrier_mark_introspection_gc_roots. The thunk also 2515 * performs all the fast-path checks, so we need just the slow path. 2516 * The UnsafeCASObject intrinsic is also using the GC root entrypoint with 2517 * MOV instead of LDR, the destination register is in the same bits. 2518 * 2519 * The code structure is 2520 * art_quick_read_barrier_mark_introspection: 2521 * Up to 256 bytes for the main entrypoint code. 2522 * Padding to 256 bytes if needed. 2523 * art_quick_read_barrier_mark_introspection_arrays: 2524 * Exactly 256 bytes for array load switch cases (32x2 instructions). 2525 * .Lmark_introspection_return_switch: 2526 * Exactly 256 bytes for return switch cases (32x2 instructions). 2527 * art_quick_read_barrier_mark_introspection_gc_roots: 2528 * GC root entrypoint code. 2529 */ 2530ENTRY_ALIGNED art_quick_read_barrier_mark_introspection, 512 2531 // At this point, IP0 contains the reference, IP1 can be freely used. 2532 // For heap poisoning, the reference is poisoned, so unpoison it first. 2533 UNPOISON_HEAP_REF wIP0 2534 // If reference is null, just return it in the right register. 2535 cbz wIP0, .Lmark_introspection_return 2536 // Use wIP1 as temp and check the mark bit of the reference. 2537 ldr wIP1, [xIP0, #MIRROR_OBJECT_LOCK_WORD_OFFSET] 2538 tbz wIP1, #LOCK_WORD_MARK_BIT_SHIFT, .Lmark_introspection_unmarked 2539.Lmark_introspection_return: 2540 // Without an extra register for the return switch case address calculation, 2541 // we exploit the high word of the xIP0 to temporarily store the ref_reg*8, 2542 // so the return switch below must move wIP0 instead of xIP0 to the register. 2543 ldr wIP1, [lr, #BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET] // Load the instruction. 2544 bfi xIP0, xIP1, #(32 + 3), #5 // Extract ref_reg*8 to high word in xIP0. 2545 adr xIP1, .Lmark_introspection_return_switch 2546 bfxil xIP1, xIP0, #32, #8 // Calculate return switch case address. 2547 br xIP1 2548.Lmark_introspection_unmarked: 2549 // Check if the top two bits are one, if this is the case it is a forwarding address. 2550 tst wIP1, wIP1, lsl #1 2551 bmi .Lmark_introspection_forwarding_address 2552 READ_BARRIER_MARK_INTROSPECTION_SLOW_PATH BAKER_MARK_INTROSPECTION_FIELD_LDR_OFFSET 2553 2554.Lmark_introspection_forwarding_address: 2555 // Shift left by the forwarding address shift. This clears out the state bits since they are 2556 // in the top 2 bits of the lock word. 2557 lsl wIP0, wIP1, #LOCK_WORD_STATE_FORWARDING_ADDRESS_SHIFT 2558 b .Lmark_introspection_return 2559 2560 // We're very close to the alloted 256B for the entrypoint code before the 2561 // array switch cases. Should we go a little bit over the limit, we can 2562 // move some code after the array switch cases and return switch cases. 2563 .balign 256 2564 .hidden art_quick_read_barrier_mark_introspection_arrays 2565 .global art_quick_read_barrier_mark_introspection_arrays 2566art_quick_read_barrier_mark_introspection_arrays: 2567 FOR_XREGISTERS INTROSPECTION_ARRAY_LOAD, BRK0_BRK0 2568.Lmark_introspection_return_switch: 2569 FOR_WREGISTERS MOV_WIP0_TO_WREG_AND_BL_LR, BRK0_BRK0 2570 .hidden art_quick_read_barrier_mark_introspection_gc_roots 2571 .global art_quick_read_barrier_mark_introspection_gc_roots 2572art_quick_read_barrier_mark_introspection_gc_roots: 2573 READ_BARRIER_MARK_INTROSPECTION_SLOW_PATH BAKER_MARK_INTROSPECTION_GC_ROOT_LDR_OFFSET 2574END art_quick_read_barrier_mark_introspection 2575 2576.extern artInvokePolymorphic 2577ENTRY art_quick_invoke_polymorphic 2578 SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC. 2579 mov x0, x1 // x0 := receiver 2580 mov x1, xSELF // x1 := Thread::Current() 2581 mov x2, sp // x2 := SP 2582 bl artInvokePolymorphic // artInvokePolymorphic(receiver, thread, save_area) 2583 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2584 REFRESH_MARKING_REGISTER 2585 fmov d0, x0 // Result is in x0. Copy to floating return register. 2586 RETURN_OR_DELIVER_PENDING_EXCEPTION 2587END art_quick_invoke_polymorphic 2588 2589.extern artInvokeCustom 2590ENTRY art_quick_invoke_custom 2591 SETUP_SAVE_REFS_AND_ARGS_FRAME // Save callee saves in case allocation triggers GC. 2592 // x0 := call_site_idx 2593 mov x1, xSELF // x1 := Thread::Current() 2594 mov x2, sp // x2 := SP 2595 bl artInvokeCustom // artInvokeCustom(call_site_idx, thread, save_area) 2596 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2597 REFRESH_MARKING_REGISTER 2598 fmov d0, x0 // Copy result to double result register. 2599 RETURN_OR_DELIVER_PENDING_EXCEPTION 2600END art_quick_invoke_custom 2601 2602// Wrap ExecuteSwitchImpl in assembly method which specifies DEX PC for unwinding. 2603// Argument 0: x0: The context pointer for ExecuteSwitchImpl. 2604// Argument 1: x1: Pointer to the templated ExecuteSwitchImpl to call. 2605// Argument 2: x2: The value of DEX PC (memory address of the methods bytecode). 2606ENTRY ExecuteSwitchImplAsm 2607 SAVE_TWO_REGS_INCREASE_FRAME x19, xLR, 16 2608 mov x19, x2 // x19 = DEX PC 2609 CFI_DEFINE_DEX_PC_WITH_OFFSET(0 /* x0 */, 19 /* x19 */, 0) 2610 blr x1 // Call the wrapped method. 2611 RESTORE_TWO_REGS_DECREASE_FRAME x19, xLR, 16 2612 ret 2613END ExecuteSwitchImplAsm 2614 2615// x0 contains the class, x8 contains the inline cache. x9-x15 can be used. 2616ENTRY art_quick_update_inline_cache 2617#if (INLINE_CACHE_SIZE != 5) 2618#error "INLINE_CACHE_SIZE not as expected." 2619#endif 2620#if defined(USE_READ_BARRIER) && defined(USE_BAKER_READ_BARRIER) 2621 // Don't update the cache if we are marking. 2622 cbnz wMR, .Ldone 2623#endif 2624.Lentry1: 2625 ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET] 2626 cmp w9, w0 2627 beq .Ldone 2628 cbnz w9, .Lentry2 2629 add x10, x8, #INLINE_CACHE_CLASSES_OFFSET 2630 ldxr w9, [x10] 2631 cbnz w9, .Lentry1 2632 stxr w9, w0, [x10] 2633 cbz w9, .Ldone 2634 b .Lentry1 2635.Lentry2: 2636 ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+4] 2637 cmp w9, w0 2638 beq .Ldone 2639 cbnz w9, .Lentry3 2640 add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+4 2641 ldxr w9, [x10] 2642 cbnz w9, .Lentry2 2643 stxr w9, w0, [x10] 2644 cbz w9, .Ldone 2645 b .Lentry2 2646.Lentry3: 2647 ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+8] 2648 cmp w9, w0 2649 beq .Ldone 2650 cbnz w9, .Lentry4 2651 add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+8 2652 ldxr w9, [x10] 2653 cbnz w9, .Lentry3 2654 stxr w9, w0, [x10] 2655 cbz w9, .Ldone 2656 b .Lentry3 2657.Lentry4: 2658 ldr w9, [x8, #INLINE_CACHE_CLASSES_OFFSET+12] 2659 cmp w9, w0 2660 beq .Ldone 2661 cbnz w9, .Lentry5 2662 add x10, x8, #INLINE_CACHE_CLASSES_OFFSET+12 2663 ldxr w9, [x10] 2664 cbnz w9, .Lentry4 2665 stxr w9, w0, [x10] 2666 cbz w9, .Ldone 2667 b .Lentry4 2668.Lentry5: 2669 // Unconditionally store, the inline cache is megamorphic. 2670 str w0, [x8, #INLINE_CACHE_CLASSES_OFFSET+16] 2671.Ldone: 2672 ret 2673END art_quick_update_inline_cache 2674 2675// On entry, method is at the bottom of the stack. 2676ENTRY art_quick_compile_optimized 2677 SETUP_SAVE_EVERYTHING_FRAME 2678 ldr x0, [sp, #FRAME_SIZE_SAVE_EVERYTHING] // pass ArtMethod 2679 mov x1, xSELF // pass Thread::Current 2680 bl artCompileOptimized // (ArtMethod*, Thread*) 2681 RESTORE_SAVE_EVERYTHING_FRAME 2682 // We don't need to restore the marking register here, as 2683 // artCompileOptimized doesn't allow thread suspension. 2684 ret 2685END art_quick_compile_optimized 2686