1%def header(): 2/* 3 * Copyright (C) 2016 The Android Open Source Project 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17 18/* 19 Art assembly interpreter notes: 20 21 First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't 22 handle invoke, allows higher-level code to create frame & shadow frame. 23 24 Once that's working, support direct entry code & eliminate shadow frame (and 25 excess locals allocation. 26 27 Some (hopefully) temporary ugliness. We'll treat xFP as pointing to the 28 base of the vreg array within the shadow frame. Access the other fields, 29 dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue 30 the shadow frame mechanism of double-storing object references - via xFP & 31 number_of_vregs_. 32 33 */ 34 35/* 36ARM64 Runtime register usage conventions. 37 38 r0 : w0 is 32-bit return register and x0 is 64-bit. 39 r0-r7 : Argument registers. 40 r8-r15 : Caller save registers (used as temporary registers). 41 r16-r17: Also known as ip0-ip1, respectively. Used as scratch registers by 42 the linker, by the trampolines and other stubs (the backend uses 43 these as temporary registers). 44 r18 : Caller save register (used as temporary register). 45 r19 : Pointer to thread-local storage. 46 r20-r29: Callee save registers. 47 r30 : (lr) is reserved (the link register). 48 rsp : (sp) is reserved (the stack pointer). 49 rzr : (zr) is reserved (the zero register). 50 51 Floating-point registers 52 v0-v31 53 54 v0 : s0 is return register for singles (32-bit) and d0 for doubles (64-bit). 55 This is analogous to the C/C++ (hard-float) calling convention. 56 v0-v7 : Floating-point argument registers in both Dalvik and C/C++ conventions. 57 Also used as temporary and codegen scratch registers. 58 59 v0-v7 and v16-v31 : trashed across C calls. 60 v8-v15 : bottom 64-bits preserved across C calls (d8-d15 are preserved). 61 62 v16-v31: Used as codegen temp/scratch. 63 v8-v15 : Can be used for promotion. 64 65 Must maintain 16-byte stack alignment. 66 67Mterp notes: 68 69The following registers have fixed assignments: 70 71 reg nick purpose 72 x20 xPC interpreted program counter, used for fetching instructions 73 x21 xFP interpreted frame pointer, used for accessing locals and args 74 x22 xSELF self (Thread) pointer 75 x23 xINST first 16-bit code unit of current instruction 76 x24 xIBASE interpreted instruction base pointer, used for computed goto 77 x25 xREFS base of object references in shadow frame (ideally, we'll get rid of this later). 78 x26 wPROFILE jit profile hotness countdown 79 x16 ip scratch reg 80 x17 ip2 scratch reg (used by macros) 81 82Macros are provided for common operations. They MUST NOT alter unspecified registers or condition 83codes. 84*/ 85 86/* 87 * This is a #include, not a %include, because we want the C pre-processor 88 * to expand the macros into assembler assignment statements. 89 */ 90#include "asm_support.h" 91#include "interpreter/cfi_asm_support.h" 92 93#define MTERP_PROFILE_BRANCHES 1 94#define MTERP_LOGGING 0 95 96/* During bringup, we'll use the shadow frame model instead of xFP */ 97/* single-purpose registers, given names for clarity */ 98#define xPC x20 99#define CFI_DEX 20 // DWARF register number of the register holding dex-pc (xPC). 100#define CFI_TMP 0 // DWARF register number of the first argument register (r0). 101#define xFP x21 102#define xSELF x22 103#define xINST x23 104#define wINST w23 105#define xIBASE x24 106#define xREFS x25 107#define wPROFILE w26 108#define xPROFILE x26 109#define ip x16 110#define ip2 x17 111 112/* 113 * Instead of holding a pointer to the shadow frame, we keep xFP at the base of the vregs. So, 114 * to access other shadow frame fields, we need to use a backwards offset. Define those here. 115 */ 116#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET) 117#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET) 118#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET) 119#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET) 120#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET) 121#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET) 122#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET) 123#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET) 124#define OFF_FP_SHADOWFRAME OFF_FP(0) 125 126/* 127 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must 128 * be done *before* something throws. 129 * 130 * It's okay to do this more than once. 131 * 132 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped 133 * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction 134 * offset into the code_items_[] array. For effiency, we will "export" the 135 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC 136 * to convert to a dex pc when needed. 137 */ 138.macro EXPORT_PC 139 str xPC, [xFP, #OFF_FP_DEX_PC_PTR] 140.endm 141 142/* 143 * Fetch the next instruction from xPC into wINST. Does not advance xPC. 144 */ 145.macro FETCH_INST 146 ldrh wINST, [xPC] 147.endm 148 149/* 150 * Fetch the next instruction from the specified offset. Advances xPC 151 * to point to the next instruction. "_count" is in 16-bit code units. 152 * 153 * Because of the limited size of immediate constants on ARM, this is only 154 * suitable for small forward movements (i.e. don't try to implement "goto" 155 * with this). 156 * 157 * This must come AFTER anything that can throw an exception, or the 158 * exception catch may miss. (This also implies that it must come after 159 * EXPORT_PC.) 160 */ 161.macro FETCH_ADVANCE_INST count 162 ldrh wINST, [xPC, #((\count)*2)]! 163.endm 164 165/* 166 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 167 * src and dest registers are parameterized (not hard-wired to xPC and xINST). 168 */ 169.macro PREFETCH_ADVANCE_INST dreg, sreg, count 170 ldrh \dreg, [\sreg, #((\count)*2)]! 171.endm 172 173/* 174 * Similar to FETCH_ADVANCE_INST, but does not update xPC. Used to load 175 * xINST ahead of possible exception point. Be sure to manually advance xPC 176 * later. 177 */ 178.macro PREFETCH_INST count 179 ldrh wINST, [xPC, #((\count)*2)] 180.endm 181 182/* Advance xPC by some number of code units. */ 183.macro ADVANCE count 184 add xPC, xPC, #((\count)*2) 185.endm 186 187/* 188 * Fetch the next instruction from an offset specified by _reg and advance xPC. 189 * xPC to point to the next instruction. "_reg" must specify the distance 190 * in bytes, *not* 16-bit code units, and may be a signed value. Must not set flags. 191 * 192 */ 193.macro FETCH_ADVANCE_INST_RB reg 194 add xPC, xPC, \reg, sxtw 195 ldrh wINST, [xPC] 196.endm 197 198/* 199 * Fetch a half-word code unit from an offset past the current PC. The 200 * "_count" value is in 16-bit code units. Does not advance xPC. 201 * 202 * The "_S" variant works the same but treats the value as signed. 203 */ 204.macro FETCH reg, count 205 ldrh \reg, [xPC, #((\count)*2)] 206.endm 207 208.macro FETCH_S reg, count 209 ldrsh \reg, [xPC, #((\count)*2)] 210.endm 211 212/* 213 * Fetch one byte from an offset past the current PC. Pass in the same 214 * "_count" as you would for FETCH, and an additional 0/1 indicating which 215 * byte of the halfword you want (lo/hi). 216 */ 217.macro FETCH_B reg, count, byte 218 ldrb \reg, [xPC, #((\count)*2+(\byte))] 219.endm 220 221/* 222 * Put the instruction's opcode field into the specified register. 223 */ 224.macro GET_INST_OPCODE reg 225 and \reg, xINST, #255 226.endm 227 228/* 229 * Put the prefetched instruction's opcode field into the specified register. 230 */ 231.macro GET_PREFETCHED_OPCODE oreg, ireg 232 and \oreg, \ireg, #255 233.endm 234 235/* 236 * Begin executing the opcode in _reg. Clobbers reg 237 */ 238 239.macro GOTO_OPCODE reg 240 add \reg, xIBASE, \reg, lsl #${handler_size_bits} 241 br \reg 242.endm 243.macro GOTO_OPCODE_BASE base,reg 244 add \reg, \base, \reg, lsl #${handler_size_bits} 245 br \reg 246.endm 247 248/* 249 * Get/set the 32-bit value from a Dalvik register. 250 */ 251.macro GET_VREG reg, vreg 252 ldr \reg, [xFP, \vreg, uxtw #2] 253.endm 254.macro SET_VREG reg, vreg 255 str \reg, [xFP, \vreg, uxtw #2] 256 str wzr, [xREFS, \vreg, uxtw #2] 257.endm 258.macro SET_VREG_OBJECT reg, vreg, tmpreg 259 str \reg, [xFP, \vreg, uxtw #2] 260 str \reg, [xREFS, \vreg, uxtw #2] 261.endm 262.macro SET_VREG_FLOAT reg, vreg 263 str \reg, [xFP, \vreg, uxtw #2] 264 str wzr, [xREFS, \vreg, uxtw #2] 265.endm 266 267/* 268 * Get/set the 64-bit value from a Dalvik register. 269 */ 270.macro GET_VREG_WIDE reg, vreg 271 add ip2, xFP, \vreg, uxtw #2 272 ldr \reg, [ip2] 273.endm 274.macro SET_VREG_WIDE reg, vreg 275 add ip2, xFP, \vreg, uxtw #2 276 str \reg, [ip2] 277 add ip2, xREFS, \vreg, uxtw #2 278 str xzr, [ip2] 279.endm 280.macro GET_VREG_DOUBLE reg, vreg 281 add ip2, xFP, \vreg, uxtw #2 282 ldr \reg, [ip2] 283.endm 284.macro SET_VREG_DOUBLE reg, vreg 285 add ip2, xFP, \vreg, uxtw #2 286 str \reg, [ip2] 287 add ip2, xREFS, \vreg, uxtw #2 288 str xzr, [ip2] 289.endm 290 291/* 292 * Get the 32-bit value from a Dalvik register and sign-extend to 64-bit. 293 * Used to avoid an extra instruction in int-to-long. 294 */ 295.macro GET_VREG_S reg, vreg 296 ldrsw \reg, [xFP, \vreg, uxtw #2] 297.endm 298 299/* 300 * Convert a virtual register index into an address. 301 */ 302.macro VREG_INDEX_TO_ADDR reg, vreg 303 add \reg, xFP, \vreg, uxtw #2 /* WARNING: handle shadow frame vreg zero if store */ 304.endm 305 306/* 307 * Refresh handler table. 308 */ 309.macro REFRESH_IBASE 310 ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] 311.endm 312 313/* 314 * Save two registers to the stack. 315 */ 316.macro SAVE_TWO_REGS reg1, reg2, offset 317 stp \reg1, \reg2, [sp, #(\offset)] 318 .cfi_rel_offset \reg1, (\offset) 319 .cfi_rel_offset \reg2, (\offset) + 8 320.endm 321 322/* 323 * Restore two registers from the stack. 324 */ 325.macro RESTORE_TWO_REGS reg1, reg2, offset 326 ldp \reg1, \reg2, [sp, #(\offset)] 327 .cfi_restore \reg1 328 .cfi_restore \reg2 329.endm 330 331/* 332 * Increase frame size and save two registers to the bottom of the stack. 333 */ 334.macro SAVE_TWO_REGS_INCREASE_FRAME reg1, reg2, frame_adjustment 335 stp \reg1, \reg2, [sp, #-(\frame_adjustment)]! 336 .cfi_adjust_cfa_offset (\frame_adjustment) 337 .cfi_rel_offset \reg1, 0 338 .cfi_rel_offset \reg2, 8 339.endm 340 341/* 342 * Restore two registers from the bottom of the stack and decrease frame size. 343 */ 344.macro RESTORE_TWO_REGS_DECREASE_FRAME reg1, reg2, frame_adjustment 345 ldp \reg1, \reg2, [sp], #(\frame_adjustment) 346 .cfi_restore \reg1 347 .cfi_restore \reg2 348 .cfi_adjust_cfa_offset -(\frame_adjustment) 349.endm 350 351/* 352 * function support macros. 353 */ 354.macro ENTRY name 355 .type \name, #function 356 .hidden \name // Hide this as a global symbol, so we do not incur plt calls. 357 .global \name 358 /* Cache alignment for function entry */ 359 .balign 16 360\name: 361.endm 362 363.macro END name 364 .size \name, .-\name 365.endm 366 367// Macro to unpoison (negate) the reference for heap poisoning. 368.macro UNPOISON_HEAP_REF rRef 369#ifdef USE_HEAP_POISONING 370 neg \rRef, \rRef 371#endif // USE_HEAP_POISONING 372.endm 373 374%def entry(): 375/* 376 * Copyright (C) 2016 The Android Open Source Project 377 * 378 * Licensed under the Apache License, Version 2.0 (the "License"); 379 * you may not use this file except in compliance with the License. 380 * You may obtain a copy of the License at 381 * 382 * http://www.apache.org/licenses/LICENSE-2.0 383 * 384 * Unless required by applicable law or agreed to in writing, software 385 * distributed under the License is distributed on an "AS IS" BASIS, 386 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 387 * See the License for the specific language governing permissions and 388 * limitations under the License. 389 */ 390 391 .text 392 393/* 394 * Interpreter entry point. 395 * On entry: 396 * x0 Thread* self/ 397 * x1 insns_ 398 * x2 ShadowFrame 399 * x3 JValue* result_register 400 * 401 */ 402ENTRY ExecuteMterpImpl 403 .cfi_startproc 404 SAVE_TWO_REGS_INCREASE_FRAME xPROFILE, x27, 80 405 SAVE_TWO_REGS xIBASE, xREFS, 16 406 SAVE_TWO_REGS xSELF, xINST, 32 407 SAVE_TWO_REGS xPC, xFP, 48 408 SAVE_TWO_REGS fp, lr, 64 409 add fp, sp, #64 410 411 /* Remember the return register */ 412 str x3, [x2, #SHADOWFRAME_RESULT_REGISTER_OFFSET] 413 414 /* Remember the dex instruction pointer */ 415 str x1, [x2, #SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET] 416 417 /* set up "named" registers */ 418 mov xSELF, x0 419 ldr w0, [x2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET] 420 add xFP, x2, #SHADOWFRAME_VREGS_OFFSET // point to vregs. 421 add xREFS, xFP, w0, uxtw #2 // point to reference array in shadow frame 422 ldr w0, [x2, #SHADOWFRAME_DEX_PC_OFFSET] // Get starting dex_pc. 423 add xPC, x1, w0, uxtw #1 // Create direct pointer to 1st dex opcode 424 CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0) 425 EXPORT_PC 426 427 /* Starting ibase */ 428 ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] 429 430 /* Set up for backwards branches & osr profiling */ 431 ldr x0, [xFP, #OFF_FP_METHOD] 432 add x1, xFP, #OFF_FP_SHADOWFRAME 433 mov x2, xSELF 434 bl MterpSetUpHotnessCountdown 435 mov wPROFILE, w0 // Starting hotness countdown to xPROFILE 436 437 /* start executing the instruction at rPC */ 438 FETCH_INST // load wINST from rPC 439 GET_INST_OPCODE ip // extract opcode from wINST 440 GOTO_OPCODE ip // jump to next instruction 441 /* NOTE: no fallthrough */ 442 // cfi info continues, and covers the whole mterp implementation. 443 END ExecuteMterpImpl 444 445%def dchecks_before_helper(): 446 // Call C++ to do debug checks and return to the handler using tail call. 447 .extern MterpCheckBefore 448 mov x0, xSELF 449 add x1, xFP, #OFF_FP_SHADOWFRAME 450 mov x2, xPC 451 b MterpCheckBefore // (self, shadow_frame, dex_pc_ptr) Note: tail call. 452 453%def opcode_pre(): 454% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper") 455 #if !defined(NDEBUG) 456 bl mterp_dchecks_before_helper 457 #endif 458 459%def footer(): 460 .cfi_endproc 461 END MterpHelpers 462 463%def fallback(): 464/* Transfer stub to alternate interpreter */ 465 b MterpFallback 466 467%def helpers(): 468 ENTRY MterpHelpers 469/* 470 * =========================================================================== 471 * Common subroutines and data 472 * =========================================================================== 473 */ 474 475/* 476 * We've detected a condition that will result in an exception, but the exception 477 * has not yet been thrown. Just bail out to the reference interpreter to deal with it. 478 * TUNING: for consistency, we may want to just go ahead and handle these here. 479 */ 480common_errDivideByZero: 481 EXPORT_PC 482#if MTERP_LOGGING 483 mov x0, xSELF 484 add x1, xFP, #OFF_FP_SHADOWFRAME 485 bl MterpLogDivideByZeroException 486#endif 487 b MterpCommonFallback 488 489common_errArrayIndex: 490 EXPORT_PC 491#if MTERP_LOGGING 492 mov x0, xSELF 493 add x1, xFP, #OFF_FP_SHADOWFRAME 494 bl MterpLogArrayIndexException 495#endif 496 b MterpCommonFallback 497 498common_errNegativeArraySize: 499 EXPORT_PC 500#if MTERP_LOGGING 501 mov x0, xSELF 502 add x1, xFP, #OFF_FP_SHADOWFRAME 503 bl MterpLogNegativeArraySizeException 504#endif 505 b MterpCommonFallback 506 507common_errNoSuchMethod: 508 EXPORT_PC 509#if MTERP_LOGGING 510 mov x0, xSELF 511 add x1, xFP, #OFF_FP_SHADOWFRAME 512 bl MterpLogNoSuchMethodException 513#endif 514 b MterpCommonFallback 515 516common_errNullObject: 517 EXPORT_PC 518#if MTERP_LOGGING 519 mov x0, xSELF 520 add x1, xFP, #OFF_FP_SHADOWFRAME 521 bl MterpLogNullObjectException 522#endif 523 b MterpCommonFallback 524 525common_exceptionThrown: 526 EXPORT_PC 527#if MTERP_LOGGING 528 mov x0, xSELF 529 add x1, xFP, #OFF_FP_SHADOWFRAME 530 bl MterpLogExceptionThrownException 531#endif 532 b MterpCommonFallback 533 534MterpSuspendFallback: 535 EXPORT_PC 536#if MTERP_LOGGING 537 mov x0, xSELF 538 add x1, xFP, #OFF_FP_SHADOWFRAME 539 ldr x2, [xSELF, #THREAD_FLAGS_OFFSET] 540 bl MterpLogSuspendFallback 541#endif 542 b MterpCommonFallback 543 544/* 545 * If we're here, something is out of the ordinary. If there is a pending 546 * exception, handle it. Otherwise, roll back and retry with the reference 547 * interpreter. 548 */ 549MterpPossibleException: 550 ldr x0, [xSELF, #THREAD_EXCEPTION_OFFSET] 551 cbz x0, MterpFallback // If not, fall back to reference interpreter. 552 /* intentional fallthrough - handle pending exception. */ 553/* 554 * On return from a runtime helper routine, we've found a pending exception. 555 * Can we handle it here - or need to bail out to caller? 556 * 557 */ 558MterpException: 559 mov x0, xSELF 560 add x1, xFP, #OFF_FP_SHADOWFRAME 561 bl MterpHandleException // (self, shadow_frame) 562 cbz w0, MterpExceptionReturn // no local catch, back to caller. 563 ldr x0, [xFP, #OFF_FP_DEX_INSTRUCTIONS] 564 ldr w1, [xFP, #OFF_FP_DEX_PC] 565 ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] 566 add xPC, x0, x1, lsl #1 // generate new dex_pc_ptr 567 /* Do we need to switch interpreters? */ 568 ldr w0, [xSELF, #THREAD_USE_MTERP_OFFSET] 569 cbz w0, MterpFallback 570 /* resume execution at catch block */ 571 EXPORT_PC 572 FETCH_INST 573 GET_INST_OPCODE ip 574 GOTO_OPCODE ip 575 /* NOTE: no fallthrough */ 576/* 577 * Common handling for branches with support for Jit profiling. 578 * On entry: 579 * wINST <= signed offset 580 * wPROFILE <= signed hotness countdown (expanded to 32 bits) 581 * condition bits <= set to establish sign of offset (use "NoFlags" entry if not) 582 * 583 * We have quite a few different cases for branch profiling, OSR detection and 584 * suspend check support here. 585 * 586 * Taken backward branches: 587 * If profiling active, do hotness countdown and report if we hit zero. 588 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so. 589 * Is there a pending suspend request? If so, suspend. 590 * 591 * Taken forward branches and not-taken backward branches: 592 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so. 593 * 594 * Our most common case is expected to be a taken backward branch with active jit profiling, 595 * but no full OSR check and no pending suspend request. 596 * Next most common case is not-taken branch with no full OSR check. 597 * 598 */ 599MterpCommonTakenBranchNoFlags: 600 cmp wINST, #0 601 b.gt .L_forward_branch // don't add forward branches to hotness 602 tbnz wPROFILE, #31, .L_no_count_backwards // go if negative 603 subs wPROFILE, wPROFILE, #1 // countdown 604 b.eq .L_add_batch // counted down to zero - report 605.L_resume_backward_branch: 606 ldr lr, [xSELF, #THREAD_FLAGS_OFFSET] 607 add w2, wINST, wINST // w2<- byte offset 608 FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST 609 REFRESH_IBASE 610 ands lr, lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 611 b.ne .L_suspend_request_pending 612 GET_INST_OPCODE ip // extract opcode from wINST 613 GOTO_OPCODE ip // jump to next instruction 614 615.L_suspend_request_pending: 616 EXPORT_PC 617 mov x0, xSELF 618 bl MterpSuspendCheck // (self) 619 cbnz x0, MterpFallback 620 REFRESH_IBASE // might have changed during suspend 621 GET_INST_OPCODE ip // extract opcode from wINST 622 GOTO_OPCODE ip // jump to next instruction 623 624.L_no_count_backwards: 625 cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry? 626 b.ne .L_resume_backward_branch 627 mov x0, xSELF 628 add x1, xFP, #OFF_FP_SHADOWFRAME 629 mov x2, xINST 630 EXPORT_PC 631 bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset) 632 cbnz x0, MterpOnStackReplacement 633 b .L_resume_backward_branch 634 635.L_forward_branch: 636 cmp wPROFILE, #JIT_CHECK_OSR // possible OSR re-entry? 637 b.eq .L_check_osr_forward 638.L_resume_forward_branch: 639 add w2, wINST, wINST // w2<- byte offset 640 FETCH_ADVANCE_INST_RB w2 // update rPC, load wINST 641 GET_INST_OPCODE ip // extract opcode from wINST 642 GOTO_OPCODE ip // jump to next instruction 643 644.L_check_osr_forward: 645 mov x0, xSELF 646 add x1, xFP, #OFF_FP_SHADOWFRAME 647 mov x2, xINST 648 EXPORT_PC 649 bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset) 650 cbnz x0, MterpOnStackReplacement 651 b .L_resume_forward_branch 652 653.L_add_batch: 654 add x1, xFP, #OFF_FP_SHADOWFRAME 655 strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET] 656 ldr x0, [xFP, #OFF_FP_METHOD] 657 mov x2, xSELF 658 bl MterpAddHotnessBatch // (method, shadow_frame, self) 659 mov wPROFILE, w0 // restore new hotness countdown to wPROFILE 660 b .L_no_count_backwards 661 662/* 663 * Entered from the conditional branch handlers when OSR check request active on 664 * not-taken path. All Dalvik not-taken conditional branch offsets are 2. 665 */ 666.L_check_not_taken_osr: 667 mov x0, xSELF 668 add x1, xFP, #OFF_FP_SHADOWFRAME 669 mov x2, #2 670 EXPORT_PC 671 bl MterpMaybeDoOnStackReplacement // (self, shadow_frame, offset) 672 cbnz x0, MterpOnStackReplacement 673 FETCH_ADVANCE_INST 2 674 GET_INST_OPCODE ip // extract opcode from wINST 675 GOTO_OPCODE ip // jump to next instruction 676 677/* 678 * Check for suspend check request. Assumes wINST already loaded, xPC advanced and 679 * still needs to get the opcode and branch to it, and flags are in lr. 680 */ 681MterpCheckSuspendAndContinue: 682 ldr xIBASE, [xSELF, #THREAD_CURRENT_IBASE_OFFSET] // refresh xIBASE 683 ands w7, w7, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST 684 b.ne check1 685 GET_INST_OPCODE ip // extract opcode from wINST 686 GOTO_OPCODE ip // jump to next instruction 687check1: 688 EXPORT_PC 689 mov x0, xSELF 690 bl MterpSuspendCheck // (self) 691 cbnz x0, MterpFallback // Something in the environment changed, switch interpreters 692 GET_INST_OPCODE ip // extract opcode from wINST 693 GOTO_OPCODE ip // jump to next instruction 694 695/* 696 * On-stack replacement has happened, and now we've returned from the compiled method. 697 */ 698MterpOnStackReplacement: 699#if MTERP_LOGGING 700 mov x0, xSELF 701 add x1, xFP, #OFF_FP_SHADOWFRAME 702 sxtw x2, wINST 703 bl MterpLogOSR 704#endif 705 mov x0, #1 // Signal normal return 706 b MterpDone 707 708/* 709 * Bail out to reference interpreter. 710 */ 711MterpFallback: 712 EXPORT_PC 713#if MTERP_LOGGING 714 mov x0, xSELF 715 add x1, xFP, #OFF_FP_SHADOWFRAME 716 bl MterpLogFallback 717#endif 718MterpCommonFallback: 719 mov x0, #0 // signal retry with reference interpreter. 720 b MterpDone 721 722/* 723 * We pushed some registers on the stack in ExecuteMterpImpl, then saved 724 * SP and LR. Here we restore SP, restore the registers, and then restore 725 * LR to PC. 726 * 727 * On entry: 728 * uint32_t* xFP (should still be live, pointer to base of vregs) 729 */ 730MterpExceptionReturn: 731 mov x0, #1 // signal return to caller. 732 b MterpDone 733MterpReturn: 734 ldr x2, [xFP, #OFF_FP_RESULT_REGISTER] 735 str x0, [x2] 736 mov x0, #1 // signal return to caller. 737MterpDone: 738/* 739 * At this point, we expect wPROFILE to be non-zero. If negative, hotness is disabled or we're 740 * checking for OSR. If greater than zero, we might have unreported hotness to register 741 * (the difference between the ending wPROFILE and the cached hotness counter). wPROFILE 742 * should only reach zero immediately after a hotness decrement, and is then reset to either 743 * a negative special state or the new non-zero countdown value. 744 */ 745 cmp wPROFILE, #0 746 bgt MterpProfileActive // if > 0, we may have some counts to report. 747 .cfi_remember_state 748 RESTORE_TWO_REGS fp, lr, 64 749 RESTORE_TWO_REGS xPC, xFP, 48 750 RESTORE_TWO_REGS xSELF, xINST, 32 751 RESTORE_TWO_REGS xIBASE, xREFS, 16 752 RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80 753 ret 754 .cfi_restore_state // Reset unwind info so following code unwinds. 755 .cfi_def_cfa_offset 80 // workaround for clang bug: 31975598 756 757MterpProfileActive: 758 mov xINST, x0 // stash return value 759 /* Report cached hotness counts */ 760 ldr x0, [xFP, #OFF_FP_METHOD] 761 add x1, xFP, #OFF_FP_SHADOWFRAME 762 mov x2, xSELF 763 strh wPROFILE, [x1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET] 764 bl MterpAddHotnessBatch // (method, shadow_frame, self) 765 mov x0, xINST // restore return value 766 RESTORE_TWO_REGS fp, lr, 64 767 RESTORE_TWO_REGS xPC, xFP, 48 768 RESTORE_TWO_REGS xSELF, xINST, 32 769 RESTORE_TWO_REGS xIBASE, xREFS, 16 770 RESTORE_TWO_REGS_DECREASE_FRAME xPROFILE, x27, 80 771 ret 772 773 774%def instruction_end(): 775 776 .type artMterpAsmInstructionEnd, #object 777 .hidden artMterpAsmInstructionEnd 778 .global artMterpAsmInstructionEnd 779artMterpAsmInstructionEnd: 780 781%def instruction_start(): 782 783 .type artMterpAsmInstructionStart, #object 784 .hidden artMterpAsmInstructionStart 785 .global artMterpAsmInstructionStart 786artMterpAsmInstructionStart = .L_op_nop 787 .text 788 789%def default_helper_prefix(): 790% return "mterp_" 791 792%def opcode_start(): 793 ENTRY mterp_${opcode} 794%def opcode_end(): 795 END mterp_${opcode} 796%def helper_start(name): 797 ENTRY ${name} 798%def helper_end(name): 799 END ${name} 800