1%def header(): 2/* 3 * Copyright (C) 2016 The Android Open Source Project 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 */ 17 18/* 19 Art assembly interpreter notes: 20 21 First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't 22 handle invoke, allows higher-level code to create frame & shadow frame. 23 24 Once that's working, support direct entry code & eliminate shadow frame (and 25 excess locals allocation. 26 27 Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the 28 base of the vreg array within the shadow frame. Access the other fields, 29 dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue 30 the shadow frame mechanism of double-storing object references - via rFP & 31 number_of_vregs_. 32 33 */ 34 35/* 36x86 ABI general notes: 37 38Caller save set: 39 eax, edx, ecx, st(0)-st(7) 40Callee save set: 41 ebx, esi, edi, ebp 42Return regs: 43 32-bit in eax 44 64-bit in edx:eax (low-order 32 in eax) 45 fp on top of fp stack st(0) 46 47Parameters passed on stack, pushed right-to-left. On entry to target, first 48parm is at 4(%esp). Traditional entry code is: 49 50functEntry: 51 push %ebp # save old frame pointer 52 mov %ebp,%esp # establish new frame pointer 53 sub FrameSize,%esp # Allocate storage for spill, locals & outs 54 55Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp) 56 57Stack must be 16-byte aligned to support SSE in native code. 58 59If we're not doing variable stack allocation (alloca), the frame pointer can be 60eliminated and all arg references adjusted to be esp relative. 61*/ 62 63/* 64Mterp and x86 notes: 65 66Some key interpreter variables will be assigned to registers. 67 68 nick reg purpose 69 rPC esi interpreted program counter, used for fetching instructions 70 rFP edi interpreted frame pointer, used for accessing locals and args 71 rINSTw bx first 16-bit code of current instruction 72 rINSTbl bl opcode portion of instruction word 73 rINSTbh bh high byte of inst word, usually contains src/tgt reg names 74 rIBASE edx base of instruction handler table 75 rREFS ebp base of object references in shadow frame. 76 77Notes: 78 o High order 16 bits of ebx must be zero on entry to handler 79 o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit 80 o eax and ecx are scratch, rINSTw/ebx sometimes scratch 81 82Macros are provided for common operations. Each macro MUST emit only 83one instruction to make instruction-counting easier. They MUST NOT alter 84unspecified registers or condition codes. 85*/ 86 87/* 88 * This is a #include, not a %include, because we want the C pre-processor 89 * to expand the macros into assembler assignment statements. 90 */ 91#include "asm_support.h" 92#include "interpreter/cfi_asm_support.h" 93 94#define LITERAL(value) $$(value) 95 96/* 97 * Handle mac compiler specific 98 */ 99#if defined(__APPLE__) 100 #define MACRO_LITERAL(value) $$(value) 101 #define FUNCTION_TYPE(name) 102 #define OBJECT_TYPE(name) 103 #define SIZE(start,end) 104 // Mac OS' symbols have an _ prefix. 105 #define SYMBOL(name) _ ## name 106 #define ASM_HIDDEN .private_extern 107#else 108 #define MACRO_LITERAL(value) $$value 109 #define FUNCTION_TYPE(name) .type name, @function 110 #define OBJECT_TYPE(name) .type name, @object 111 #define SIZE(start,end) .size start, .-end 112 #define SYMBOL(name) name 113 #define ASM_HIDDEN .hidden 114#endif 115 116.macro PUSH _reg 117 pushl \_reg 118 .cfi_adjust_cfa_offset 4 119 .cfi_rel_offset \_reg, 0 120.endm 121 122.macro POP _reg 123 popl \_reg 124 .cfi_adjust_cfa_offset -4 125 .cfi_restore \_reg 126.endm 127 128/* 129 * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So, 130 * to access other shadow frame fields, we need to use a backwards offset. Define those here. 131 */ 132#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET) 133#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET) 134#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET) 135#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET) 136#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET) 137#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET) 138#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET) 139#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET) 140#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET) 141#define OFF_FP_SHADOWFRAME OFF_FP(0) 142 143/* Frame size must be 16-byte aligned. 144 * Remember about 4 bytes for return address + 4 * 4 for spills 145 */ 146#define FRAME_SIZE 28 147 148/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */ 149#define IN_ARG3 (FRAME_SIZE + 16 + 16) 150#define IN_ARG2 (FRAME_SIZE + 16 + 12) 151#define IN_ARG1 (FRAME_SIZE + 16 + 8) 152#define IN_ARG0 (FRAME_SIZE + 16 + 4) 153/* Spill offsets relative to %esp */ 154#define LOCAL0 (FRAME_SIZE - 4) 155#define LOCAL1 (FRAME_SIZE - 8) 156#define LOCAL2 (FRAME_SIZE - 12) 157/* Out Arg offsets, relative to %esp */ 158#define OUT_ARG3 ( 12) 159#define OUT_ARG2 ( 8) 160#define OUT_ARG1 ( 4) 161#define OUT_ARG0 ( 0) /* <- ExecuteMterpImpl esp + 0 */ 162 163/* During bringup, we'll use the shadow frame model instead of rFP */ 164/* single-purpose registers, given names for clarity */ 165#define rSELF IN_ARG0(%esp) 166#define rPC %esi 167#define CFI_DEX 6 // DWARF register number of the register holding dex-pc (esi). 168#define CFI_TMP 0 // DWARF register number of the first argument register (eax). 169#define rFP %edi 170#define rINST %ebx 171#define rINSTw %bx 172#define rINSTbh %bh 173#define rINSTbl %bl 174#define rIBASE %edx 175#define rREFS %ebp 176#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP) 177 178#define MTERP_LOGGING 0 179 180/* 181 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must 182 * be done *before* something throws. 183 * 184 * It's okay to do this more than once. 185 * 186 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped 187 * dex byte codes. However, the rest of the runtime expects dex pc to be an instruction 188 * offset into the code_items_[] array. For effiency, we will "export" the 189 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC 190 * to convert to a dex pc when needed. 191 */ 192.macro EXPORT_PC 193 movl rPC, OFF_FP_DEX_PC_PTR(rFP) 194.endm 195 196/* 197 * Refresh handler table. 198 */ 199.macro REFRESH_IBASE 200 movl rSELF, rIBASE 201 movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE 202.endm 203 204/* 205 * Refresh handler table. 206 * IBase handles uses the caller save register so we must restore it after each call. 207 * Also it is used as a result of some 64-bit operations (like imul) and we should 208 * restore it in such cases also. 209 * 210 * TODO: Consider spilling the IBase instead of restoring it from Thread structure. 211 */ 212.macro RESTORE_IBASE 213 movl rSELF, rIBASE 214 movl THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE 215.endm 216 217/* 218 * If rSELF is already loaded then we can use it from known reg. 219 */ 220.macro RESTORE_IBASE_FROM_SELF _reg 221 movl THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE 222.endm 223 224/* 225 * Refresh rINST. 226 * At enter to handler rINST does not contain the opcode number. 227 * However some utilities require the full value, so this macro 228 * restores the opcode number. 229 */ 230.macro REFRESH_INST _opnum 231 movb rINSTbl, rINSTbh 232 movb MACRO_LITERAL(\_opnum), rINSTbl 233.endm 234 235/* 236 * Fetch the next instruction from rPC into rINSTw. Does not advance rPC. 237 */ 238.macro FETCH_INST 239 movzwl (rPC), rINST 240.endm 241 242/* 243 * Remove opcode from rINST, compute the address of handler and jump to it. 244 */ 245.macro GOTO_NEXT 246 movzx rINSTbl,%eax 247 movzbl rINSTbh,rINST 248 shll MACRO_LITERAL(${handler_size_bits}), %eax 249 addl rIBASE, %eax 250 jmp *%eax 251.endm 252 253/* 254 * Advance rPC by instruction count. 255 */ 256.macro ADVANCE_PC _count 257 leal 2*\_count(rPC), rPC 258.endm 259 260/* 261 * Advance rPC by instruction count, fetch instruction and jump to handler. 262 */ 263.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count 264 ADVANCE_PC \_count 265 FETCH_INST 266 GOTO_NEXT 267.endm 268 269/* 270 * Get/set the 32-bit value from a Dalvik register. 271 */ 272#define VREG_ADDRESS(_vreg) (rFP,_vreg,4) 273#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4) 274#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4) 275#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4) 276 277.macro GET_VREG _reg _vreg 278 movl VREG_ADDRESS(\_vreg), \_reg 279.endm 280 281/* Read wide value to xmm. */ 282.macro GET_WIDE_FP_VREG _reg _vreg 283 movq VREG_ADDRESS(\_vreg), \_reg 284.endm 285 286.macro SET_VREG _reg _vreg 287 movl \_reg, VREG_ADDRESS(\_vreg) 288 movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg) 289.endm 290 291/* Write wide value from xmm. xmm is clobbered. */ 292.macro SET_WIDE_FP_VREG _reg _vreg 293 movq \_reg, VREG_ADDRESS(\_vreg) 294 pxor \_reg, \_reg 295 movq \_reg, VREG_REF_ADDRESS(\_vreg) 296.endm 297 298.macro SET_VREG_OBJECT _reg _vreg 299 movl \_reg, VREG_ADDRESS(\_vreg) 300 movl \_reg, VREG_REF_ADDRESS(\_vreg) 301.endm 302 303.macro GET_VREG_HIGH _reg _vreg 304 movl VREG_HIGH_ADDRESS(\_vreg), \_reg 305.endm 306 307.macro SET_VREG_HIGH _reg _vreg 308 movl \_reg, VREG_HIGH_ADDRESS(\_vreg) 309 movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg) 310.endm 311 312.macro CLEAR_REF _vreg 313 movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg) 314.endm 315 316.macro CLEAR_WIDE_REF _vreg 317 movl MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg) 318 movl MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg) 319.endm 320 321.macro GET_VREG_XMMs _xmmreg _vreg 322 movss VREG_ADDRESS(\_vreg), \_xmmreg 323.endm 324.macro GET_VREG_XMMd _xmmreg _vreg 325 movsd VREG_ADDRESS(\_vreg), \_xmmreg 326.endm 327.macro SET_VREG_XMMs _xmmreg _vreg 328 movss \_xmmreg, VREG_ADDRESS(\_vreg) 329.endm 330.macro SET_VREG_XMMd _xmmreg _vreg 331 movsd \_xmmreg, VREG_ADDRESS(\_vreg) 332.endm 333 334/* 335 * function support macros. 336 */ 337.macro ENTRY name 338 .text 339 ASM_HIDDEN SYMBOL(\name) 340 .global SYMBOL(\name) 341 FUNCTION_TYPE(\name) 342SYMBOL(\name): 343.endm 344 345.macro END name 346 SIZE(\name,\name) 347.endm 348 349%def entry(): 350/* 351 * Copyright (C) 2016 The Android Open Source Project 352 * 353 * Licensed under the Apache License, Version 2.0 (the "License"); 354 * you may not use this file except in compliance with the License. 355 * You may obtain a copy of the License at 356 * 357 * http://www.apache.org/licenses/LICENSE-2.0 358 * 359 * Unless required by applicable law or agreed to in writing, software 360 * distributed under the License is distributed on an "AS IS" BASIS, 361 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 362 * See the License for the specific language governing permissions and 363 * limitations under the License. 364 */ 365/* 366 * Interpreter entry point. 367 * 368 * On entry: 369 * 0 Thread* self 370 * 1 insns_ 371 * 2 ShadowFrame 372 * 3 JValue* result_register 373 * 374 */ 375ENTRY ExecuteMterpImpl 376 .cfi_startproc 377 .cfi_def_cfa esp, 4 378 379 /* Spill callee save regs */ 380 PUSH %ebp 381 PUSH %edi 382 PUSH %esi 383 PUSH %ebx 384 385 /* Allocate frame */ 386 subl $$FRAME_SIZE, %esp 387 .cfi_adjust_cfa_offset FRAME_SIZE 388 389 /* Load ShadowFrame pointer */ 390 movl IN_ARG2(%esp), %edx 391 392 /* Remember the return register */ 393 movl IN_ARG3(%esp), %eax 394 movl %eax, SHADOWFRAME_RESULT_REGISTER_OFFSET(%edx) 395 396 /* Remember the code_item */ 397 movl IN_ARG1(%esp), %ecx 398 movl %ecx, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(%edx) 399 400 /* set up "named" registers */ 401 movl SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(%edx), %eax 402 leal SHADOWFRAME_VREGS_OFFSET(%edx), rFP 403 leal (rFP, %eax, 4), rREFS 404 movl SHADOWFRAME_DEX_PC_OFFSET(%edx), %eax 405 lea (%ecx, %eax, 2), rPC 406 CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0) 407 EXPORT_PC 408 409 /* Set up for backwards branches & osr profiling */ 410 movl OFF_FP_METHOD(rFP), %eax 411 movl %eax, OUT_ARG0(%esp) 412 leal OFF_FP_SHADOWFRAME(rFP), %ecx 413 movl %ecx, OUT_ARG1(%esp) 414 movl rSELF, %eax 415 movl %eax, OUT_ARG2(%esp) 416 call SYMBOL(MterpSetUpHotnessCountdown) 417 418 /* Starting ibase */ 419 REFRESH_IBASE 420 421 /* start executing the instruction at rPC */ 422 FETCH_INST 423 GOTO_NEXT 424 /* NOTE: no fallthrough */ 425 // cfi info continues, and covers the whole mterp implementation. 426 END ExecuteMterpImpl 427 428%def dchecks_before_helper(): 429 // Call C++ to do debug checks and return to the handler using tail call. 430 .extern MterpCheckBefore 431 popl %eax # Return address (the instuction handler). 432 movl rSELF, %ecx 433 movl %ecx, OUT_ARG0(%esp) 434 leal OFF_FP_SHADOWFRAME(rFP), %ecx 435 movl %ecx, OUT_ARG1(%esp) 436 movl rPC, OUT_ARG2(%esp) 437 pushl %eax # Return address for the tail call. 438 jmp SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr) 439 440%def opcode_pre(): 441% add_helper(dchecks_before_helper, "mterp_dchecks_before_helper") 442 #if !defined(NDEBUG) 443 call SYMBOL(mterp_dchecks_before_helper) 444 REFRESH_IBASE 445 #endif 446 447%def fallback(): 448/* Transfer stub to alternate interpreter */ 449 jmp MterpFallback 450 451 452%def helpers(): 453 ENTRY MterpHelpers 454 455%def footer(): 456/* 457 * =========================================================================== 458 * Common subroutines and data 459 * =========================================================================== 460 */ 461 462 .text 463 .align 2 464 465/* 466 * We've detected a condition that will result in an exception, but the exception 467 * has not yet been thrown. Just bail out to the reference interpreter to deal with it. 468 * TUNING: for consistency, we may want to just go ahead and handle these here. 469 */ 470common_errDivideByZero: 471 EXPORT_PC 472#if MTERP_LOGGING 473 movl rSELF, %eax 474 movl %eax, OUT_ARG0(%esp) 475 lea OFF_FP_SHADOWFRAME(rFP), %ecx 476 movl %ecx, OUT_ARG1(%esp) 477 call SYMBOL(MterpLogDivideByZeroException) 478#endif 479 jmp MterpCommonFallback 480 481common_errArrayIndex: 482 EXPORT_PC 483#if MTERP_LOGGING 484 movl rSELF, %eax 485 movl %eax, OUT_ARG0(%esp) 486 lea OFF_FP_SHADOWFRAME(rFP), %ecx 487 movl %ecx, OUT_ARG1(%esp) 488 call SYMBOL(MterpLogArrayIndexException) 489#endif 490 jmp MterpCommonFallback 491 492common_errNegativeArraySize: 493 EXPORT_PC 494#if MTERP_LOGGING 495 movl rSELF, %eax 496 movl %eax, OUT_ARG0(%esp) 497 lea OFF_FP_SHADOWFRAME(rFP), %ecx 498 movl %ecx, OUT_ARG1(%esp) 499 call SYMBOL(MterpLogNegativeArraySizeException) 500#endif 501 jmp MterpCommonFallback 502 503common_errNoSuchMethod: 504 EXPORT_PC 505#if MTERP_LOGGING 506 movl rSELF, %eax 507 movl %eax, OUT_ARG0(%esp) 508 lea OFF_FP_SHADOWFRAME(rFP), %ecx 509 movl %ecx, OUT_ARG1(%esp) 510 call SYMBOL(MterpLogNoSuchMethodException) 511#endif 512 jmp MterpCommonFallback 513 514common_errNullObject: 515 EXPORT_PC 516#if MTERP_LOGGING 517 movl rSELF, %eax 518 movl %eax, OUT_ARG0(%esp) 519 lea OFF_FP_SHADOWFRAME(rFP), %ecx 520 movl %ecx, OUT_ARG1(%esp) 521 call SYMBOL(MterpLogNullObjectException) 522#endif 523 jmp MterpCommonFallback 524 525common_exceptionThrown: 526 EXPORT_PC 527#if MTERP_LOGGING 528 movl rSELF, %eax 529 movl %eax, OUT_ARG0(%esp) 530 lea OFF_FP_SHADOWFRAME(rFP), %ecx 531 movl %ecx, OUT_ARG0(%esp) 532 call SYMBOL(MterpLogExceptionThrownException) 533#endif 534 jmp MterpCommonFallback 535 536MterpSuspendFallback: 537 EXPORT_PC 538#if MTERP_LOGGING 539 movl rSELF, %eax 540 movl %eax, OUT_ARG0(%esp) 541 lea OFF_FP_SHADOWFRAME(rFP), %ecx 542 movl %ecx, OUT_ARG0(%esp) 543 movl THREAD_FLAGS_OFFSET(%eax), %eax 544 movl %eax, OUT_ARG2(%esp) 545 call SYMBOL(MterpLogSuspendFallback) 546#endif 547 jmp MterpCommonFallback 548 549/* 550 * If we're here, something is out of the ordinary. If there is a pending 551 * exception, handle it. Otherwise, roll back and retry with the reference 552 * interpreter. 553 */ 554MterpPossibleException: 555 movl rSELF, %eax 556 testl $$-1, THREAD_EXCEPTION_OFFSET(%eax) 557 jz MterpFallback 558 /* intentional fallthrough - handle pending exception. */ 559 560/* 561 * On return from a runtime helper routine, we've found a pending exception. 562 * Can we handle it here - or need to bail out to caller? 563 * 564 */ 565MterpException: 566 movl rSELF, %eax 567 movl %eax, OUT_ARG0(%esp) 568 lea OFF_FP_SHADOWFRAME(rFP), %ecx 569 movl %ecx, OUT_ARG1(%esp) 570 call SYMBOL(MterpHandleException) 571 testb %al, %al 572 jz MterpExceptionReturn 573 movl OFF_FP_DEX_INSTRUCTIONS(rFP), %eax 574 movl OFF_FP_DEX_PC(rFP), %ecx 575 lea (%eax, %ecx, 2), rPC 576 movl rPC, OFF_FP_DEX_PC_PTR(rFP) 577 /* Do we need to switch interpreters? */ 578 movl rSELF, %eax 579 cmpb LITERAL(0), THREAD_USE_MTERP_OFFSET(%eax) 580 jz MterpFallback 581 /* resume execution at catch block */ 582 REFRESH_IBASE 583 FETCH_INST 584 GOTO_NEXT 585 /* NOTE: no fallthrough */ 586 587/* 588 * Common handling for branches with support for Jit profiling. 589 * On entry: 590 * rINST <= signed offset 591 * condition bits <= set to establish sign of offset (use "NoFlags" entry if not) 592 * 593 * We have quite a few different cases for branch profiling, OSR detection and 594 * suspend check support here. 595 * 596 * Taken backward branches: 597 * If profiling active, do hotness countdown and report if we hit zero. 598 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so. 599 * Is there a pending suspend request? If so, suspend. 600 * 601 * Taken forward branches and not-taken backward branches: 602 * If in osr check mode, see if our target is a compiled loop header entry and do OSR if so. 603 * 604 * Our most common case is expected to be a taken backward branch with active jit profiling, 605 * but no full OSR check and no pending suspend request. 606 * Next most common case is not-taken branch with no full OSR check. 607 * 608 */ 609MterpCommonTakenBranch: 610 jg .L_forward_branch # don't add forward branches to hotness 611/* 612 * We need to subtract 1 from positive values and we should not see 0 here, 613 * so we may use the result of the comparison with -1. 614 */ 615#if JIT_CHECK_OSR != -1 616# error "JIT_CHECK_OSR must be -1." 617#endif 618 cmpw $$JIT_CHECK_OSR, rPROFILE 619 je .L_osr_check 620 decw rPROFILE 621 je .L_add_batch # counted down to zero - report 622.L_resume_backward_branch: 623 movl rSELF, %eax 624 testl $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%eax) 625 leal (rPC, rINST, 2), rPC 626 FETCH_INST 627 jnz .L_suspend_request_pending 628 REFRESH_IBASE 629 GOTO_NEXT 630 631.L_suspend_request_pending: 632 EXPORT_PC 633 movl %eax, OUT_ARG0(%esp) # rSELF in eax 634 call SYMBOL(MterpSuspendCheck) # (self) 635 testb %al, %al 636 jnz MterpFallback 637 REFRESH_IBASE # might have changed during suspend 638 GOTO_NEXT 639 640.L_no_count_backwards: 641 cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry? 642 jne .L_resume_backward_branch 643.L_osr_check: 644 EXPORT_PC 645 movl rSELF, %eax 646 movl %eax, OUT_ARG0(%esp) 647 leal OFF_FP_SHADOWFRAME(rFP), %ecx 648 movl %ecx, OUT_ARG1(%esp) 649 movl rINST, OUT_ARG2(%esp) 650 call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset) 651 testb %al, %al 652 jz .L_resume_backward_branch 653 jmp MterpOnStackReplacement 654 655.L_forward_branch: 656 cmpw $$JIT_CHECK_OSR, rPROFILE # possible OSR re-entry? 657 je .L_check_osr_forward 658.L_resume_forward_branch: 659 leal (rPC, rINST, 2), rPC 660 FETCH_INST 661 GOTO_NEXT 662 663.L_check_osr_forward: 664 EXPORT_PC 665 movl rSELF, %eax 666 movl %eax, OUT_ARG0(%esp) 667 leal OFF_FP_SHADOWFRAME(rFP), %ecx 668 movl %ecx, OUT_ARG1(%esp) 669 movl rINST, OUT_ARG2(%esp) 670 call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset) 671 testb %al, %al 672 REFRESH_IBASE 673 jz .L_resume_forward_branch 674 jmp MterpOnStackReplacement 675 676.L_add_batch: 677 movl OFF_FP_METHOD(rFP), %eax 678 movl %eax, OUT_ARG0(%esp) 679 leal OFF_FP_SHADOWFRAME(rFP), %ecx 680 movl %ecx, OUT_ARG1(%esp) 681 movl rSELF, %eax 682 movl %eax, OUT_ARG2(%esp) 683 call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self) 684 jmp .L_no_count_backwards 685 686/* 687 * Entered from the conditional branch handlers when OSR check request active on 688 * not-taken path. All Dalvik not-taken conditional branch offsets are 2. 689 */ 690.L_check_not_taken_osr: 691 EXPORT_PC 692 movl rSELF, %eax 693 movl %eax, OUT_ARG0(%esp) 694 leal OFF_FP_SHADOWFRAME(rFP), %ecx 695 movl %ecx, OUT_ARG1(%esp) 696 movl $$2, OUT_ARG2(%esp) 697 call SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset) 698 testb %al, %al 699 REFRESH_IBASE 700 jnz MterpOnStackReplacement 701 ADVANCE_PC_FETCH_AND_GOTO_NEXT 2 702 703/* 704 * On-stack replacement has happened, and now we've returned from the compiled method. 705 */ 706MterpOnStackReplacement: 707#if MTERP_LOGGING 708 movl rSELF, %eax 709 movl %eax, OUT_ARG0(%esp) 710 lea OFF_FP_SHADOWFRAME(rFP), %ecx 711 movl %ecx, OUT_ARG1(%esp) 712 movl rINST, OUT_ARG2(%esp) 713 call SYMBOL(MterpLogOSR) 714#endif 715 movl $$1, %eax 716 jmp MterpDone 717 718/* 719 * Bail out to reference interpreter. 720 */ 721MterpFallback: 722 EXPORT_PC 723#if MTERP_LOGGING 724 movl rSELF, %eax 725 movl %eax, OUT_ARG0(%esp) 726 lea OFF_FP_SHADOWFRAME(rFP), %ecx 727 movl %ecx, OUT_ARG1(%esp) 728 call SYMBOL(MterpLogFallback) 729#endif 730MterpCommonFallback: 731 xor %eax, %eax 732 jmp MterpDone 733 734/* 735 * On entry: 736 * uint32_t* rFP (should still be live, pointer to base of vregs) 737 */ 738MterpExceptionReturn: 739 movl $$1, %eax 740 jmp MterpDone 741MterpReturn: 742 movl OFF_FP_RESULT_REGISTER(rFP), %edx 743 movl %eax, (%edx) 744 movl %ecx, 4(%edx) 745 mov $$1, %eax 746MterpDone: 747/* 748 * At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're 749 * checking for OSR. If greater than zero, we might have unreported hotness to register 750 * (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE 751 * should only reach zero immediately after a hotness decrement, and is then reset to either 752 * a negative special state or the new non-zero countdown value. 753 */ 754 cmpw $$0, rPROFILE 755 jle MRestoreFrame # if > 0, we may have some counts to report. 756 757 movl %eax, rINST # stash return value 758 /* Report cached hotness counts */ 759 movl OFF_FP_METHOD(rFP), %eax 760 movl %eax, OUT_ARG0(%esp) 761 leal OFF_FP_SHADOWFRAME(rFP), %ecx 762 movl %ecx, OUT_ARG1(%esp) 763 movl rSELF, %eax 764 movl %eax, OUT_ARG2(%esp) 765 call SYMBOL(MterpAddHotnessBatch) # (method, shadow_frame, self) 766 movl rINST, %eax # restore return value 767 768 /* pop up frame */ 769MRestoreFrame: 770 addl $$FRAME_SIZE, %esp 771 .cfi_adjust_cfa_offset -FRAME_SIZE 772 773 /* Restore callee save register */ 774 POP %ebx 775 POP %esi 776 POP %edi 777 POP %ebp 778 ret 779 .cfi_endproc 780 END MterpHelpers 781 782%def instruction_end(): 783 784 OBJECT_TYPE(artMterpAsmInstructionEnd) 785 ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd) 786 .global SYMBOL(artMterpAsmInstructionEnd) 787SYMBOL(artMterpAsmInstructionEnd): 788 789%def instruction_start(): 790 791 OBJECT_TYPE(artMterpAsmInstructionStart) 792 ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart) 793 .global SYMBOL(artMterpAsmInstructionStart) 794SYMBOL(artMterpAsmInstructionStart) = .L_op_nop 795 .text 796 797%def opcode_start(): 798 ENTRY mterp_${opcode} 799%def opcode_end(): 800 END mterp_${opcode} 801%def helper_start(name): 802 ENTRY ${name} 803%def helper_end(name): 804 END ${name} 805