1%def header():
2/*
3 * Copyright (C) 2016 The Android Open Source Project
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18/*
19  Art assembly interpreter notes:
20
21  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
22  handle invoke, allows higher-level code to create frame & shadow frame.
23
24  Once that's working, support direct entry code & eliminate shadow frame (and
25  excess locals allocation.
26
27  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
28  base of the vreg array within the shadow frame.  Access the other fields,
29  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
30  the shadow frame mechanism of double-storing object references - via rFP &
31  number_of_vregs_.
32
33 */
34
35/*
36x86_64 ABI general notes:
37
38Caller save set:
39   rax, rdx, rcx, rsi, rdi, r8-r11, st(0)-st(7)
40Callee save set:
41   rbx, rbp, r12-r15
42Return regs:
43   32-bit in eax
44   64-bit in rax
45   fp on xmm0
46
47First 8 fp parameters came in xmm0-xmm7.
48First 6 non-fp parameters came in rdi, rsi, rdx, rcx, r8, r9.
49Other parameters passed on stack, pushed right-to-left.  On entry to target, first
50param is at 8(%esp).  Traditional entry code is:
51
52Stack must be 16-byte aligned to support SSE in native code.
53
54If we're not doing variable stack allocation (alloca), the frame pointer can be
55eliminated and all arg references adjusted to be esp relative.
56*/
57
58/*
59Mterp and x86_64 notes:
60
61Some key interpreter variables will be assigned to registers.
62
63  nick     reg   purpose
64  rPROFILE rbp   countdown register for jit profiling
65  rPC      r12   interpreted program counter, used for fetching instructions
66  rFP      r13   interpreted frame pointer, used for accessing locals and args
67  rINSTw   bx    first 16-bit code of current instruction
68  rINSTbl  bl    opcode portion of instruction word
69  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
70  rIBASE   r14   base of instruction handler table
71  rREFS    r15   base of object references in shadow frame.
72
73Notes:
74   o High order 16 bits of ebx must be zero on entry to handler
75   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
76   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
77
78Macros are provided for common operations.  Each macro MUST emit only
79one instruction to make instruction-counting easier.  They MUST NOT alter
80unspecified registers or condition codes.
81*/
82
83/*
84 * This is a #include, not a %include, because we want the C pre-processor
85 * to expand the macros into assembler assignment statements.
86 */
87#include "asm_support.h"
88#include "interpreter/cfi_asm_support.h"
89
90#define LITERAL(value) $$(value)
91
92/*
93 * Handle mac compiler specific
94 */
95#if defined(__APPLE__)
96    #define MACRO_LITERAL(value) $$(value)
97    #define FUNCTION_TYPE(name)
98    #define OBJECT_TYPE(name)
99    #define SIZE(start,end)
100    // Mac OS' symbols have an _ prefix.
101    #define SYMBOL(name) _ ## name
102    #define ASM_HIDDEN .private_extern
103#else
104    #define MACRO_LITERAL(value) $$value
105    #define FUNCTION_TYPE(name) .type name, @function
106    #define OBJECT_TYPE(name) .type name, @object
107    #define SIZE(start,end) .size start, .-end
108    #define SYMBOL(name) name
109    #define ASM_HIDDEN .hidden
110#endif
111
112.macro PUSH _reg
113    pushq \_reg
114    .cfi_adjust_cfa_offset 8
115    .cfi_rel_offset \_reg, 0
116.endm
117
118.macro POP _reg
119    popq \_reg
120    .cfi_adjust_cfa_offset -8
121    .cfi_restore \_reg
122.endm
123
124/*
125 * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
126 * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
127 */
128#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
129#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
130#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
131#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
132#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
133#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
134#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
135#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
136#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
137#define OFF_FP_SHADOWFRAME (-SHADOWFRAME_VREGS_OFFSET)
138
139/* Frame size must be 16-byte aligned.
140 * Remember about 8 bytes for return address + 6 * 8 for spills.
141 */
142#define FRAME_SIZE     8
143
144/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
145#define IN_ARG3        %rcx
146#define IN_ARG2        %rdx
147#define IN_ARG1        %rsi
148#define IN_ARG0        %rdi
149/* Spill offsets relative to %esp */
150#define SELF_SPILL     (FRAME_SIZE -  8)
151/* Out Args  */
152#define OUT_ARG3       %rcx
153#define OUT_ARG2       %rdx
154#define OUT_ARG1       %rsi
155#define OUT_ARG0       %rdi
156#define OUT_32_ARG3    %ecx
157#define OUT_32_ARG2    %edx
158#define OUT_32_ARG1    %esi
159#define OUT_32_ARG0    %edi
160#define OUT_FP_ARG1    %xmm1
161#define OUT_FP_ARG0    %xmm0
162
163/* During bringup, we'll use the shadow frame model instead of rFP */
164/* single-purpose registers, given names for clarity */
165#define rSELF    SELF_SPILL(%rsp)
166#define rPC      %r12
167#define CFI_DEX  12 // DWARF register number of the register holding dex-pc (rPC).
168#define CFI_TMP  5  // DWARF register number of the first argument register (rdi).
169#define rFP      %r13
170#define rINST    %ebx
171#define rINSTq   %rbx
172#define rINSTw   %bx
173#define rINSTbh  %bh
174#define rINSTbl  %bl
175#define rIBASE   %r14
176#define rREFS    %r15
177#define rPROFILE %ebp
178
179#define MTERP_LOGGING 0
180
181/*
182 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
183 * be done *before* something throws.
184 *
185 * It's okay to do this more than once.
186 *
187 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
188 * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
189 * offset into the code_items_[] array.  For effiency, we will "export" the
190 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
191 * to convert to a dex pc when needed.
192 */
193.macro EXPORT_PC
194    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
195.endm
196
197/*
198 * Refresh handler table.
199 * IBase handles uses the caller save register so we must restore it after each call.
200 * Also it is used as a result of some 64-bit operations (like imul) and we should
201 * restore it in such cases also.
202 *
203 */
204.macro REFRESH_IBASE_REG self_reg
205    movq    THREAD_CURRENT_IBASE_OFFSET(\self_reg), rIBASE
206.endm
207.macro REFRESH_IBASE
208    movq    rSELF, rIBASE
209    REFRESH_IBASE_REG rIBASE
210.endm
211
212/*
213 * Refresh rINST.
214 * At enter to handler rINST does not contain the opcode number.
215 * However some utilities require the full value, so this macro
216 * restores the opcode number.
217 */
218.macro REFRESH_INST _opnum
219    movb    rINSTbl, rINSTbh
220    movb    $$\_opnum, rINSTbl
221.endm
222
223/*
224 * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
225 */
226.macro FETCH_INST
227    movzwq  (rPC), rINSTq
228.endm
229
230/*
231 * Remove opcode from rINST, compute the address of handler and jump to it.
232 */
233.macro GOTO_NEXT
234    movzx   rINSTbl,%eax
235    movzbl  rINSTbh,rINST
236    shll    MACRO_LITERAL(${handler_size_bits}), %eax
237    addq    rIBASE, %rax
238    jmp     *%rax
239.endm
240
241/*
242 * Advance rPC by instruction count.
243 */
244.macro ADVANCE_PC _count
245    leaq    2*\_count(rPC), rPC
246.endm
247
248/*
249 * Advance rPC by instruction count, fetch instruction and jump to handler.
250 */
251.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
252    ADVANCE_PC \_count
253    FETCH_INST
254    GOTO_NEXT
255.endm
256
257/*
258 * Get/set the 32-bit value from a Dalvik register.
259 */
260#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
261#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
262#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
263#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
264
265.macro GET_VREG _reg _vreg
266    movl    VREG_ADDRESS(\_vreg), \_reg
267.endm
268
269/* Read wide value. */
270.macro GET_WIDE_VREG _reg _vreg
271    movq    VREG_ADDRESS(\_vreg), \_reg
272.endm
273
274.macro SET_VREG _reg _vreg
275    movl    \_reg, VREG_ADDRESS(\_vreg)
276    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
277.endm
278
279/* Write wide value. reg is clobbered. */
280.macro SET_WIDE_VREG _reg _vreg
281    movq    \_reg, VREG_ADDRESS(\_vreg)
282    xorq    \_reg, \_reg
283    movq    \_reg, VREG_REF_ADDRESS(\_vreg)
284.endm
285
286.macro SET_VREG_OBJECT _reg _vreg
287    movl    \_reg, VREG_ADDRESS(\_vreg)
288    movl    \_reg, VREG_REF_ADDRESS(\_vreg)
289.endm
290
291.macro GET_VREG_HIGH _reg _vreg
292    movl    VREG_HIGH_ADDRESS(\_vreg), \_reg
293.endm
294
295.macro SET_VREG_HIGH _reg _vreg
296    movl    \_reg, VREG_HIGH_ADDRESS(\_vreg)
297    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
298.endm
299
300.macro CLEAR_REF _vreg
301    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
302.endm
303
304.macro CLEAR_WIDE_REF _vreg
305    movl    MACRO_LITERAL(0), VREG_REF_ADDRESS(\_vreg)
306    movl    MACRO_LITERAL(0), VREG_REF_HIGH_ADDRESS(\_vreg)
307.endm
308
309.macro GET_VREG_XMMs _xmmreg _vreg
310    movss VREG_ADDRESS(\_vreg), \_xmmreg
311.endm
312.macro GET_VREG_XMMd _xmmreg _vreg
313    movsd VREG_ADDRESS(\_vreg), \_xmmreg
314.endm
315.macro SET_VREG_XMMs _xmmreg _vreg
316    movss \_xmmreg, VREG_ADDRESS(\_vreg)
317.endm
318.macro SET_VREG_XMMd _xmmreg _vreg
319    movsd \_xmmreg, VREG_ADDRESS(\_vreg)
320.endm
321
322/*
323 * function support macros.
324 */
325.macro ENTRY name
326    .text
327    ASM_HIDDEN SYMBOL(\name)
328    .global SYMBOL(\name)
329    FUNCTION_TYPE(\name)
330SYMBOL(\name):
331.endm
332
333.macro END name
334    SIZE(\name,\name)
335.endm
336
337%def entry():
338/*
339 * Copyright (C) 2016 The Android Open Source Project
340 *
341 * Licensed under the Apache License, Version 2.0 (the "License");
342 * you may not use this file except in compliance with the License.
343 * You may obtain a copy of the License at
344 *
345 *      http://www.apache.org/licenses/LICENSE-2.0
346 *
347 * Unless required by applicable law or agreed to in writing, software
348 * distributed under the License is distributed on an "AS IS" BASIS,
349 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
350 * See the License for the specific language governing permissions and
351 * limitations under the License.
352 */
353/*
354 * Interpreter entry point.
355 *
356 * On entry:
357 *  0  Thread* self
358 *  1  insns_
359 *  2  ShadowFrame
360 *  3  JValue* result_register
361 *
362 */
363
364ENTRY ExecuteMterpImpl
365    .cfi_startproc
366    .cfi_def_cfa rsp, 8
367
368    /* Spill callee save regs */
369    PUSH %rbx
370    PUSH %rbp
371    PUSH %r12
372    PUSH %r13
373    PUSH %r14
374    PUSH %r15
375
376    /* Allocate frame */
377    subq    $$FRAME_SIZE, %rsp
378    .cfi_adjust_cfa_offset FRAME_SIZE
379
380    /* Remember the return register */
381    movq    IN_ARG3, SHADOWFRAME_RESULT_REGISTER_OFFSET(IN_ARG2)
382
383    /* Remember the code_item */
384    movq    IN_ARG1, SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET(IN_ARG2)
385
386    /* set up "named" registers */
387    movl    SHADOWFRAME_NUMBER_OF_VREGS_OFFSET(IN_ARG2), %eax
388    leaq    SHADOWFRAME_VREGS_OFFSET(IN_ARG2), rFP
389    leaq    (rFP, %rax, 4), rREFS
390    movl    SHADOWFRAME_DEX_PC_OFFSET(IN_ARG2), %eax
391    leaq    (IN_ARG1, %rax, 2), rPC
392    CFI_DEFINE_DEX_PC_WITH_OFFSET(CFI_TMP, CFI_DEX, 0)
393    EXPORT_PC
394
395    /* Starting ibase */
396    movq    IN_ARG0, rSELF
397    REFRESH_IBASE_REG IN_ARG0
398
399    /* Set up for backwards branches & osr profiling */
400    movq    IN_ARG0, OUT_ARG2  /* Set up OUT_ARG2 before clobbering IN_ARG0 */
401    movq    OFF_FP_METHOD(rFP), OUT_ARG0
402    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
403    call    SYMBOL(MterpSetUpHotnessCountdown)
404    movswl  %ax, rPROFILE
405
406    /* start executing the instruction at rPC */
407    FETCH_INST
408    GOTO_NEXT
409    /* NOTE: no fallthrough */
410    // cfi info continues, and covers the whole mterp implementation.
411    END ExecuteMterpImpl
412
413%def dchecks_before_helper():
414    // Call C++ to do debug checks and return to the handler using tail call.
415    .extern MterpCheckBefore
416    popq    %rax                     # Return address (the instuction handler).
417    REFRESH_IBASE
418    movq    rSELF, OUT_ARG0
419    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
420    movq    rPC, OUT_ARG2
421    pushq   %rax                     # Return address for the tail call.
422    jmp     SYMBOL(MterpCheckBefore) # (self, shadow_frame, dex_pc_ptr)
423
424%def opcode_pre():
425%  add_helper(dchecks_before_helper, "mterp_dchecks_before_helper")
426    #if !defined(NDEBUG)
427    call    SYMBOL(mterp_dchecks_before_helper)
428    #endif
429
430%def fallback():
431/* Transfer stub to alternate interpreter */
432    jmp     MterpFallback
433
434%def helpers():
435    ENTRY MterpHelpers
436
437%def footer():
438/*
439 * ===========================================================================
440 *  Common subroutines and data
441 * ===========================================================================
442 */
443
444    .text
445    .align  2
446
447/*
448 * We've detected a condition that will result in an exception, but the exception
449 * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
450 * TUNING: for consistency, we may want to just go ahead and handle these here.
451 */
452common_errDivideByZero:
453    EXPORT_PC
454#if MTERP_LOGGING
455    movq    rSELF, OUT_ARG0
456    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
457    call    SYMBOL(MterpLogDivideByZeroException)
458#endif
459    jmp     MterpCommonFallback
460
461common_errArrayIndex:
462    EXPORT_PC
463#if MTERP_LOGGING
464    movq    rSELF, OUT_ARG0
465    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
466    call    SYMBOL(MterpLogArrayIndexException)
467#endif
468    jmp     MterpCommonFallback
469
470common_errNegativeArraySize:
471    EXPORT_PC
472#if MTERP_LOGGING
473    movq    rSELF, OUT_ARG0
474    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
475    call    SYMBOL(MterpLogNegativeArraySizeException)
476#endif
477    jmp     MterpCommonFallback
478
479common_errNoSuchMethod:
480    EXPORT_PC
481#if MTERP_LOGGING
482    movq    rSELF, OUT_ARG0
483    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
484    call    SYMBOL(MterpLogNoSuchMethodException)
485#endif
486    jmp     MterpCommonFallback
487
488common_errNullObject:
489    EXPORT_PC
490#if MTERP_LOGGING
491    movq    rSELF, OUT_ARG0
492    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
493    call    SYMBOL(MterpLogNullObjectException)
494#endif
495    jmp     MterpCommonFallback
496
497common_exceptionThrown:
498    EXPORT_PC
499#if MTERP_LOGGING
500    movq    rSELF, OUT_ARG0
501    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
502    call    SYMBOL(MterpLogExceptionThrownException)
503#endif
504    jmp     MterpCommonFallback
505
506MterpSuspendFallback:
507    EXPORT_PC
508#if MTERP_LOGGING
509    movq    rSELF, OUT_ARG0
510    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
511    movl    THREAD_FLAGS_OFFSET(OUT_ARG0), OUT_32_ARG2
512    call    SYMBOL(MterpLogSuspendFallback)
513#endif
514    jmp     MterpCommonFallback
515
516/*
517 * If we're here, something is out of the ordinary.  If there is a pending
518 * exception, handle it.  Otherwise, roll back and retry with the reference
519 * interpreter.
520 */
521MterpPossibleException:
522    movq    rSELF, %rcx
523    cmpq    $$0, THREAD_EXCEPTION_OFFSET(%rcx)
524    jz      MterpFallback
525    /* intentional fallthrough - handle pending exception. */
526
527/*
528 * On return from a runtime helper routine, we've found a pending exception.
529 * Can we handle it here - or need to bail out to caller?
530 *
531 */
532MterpException:
533    movq    rSELF, OUT_ARG0
534    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
535    call    SYMBOL(MterpHandleException)
536    testb   %al, %al
537    jz      MterpExceptionReturn
538    movq    OFF_FP_DEX_INSTRUCTIONS(rFP), %rax
539    mov     OFF_FP_DEX_PC(rFP), %ecx
540    leaq    (%rax, %rcx, 2), rPC
541    movq    rPC, OFF_FP_DEX_PC_PTR(rFP)
542    /* Do we need to switch interpreters? */
543    movq    rSELF, %rax
544    cmpb    LITERAL(0), THREAD_USE_MTERP_OFFSET(%rax)
545    jz      MterpFallback
546    /* resume execution at catch block */
547    REFRESH_IBASE
548    FETCH_INST
549    GOTO_NEXT
550    /* NOTE: no fallthrough */
551
552/*
553 * Common handling for branches with support for Jit profiling.
554 * On entry:
555 *    rINST          <= signed offset
556 *    rPROFILE       <= signed hotness countdown (expanded to 32 bits)
557 *    condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
558 *
559 * We have quite a few different cases for branch profiling, OSR detection and
560 * suspend check support here.
561 *
562 * Taken backward branches:
563 *    If profiling active, do hotness countdown and report if we hit zero.
564 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
565 *    Is there a pending suspend request?  If so, suspend.
566 *
567 * Taken forward branches and not-taken backward branches:
568 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
569 *
570 * Our most common case is expected to be a taken backward branch with active jit profiling,
571 * but no full OSR check and no pending suspend request.
572 * Next most common case is not-taken branch with no full OSR check.
573 *
574 */
575MterpCommonTakenBranch:
576    jg      .L_forward_branch               # don't add forward branches to hotness
577/*
578 * We need to subtract 1 from positive values and we should not see 0 here,
579 * so we may use the result of the comparison with -1.
580 */
581#if JIT_CHECK_OSR != -1
582#  error "JIT_CHECK_OSR must be -1."
583#endif
584    cmpl    $$JIT_CHECK_OSR, rPROFILE
585    je      .L_osr_check
586    decl    rPROFILE
587    je      .L_add_batch                    # counted down to zero - report
588.L_resume_backward_branch:
589    movq    rSELF, %rax
590    testl   $$(THREAD_SUSPEND_OR_CHECKPOINT_REQUEST), THREAD_FLAGS_OFFSET(%rax)
591    REFRESH_IBASE_REG %rax
592    leaq    (rPC, rINSTq, 2), rPC
593    FETCH_INST
594    jnz     .L_suspend_request_pending
595    GOTO_NEXT
596
597.L_suspend_request_pending:
598    EXPORT_PC
599    movq    rSELF, OUT_ARG0
600    call    SYMBOL(MterpSuspendCheck)       # (self)
601    testb   %al, %al
602    jnz     MterpFallback
603    REFRESH_IBASE                           # might have changed during suspend
604    GOTO_NEXT
605
606.L_no_count_backwards:
607    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
608    jne     .L_resume_backward_branch
609.L_osr_check:
610    EXPORT_PC
611    movq    rSELF, OUT_ARG0
612    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
613    movq    rINSTq, OUT_ARG2
614    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
615    testb   %al, %al
616    jz      .L_resume_backward_branch
617    jmp     MterpOnStackReplacement
618
619.L_forward_branch:
620    cmpl    $$JIT_CHECK_OSR, rPROFILE         # possible OSR re-entry?
621    je      .L_check_osr_forward
622.L_resume_forward_branch:
623    leaq    (rPC, rINSTq, 2), rPC
624    FETCH_INST
625    GOTO_NEXT
626
627.L_check_osr_forward:
628    EXPORT_PC
629    movq    rSELF, OUT_ARG0
630    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
631    movq    rINSTq, OUT_ARG2
632    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
633    testb   %al, %al
634    jz      .L_resume_forward_branch
635    jmp     MterpOnStackReplacement
636
637.L_add_batch:
638    movl    rPROFILE, %eax
639    movq    OFF_FP_METHOD(rFP), OUT_ARG0
640    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
641    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
642    movq    rSELF, OUT_ARG2
643    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
644    movswl  %ax, rPROFILE
645    jmp     .L_no_count_backwards
646
647/*
648 * Entered from the conditional branch handlers when OSR check request active on
649 * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
650 */
651.L_check_not_taken_osr:
652    EXPORT_PC
653    movq    rSELF, OUT_ARG0
654    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
655    movl    $$2, OUT_32_ARG2
656    call    SYMBOL(MterpMaybeDoOnStackReplacement) # (self, shadow_frame, offset)
657    testb   %al, %al
658    jnz     MterpOnStackReplacement
659    ADVANCE_PC_FETCH_AND_GOTO_NEXT 2
660
661/*
662 * On-stack replacement has happened, and now we've returned from the compiled method.
663 */
664MterpOnStackReplacement:
665#if MTERP_LOGGING
666    movq    rSELF, OUT_ARG0
667    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
668    movl    rINST, OUT_32_ARG2
669    call    SYMBOL(MterpLogOSR)
670#endif
671    movl    $$1, %eax
672    jmp     MterpDone
673
674/*
675 * Bail out to reference interpreter.
676 */
677MterpFallback:
678    EXPORT_PC
679#if MTERP_LOGGING
680    movq    rSELF, OUT_ARG0
681    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
682    call    SYMBOL(MterpLogFallback)
683#endif
684MterpCommonFallback:
685    xorl    %eax, %eax
686    jmp     MterpDone
687
688/*
689 * On entry:
690 *  uint32_t* rFP  (should still be live, pointer to base of vregs)
691 */
692MterpExceptionReturn:
693    movl    $$1, %eax
694    jmp     MterpDone
695MterpReturn:
696    movq    OFF_FP_RESULT_REGISTER(rFP), %rdx
697    movq    %rax, (%rdx)
698    movl    $$1, %eax
699MterpDone:
700/*
701 * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
702 * checking for OSR.  If greater than zero, we might have unreported hotness to register
703 * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
704 * should only reach zero immediately after a hotness decrement, and is then reset to either
705 * a negative special state or the new non-zero countdown value.
706 */
707    testl   rPROFILE, rPROFILE
708    jle     MRestoreFrame                   # if > 0, we may have some counts to report.
709
710    movl    %eax, rINST                     # stash return value
711    /* Report cached hotness counts */
712    movl    rPROFILE, %eax
713    movq    OFF_FP_METHOD(rFP), OUT_ARG0
714    leaq    OFF_FP_SHADOWFRAME(rFP), OUT_ARG1
715    movw    %ax, OFF_FP_COUNTDOWN_OFFSET(rFP)
716    movq    rSELF, OUT_ARG2
717    call    SYMBOL(MterpAddHotnessBatch)    # (method, shadow_frame, self)
718    movl    rINST, %eax                     # restore return value
719
720    /* pop up frame */
721MRestoreFrame:
722    addq    $$FRAME_SIZE, %rsp
723    .cfi_adjust_cfa_offset -FRAME_SIZE
724
725    /* Restore callee save register */
726    POP %r15
727    POP %r14
728    POP %r13
729    POP %r12
730    POP %rbp
731    POP %rbx
732    ret
733    .cfi_endproc
734    END MterpHelpers
735
736%def instruction_end():
737
738    OBJECT_TYPE(artMterpAsmInstructionEnd)
739    ASM_HIDDEN SYMBOL(artMterpAsmInstructionEnd)
740    .global SYMBOL(artMterpAsmInstructionEnd)
741SYMBOL(artMterpAsmInstructionEnd):
742
743%def instruction_start():
744
745    OBJECT_TYPE(artMterpAsmInstructionStart)
746    ASM_HIDDEN SYMBOL(artMterpAsmInstructionStart)
747    .global SYMBOL(artMterpAsmInstructionStart)
748SYMBOL(artMterpAsmInstructionStart) = .L_op_nop
749    .text
750
751%def default_helper_prefix():
752%  return "mterp_"
753
754%def opcode_start():
755    ENTRY mterp_${opcode}
756%def opcode_end():
757    END mterp_${opcode}
758%def helper_start(name):
759    ENTRY ${name}
760%def helper_end(name):
761    END ${name}
762