1/*
2 * We've detected a condition that will result in an exception, but the exception
3 * has not yet been thrown.  Just bail out to the reference interpreter to deal with it.
4 * TUNING: for consistency, we may want to just go ahead and handle these here.
5 */
6
7    .extern MterpLogDivideByZeroException
8common_errDivideByZero:
9    EXPORT_PC
10#if MTERP_LOGGING
11    move    a0, rSELF
12    daddu   a1, rFP, OFF_FP_SHADOWFRAME
13    jal     MterpLogDivideByZeroException
14#endif
15    b       MterpCommonFallback
16
17    .extern MterpLogArrayIndexException
18common_errArrayIndex:
19    EXPORT_PC
20#if MTERP_LOGGING
21    move    a0, rSELF
22    daddu   a1, rFP, OFF_FP_SHADOWFRAME
23    jal     MterpLogArrayIndexException
24#endif
25    b       MterpCommonFallback
26
27    .extern MterpLogNullObjectException
28common_errNullObject:
29    EXPORT_PC
30#if MTERP_LOGGING
31    move    a0, rSELF
32    daddu   a1, rFP, OFF_FP_SHADOWFRAME
33    jal     MterpLogNullObjectException
34#endif
35    b       MterpCommonFallback
36
37/*
38 * If we're here, something is out of the ordinary.  If there is a pending
39 * exception, handle it.  Otherwise, roll back and retry with the reference
40 * interpreter.
41 */
42MterpPossibleException:
43    ld      a0, THREAD_EXCEPTION_OFFSET(rSELF)
44    beqzc   a0, MterpFallback                       # If not, fall back to reference interpreter.
45    /* intentional fallthrough - handle pending exception. */
46/*
47 * On return from a runtime helper routine, we've found a pending exception.
48 * Can we handle it here - or need to bail out to caller?
49 *
50 */
51    .extern MterpHandleException
52    .extern MterpShouldSwitchInterpreters
53MterpException:
54    move    a0, rSELF
55    daddu   a1, rFP, OFF_FP_SHADOWFRAME
56    jal     MterpHandleException                    # (self, shadow_frame)
57    beqzc   v0, MterpExceptionReturn                # no local catch, back to caller.
58    ld      a0, OFF_FP_DEX_INSTRUCTIONS(rFP)
59    lwu     a1, OFF_FP_DEX_PC(rFP)
60    REFRESH_IBASE
61    dlsa    rPC, a1, a0, 1                          # generate new dex_pc_ptr
62    /* Do we need to switch interpreters? */
63    jal     MterpShouldSwitchInterpreters
64    bnezc   v0, MterpFallback
65    /* resume execution at catch block */
66    EXPORT_PC
67    FETCH_INST
68    GET_INST_OPCODE v0
69    GOTO_OPCODE v0
70    /* NOTE: no fallthrough */
71
72/*
73 * Common handling for branches with support for Jit profiling.
74 * On entry:
75 *    rINST          <= signed offset
76 *    rPROFILE       <= signed hotness countdown (expanded to 64 bits)
77 *
78 * We have quite a few different cases for branch profiling, OSR detection and
79 * suspend check support here.
80 *
81 * Taken backward branches:
82 *    If profiling active, do hotness countdown and report if we hit zero.
83 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
84 *    Is there a pending suspend request?  If so, suspend.
85 *
86 * Taken forward branches and not-taken backward branches:
87 *    If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
88 *
89 * Our most common case is expected to be a taken backward branch with active jit profiling,
90 * but no full OSR check and no pending suspend request.
91 * Next most common case is not-taken branch with no full OSR check.
92 *
93 */
94MterpCommonTakenBranchNoFlags:
95    bgtzc   rINST, .L_forward_branch    # don't add forward branches to hotness
96/*
97 * We need to subtract 1 from positive values and we should not see 0 here,
98 * so we may use the result of the comparison with -1.
99 */
100    li      v0, JIT_CHECK_OSR
101    beqc    rPROFILE, v0, .L_osr_check
102    bltc    rPROFILE, v0, .L_resume_backward_branch
103    dsubu   rPROFILE, 1
104    beqzc   rPROFILE, .L_add_batch      # counted down to zero - report
105.L_resume_backward_branch:
106    lw      ra, THREAD_FLAGS_OFFSET(rSELF)
107    REFRESH_IBASE
108    daddu   a2, rINST, rINST            # a2<- byte offset
109    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
110    and     ra, THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
111    bnezc   ra, .L_suspend_request_pending
112    GET_INST_OPCODE v0                  # extract opcode from rINST
113    GOTO_OPCODE v0                      # jump to next instruction
114
115.L_suspend_request_pending:
116    EXPORT_PC
117    move    a0, rSELF
118    jal     MterpSuspendCheck           # (self)
119    bnezc   v0, MterpFallback
120    REFRESH_IBASE                       # might have changed during suspend
121    GET_INST_OPCODE v0                  # extract opcode from rINST
122    GOTO_OPCODE v0                      # jump to next instruction
123
124.L_no_count_backwards:
125    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
126    bnec    rPROFILE, v0, .L_resume_backward_branch
127.L_osr_check:
128    move    a0, rSELF
129    daddu   a1, rFP, OFF_FP_SHADOWFRAME
130    move    a2, rINST
131    EXPORT_PC
132    jal MterpMaybeDoOnStackReplacement  # (self, shadow_frame, offset)
133    bnezc   v0, MterpOnStackReplacement
134    b       .L_resume_backward_branch
135
136.L_forward_branch:
137    li      v0, JIT_CHECK_OSR           # check for possible OSR re-entry
138    beqc    rPROFILE, v0, .L_check_osr_forward
139.L_resume_forward_branch:
140    daddu   a2, rINST, rINST            # a2<- byte offset
141    FETCH_ADVANCE_INST_RB a2            # update rPC, load rINST
142    GET_INST_OPCODE v0                  # extract opcode from rINST
143    GOTO_OPCODE v0                      # jump to next instruction
144
145.L_check_osr_forward:
146    move    a0, rSELF
147    daddu   a1, rFP, OFF_FP_SHADOWFRAME
148    move    a2, rINST
149    EXPORT_PC
150    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
151    bnezc   v0, MterpOnStackReplacement
152    b       .L_resume_forward_branch
153
154.L_add_batch:
155    daddu   a1, rFP, OFF_FP_SHADOWFRAME
156    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
157    ld      a0, OFF_FP_METHOD(rFP)
158    move    a2, rSELF
159    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
160    move    rPROFILE, v0                # restore new hotness countdown to rPROFILE
161    b       .L_no_count_backwards
162
163/*
164 * Entered from the conditional branch handlers when OSR check request active on
165 * not-taken path.  All Dalvik not-taken conditional branch offsets are 2.
166 */
167.L_check_not_taken_osr:
168    move    a0, rSELF
169    daddu   a1, rFP, OFF_FP_SHADOWFRAME
170    li      a2, 2
171    EXPORT_PC
172    jal     MterpMaybeDoOnStackReplacement # (self, shadow_frame, offset)
173    bnezc   v0, MterpOnStackReplacement
174    FETCH_ADVANCE_INST 2
175    GET_INST_OPCODE v0                  # extract opcode from rINST
176    GOTO_OPCODE v0                      # jump to next instruction
177
178/*
179 * On-stack replacement has happened, and now we've returned from the compiled method.
180 */
181MterpOnStackReplacement:
182#if MTERP_LOGGING
183    move    a0, rSELF
184    daddu   a1, rFP, OFF_FP_SHADOWFRAME
185    move    a2, rINST                               # rINST contains offset
186    jal     MterpLogOSR
187#endif
188    li      v0, 1                                   # Signal normal return
189    b       MterpDone
190
191/*
192 * Bail out to reference interpreter.
193 */
194    .extern MterpLogFallback
195MterpFallback:
196    EXPORT_PC
197#if MTERP_LOGGING
198    move    a0, rSELF
199    daddu   a1, rFP, OFF_FP_SHADOWFRAME
200    jal     MterpLogFallback
201#endif
202MterpCommonFallback:
203    li      v0, 0                                   # signal retry with reference interpreter.
204    b       MterpDone
205
206/*
207 * We pushed some registers on the stack in ExecuteMterpImpl, then saved
208 * SP and RA.  Here we restore SP, restore the registers, and then restore
209 * RA to PC.
210 *
211 * On entry:
212 *  uint32_t* rFP  (should still be live, pointer to base of vregs)
213 */
214MterpExceptionReturn:
215    li      v0, 1                                   # signal return to caller.
216    b       MterpDone
217/*
218 * Returned value is expected in a0 and if it's not 64-bit, the 32 most
219 * significant bits of a0 must be zero-extended or sign-extended
220 * depending on the return type.
221 */
222MterpReturn:
223    ld      a2, OFF_FP_RESULT_REGISTER(rFP)
224    sd      a0, 0(a2)
225    li      v0, 1                                   # signal return to caller.
226MterpDone:
227/*
228 * At this point, we expect rPROFILE to be non-zero.  If negative, hotness is disabled or we're
229 * checking for OSR.  If greater than zero, we might have unreported hotness to register
230 * (the difference between the ending rPROFILE and the cached hotness counter).  rPROFILE
231 * should only reach zero immediately after a hotness decrement, and is then reset to either
232 * a negative special state or the new non-zero countdown value.
233 */
234    blez    rPROFILE, .L_pop_and_return # if > 0, we may have some counts to report.
235
236MterpProfileActive:
237    move    rINST, v0                   # stash return value
238    /* Report cached hotness counts */
239    ld      a0, OFF_FP_METHOD(rFP)
240    daddu   a1, rFP, OFF_FP_SHADOWFRAME
241    move    a2, rSELF
242    sh      rPROFILE, SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET(a1)
243    jal     MterpAddHotnessBatch        # (method, shadow_frame, self)
244    move    v0, rINST                   # restore return value
245
246.L_pop_and_return:
247    ld      s6, STACK_OFFSET_S6(sp)
248    .cfi_restore 22
249    ld      s5, STACK_OFFSET_S5(sp)
250    .cfi_restore 21
251    ld      s4, STACK_OFFSET_S4(sp)
252    .cfi_restore 20
253    ld      s3, STACK_OFFSET_S3(sp)
254    .cfi_restore 19
255    ld      s2, STACK_OFFSET_S2(sp)
256    .cfi_restore 18
257    ld      s1, STACK_OFFSET_S1(sp)
258    .cfi_restore 17
259    ld      s0, STACK_OFFSET_S0(sp)
260    .cfi_restore 16
261
262    ld      ra, STACK_OFFSET_RA(sp)
263    .cfi_restore 31
264
265    ld      t8, STACK_OFFSET_GP(sp)
266    .cpreturn
267    .cfi_restore 28
268
269    .set    noreorder
270    jr      ra
271    daddu   sp, sp, STACK_SIZE
272    .cfi_adjust_cfa_offset -STACK_SIZE
273
274    .cfi_endproc
275    .set    reorder
276    .size ExecuteMterpImpl, .-ExecuteMterpImpl
277