1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18  Art assembly interpreter notes:
19
20  First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
21  handle invoke, allows higher-level code to create frame & shadow frame.
22
23  Once that's working, support direct entry code & eliminate shadow frame (and
24  excess locals allocation.
25
26  Some (hopefully) temporary ugliness.  We'll treat rFP as pointing to the
27  base of the vreg array within the shadow frame.  Access the other fields,
28  dex_pc_, method_ and number_of_vregs_ via negative offsets.  For now, we'll continue
29  the shadow frame mechanism of double-storing object references - via rFP &
30  number_of_vregs_.
31
32 */
33
34/*
35x86 ABI general notes:
36
37Caller save set:
38   eax, edx, ecx, st(0)-st(7)
39Callee save set:
40   ebx, esi, edi, ebp
41Return regs:
42   32-bit in eax
43   64-bit in edx:eax (low-order 32 in eax)
44   fp on top of fp stack st(0)
45
46Parameters passed on stack, pushed right-to-left.  On entry to target, first
47parm is at 4(%esp).  Traditional entry code is:
48
49functEntry:
50    push    %ebp             # save old frame pointer
51    mov     %ebp,%esp        # establish new frame pointer
52    sub     FrameSize,%esp   # Allocate storage for spill, locals & outs
53
54Once past the prologue, arguments are referenced at ((argno + 2)*4)(%ebp)
55
56Stack must be 16-byte aligned to support SSE in native code.
57
58If we're not doing variable stack allocation (alloca), the frame pointer can be
59eliminated and all arg references adjusted to be esp relative.
60*/
61
62/*
63Mterp and x86 notes:
64
65Some key interpreter variables will be assigned to registers.
66
67  nick     reg   purpose
68  rPC      esi   interpreted program counter, used for fetching instructions
69  rFP      edi   interpreted frame pointer, used for accessing locals and args
70  rINSTw   bx    first 16-bit code of current instruction
71  rINSTbl  bl    opcode portion of instruction word
72  rINSTbh  bh    high byte of inst word, usually contains src/tgt reg names
73  rIBASE   edx   base of instruction handler table
74  rREFS    ebp   base of object references in shadow frame.
75
76Notes:
77   o High order 16 bits of ebx must be zero on entry to handler
78   o rPC, rFP, rINSTw/rINSTbl valid on handler entry and exit
79   o eax and ecx are scratch, rINSTw/ebx sometimes scratch
80
81Macros are provided for common operations.  Each macro MUST emit only
82one instruction to make instruction-counting easier.  They MUST NOT alter
83unspecified registers or condition codes.
84*/
85
86/*
87 * This is a #include, not a %include, because we want the C pre-processor
88 * to expand the macros into assembler assignment statements.
89 */
90#include "asm_support.h"
91#include "interpreter/cfi_asm_support.h"
92
93/*
94 * Handle mac compiler specific
95 */
96#if defined(__APPLE__)
97    #define MACRO_LITERAL(value) $$(value)
98    #define FUNCTION_TYPE(name)
99    #define SIZE(start,end)
100    // Mac OS' symbols have an _ prefix.
101    #define SYMBOL(name) _ ## name
102#else
103    #define MACRO_LITERAL(value) $$value
104    #define FUNCTION_TYPE(name) .type name, @function
105    #define SIZE(start,end) .size start, .-end
106    #define SYMBOL(name) name
107#endif
108
109.macro PUSH _reg
110    pushl \_reg
111    .cfi_adjust_cfa_offset 4
112    .cfi_rel_offset \_reg, 0
113.endm
114
115.macro POP _reg
116    popl \_reg
117    .cfi_adjust_cfa_offset -4
118    .cfi_restore \_reg
119.endm
120
121/*
122 * Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs.  So,
123 * to access other shadow frame fields, we need to use a backwards offset.  Define those here.
124 */
125#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
126#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
127#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
128#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
129#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
130#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
131#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
132#define OFF_FP_DEX_INSTRUCTIONS OFF_FP(SHADOWFRAME_DEX_INSTRUCTIONS_OFFSET)
133#define OFF_FP_COUNTDOWN_OFFSET OFF_FP(SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET)
134#define OFF_FP_SHADOWFRAME OFF_FP(0)
135
136/* Frame size must be 16-byte aligned.
137 * Remember about 4 bytes for return address + 4 * 4 for spills
138 */
139#define FRAME_SIZE     28
140
141/* Frame diagram while executing ExecuteMterpImpl, high to low addresses */
142#define IN_ARG3        (FRAME_SIZE + 16 + 16)
143#define IN_ARG2        (FRAME_SIZE + 16 + 12)
144#define IN_ARG1        (FRAME_SIZE + 16 +  8)
145#define IN_ARG0        (FRAME_SIZE + 16 +  4)
146/* Spill offsets relative to %esp */
147#define LOCAL0         (FRAME_SIZE -  4)
148#define LOCAL1         (FRAME_SIZE -  8)
149#define LOCAL2         (FRAME_SIZE - 12)
150/* Out Arg offsets, relative to %esp */
151#define OUT_ARG3       ( 12)
152#define OUT_ARG2       (  8)
153#define OUT_ARG1       (  4)
154#define OUT_ARG0       (  0)  /* <- ExecuteMterpImpl esp + 0 */
155
156/* During bringup, we'll use the shadow frame model instead of rFP */
157/* single-purpose registers, given names for clarity */
158#define rSELF    IN_ARG0(%esp)
159#define rPC      %esi
160#define CFI_DEX  6  // DWARF register number of the register holding dex-pc (esi).
161#define CFI_TMP  0  // DWARF register number of the first argument register (eax).
162#define rFP      %edi
163#define rINST    %ebx
164#define rINSTw   %bx
165#define rINSTbh  %bh
166#define rINSTbl  %bl
167#define rIBASE   %edx
168#define rREFS    %ebp
169#define rPROFILE OFF_FP_COUNTDOWN_OFFSET(rFP)
170
171#define MTERP_LOGGING 0
172
173/*
174 * "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects.  Must
175 * be done *before* something throws.
176 *
177 * It's okay to do this more than once.
178 *
179 * NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
180 * dex byte codes.  However, the rest of the runtime expects dex pc to be an instruction
181 * offset into the code_items_[] array.  For effiency, we will "export" the
182 * current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
183 * to convert to a dex pc when needed.
184 */
185.macro EXPORT_PC
186    movl    rPC, OFF_FP_DEX_PC_PTR(rFP)
187.endm
188
189/*
190 * Refresh handler table.
191 */
192.macro REFRESH_IBASE
193    movl    rSELF, rIBASE
194    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
195.endm
196
197/*
198 * Refresh handler table.
199 * IBase handles uses the caller save register so we must restore it after each call.
200 * Also it is used as a result of some 64-bit operations (like imul) and we should
201 * restore it in such cases also.
202 *
203 * TODO: Consider spilling the IBase instead of restoring it from Thread structure.
204 */
205.macro RESTORE_IBASE
206    movl    rSELF, rIBASE
207    movl    THREAD_CURRENT_IBASE_OFFSET(rIBASE), rIBASE
208.endm
209
210/*
211 * If rSELF is already loaded then we can use it from known reg.
212 */
213.macro RESTORE_IBASE_FROM_SELF _reg
214    movl    THREAD_CURRENT_IBASE_OFFSET(\_reg), rIBASE
215.endm
216
217/*
218 * Refresh rINST.
219 * At enter to handler rINST does not contain the opcode number.
220 * However some utilities require the full value, so this macro
221 * restores the opcode number.
222 */
223.macro REFRESH_INST _opnum
224    movb    rINSTbl, rINSTbh
225    movb    MACRO_LITERAL(\_opnum), rINSTbl
226.endm
227
228/*
229 * Fetch the next instruction from rPC into rINSTw.  Does not advance rPC.
230 */
231.macro FETCH_INST
232    movzwl  (rPC), rINST
233.endm
234
235/*
236 * Remove opcode from rINST, compute the address of handler and jump to it.
237 */
238.macro GOTO_NEXT
239    movzx   rINSTbl,%eax
240    movzbl  rINSTbh,rINST
241    shll    MACRO_LITERAL(${handler_size_bits}), %eax
242    addl    rIBASE, %eax
243    jmp     *%eax
244.endm
245
246/*
247 * Advance rPC by instruction count.
248 */
249.macro ADVANCE_PC _count
250    leal    2*\_count(rPC), rPC
251.endm
252
253/*
254 * Advance rPC by instruction count, fetch instruction and jump to handler.
255 */
256.macro ADVANCE_PC_FETCH_AND_GOTO_NEXT _count
257    ADVANCE_PC \_count
258    FETCH_INST
259    GOTO_NEXT
260.endm
261
262/*
263 * Get/set the 32-bit value from a Dalvik register.
264 */
265#define VREG_ADDRESS(_vreg) (rFP,_vreg,4)
266#define VREG_HIGH_ADDRESS(_vreg) 4(rFP,_vreg,4)
267#define VREG_REF_ADDRESS(_vreg) (rREFS,_vreg,4)
268#define VREG_REF_HIGH_ADDRESS(_vreg) 4(rREFS,_vreg,4)
269
270.macro GET_VREG _reg _vreg
271    movl    (rFP,\_vreg,4), \_reg
272.endm
273
274/* Read wide value to xmm. */
275.macro GET_WIDE_FP_VREG _reg _vreg
276    movq    (rFP,\_vreg,4), \_reg
277.endm
278
279.macro SET_VREG _reg _vreg
280    movl    \_reg, (rFP,\_vreg,4)
281    movl    MACRO_LITERAL(0), (rREFS,\_vreg,4)
282.endm
283
284/* Write wide value from xmm. xmm is clobbered. */
285.macro SET_WIDE_FP_VREG _reg _vreg
286    movq    \_reg, (rFP,\_vreg,4)
287    pxor    \_reg, \_reg
288    movq    \_reg, (rREFS,\_vreg,4)
289.endm
290
291.macro SET_VREG_OBJECT _reg _vreg
292    movl    \_reg, (rFP,\_vreg,4)
293    movl    \_reg, (rREFS,\_vreg,4)
294.endm
295
296.macro GET_VREG_HIGH _reg _vreg
297    movl    4(rFP,\_vreg,4), \_reg
298.endm
299
300.macro SET_VREG_HIGH _reg _vreg
301    movl    \_reg, 4(rFP,\_vreg,4)
302    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
303.endm
304
305.macro CLEAR_REF _vreg
306    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
307.endm
308
309.macro CLEAR_WIDE_REF _vreg
310    movl    MACRO_LITERAL(0),  (rREFS,\_vreg,4)
311    movl    MACRO_LITERAL(0), 4(rREFS,\_vreg,4)
312.endm
313