1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
18 #define ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
19 
20 #include "dex/reg_location.h"
21 #include "dex/reg_storage.h"
22 
23 namespace art {
24 
25 /*
26  * Runtime register conventions. We consider both x86, x86-64 and x32 (32bit mode x86-64). The ABI
27  * has different conventions and we capture those here. Changing something that is callee save and
28  * making it caller save places a burden on up-calls to save/restore the callee save register,
29  * however, there are few registers that are callee save in the ABI. Changing something that is
30  * caller save and making it callee save places a burden on down-calls to save/restore the callee
31  * save register. For these reasons we aim to match native conventions for caller and callee save.
32  * On x86 only the first 4 registers can be used for byte operations, for this reason they are
33  * preferred for temporary scratch registers.
34  *
35  * General Purpose Register:
36  *  Native: x86    | x86-64 / x32 | ART x86                                         | ART x86-64
37  *  r0/eax: caller | caller       | caller, Method*, scratch, return value          | caller, scratch, return value
38  *  r1/ecx: caller | caller, arg4 | caller, arg1, scratch                           | caller, arg3, scratch
39  *  r2/edx: caller | caller, arg3 | caller, arg2, scratch, high half of long return | caller, arg2, scratch
40  *  r3/ebx: callEE | callEE       | callER, arg3, scratch                           | callee, promotable
41  *  r4/esp: stack pointer
42  *  r5/ebp: callee | callee       | callee, promotable                              | callee, promotable
43  *  r6/esi: callEE | callER, arg2 | callee, promotable                              | caller, arg1, scratch
44  *  r7/edi: callEE | callER, arg1 | callee, promotable                              | caller, Method*, scratch
45  *  ---  x86-64/x32 registers
46  *  Native: x86-64 / x32      | ART
47  *  r8:     caller save, arg5 | caller, arg4, scratch
48  *  r9:     caller save, arg6 | caller, arg5, scratch
49  *  r10:    caller save       | caller, scratch
50  *  r11:    caller save       | caller, scratch
51  *  r12:    callee save       | callee, available for register promotion (promotable)
52  *  r13:    callee save       | callee, available for register promotion (promotable)
53  *  r14:    callee save       | callee, available for register promotion (promotable)
54  *  r15:    callee save       | callee, available for register promotion (promotable)
55  *
56  * There is no rSELF, instead on x86 fs: has a base address of Thread::Current, whereas on
57  * x86-64/x32 gs: holds it.
58  *
59  * For floating point we don't support CPUs without SSE2 support (ie newer than PIII):
60  *  Native: x86  | x86-64 / x32 | ART x86                          | ART x86-64
61  *  XMM0: caller | caller, arg1 | caller, arg1, float return value | caller, arg1, float return value
62  *  XMM1: caller | caller, arg2 | caller, arg2, scratch            | caller, arg2, scratch
63  *  XMM2: caller | caller, arg3 | caller, arg3, scratch            | caller, arg3, scratch
64  *  XMM3: caller | caller, arg4 | caller, arg4, scratch            | caller, arg4, scratch
65  *  XMM4: caller | caller, arg5 | caller, scratch                  | caller, arg5, scratch
66  *  XMM5: caller | caller, arg6 | caller, scratch                  | caller, arg6, scratch
67  *  XMM6: caller | caller, arg7 | caller, scratch                  | caller, arg7, scratch
68  *  XMM7: caller | caller, arg8 | caller, scratch                  | caller, arg8, scratch
69  *  ---  x86-64/x32 registers
70  *  XMM8 .. 11: caller save available as scratch registers for ART.
71  *  XMM12 .. 15: callee save available as promoted registers for ART.
72  *  This change (XMM12..15) is for QCG only, for others they are caller save.
73  *
74  * X87 is a necessary evil outside of ART code for x86:
75  *  ST0:  x86 float/double native return value, caller save
76  *  ST1 .. ST7: caller save
77  *
78  *  Stack frame diagram (stack grows down, higher addresses at top):
79  *  For a more detailed view of each region see stack.h.
80  *
81  * +---------------------------+
82  * | IN[ins-1]                 |  {Note: resides in caller's frame}
83  * |       .                   |
84  * | IN[0]                     |
85  * | caller's ArtMethod*       |
86  * +===========================+  {Note: start of callee's frame}
87  * | return address            |  {pushed by call}
88  * | spill region              |  {variable sized}
89  * +---------------------------+
90  * | ...filler 4-bytes...      |  {Note: used as 2nd word of V[locals-1] if long]
91  * +---------------------------+
92  * | V[locals-1]               |
93  * | V[locals-2]               |
94  * |      .                    |
95  * |      .                    |
96  * | V[1]                      |
97  * | V[0]                      |
98  * +---------------------------+
99  * | 0 to 12-bytes padding     |
100  * +---------------------------+
101  * | compiler temp region      |
102  * +---------------------------+
103  * | OUT[outs-1]               |
104  * | OUT[outs-2]               |
105  * |       .                   |
106  * | OUT[0]                    |
107  * | ArtMethod*                | <<== sp w/ 16-byte alignment
108  * +===========================+
109  */
110 
111 enum X86ResourceEncodingPos {
112   kX86GPReg0   = 0,
113   kX86RegSP    = 4,
114   kX86FPReg0   = 16,  // xmm0 .. xmm7/xmm15.
115   kX86FPRegEnd = 32,
116   kX86FPStack  = 33,
117   kX86RegEnd   = kX86FPStack,
118 };
119 
120 // FIXME: for 64-bit, perhaps add an X86_64NativeRegisterPool enum?
121 enum X86NativeRegisterPool {
122   r0             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 0,
123   r0q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 0,
124   rAX            = r0,
125   r1             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 1,
126   r1q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 1,
127   rCX            = r1,
128   r2             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 2,
129   r2q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 2,
130   rDX            = r2,
131   r3             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 3,
132   r3q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 3,
133   rBX            = r3,
134   r4sp_32        = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 4,
135   rX86_SP_32     = r4sp_32,
136   r4sp_64        = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 4,
137   rX86_SP_64     = r4sp_64,
138   r5             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 5,
139   r5q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 5,
140   rBP            = r5,
141   r5sib_no_base  = r5,
142   r6             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 6,
143   r6q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 6,
144   rSI            = r6,
145   r7             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 7,
146   r7q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 7,
147   rDI            = r7,
148   r8             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 8,
149   r8q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 8,
150   r9             = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 9,
151   r9q            = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 9,
152   r10            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 10,
153   r10q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 10,
154   r11            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 11,
155   r11q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 11,
156   r12            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 12,
157   r12q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 12,
158   r13            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 13,
159   r13q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 13,
160   r14            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 14,
161   r14q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 14,
162   r15            = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 15,
163   r15q           = RegStorage::k64BitSolo | RegStorage::kCoreRegister | 15,
164   // fake return address register for core spill mask.
165   rRET           = RegStorage::k32BitSolo | RegStorage::kCoreRegister | 16,
166 
167   // xmm registers, single precision view.
168   fr0  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 0,
169   fr1  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 1,
170   fr2  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 2,
171   fr3  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 3,
172   fr4  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 4,
173   fr5  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 5,
174   fr6  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 6,
175   fr7  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 7,
176   fr8  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 8,
177   fr9  = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 9,
178   fr10 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 10,
179   fr11 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 11,
180   fr12 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 12,
181   fr13 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 13,
182   fr14 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 14,
183   fr15 = RegStorage::k32BitSolo | RegStorage::kFloatingPoint | 15,
184 
185   // xmm registers, double precision aliases.
186   dr0  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 0,
187   dr1  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 1,
188   dr2  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 2,
189   dr3  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 3,
190   dr4  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 4,
191   dr5  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 5,
192   dr6  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 6,
193   dr7  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 7,
194   dr8  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 8,
195   dr9  = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 9,
196   dr10 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 10,
197   dr11 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 11,
198   dr12 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 12,
199   dr13 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 13,
200   dr14 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 14,
201   dr15 = RegStorage::k64BitSolo | RegStorage::kFloatingPoint | 15,
202 
203   // xmm registers, quad precision aliases
204   xr0  = RegStorage::k128BitSolo | 0,
205   xr1  = RegStorage::k128BitSolo | 1,
206   xr2  = RegStorage::k128BitSolo | 2,
207   xr3  = RegStorage::k128BitSolo | 3,
208   xr4  = RegStorage::k128BitSolo | 4,
209   xr5  = RegStorage::k128BitSolo | 5,
210   xr6  = RegStorage::k128BitSolo | 6,
211   xr7  = RegStorage::k128BitSolo | 7,
212   xr8  = RegStorage::k128BitSolo | 8,
213   xr9  = RegStorage::k128BitSolo | 9,
214   xr10 = RegStorage::k128BitSolo | 10,
215   xr11 = RegStorage::k128BitSolo | 11,
216   xr12 = RegStorage::k128BitSolo | 12,
217   xr13 = RegStorage::k128BitSolo | 13,
218   xr14 = RegStorage::k128BitSolo | 14,
219   xr15 = RegStorage::k128BitSolo | 15,
220 
221   // Special value for RIP 64 bit addressing.
222   kRIPReg = 255,
223 
224   // TODO: as needed, add 256, 512 and 1024-bit xmm views.
225 };
226 
227 constexpr RegStorage rs_r0(RegStorage::kValid | r0);
228 constexpr RegStorage rs_r0q(RegStorage::kValid | r0q);
229 constexpr RegStorage rs_rAX = rs_r0;
230 constexpr RegStorage rs_r1(RegStorage::kValid | r1);
231 constexpr RegStorage rs_r1q(RegStorage::kValid | r1q);
232 constexpr RegStorage rs_rCX = rs_r1;
233 constexpr RegStorage rs_r2(RegStorage::kValid | r2);
234 constexpr RegStorage rs_r2q(RegStorage::kValid | r2q);
235 constexpr RegStorage rs_rDX = rs_r2;
236 constexpr RegStorage rs_r3(RegStorage::kValid | r3);
237 constexpr RegStorage rs_r3q(RegStorage::kValid | r3q);
238 constexpr RegStorage rs_rBX = rs_r3;
239 constexpr RegStorage rs_rX86_SP_64(RegStorage::kValid | r4sp_64);
240 constexpr RegStorage rs_rX86_SP_32(RegStorage::kValid | r4sp_32);
241 static_assert(rs_rX86_SP_64.GetRegNum() == rs_rX86_SP_32.GetRegNum(), "Unexpected mismatch");
242 constexpr RegStorage rs_r5(RegStorage::kValid | r5);
243 constexpr RegStorage rs_r5q(RegStorage::kValid | r5q);
244 constexpr RegStorage rs_rBP = rs_r5;
245 constexpr RegStorage rs_r6(RegStorage::kValid | r6);
246 constexpr RegStorage rs_r6q(RegStorage::kValid | r6q);
247 constexpr RegStorage rs_rSI = rs_r6;
248 constexpr RegStorage rs_r7(RegStorage::kValid | r7);
249 constexpr RegStorage rs_r7q(RegStorage::kValid | r7q);
250 constexpr RegStorage rs_rDI = rs_r7;
251 constexpr RegStorage rs_rRET(RegStorage::kValid | rRET);
252 constexpr RegStorage rs_r8(RegStorage::kValid | r8);
253 constexpr RegStorage rs_r8q(RegStorage::kValid | r8q);
254 constexpr RegStorage rs_r9(RegStorage::kValid | r9);
255 constexpr RegStorage rs_r9q(RegStorage::kValid | r9q);
256 constexpr RegStorage rs_r10(RegStorage::kValid | r10);
257 constexpr RegStorage rs_r10q(RegStorage::kValid | r10q);
258 constexpr RegStorage rs_r11(RegStorage::kValid | r11);
259 constexpr RegStorage rs_r11q(RegStorage::kValid | r11q);
260 constexpr RegStorage rs_r12(RegStorage::kValid | r12);
261 constexpr RegStorage rs_r12q(RegStorage::kValid | r12q);
262 constexpr RegStorage rs_r13(RegStorage::kValid | r13);
263 constexpr RegStorage rs_r13q(RegStorage::kValid | r13q);
264 constexpr RegStorage rs_r14(RegStorage::kValid | r14);
265 constexpr RegStorage rs_r14q(RegStorage::kValid | r14q);
266 constexpr RegStorage rs_r15(RegStorage::kValid | r15);
267 constexpr RegStorage rs_r15q(RegStorage::kValid | r15q);
268 
269 constexpr RegStorage rs_fr0(RegStorage::kValid | fr0);
270 constexpr RegStorage rs_fr1(RegStorage::kValid | fr1);
271 constexpr RegStorage rs_fr2(RegStorage::kValid | fr2);
272 constexpr RegStorage rs_fr3(RegStorage::kValid | fr3);
273 constexpr RegStorage rs_fr4(RegStorage::kValid | fr4);
274 constexpr RegStorage rs_fr5(RegStorage::kValid | fr5);
275 constexpr RegStorage rs_fr6(RegStorage::kValid | fr6);
276 constexpr RegStorage rs_fr7(RegStorage::kValid | fr7);
277 constexpr RegStorage rs_fr8(RegStorage::kValid | fr8);
278 constexpr RegStorage rs_fr9(RegStorage::kValid | fr9);
279 constexpr RegStorage rs_fr10(RegStorage::kValid | fr10);
280 constexpr RegStorage rs_fr11(RegStorage::kValid | fr11);
281 constexpr RegStorage rs_fr12(RegStorage::kValid | fr12);
282 constexpr RegStorage rs_fr13(RegStorage::kValid | fr13);
283 constexpr RegStorage rs_fr14(RegStorage::kValid | fr14);
284 constexpr RegStorage rs_fr15(RegStorage::kValid | fr15);
285 
286 constexpr RegStorage rs_dr0(RegStorage::kValid | dr0);
287 constexpr RegStorage rs_dr1(RegStorage::kValid | dr1);
288 constexpr RegStorage rs_dr2(RegStorage::kValid | dr2);
289 constexpr RegStorage rs_dr3(RegStorage::kValid | dr3);
290 constexpr RegStorage rs_dr4(RegStorage::kValid | dr4);
291 constexpr RegStorage rs_dr5(RegStorage::kValid | dr5);
292 constexpr RegStorage rs_dr6(RegStorage::kValid | dr6);
293 constexpr RegStorage rs_dr7(RegStorage::kValid | dr7);
294 constexpr RegStorage rs_dr8(RegStorage::kValid | dr8);
295 constexpr RegStorage rs_dr9(RegStorage::kValid | dr9);
296 constexpr RegStorage rs_dr10(RegStorage::kValid | dr10);
297 constexpr RegStorage rs_dr11(RegStorage::kValid | dr11);
298 constexpr RegStorage rs_dr12(RegStorage::kValid | dr12);
299 constexpr RegStorage rs_dr13(RegStorage::kValid | dr13);
300 constexpr RegStorage rs_dr14(RegStorage::kValid | dr14);
301 constexpr RegStorage rs_dr15(RegStorage::kValid | dr15);
302 
303 constexpr RegStorage rs_xr0(RegStorage::kValid | xr0);
304 constexpr RegStorage rs_xr1(RegStorage::kValid | xr1);
305 constexpr RegStorage rs_xr2(RegStorage::kValid | xr2);
306 constexpr RegStorage rs_xr3(RegStorage::kValid | xr3);
307 constexpr RegStorage rs_xr4(RegStorage::kValid | xr4);
308 constexpr RegStorage rs_xr5(RegStorage::kValid | xr5);
309 constexpr RegStorage rs_xr6(RegStorage::kValid | xr6);
310 constexpr RegStorage rs_xr7(RegStorage::kValid | xr7);
311 constexpr RegStorage rs_xr8(RegStorage::kValid | xr8);
312 constexpr RegStorage rs_xr9(RegStorage::kValid | xr9);
313 constexpr RegStorage rs_xr10(RegStorage::kValid | xr10);
314 constexpr RegStorage rs_xr11(RegStorage::kValid | xr11);
315 constexpr RegStorage rs_xr12(RegStorage::kValid | xr12);
316 constexpr RegStorage rs_xr13(RegStorage::kValid | xr13);
317 constexpr RegStorage rs_xr14(RegStorage::kValid | xr14);
318 constexpr RegStorage rs_xr15(RegStorage::kValid | xr15);
319 
320 constexpr RegStorage rs_rX86_RET0 = rs_rAX;
321 constexpr RegStorage rs_rX86_RET1 = rs_rDX;
322 
323 // RegisterLocation templates return values (r_V0, or r_V0/r_V1).
324 const RegLocation x86_loc_c_return
325     {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1,
326      RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
327 const RegLocation x86_loc_c_return_wide
328     {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
329      RegStorage(RegStorage::k64BitPair, rAX, rDX), INVALID_SREG, INVALID_SREG};
330 const RegLocation x86_loc_c_return_ref
331     {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
332      RegStorage(RegStorage::k32BitSolo, rAX), INVALID_SREG, INVALID_SREG};
333 const RegLocation x86_64_loc_c_return_ref
334     {kLocPhysReg, 0, 0, 0, 0, 0, 1, 0, 1,
335      RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
336 const RegLocation x86_64_loc_c_return_wide
337     {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1,
338      RegStorage(RegStorage::k64BitSolo, rAX), INVALID_SREG, INVALID_SREG};
339 const RegLocation x86_loc_c_return_float
340     {kLocPhysReg, 0, 0, 0, 1, 0, 0, 0, 1,
341      RegStorage(RegStorage::k32BitSolo, fr0), INVALID_SREG, INVALID_SREG};
342 const RegLocation x86_loc_c_return_double
343     {kLocPhysReg, 1, 0, 0, 1, 0, 0, 0, 1,
344      RegStorage(RegStorage::k64BitSolo, dr0), INVALID_SREG, INVALID_SREG};
345 
346 /*
347  * The following enum defines the list of supported X86 instructions by the
348  * assembler. Their corresponding EncodingMap positions will be defined in
349  * Assemble.cc.
350  */
351 enum X86OpCode {
352   kX86First = 0,
353   kX8632BitData = kX86First,  // data [31..0].
354   kX86Bkpt,
355   kX86Nop,
356   // Define groups of binary operations
357   // MR - Memory Register  - opcode [base + disp], reg
358   //             - lir operands - 0: base, 1: disp, 2: reg
359   // AR - Array Register   - opcode [base + index * scale + disp], reg
360   //             - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
361   // TR - Thread Register  - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
362   //             - lir operands - 0: disp, 1: reg
363   // RR - Register Register  - opcode reg1, reg2
364   //             - lir operands - 0: reg1, 1: reg2
365   // RM - Register Memory  - opcode reg, [base + disp]
366   //             - lir operands - 0: reg, 1: base, 2: disp
367   // RA - Register Array   - opcode reg, [base + index * scale + disp]
368   //             - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
369   // RT - Register Thread  - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
370   //             - lir operands - 0: reg, 1: disp
371   // RI - Register Immediate - opcode reg, #immediate
372   //             - lir operands - 0: reg, 1: immediate
373   // MI - Memory Immediate   - opcode [base + disp], #immediate
374   //             - lir operands - 0: base, 1: disp, 2: immediate
375   // AI - Array Immediate  - opcode [base + index * scale + disp], #immediate
376   //             - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
377   // TI - Thread Immediate  - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
378   //             - lir operands - 0: disp, 1: imm
379 #define BinaryOpCode(opcode) \
380   opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
381   opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
382   opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, opcode ## 8TI, \
383   opcode ## 16MR, opcode ## 16AR, opcode ## 16TR, \
384   opcode ## 16RR, opcode ## 16RM, opcode ## 16RA, opcode ## 16RT, \
385   opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, opcode ## 16TI, \
386   opcode ## 16RI8, opcode ## 16MI8, opcode ## 16AI8, opcode ## 16TI8, \
387   opcode ## 32MR, opcode ## 32AR, opcode ## 32TR,  \
388   opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
389   opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
390   opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8, \
391   opcode ## 64MR, opcode ## 64AR, opcode ## 64TR,  \
392   opcode ## 64RR, opcode ## 64RM, opcode ## 64RA, opcode ## 64RT, \
393   opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, opcode ## 64TI, \
394   opcode ## 64RI8, opcode ## 64MI8, opcode ## 64AI8, opcode ## 64TI8
395   BinaryOpCode(kX86Add),
396   BinaryOpCode(kX86Or),
397   BinaryOpCode(kX86Adc),
398   BinaryOpCode(kX86Sbb),
399   BinaryOpCode(kX86And),
400   BinaryOpCode(kX86Sub),
401   BinaryOpCode(kX86Xor),
402   BinaryOpCode(kX86Cmp),
403 #undef BinaryOpCode
404   kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
405   kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
406   kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
407   kX86Imul64RRI, kX86Imul64RMI, kX86Imul64RAI,
408   kX86Imul64RRI8, kX86Imul64RMI8, kX86Imul64RAI8,
409   kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
410   kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
411   kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
412   kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
413   kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
414   kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
415   kX86Mov32MR, kX86Mov32AR, kX86Movnti32MR, kX86Movnti32AR, kX86Mov32TR,
416   kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
417   kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
418   kX86Lea32RM,
419   kX86Lea32RA,
420   kX86Mov64MR, kX86Mov64AR, kX86Movnti64MR, kX86Movnti64AR, kX86Mov64TR,
421   kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
422   kX86Mov64RI32, kX86Mov64RI64, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
423   kX86Lea64RM,
424   kX86Lea64RA,
425   // RRC - Register Register ConditionCode - cond_opcode reg1, reg2
426   //             - lir operands - 0: reg1, 1: reg2, 2: CC
427   kX86Cmov32RRC,
428   kX86Cmov64RRC,
429   // RMC - Register Memory ConditionCode - cond_opcode reg1, [base + disp]
430   //             - lir operands - 0: reg1, 1: base, 2: disp 3: CC
431   kX86Cmov32RMC,
432   kX86Cmov64RMC,
433 
434   // RC - Register CL - opcode reg, CL
435   //          - lir operands - 0: reg, 1: CL
436   // MC - Memory CL   - opcode [base + disp], CL
437   //          - lir operands - 0: base, 1: disp, 2: CL
438   // AC - Array CL  - opcode [base + index * scale + disp], CL
439   //          - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
440 #define BinaryShiftOpCode(opcode) \
441   opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
442   opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
443   opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
444   opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
445   opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
446   opcode ## 32RC, opcode ## 32MC, opcode ## 32AC, \
447   opcode ## 64RI, opcode ## 64MI, opcode ## 64AI, \
448   opcode ## 64RC, opcode ## 64MC, opcode ## 64AC
449   BinaryShiftOpCode(kX86Rol),
450   BinaryShiftOpCode(kX86Ror),
451   BinaryShiftOpCode(kX86Rcl),
452   BinaryShiftOpCode(kX86Rcr),
453   BinaryShiftOpCode(kX86Sal),
454   BinaryShiftOpCode(kX86Shr),
455   BinaryShiftOpCode(kX86Sar),
456 #undef BinaryShiftOpcode
457   kX86Cmc,
458   kX86Shld32RRI,
459   kX86Shld32RRC,
460   kX86Shld32MRI,
461   kX86Shrd32RRI,
462   kX86Shrd32RRC,
463   kX86Shrd32MRI,
464   kX86Shld64RRI,
465   kX86Shld64MRI,
466   kX86Shrd64RRI,
467   kX86Shrd64MRI,
468 #define UnaryOpcode(opcode, reg, mem, array) \
469   opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
470   opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
471   opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array, \
472   opcode ## 64 ## reg, opcode ## 64 ## mem, opcode ## 64 ## array
473   UnaryOpcode(kX86Test, RI, MI, AI),
474   kX86Test32RR,
475   kX86Test64RR,
476   kX86Test32RM,
477   UnaryOpcode(kX86Not, R, M, A),
478   UnaryOpcode(kX86Neg, R, M, A),
479   UnaryOpcode(kX86Mul,  DaR, DaM, DaA),
480   UnaryOpcode(kX86Imul, DaR, DaM, DaA),
481   UnaryOpcode(kX86Divmod,  DaR, DaM, DaA),
482   UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
483   kx86Cdq32Da,
484   kx86Cqo64Da,
485   kX86Bswap32R,
486   kX86Bswap64R,
487   kX86Push32R, kX86Pop32R,
488 #undef UnaryOpcode
489 #define Binary0fOpCode(opcode) \
490   opcode ## RR, opcode ## RM, opcode ## RA
491   Binary0fOpCode(kX86Movsd),
492   kX86MovsdMR,
493   kX86MovsdAR,
494   Binary0fOpCode(kX86Movss),
495   kX86MovssMR,
496   kX86MovssAR,
497   Binary0fOpCode(kX86Cvtsi2sd),  // int to double
498   Binary0fOpCode(kX86Cvtsi2ss),  // int to float
499   Binary0fOpCode(kX86Cvtsqi2sd),  // long to double
500   Binary0fOpCode(kX86Cvtsqi2ss),  // long to float
501   Binary0fOpCode(kX86Cvttsd2si),  // truncating double to int
502   Binary0fOpCode(kX86Cvttss2si),  // truncating float to int
503   Binary0fOpCode(kX86Cvttsd2sqi),  // truncating double to long
504   Binary0fOpCode(kX86Cvttss2sqi),  // truncating float to long
505   Binary0fOpCode(kX86Cvtsd2si),  // rounding double to int
506   Binary0fOpCode(kX86Cvtss2si),  // rounding float to int
507   Binary0fOpCode(kX86Ucomisd),  // unordered double compare
508   Binary0fOpCode(kX86Ucomiss),  // unordered float compare
509   Binary0fOpCode(kX86Comisd),   // double compare
510   Binary0fOpCode(kX86Comiss),   // float compare
511   Binary0fOpCode(kX86Orpd),     // double logical OR
512   Binary0fOpCode(kX86Orps),     // float logical OR
513   Binary0fOpCode(kX86Andpd),    // double logical AND
514   Binary0fOpCode(kX86Andps),    // float logical AND
515   Binary0fOpCode(kX86Xorpd),    // double logical XOR
516   Binary0fOpCode(kX86Xorps),    // float logical XOR
517   Binary0fOpCode(kX86Addsd),    // double ADD
518   Binary0fOpCode(kX86Addss),    // float ADD
519   Binary0fOpCode(kX86Mulsd),    // double multiply
520   Binary0fOpCode(kX86Mulss),    // float multiply
521   Binary0fOpCode(kX86Cvtsd2ss),  // double to float
522   Binary0fOpCode(kX86Cvtss2sd),  // float to double
523   Binary0fOpCode(kX86Subsd),    // double subtract
524   Binary0fOpCode(kX86Subss),    // float subtract
525   Binary0fOpCode(kX86Divsd),    // double divide
526   Binary0fOpCode(kX86Divss),    // float divide
527   Binary0fOpCode(kX86Punpcklbw),  // Interleave low-order bytes
528   Binary0fOpCode(kX86Punpcklwd),  // Interleave low-order single words (16-bits)
529   Binary0fOpCode(kX86Punpckldq),  // Interleave low-order double words (32-bit)
530   Binary0fOpCode(kX86Punpcklqdq),  // Interleave low-order quad word
531   Binary0fOpCode(kX86Sqrtsd),   // square root
532   Binary0fOpCode(kX86Pmulld),   // parallel integer multiply 32 bits x 4
533   Binary0fOpCode(kX86Pmullw),   // parallel integer multiply 16 bits x 8
534   Binary0fOpCode(kX86Pmuludq),   // parallel unsigned 32 integer and stores result as 64
535   Binary0fOpCode(kX86Mulps),    // parallel FP multiply 32 bits x 4
536   Binary0fOpCode(kX86Mulpd),    // parallel FP multiply 64 bits x 2
537   Binary0fOpCode(kX86Paddb),    // parallel integer addition 8 bits x 16
538   Binary0fOpCode(kX86Paddw),    // parallel integer addition 16 bits x 8
539   Binary0fOpCode(kX86Paddd),    // parallel integer addition 32 bits x 4
540   Binary0fOpCode(kX86Paddq),    // parallel integer addition 64 bits x 2
541   Binary0fOpCode(kX86Psadbw),   // computes sum of absolute differences for unsigned byte integers
542   Binary0fOpCode(kX86Addps),    // parallel FP addition 32 bits x 4
543   Binary0fOpCode(kX86Addpd),    // parallel FP addition 64 bits x 2
544   Binary0fOpCode(kX86Psubb),    // parallel integer subtraction 8 bits x 16
545   Binary0fOpCode(kX86Psubw),    // parallel integer subtraction 16 bits x 8
546   Binary0fOpCode(kX86Psubd),    // parallel integer subtraction 32 bits x 4
547   Binary0fOpCode(kX86Psubq),    // parallel integer subtraction 32 bits x 4
548   Binary0fOpCode(kX86Subps),    // parallel FP subtraction 32 bits x 4
549   Binary0fOpCode(kX86Subpd),    // parallel FP subtraction 64 bits x 2
550   Binary0fOpCode(kX86Pand),     // parallel AND 128 bits x 1
551   Binary0fOpCode(kX86Por),      // parallel OR 128 bits x 1
552   Binary0fOpCode(kX86Pxor),     // parallel XOR 128 bits x 1
553   Binary0fOpCode(kX86Phaddw),   // parallel horizontal addition 16 bits x 8
554   Binary0fOpCode(kX86Phaddd),   // parallel horizontal addition 32 bits x 4
555   Binary0fOpCode(kX86Haddpd),   // parallel FP horizontal addition 64 bits x 2
556   Binary0fOpCode(kX86Haddps),   // parallel FP horizontal addition 32 bits x 4
557   kX86PextrbRRI,                // Extract 8 bits from XMM into GPR
558   kX86PextrwRRI,                // Extract 16 bits from XMM into GPR
559   kX86PextrdRRI,                // Extract 32 bits from XMM into GPR
560   kX86PextrbMRI,                // Extract 8 bits from XMM into memory
561   kX86PextrwMRI,                // Extract 16 bits from XMM into memory
562   kX86PextrdMRI,                // Extract 32 bits from XMM into memory
563   kX86PshuflwRRI,               // Shuffle 16 bits in lower 64 bits of XMM.
564   kX86PshufdRRI,                // Shuffle 32 bits in XMM.
565   kX86ShufpsRRI,                // FP Shuffle 32 bits in XMM.
566   kX86ShufpdRRI,                // FP Shuffle 64 bits in XMM.
567   kX86PsrawRI,                  // signed right shift of floating point registers 16 bits x 8
568   kX86PsradRI,                  // signed right shift of floating point registers 32 bits x 4
569   kX86PsrlwRI,                  // logical right shift of floating point registers 16 bits x 8
570   kX86PsrldRI,                  // logical right shift of floating point registers 32 bits x 4
571   kX86PsrlqRI,                  // logical right shift of floating point registers 64 bits x 2
572   kX86PsrldqRI,                 // logical shift of 128-bit vector register, immediate in bytes
573   kX86PsllwRI,                  // left shift of floating point registers 16 bits x 8
574   kX86PslldRI,                  // left shift of floating point registers 32 bits x 4
575   kX86PsllqRI,                  // left shift of floating point registers 64 bits x 2
576   kX86Fild32M,                  // push 32-bit integer on x87 stack
577   kX86Fild64M,                  // push 64-bit integer on x87 stack
578   kX86Fld32M,                   // push float on x87 stack
579   kX86Fld64M,                   // push double on x87 stack
580   kX86Fstp32M,                  // pop top x87 fp stack and do 32-bit store
581   kX86Fstp64M,                  // pop top x87 fp stack and do 64-bit store
582   kX86Fst32M,                   // do 32-bit store
583   kX86Fst64M,                   // do 64-bit store
584   kX86Fprem,                    // remainder from dividing of two floating point values
585   kX86Fucompp,                  // compare floating point values and pop x87 fp stack twice
586   kX86Fstsw16R,                 // store FPU status word
587   Binary0fOpCode(kX86Movdqa),   // move 128 bits aligned
588   kX86MovdqaMR, kX86MovdqaAR,   // store 128 bit aligned from xmm1 to m128
589   Binary0fOpCode(kX86Movups),   // load unaligned packed single FP values from xmm2/m128 to xmm1
590   kX86MovupsMR, kX86MovupsAR,   // store unaligned packed single FP values from xmm1 to m128
591   Binary0fOpCode(kX86Movaps),   // load aligned packed single FP values from xmm2/m128 to xmm1
592   kX86MovapsMR, kX86MovapsAR,   // store aligned packed single FP values from xmm1 to m128
593   kX86MovlpsRM, kX86MovlpsRA,   // load packed single FP values from m64 to low quadword of xmm
594   kX86MovlpsMR, kX86MovlpsAR,   // store packed single FP values from low quadword of xmm to m64
595   kX86MovhpsRM, kX86MovhpsRA,   // load packed single FP values from m64 to high quadword of xmm
596   kX86MovhpsMR, kX86MovhpsAR,   // store packed single FP values from high quadword of xmm to m64
597   Binary0fOpCode(kX86Movdxr),   // move into xmm from gpr
598   Binary0fOpCode(kX86Movqxr),   // move into xmm from 64 bit gpr
599   kX86MovqrxRR, kX86MovqrxMR, kX86MovqrxAR,  // move into 64 bit reg from xmm
600   kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,  // move into reg from xmm
601   kX86MovsxdRR, kX86MovsxdRM, kX86MovsxdRA,  // move 32 bit to 64 bit with sign extension
602   kX86Set8R, kX86Set8M, kX86Set8A,  // set byte depending on condition operand
603   kX86Lfence,                   // memory barrier to serialize all previous
604                                 // load-from-memory instructions
605   kX86Mfence,                   // memory barrier to serialize all previous
606                                 // load-from-memory and store-to-memory instructions
607   kX86Sfence,                   // memory barrier to serialize all previous
608                                 // store-to-memory instructions
609   Binary0fOpCode(kX86Imul16),   // 16bit multiply
610   Binary0fOpCode(kX86Imul32),   // 32bit multiply
611   Binary0fOpCode(kX86Imul64),   // 64bit multiply
612   kX86CmpxchgRR, kX86CmpxchgMR, kX86CmpxchgAR,  // compare and exchange
613   kX86LockCmpxchgMR, kX86LockCmpxchgAR, kX86LockCmpxchg64AR,  // locked compare and exchange
614   kX86LockCmpxchg64M, kX86LockCmpxchg64A,  // locked compare and exchange
615   kX86XchgMR,  // exchange memory with register (automatically locked)
616   Binary0fOpCode(kX86Movzx8),   // zero-extend 8-bit value
617   Binary0fOpCode(kX86Movzx16),  // zero-extend 16-bit value
618   Binary0fOpCode(kX86Movsx8),   // sign-extend 8-bit value
619   Binary0fOpCode(kX86Movsx16),  // sign-extend 16-bit value
620   Binary0fOpCode(kX86Movzx8q),   // zero-extend 8-bit value to quad word
621   Binary0fOpCode(kX86Movzx16q),  // zero-extend 16-bit value to quad word
622   Binary0fOpCode(kX86Movsx8q),   // sign-extend 8-bit value to quad word
623   Binary0fOpCode(kX86Movsx16q),  // sign-extend 16-bit value to quad word
624 #undef Binary0fOpCode
625   kX86Jcc8, kX86Jcc32,  // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
626   kX86Jmp8, kX86Jmp32,  // jmp rel8/32; lir operands - 0: rel, target assigned
627   kX86JmpR,             // jmp reg; lir operands - 0: reg
628   kX86Jecxz8,           // jcexz rel8; jump relative if ECX is zero.
629   kX86JmpT,             // jmp fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
630 
631   kX86CallR,            // call reg; lir operands - 0: reg
632   kX86CallM,            // call [base + disp]; lir operands - 0: base, 1: disp
633   kX86CallA,            // call [base + index * scale + disp]
634                         // lir operands - 0: base, 1: index, 2: scale, 3: disp
635   kX86CallT,            // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
636   kX86CallI,            // call <relative> - 0: disp; Used for core.oat linking only
637   kX86Ret,              // ret; no lir operands
638   kX86PcRelLoadRA,      // mov reg, [base + index * scale + PC relative displacement]
639                         // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
640   kX86PcRelAdr,         // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
641   kX86RepneScasw,       // repne scasw
642   kX86Last
643 };
644 std::ostream& operator<<(std::ostream& os, const X86OpCode& rhs);
645 
646 /* Instruction assembly field_loc kind */
647 enum X86EncodingKind {
648   kData,                                    // Special case for raw data.
649   kNop,                                     // Special case for variable length nop.
650   kNullary,                                 // Opcode that takes no arguments.
651   kRegOpcode,                               // Shorter form of R instruction kind (opcode+rd)
652   kReg, kMem, kArray,                       // R, M and A instruction kinds.
653   kMemReg, kArrayReg, kThreadReg,           // MR, AR and TR instruction kinds.
654   kRegReg, kRegMem, kRegArray, kRegThread,  // RR, RM, RA and RT instruction kinds.
655   kRegRegStore,                             // RR following the store modrm reg-reg encoding rather than the load.
656   kRegImm, kMemImm, kArrayImm, kThreadImm,  // RI, MI, AI and TI instruction kinds.
657   kRegRegImm, kRegMemImm, kRegArrayImm,     // RRI, RMI and RAI instruction kinds.
658   kMovRegImm,                               // Shorter form move RI.
659   kMovRegQuadImm,                           // 64 bit move RI
660   kRegRegImmStore,                          // RRI following the store modrm reg-reg encoding rather than the load.
661   kMemRegImm,                               // MRI instruction kinds.
662   kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
663   kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
664   kShiftRegRegCl,
665   // kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
666   kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
667   kRegRegCond,                             // RR instruction kind followed by a condition.
668   kRegMemCond,                             // RM instruction kind followed by a condition.
669   kJmp, kJcc, kCall,                       // Branch instruction kinds.
670   kPcRel,                                  // Operation with displacement that is PC relative
671   kUnimplemented                           // Encoding used when an instruction isn't yet implemented.
672 };
673 
674 /* Struct used to define the EncodingMap positions for each X86 opcode */
675 struct X86EncodingMap {
676   X86OpCode opcode;      // e.g. kOpAddRI
677   // The broad category the instruction conforms to, such as kRegReg. Identifies which LIR operands
678   // hold meaning for the opcode.
679   X86EncodingKind kind;
680   uint64_t flags;
681   struct {
682   uint8_t prefix1;       // Non-zero => a prefix byte.
683   uint8_t prefix2;       // Non-zero => a second prefix byte.
684   uint8_t opcode;        // 1 byte opcode.
685   uint8_t extra_opcode1;  // Possible extra opcode byte.
686   uint8_t extra_opcode2;  // Possible second extra opcode byte.
687   // 3-bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
688   // encoding kind.
689   uint8_t modrm_opcode;
690   uint8_t ax_opcode;  // Non-zero => shorter encoding for AX as a destination.
691   uint8_t immediate_bytes;  // Number of bytes of immediate.
692   // Does the instruction address a byte register? In 32-bit mode the registers ah, bh, ch and dh
693   // are not used. In 64-bit mode the REX prefix is used to normalize and allow any byte register
694   // to be addressed.
695   bool r8_form;
696   } skeleton;
697   const char *name;
698   const char* fmt;
699 };
700 
701 
702 // FIXME: mem barrier type - what do we do for x86?
703 #define kSY 0
704 #define kST 0
705 
706 // Offsets of high and low halves of a 64bit value.
707 #define LOWORD_OFFSET 0
708 #define HIWORD_OFFSET 4
709 
710 // Segment override instruction prefix used for quick TLS access to Thread::Current().
711 #define THREAD_PREFIX 0x64
712 #define THREAD_PREFIX_GS 0x65
713 
714 // 64 Bit Operand Size
715 #define REX_W 0x48
716 // Extension of the ModR/M reg field
717 #define REX_R 0x44
718 // Extension of the SIB index field
719 #define REX_X 0x42
720 // Extension of the ModR/M r/m field, SIB base field, or Opcode reg field
721 #define REX_B 0x41
722 // An empty REX prefix used to normalize the byte operations so that they apply to R4 through R15
723 #define REX 0x40
724 // Mask extracting the least 3 bits of r0..r15
725 #define kRegNumMask32 0x07
726 // Value indicating that base or reg is not used
727 #define NO_REG 0
728 
729 #define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
730 #define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
731 #define IS_SIMM32(v) ((INT64_C(-2147483648) <= (v)) && ((v) <= INT64_C(2147483647)))
732 
733 extern X86EncodingMap EncodingMap[kX86Last];
734 extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
735 
736 }  // namespace art
737 
738 #endif  // ART_COMPILER_DEX_QUICK_X86_X86_LIR_H_
739