1 /*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "codegen_arm64.h"
18
19 #include "arm64_lir.h"
20 #include "base/logging.h"
21 #include "dex/mir_graph.h"
22 #include "dex/quick/mir_to_lir-inl.h"
23
24 namespace art {
25
GenArithOpFloat(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2)26 void Arm64Mir2Lir::GenArithOpFloat(Instruction::Code opcode, RegLocation rl_dest,
27 RegLocation rl_src1, RegLocation rl_src2) {
28 int op = kA64Brk1d;
29 RegLocation rl_result;
30
31 switch (opcode) {
32 case Instruction::ADD_FLOAT_2ADDR:
33 case Instruction::ADD_FLOAT:
34 op = kA64Fadd3fff;
35 break;
36 case Instruction::SUB_FLOAT_2ADDR:
37 case Instruction::SUB_FLOAT:
38 op = kA64Fsub3fff;
39 break;
40 case Instruction::DIV_FLOAT_2ADDR:
41 case Instruction::DIV_FLOAT:
42 op = kA64Fdiv3fff;
43 break;
44 case Instruction::MUL_FLOAT_2ADDR:
45 case Instruction::MUL_FLOAT:
46 op = kA64Fmul3fff;
47 break;
48 case Instruction::REM_FLOAT_2ADDR:
49 case Instruction::REM_FLOAT:
50 FlushAllRegs(); // Send everything to home location
51 CallRuntimeHelperRegLocationRegLocation(kQuickFmodf, rl_src1, rl_src2, false);
52 rl_result = GetReturn(kFPReg);
53 StoreValue(rl_dest, rl_result);
54 return;
55 case Instruction::NEG_FLOAT:
56 GenNegFloat(rl_dest, rl_src1);
57 return;
58 default:
59 LOG(FATAL) << "Unexpected opcode: " << opcode;
60 }
61 rl_src1 = LoadValue(rl_src1, kFPReg);
62 rl_src2 = LoadValue(rl_src2, kFPReg);
63 rl_result = EvalLoc(rl_dest, kFPReg, true);
64 NewLIR3(op, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
65 StoreValue(rl_dest, rl_result);
66 }
67
GenArithOpDouble(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2)68 void Arm64Mir2Lir::GenArithOpDouble(Instruction::Code opcode,
69 RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
70 int op = kA64Brk1d;
71 RegLocation rl_result;
72
73 switch (opcode) {
74 case Instruction::ADD_DOUBLE_2ADDR:
75 case Instruction::ADD_DOUBLE:
76 op = kA64Fadd3fff;
77 break;
78 case Instruction::SUB_DOUBLE_2ADDR:
79 case Instruction::SUB_DOUBLE:
80 op = kA64Fsub3fff;
81 break;
82 case Instruction::DIV_DOUBLE_2ADDR:
83 case Instruction::DIV_DOUBLE:
84 op = kA64Fdiv3fff;
85 break;
86 case Instruction::MUL_DOUBLE_2ADDR:
87 case Instruction::MUL_DOUBLE:
88 op = kA64Fmul3fff;
89 break;
90 case Instruction::REM_DOUBLE_2ADDR:
91 case Instruction::REM_DOUBLE:
92 FlushAllRegs(); // Send everything to home location
93 {
94 RegStorage r_tgt = CallHelperSetup(kQuickFmod);
95 LoadValueDirectWideFixed(rl_src1, rs_d0);
96 LoadValueDirectWideFixed(rl_src2, rs_d1);
97 ClobberCallerSave();
98 CallHelper(r_tgt, kQuickFmod, false);
99 }
100 rl_result = GetReturnWide(kFPReg);
101 StoreValueWide(rl_dest, rl_result);
102 return;
103 case Instruction::NEG_DOUBLE:
104 GenNegDouble(rl_dest, rl_src1);
105 return;
106 default:
107 LOG(FATAL) << "Unexpected opcode: " << opcode;
108 }
109
110 rl_src1 = LoadValueWide(rl_src1, kFPReg);
111 DCHECK(rl_src1.wide);
112 rl_src2 = LoadValueWide(rl_src2, kFPReg);
113 DCHECK(rl_src2.wide);
114 rl_result = EvalLoc(rl_dest, kFPReg, true);
115 DCHECK(rl_dest.wide);
116 DCHECK(rl_result.wide);
117 NewLIR3(WIDE(op), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
118 StoreValueWide(rl_dest, rl_result);
119 }
120
GenMultiplyByConstantFloat(RegLocation rl_dest,RegLocation rl_src1,int32_t constant)121 void Arm64Mir2Lir::GenMultiplyByConstantFloat(RegLocation rl_dest, RegLocation rl_src1,
122 int32_t constant) {
123 RegLocation rl_result;
124 RegStorage r_tmp = AllocTempSingle();
125 LoadConstantNoClobber(r_tmp, constant);
126 rl_src1 = LoadValue(rl_src1, kFPReg);
127 rl_result = EvalLoc(rl_dest, kFPReg, true);
128 NewLIR3(kA64Fmul3fff, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
129 StoreValue(rl_dest, rl_result);
130 }
131
GenMultiplyByConstantDouble(RegLocation rl_dest,RegLocation rl_src1,int64_t constant)132 void Arm64Mir2Lir::GenMultiplyByConstantDouble(RegLocation rl_dest, RegLocation rl_src1,
133 int64_t constant) {
134 RegLocation rl_result;
135 RegStorage r_tmp = AllocTempDouble();
136 DCHECK(r_tmp.IsDouble());
137 LoadConstantWide(r_tmp, constant);
138 rl_src1 = LoadValueWide(rl_src1, kFPReg);
139 DCHECK(rl_src1.wide);
140 rl_result = EvalLocWide(rl_dest, kFPReg, true);
141 DCHECK(rl_dest.wide);
142 DCHECK(rl_result.wide);
143 NewLIR3(WIDE(kA64Fmul3fff), rl_result.reg.GetReg(), rl_src1.reg.GetReg(), r_tmp.GetReg());
144 StoreValueWide(rl_dest, rl_result);
145 }
146
GenConversion(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src)147 void Arm64Mir2Lir::GenConversion(Instruction::Code opcode,
148 RegLocation rl_dest, RegLocation rl_src) {
149 int op = kA64Brk1d;
150 RegLocation rl_result;
151 RegisterClass src_reg_class = kInvalidRegClass;
152 RegisterClass dst_reg_class = kInvalidRegClass;
153
154 switch (opcode) {
155 case Instruction::INT_TO_FLOAT:
156 op = kA64Scvtf2fw;
157 src_reg_class = kCoreReg;
158 dst_reg_class = kFPReg;
159 break;
160 case Instruction::FLOAT_TO_INT:
161 op = kA64Fcvtzs2wf;
162 src_reg_class = kFPReg;
163 dst_reg_class = kCoreReg;
164 break;
165 case Instruction::DOUBLE_TO_FLOAT:
166 op = kA64Fcvt2sS;
167 src_reg_class = kFPReg;
168 dst_reg_class = kFPReg;
169 break;
170 case Instruction::FLOAT_TO_DOUBLE:
171 op = kA64Fcvt2Ss;
172 src_reg_class = kFPReg;
173 dst_reg_class = kFPReg;
174 break;
175 case Instruction::INT_TO_DOUBLE:
176 op = WIDE(kA64Scvtf2fw);
177 src_reg_class = kCoreReg;
178 dst_reg_class = kFPReg;
179 break;
180 case Instruction::DOUBLE_TO_INT:
181 op = WIDE(kA64Fcvtzs2wf);
182 src_reg_class = kFPReg;
183 dst_reg_class = kCoreReg;
184 break;
185 case Instruction::LONG_TO_DOUBLE:
186 op = WIDE(kA64Scvtf2fx);
187 src_reg_class = kCoreReg;
188 dst_reg_class = kFPReg;
189 break;
190 case Instruction::FLOAT_TO_LONG:
191 op = kA64Fcvtzs2xf;
192 src_reg_class = kFPReg;
193 dst_reg_class = kCoreReg;
194 break;
195 case Instruction::LONG_TO_FLOAT:
196 op = kA64Scvtf2fx;
197 src_reg_class = kCoreReg;
198 dst_reg_class = kFPReg;
199 break;
200 case Instruction::DOUBLE_TO_LONG:
201 op = WIDE(kA64Fcvtzs2xf);
202 src_reg_class = kFPReg;
203 dst_reg_class = kCoreReg;
204 break;
205 default:
206 LOG(FATAL) << "Unexpected opcode: " << opcode;
207 }
208
209 DCHECK_NE(src_reg_class, kInvalidRegClass);
210 DCHECK_NE(dst_reg_class, kInvalidRegClass);
211 DCHECK_NE(op, kA64Brk1d);
212
213 if (rl_src.wide) {
214 rl_src = LoadValueWide(rl_src, src_reg_class);
215 } else {
216 rl_src = LoadValue(rl_src, src_reg_class);
217 }
218
219 rl_result = EvalLoc(rl_dest, dst_reg_class, true);
220 NewLIR2(op, rl_result.reg.GetReg(), rl_src.reg.GetReg());
221
222 if (rl_dest.wide) {
223 StoreValueWide(rl_dest, rl_result);
224 } else {
225 StoreValue(rl_dest, rl_result);
226 }
227 }
228
GenFusedFPCmpBranch(BasicBlock * bb,MIR * mir,bool gt_bias,bool is_double)229 void Arm64Mir2Lir::GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias,
230 bool is_double) {
231 LIR* target = &block_label_list_[bb->taken];
232 RegLocation rl_src1;
233 RegLocation rl_src2;
234 if (is_double) {
235 rl_src1 = mir_graph_->GetSrcWide(mir, 0);
236 rl_src2 = mir_graph_->GetSrcWide(mir, 2);
237 rl_src1 = LoadValueWide(rl_src1, kFPReg);
238 rl_src2 = LoadValueWide(rl_src2, kFPReg);
239 NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
240 } else {
241 rl_src1 = mir_graph_->GetSrc(mir, 0);
242 rl_src2 = mir_graph_->GetSrc(mir, 1);
243 rl_src1 = LoadValue(rl_src1, kFPReg);
244 rl_src2 = LoadValue(rl_src2, kFPReg);
245 NewLIR2(kA64Fcmp2ff, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
246 }
247 ConditionCode ccode = mir->meta.ccode;
248 switch (ccode) {
249 case kCondEq:
250 case kCondNe:
251 break;
252 case kCondLt:
253 if (gt_bias) {
254 ccode = kCondMi;
255 }
256 break;
257 case kCondLe:
258 if (gt_bias) {
259 ccode = kCondLs;
260 }
261 break;
262 case kCondGt:
263 if (gt_bias) {
264 ccode = kCondHi;
265 }
266 break;
267 case kCondGe:
268 if (gt_bias) {
269 ccode = kCondUge;
270 }
271 break;
272 default:
273 LOG(FATAL) << "Unexpected ccode: " << ccode;
274 }
275 OpCondBranch(ccode, target);
276 }
277
278
GenCmpFP(Instruction::Code opcode,RegLocation rl_dest,RegLocation rl_src1,RegLocation rl_src2)279 void Arm64Mir2Lir::GenCmpFP(Instruction::Code opcode, RegLocation rl_dest,
280 RegLocation rl_src1, RegLocation rl_src2) {
281 bool is_double = false;
282 int default_result = -1;
283 RegLocation rl_result;
284
285 switch (opcode) {
286 case Instruction::CMPL_FLOAT:
287 is_double = false;
288 default_result = -1;
289 break;
290 case Instruction::CMPG_FLOAT:
291 is_double = false;
292 default_result = 1;
293 break;
294 case Instruction::CMPL_DOUBLE:
295 is_double = true;
296 default_result = -1;
297 break;
298 case Instruction::CMPG_DOUBLE:
299 is_double = true;
300 default_result = 1;
301 break;
302 default:
303 LOG(FATAL) << "Unexpected opcode: " << opcode;
304 }
305 if (is_double) {
306 rl_src1 = LoadValueWide(rl_src1, kFPReg);
307 rl_src2 = LoadValueWide(rl_src2, kFPReg);
308 // In case result vreg is also a src vreg, break association to avoid useless copy by EvalLoc()
309 ClobberSReg(rl_dest.s_reg_low);
310 rl_result = EvalLoc(rl_dest, kCoreReg, true);
311 LoadConstant(rl_result.reg, default_result);
312 NewLIR2(WIDE(kA64Fcmp2ff), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
313 } else {
314 rl_src1 = LoadValue(rl_src1, kFPReg);
315 rl_src2 = LoadValue(rl_src2, kFPReg);
316 // In case result vreg is also a srcvreg, break association to avoid useless copy by EvalLoc()
317 ClobberSReg(rl_dest.s_reg_low);
318 rl_result = EvalLoc(rl_dest, kCoreReg, true);
319 LoadConstant(rl_result.reg, default_result);
320 NewLIR2(kA64Fcmp2ff, rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
321 }
322 DCHECK(!rl_result.reg.IsFloat());
323
324 // TODO(Arm64): should we rather do this?
325 // csinc wD, wzr, wzr, eq
326 // csneg wD, wD, wD, le
327 // (which requires 2 instructions rather than 3)
328
329 // Rd = if cond then Rd else -Rd.
330 NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_result.reg.GetReg(),
331 rl_result.reg.GetReg(), (default_result == 1) ? kArmCondPl : kArmCondLe);
332 NewLIR4(kA64Csel4rrrc, rl_result.reg.GetReg(), rwzr, rl_result.reg.GetReg(),
333 kArmCondEq);
334 StoreValue(rl_dest, rl_result);
335 }
336
GenNegFloat(RegLocation rl_dest,RegLocation rl_src)337 void Arm64Mir2Lir::GenNegFloat(RegLocation rl_dest, RegLocation rl_src) {
338 RegLocation rl_result;
339 rl_src = LoadValue(rl_src, kFPReg);
340 rl_result = EvalLoc(rl_dest, kFPReg, true);
341 NewLIR2(kA64Fneg2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
342 StoreValue(rl_dest, rl_result);
343 }
344
GenNegDouble(RegLocation rl_dest,RegLocation rl_src)345 void Arm64Mir2Lir::GenNegDouble(RegLocation rl_dest, RegLocation rl_src) {
346 RegLocation rl_result;
347 rl_src = LoadValueWide(rl_src, kFPReg);
348 rl_result = EvalLoc(rl_dest, kFPReg, true);
349 NewLIR2(WIDE(kA64Fneg2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
350 StoreValueWide(rl_dest, rl_result);
351 }
352
RegClassForAbsFP(RegLocation rl_src,RegLocation rl_dest)353 static RegisterClass RegClassForAbsFP(RegLocation rl_src, RegLocation rl_dest) {
354 // If src is in a core reg or, unlikely, dest has been promoted to a core reg, use core reg.
355 if ((rl_src.location == kLocPhysReg && !rl_src.reg.IsFloat()) ||
356 (rl_dest.location == kLocPhysReg && !rl_dest.reg.IsFloat())) {
357 return kCoreReg;
358 }
359 // If src is in an fp reg or dest has been promoted to an fp reg, use fp reg.
360 if (rl_src.location == kLocPhysReg || rl_dest.location == kLocPhysReg) {
361 return kFPReg;
362 }
363 // With both src and dest in the stack frame we have to perform load+abs+store. Whether this
364 // is faster using a core reg or fp reg depends on the particular CPU. For example, on A53
365 // it's faster using core reg while on A57 it's faster with fp reg, the difference being
366 // bigger on the A53. Without further investigation and testing we prefer core register.
367 // (If the result is subsequently used in another fp operation, the dalvik reg will probably
368 // get promoted and that should be handled by the cases above.)
369 return kCoreReg;
370 }
371
GenInlinedAbsFloat(CallInfo * info)372 bool Arm64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
373 if (info->result.location == kLocInvalid) {
374 return true; // Result is unused: inlining successful, no code generated.
375 }
376 RegLocation rl_dest = info->result;
377 RegLocation rl_src = UpdateLoc(info->args[0]);
378 RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
379 rl_src = LoadValue(rl_src, reg_class);
380 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
381 if (reg_class == kFPReg) {
382 NewLIR2(kA64Fabs2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
383 } else {
384 // Clear the sign bit in an integer register.
385 OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
386 }
387 StoreValue(rl_dest, rl_result);
388 return true;
389 }
390
GenInlinedAbsDouble(CallInfo * info)391 bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
392 if (info->result.location == kLocInvalid) {
393 return true; // Result is unused: inlining successful, no code generated.
394 }
395 RegLocation rl_dest = info->result;
396 RegLocation rl_src = UpdateLocWide(info->args[0]);
397 RegisterClass reg_class = RegClassForAbsFP(rl_src, rl_dest);
398 rl_src = LoadValueWide(rl_src, reg_class);
399 RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
400 if (reg_class == kFPReg) {
401 NewLIR2(WIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
402 } else {
403 // Clear the sign bit in an integer register.
404 OpRegRegImm64(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffffffffffff);
405 }
406 StoreValueWide(rl_dest, rl_result);
407 return true;
408 }
409
GenInlinedSqrt(CallInfo * info)410 bool Arm64Mir2Lir::GenInlinedSqrt(CallInfo* info) {
411 RegLocation rl_src = info->args[0];
412 RegLocation rl_dest = InlineTargetWide(info); // double place for result
413 rl_src = LoadValueWide(rl_src, kFPReg);
414 RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
415 NewLIR2(WIDE(kA64Fsqrt2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
416 StoreValueWide(rl_dest, rl_result);
417 return true;
418 }
419
GenInlinedCeil(CallInfo * info)420 bool Arm64Mir2Lir::GenInlinedCeil(CallInfo* info) {
421 RegLocation rl_src = info->args[0];
422 RegLocation rl_dest = InlineTargetWide(info);
423 rl_src = LoadValueWide(rl_src, kFPReg);
424 RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
425 NewLIR2(WIDE(kA64Frintp2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
426 StoreValueWide(rl_dest, rl_result);
427 return true;
428 }
429
GenInlinedFloor(CallInfo * info)430 bool Arm64Mir2Lir::GenInlinedFloor(CallInfo* info) {
431 RegLocation rl_src = info->args[0];
432 RegLocation rl_dest = InlineTargetWide(info);
433 rl_src = LoadValueWide(rl_src, kFPReg);
434 RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
435 NewLIR2(WIDE(kA64Frintm2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
436 StoreValueWide(rl_dest, rl_result);
437 return true;
438 }
439
GenInlinedRint(CallInfo * info)440 bool Arm64Mir2Lir::GenInlinedRint(CallInfo* info) {
441 RegLocation rl_src = info->args[0];
442 RegLocation rl_dest = InlineTargetWide(info);
443 rl_src = LoadValueWide(rl_src, kFPReg);
444 RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
445 NewLIR2(WIDE(kA64Frintn2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
446 StoreValueWide(rl_dest, rl_result);
447 return true;
448 }
449
GenInlinedRound(CallInfo * info,bool is_double)450 bool Arm64Mir2Lir::GenInlinedRound(CallInfo* info, bool is_double) {
451 int32_t encoded_imm = EncodeImmSingle(bit_cast<uint32_t, float>(0.5f));
452 A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
453 RegLocation rl_src = info->args[0];
454 RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
455 rl_src = (is_double) ? LoadValueWide(rl_src, kFPReg) : LoadValue(rl_src, kFPReg);
456 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
457 RegStorage r_imm_point5 = (is_double) ? AllocTempDouble() : AllocTempSingle();
458 RegStorage r_tmp = (is_double) ? AllocTempDouble() : AllocTempSingle();
459 // 0.5f and 0.5d are encoded in the same way.
460 NewLIR2(kA64Fmov2fI | wide, r_imm_point5.GetReg(), encoded_imm);
461 NewLIR3(kA64Fadd3fff | wide, r_tmp.GetReg(), rl_src.reg.GetReg(), r_imm_point5.GetReg());
462 NewLIR2((is_double) ? kA64Fcvtms2xS : kA64Fcvtms2ws, rl_result.reg.GetReg(), r_tmp.GetReg());
463 (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
464 return true;
465 }
466
GenInlinedMinMaxFP(CallInfo * info,bool is_min,bool is_double)467 bool Arm64Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) {
468 DCHECK_EQ(cu_->instruction_set, kArm64);
469 int op = (is_min) ? kA64Fmin3fff : kA64Fmax3fff;
470 A64Opcode wide = (is_double) ? WIDE(0) : UNWIDE(0);
471 RegLocation rl_src1 = info->args[0];
472 RegLocation rl_src2 = (is_double) ? info->args[2] : info->args[1];
473 rl_src1 = (is_double) ? LoadValueWide(rl_src1, kFPReg) : LoadValue(rl_src1, kFPReg);
474 rl_src2 = (is_double) ? LoadValueWide(rl_src2, kFPReg) : LoadValue(rl_src2, kFPReg);
475 RegLocation rl_dest = (is_double) ? InlineTargetWide(info) : InlineTarget(info);
476 RegLocation rl_result = EvalLoc(rl_dest, kFPReg, true);
477 NewLIR3(op | wide, rl_result.reg.GetReg(), rl_src1.reg.GetReg(), rl_src2.reg.GetReg());
478 (is_double) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
479 return true;
480 }
481
482 } // namespace art
483