1 /*
2  * Copyright (C) 2015 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "intrinsics_x86.h"
18 
19 #include <limits>
20 
21 #include "arch/x86/instruction_set_features_x86.h"
22 #include "art_method.h"
23 #include "code_generator_x86.h"
24 #include "entrypoints/quick/quick_entrypoints.h"
25 #include "intrinsics.h"
26 #include "mirror/array-inl.h"
27 #include "mirror/string.h"
28 #include "thread.h"
29 #include "utils/x86/assembler_x86.h"
30 #include "utils/x86/constants_x86.h"
31 
32 namespace art {
33 
34 namespace x86 {
35 
36 static constexpr int kDoubleNaNHigh = 0x7FF80000;
37 static constexpr int kDoubleNaNLow = 0x00000000;
38 static constexpr int kFloatNaN = 0x7FC00000;
39 
IntrinsicLocationsBuilderX86(CodeGeneratorX86 * codegen)40 IntrinsicLocationsBuilderX86::IntrinsicLocationsBuilderX86(CodeGeneratorX86* codegen)
41   : arena_(codegen->GetGraph()->GetArena()), codegen_(codegen) {
42 }
43 
44 
GetAssembler()45 X86Assembler* IntrinsicCodeGeneratorX86::GetAssembler() {
46   return reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
47 }
48 
GetAllocator()49 ArenaAllocator* IntrinsicCodeGeneratorX86::GetAllocator() {
50   return codegen_->GetGraph()->GetArena();
51 }
52 
TryDispatch(HInvoke * invoke)53 bool IntrinsicLocationsBuilderX86::TryDispatch(HInvoke* invoke) {
54   Dispatch(invoke);
55   LocationSummary* res = invoke->GetLocations();
56   return res != nullptr && res->Intrinsified();
57 }
58 
59 #define __ reinterpret_cast<X86Assembler*>(codegen->GetAssembler())->
60 
61 // TODO: target as memory.
MoveFromReturnRegister(Location target,Primitive::Type type,CodeGeneratorX86 * codegen)62 static void MoveFromReturnRegister(Location target,
63                                    Primitive::Type type,
64                                    CodeGeneratorX86* codegen) {
65   if (!target.IsValid()) {
66     DCHECK(type == Primitive::kPrimVoid);
67     return;
68   }
69 
70   switch (type) {
71     case Primitive::kPrimBoolean:
72     case Primitive::kPrimByte:
73     case Primitive::kPrimChar:
74     case Primitive::kPrimShort:
75     case Primitive::kPrimInt:
76     case Primitive::kPrimNot: {
77       Register target_reg = target.AsRegister<Register>();
78       if (target_reg != EAX) {
79         __ movl(target_reg, EAX);
80       }
81       break;
82     }
83     case Primitive::kPrimLong: {
84       Register target_reg_lo = target.AsRegisterPairLow<Register>();
85       Register target_reg_hi = target.AsRegisterPairHigh<Register>();
86       if (target_reg_lo != EAX) {
87         __ movl(target_reg_lo, EAX);
88       }
89       if (target_reg_hi != EDX) {
90         __ movl(target_reg_hi, EDX);
91       }
92       break;
93     }
94 
95     case Primitive::kPrimVoid:
96       LOG(FATAL) << "Unexpected void type for valid location " << target;
97       UNREACHABLE();
98 
99     case Primitive::kPrimDouble: {
100       XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
101       if (target_reg != XMM0) {
102         __ movsd(target_reg, XMM0);
103       }
104       break;
105     }
106     case Primitive::kPrimFloat: {
107       XmmRegister target_reg = target.AsFpuRegister<XmmRegister>();
108       if (target_reg != XMM0) {
109         __ movss(target_reg, XMM0);
110       }
111       break;
112     }
113   }
114 }
115 
MoveArguments(HInvoke * invoke,CodeGeneratorX86 * codegen)116 static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
117   InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
118   IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
119 }
120 
121 // Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
122 // call. This will copy the arguments into the positions for a regular call.
123 //
124 // Note: The actual parameters are required to be in the locations given by the invoke's location
125 //       summary. If an intrinsic modifies those locations before a slowpath call, they must be
126 //       restored!
127 class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
128  public:
IntrinsicSlowPathX86(HInvoke * invoke)129   explicit IntrinsicSlowPathX86(HInvoke* invoke)
130     : invoke_(invoke) { }
131 
EmitNativeCode(CodeGenerator * codegen_in)132   void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE {
133     CodeGeneratorX86* codegen = down_cast<CodeGeneratorX86*>(codegen_in);
134     __ Bind(GetEntryLabel());
135 
136     SaveLiveRegisters(codegen, invoke_->GetLocations());
137 
138     MoveArguments(invoke_, codegen);
139 
140     if (invoke_->IsInvokeStaticOrDirect()) {
141       codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
142       RecordPcInfo(codegen, invoke_, invoke_->GetDexPc());
143     } else {
144       UNIMPLEMENTED(FATAL) << "Non-direct intrinsic slow-path not yet implemented";
145       UNREACHABLE();
146     }
147 
148     // Copy the result back to the expected output.
149     Location out = invoke_->GetLocations()->Out();
150     if (out.IsValid()) {
151       DCHECK(out.IsRegister());  // TODO: Replace this when we support output in memory.
152       DCHECK(!invoke_->GetLocations()->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
153       MoveFromReturnRegister(out, invoke_->GetType(), codegen);
154     }
155 
156     RestoreLiveRegisters(codegen, invoke_->GetLocations());
157     __ jmp(GetExitLabel());
158   }
159 
160  private:
161   // The instruction where this slow path is happening.
162   HInvoke* const invoke_;
163 
164   DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathX86);
165 };
166 
167 #undef __
168 #define __ assembler->
169 
CreateFPToIntLocations(ArenaAllocator * arena,HInvoke * invoke,bool is64bit)170 static void CreateFPToIntLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
171   LocationSummary* locations = new (arena) LocationSummary(invoke,
172                                                            LocationSummary::kNoCall,
173                                                            kIntrinsified);
174   locations->SetInAt(0, Location::RequiresFpuRegister());
175   locations->SetOut(Location::RequiresRegister());
176   if (is64bit) {
177     locations->AddTemp(Location::RequiresFpuRegister());
178   }
179 }
180 
CreateIntToFPLocations(ArenaAllocator * arena,HInvoke * invoke,bool is64bit)181 static void CreateIntToFPLocations(ArenaAllocator* arena, HInvoke* invoke, bool is64bit) {
182   LocationSummary* locations = new (arena) LocationSummary(invoke,
183                                                            LocationSummary::kNoCall,
184                                                            kIntrinsified);
185   locations->SetInAt(0, Location::RequiresRegister());
186   locations->SetOut(Location::RequiresFpuRegister());
187   if (is64bit) {
188     locations->AddTemp(Location::RequiresFpuRegister());
189     locations->AddTemp(Location::RequiresFpuRegister());
190   }
191 }
192 
MoveFPToInt(LocationSummary * locations,bool is64bit,X86Assembler * assembler)193 static void MoveFPToInt(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
194   Location input = locations->InAt(0);
195   Location output = locations->Out();
196   if (is64bit) {
197     // Need to use the temporary.
198     XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
199     __ movsd(temp, input.AsFpuRegister<XmmRegister>());
200     __ movd(output.AsRegisterPairLow<Register>(), temp);
201     __ psrlq(temp, Immediate(32));
202     __ movd(output.AsRegisterPairHigh<Register>(), temp);
203   } else {
204     __ movd(output.AsRegister<Register>(), input.AsFpuRegister<XmmRegister>());
205   }
206 }
207 
MoveIntToFP(LocationSummary * locations,bool is64bit,X86Assembler * assembler)208 static void MoveIntToFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
209   Location input = locations->InAt(0);
210   Location output = locations->Out();
211   if (is64bit) {
212     // Need to use the temporary.
213     XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
214     XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
215     __ movd(temp1, input.AsRegisterPairLow<Register>());
216     __ movd(temp2, input.AsRegisterPairHigh<Register>());
217     __ punpckldq(temp1, temp2);
218     __ movsd(output.AsFpuRegister<XmmRegister>(), temp1);
219   } else {
220     __ movd(output.AsFpuRegister<XmmRegister>(), input.AsRegister<Register>());
221   }
222 }
223 
VisitDoubleDoubleToRawLongBits(HInvoke * invoke)224 void IntrinsicLocationsBuilderX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
225   CreateFPToIntLocations(arena_, invoke, true);
226 }
VisitDoubleLongBitsToDouble(HInvoke * invoke)227 void IntrinsicLocationsBuilderX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
228   CreateIntToFPLocations(arena_, invoke, true);
229 }
230 
VisitDoubleDoubleToRawLongBits(HInvoke * invoke)231 void IntrinsicCodeGeneratorX86::VisitDoubleDoubleToRawLongBits(HInvoke* invoke) {
232   MoveFPToInt(invoke->GetLocations(), true, GetAssembler());
233 }
VisitDoubleLongBitsToDouble(HInvoke * invoke)234 void IntrinsicCodeGeneratorX86::VisitDoubleLongBitsToDouble(HInvoke* invoke) {
235   MoveIntToFP(invoke->GetLocations(), true, GetAssembler());
236 }
237 
VisitFloatFloatToRawIntBits(HInvoke * invoke)238 void IntrinsicLocationsBuilderX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
239   CreateFPToIntLocations(arena_, invoke, false);
240 }
VisitFloatIntBitsToFloat(HInvoke * invoke)241 void IntrinsicLocationsBuilderX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
242   CreateIntToFPLocations(arena_, invoke, false);
243 }
244 
VisitFloatFloatToRawIntBits(HInvoke * invoke)245 void IntrinsicCodeGeneratorX86::VisitFloatFloatToRawIntBits(HInvoke* invoke) {
246   MoveFPToInt(invoke->GetLocations(), false, GetAssembler());
247 }
VisitFloatIntBitsToFloat(HInvoke * invoke)248 void IntrinsicCodeGeneratorX86::VisitFloatIntBitsToFloat(HInvoke* invoke) {
249   MoveIntToFP(invoke->GetLocations(), false, GetAssembler());
250 }
251 
CreateIntToIntLocations(ArenaAllocator * arena,HInvoke * invoke)252 static void CreateIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
253   LocationSummary* locations = new (arena) LocationSummary(invoke,
254                                                            LocationSummary::kNoCall,
255                                                            kIntrinsified);
256   locations->SetInAt(0, Location::RequiresRegister());
257   locations->SetOut(Location::SameAsFirstInput());
258 }
259 
CreateLongToIntLocations(ArenaAllocator * arena,HInvoke * invoke)260 static void CreateLongToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
261   LocationSummary* locations = new (arena) LocationSummary(invoke,
262                                                            LocationSummary::kNoCall,
263                                                            kIntrinsified);
264   locations->SetInAt(0, Location::RequiresRegister());
265   locations->SetOut(Location::RequiresRegister());
266 }
267 
CreateLongToLongLocations(ArenaAllocator * arena,HInvoke * invoke)268 static void CreateLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
269   LocationSummary* locations = new (arena) LocationSummary(invoke,
270                                                            LocationSummary::kNoCall,
271                                                            kIntrinsified);
272   locations->SetInAt(0, Location::RequiresRegister());
273   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
274 }
275 
GenReverseBytes(LocationSummary * locations,Primitive::Type size,X86Assembler * assembler)276 static void GenReverseBytes(LocationSummary* locations,
277                             Primitive::Type size,
278                             X86Assembler* assembler) {
279   Register out = locations->Out().AsRegister<Register>();
280 
281   switch (size) {
282     case Primitive::kPrimShort:
283       // TODO: Can be done with an xchg of 8b registers. This is straight from Quick.
284       __ bswapl(out);
285       __ sarl(out, Immediate(16));
286       break;
287     case Primitive::kPrimInt:
288       __ bswapl(out);
289       break;
290     default:
291       LOG(FATAL) << "Unexpected size for reverse-bytes: " << size;
292       UNREACHABLE();
293   }
294 }
295 
VisitIntegerReverseBytes(HInvoke * invoke)296 void IntrinsicLocationsBuilderX86::VisitIntegerReverseBytes(HInvoke* invoke) {
297   CreateIntToIntLocations(arena_, invoke);
298 }
299 
VisitIntegerReverseBytes(HInvoke * invoke)300 void IntrinsicCodeGeneratorX86::VisitIntegerReverseBytes(HInvoke* invoke) {
301   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
302 }
303 
VisitLongReverseBytes(HInvoke * invoke)304 void IntrinsicLocationsBuilderX86::VisitLongReverseBytes(HInvoke* invoke) {
305   CreateLongToLongLocations(arena_, invoke);
306 }
307 
VisitLongReverseBytes(HInvoke * invoke)308 void IntrinsicCodeGeneratorX86::VisitLongReverseBytes(HInvoke* invoke) {
309   LocationSummary* locations = invoke->GetLocations();
310   Location input = locations->InAt(0);
311   Register input_lo = input.AsRegisterPairLow<Register>();
312   Register input_hi = input.AsRegisterPairHigh<Register>();
313   Location output = locations->Out();
314   Register output_lo = output.AsRegisterPairLow<Register>();
315   Register output_hi = output.AsRegisterPairHigh<Register>();
316 
317   X86Assembler* assembler = GetAssembler();
318   // Assign the inputs to the outputs, mixing low/high.
319   __ movl(output_lo, input_hi);
320   __ movl(output_hi, input_lo);
321   __ bswapl(output_lo);
322   __ bswapl(output_hi);
323 }
324 
VisitShortReverseBytes(HInvoke * invoke)325 void IntrinsicLocationsBuilderX86::VisitShortReverseBytes(HInvoke* invoke) {
326   CreateIntToIntLocations(arena_, invoke);
327 }
328 
VisitShortReverseBytes(HInvoke * invoke)329 void IntrinsicCodeGeneratorX86::VisitShortReverseBytes(HInvoke* invoke) {
330   GenReverseBytes(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
331 }
332 
333 
334 // TODO: Consider Quick's way of doing Double abs through integer operations, as the immediate we
335 //       need is 64b.
336 
CreateFloatToFloat(ArenaAllocator * arena,HInvoke * invoke)337 static void CreateFloatToFloat(ArenaAllocator* arena, HInvoke* invoke) {
338   // TODO: Enable memory operations when the assembler supports them.
339   LocationSummary* locations = new (arena) LocationSummary(invoke,
340                                                            LocationSummary::kNoCall,
341                                                            kIntrinsified);
342   locations->SetInAt(0, Location::RequiresFpuRegister());
343   // TODO: Allow x86 to work with memory. This requires assembler support, see below.
344   // locations->SetInAt(0, Location::Any());               // X86 can work on memory directly.
345   locations->SetOut(Location::SameAsFirstInput());
346 }
347 
MathAbsFP(LocationSummary * locations,bool is64bit,X86Assembler * assembler)348 static void MathAbsFP(LocationSummary* locations, bool is64bit, X86Assembler* assembler) {
349   Location output = locations->Out();
350 
351   if (output.IsFpuRegister()) {
352     // Create the right constant on an aligned stack.
353     if (is64bit) {
354       __ subl(ESP, Immediate(8));
355       __ pushl(Immediate(0x7FFFFFFF));
356       __ pushl(Immediate(0xFFFFFFFF));
357       __ andpd(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
358     } else {
359       __ subl(ESP, Immediate(12));
360       __ pushl(Immediate(0x7FFFFFFF));
361       __ andps(output.AsFpuRegister<XmmRegister>(), Address(ESP, 0));
362     }
363     __ addl(ESP, Immediate(16));
364   } else {
365     // TODO: update when assember support is available.
366     UNIMPLEMENTED(FATAL) << "Needs assembler support.";
367 //  Once assembler support is available, in-memory operations look like this:
368 //    if (is64bit) {
369 //      DCHECK(output.IsDoubleStackSlot());
370 //      __ andl(Address(Register(RSP), output.GetHighStackIndex(kX86WordSize)),
371 //              Immediate(0x7FFFFFFF));
372 //    } else {
373 //      DCHECK(output.IsStackSlot());
374 //      // Can use and with a literal directly.
375 //      __ andl(Address(Register(RSP), output.GetStackIndex()), Immediate(0x7FFFFFFF));
376 //    }
377   }
378 }
379 
VisitMathAbsDouble(HInvoke * invoke)380 void IntrinsicLocationsBuilderX86::VisitMathAbsDouble(HInvoke* invoke) {
381   CreateFloatToFloat(arena_, invoke);
382 }
383 
VisitMathAbsDouble(HInvoke * invoke)384 void IntrinsicCodeGeneratorX86::VisitMathAbsDouble(HInvoke* invoke) {
385   MathAbsFP(invoke->GetLocations(), true, GetAssembler());
386 }
387 
VisitMathAbsFloat(HInvoke * invoke)388 void IntrinsicLocationsBuilderX86::VisitMathAbsFloat(HInvoke* invoke) {
389   CreateFloatToFloat(arena_, invoke);
390 }
391 
VisitMathAbsFloat(HInvoke * invoke)392 void IntrinsicCodeGeneratorX86::VisitMathAbsFloat(HInvoke* invoke) {
393   MathAbsFP(invoke->GetLocations(), false, GetAssembler());
394 }
395 
CreateAbsIntLocation(ArenaAllocator * arena,HInvoke * invoke)396 static void CreateAbsIntLocation(ArenaAllocator* arena, HInvoke* invoke) {
397   LocationSummary* locations = new (arena) LocationSummary(invoke,
398                                                            LocationSummary::kNoCall,
399                                                            kIntrinsified);
400   locations->SetInAt(0, Location::RegisterLocation(EAX));
401   locations->SetOut(Location::SameAsFirstInput());
402   locations->AddTemp(Location::RegisterLocation(EDX));
403 }
404 
GenAbsInteger(LocationSummary * locations,X86Assembler * assembler)405 static void GenAbsInteger(LocationSummary* locations, X86Assembler* assembler) {
406   Location output = locations->Out();
407   Register out = output.AsRegister<Register>();
408   DCHECK_EQ(out, EAX);
409   Register temp = locations->GetTemp(0).AsRegister<Register>();
410   DCHECK_EQ(temp, EDX);
411 
412   // Sign extend EAX into EDX.
413   __ cdq();
414 
415   // XOR EAX with sign.
416   __ xorl(EAX, EDX);
417 
418   // Subtract out sign to correct.
419   __ subl(EAX, EDX);
420 
421   // The result is in EAX.
422 }
423 
CreateAbsLongLocation(ArenaAllocator * arena,HInvoke * invoke)424 static void CreateAbsLongLocation(ArenaAllocator* arena, HInvoke* invoke) {
425   LocationSummary* locations = new (arena) LocationSummary(invoke,
426                                                            LocationSummary::kNoCall,
427                                                            kIntrinsified);
428   locations->SetInAt(0, Location::RequiresRegister());
429   locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
430   locations->AddTemp(Location::RequiresRegister());
431 }
432 
GenAbsLong(LocationSummary * locations,X86Assembler * assembler)433 static void GenAbsLong(LocationSummary* locations, X86Assembler* assembler) {
434   Location input = locations->InAt(0);
435   Register input_lo = input.AsRegisterPairLow<Register>();
436   Register input_hi = input.AsRegisterPairHigh<Register>();
437   Location output = locations->Out();
438   Register output_lo = output.AsRegisterPairLow<Register>();
439   Register output_hi = output.AsRegisterPairHigh<Register>();
440   Register temp = locations->GetTemp(0).AsRegister<Register>();
441 
442   // Compute the sign into the temporary.
443   __ movl(temp, input_hi);
444   __ sarl(temp, Immediate(31));
445 
446   // Store the sign into the output.
447   __ movl(output_lo, temp);
448   __ movl(output_hi, temp);
449 
450   // XOR the input to the output.
451   __ xorl(output_lo, input_lo);
452   __ xorl(output_hi, input_hi);
453 
454   // Subtract the sign.
455   __ subl(output_lo, temp);
456   __ sbbl(output_hi, temp);
457 }
458 
VisitMathAbsInt(HInvoke * invoke)459 void IntrinsicLocationsBuilderX86::VisitMathAbsInt(HInvoke* invoke) {
460   CreateAbsIntLocation(arena_, invoke);
461 }
462 
VisitMathAbsInt(HInvoke * invoke)463 void IntrinsicCodeGeneratorX86::VisitMathAbsInt(HInvoke* invoke) {
464   GenAbsInteger(invoke->GetLocations(), GetAssembler());
465 }
466 
VisitMathAbsLong(HInvoke * invoke)467 void IntrinsicLocationsBuilderX86::VisitMathAbsLong(HInvoke* invoke) {
468   CreateAbsLongLocation(arena_, invoke);
469 }
470 
VisitMathAbsLong(HInvoke * invoke)471 void IntrinsicCodeGeneratorX86::VisitMathAbsLong(HInvoke* invoke) {
472   GenAbsLong(invoke->GetLocations(), GetAssembler());
473 }
474 
GenMinMaxFP(LocationSummary * locations,bool is_min,bool is_double,X86Assembler * assembler)475 static void GenMinMaxFP(LocationSummary* locations, bool is_min, bool is_double,
476                         X86Assembler* assembler) {
477   Location op1_loc = locations->InAt(0);
478   Location op2_loc = locations->InAt(1);
479   Location out_loc = locations->Out();
480   XmmRegister out = out_loc.AsFpuRegister<XmmRegister>();
481 
482   // Shortcut for same input locations.
483   if (op1_loc.Equals(op2_loc)) {
484     DCHECK(out_loc.Equals(op1_loc));
485     return;
486   }
487 
488   //  (out := op1)
489   //  out <=? op2
490   //  if Nan jmp Nan_label
491   //  if out is min jmp done
492   //  if op2 is min jmp op2_label
493   //  handle -0/+0
494   //  jmp done
495   // Nan_label:
496   //  out := NaN
497   // op2_label:
498   //  out := op2
499   // done:
500   //
501   // This removes one jmp, but needs to copy one input (op1) to out.
502   //
503   // TODO: This is straight from Quick (except literal pool). Make NaN an out-of-line slowpath?
504 
505   XmmRegister op2 = op2_loc.AsFpuRegister<XmmRegister>();
506 
507   Label nan, done, op2_label;
508   if (is_double) {
509     __ ucomisd(out, op2);
510   } else {
511     __ ucomiss(out, op2);
512   }
513 
514   __ j(Condition::kParityEven, &nan);
515 
516   __ j(is_min ? Condition::kAbove : Condition::kBelow, &op2_label);
517   __ j(is_min ? Condition::kBelow : Condition::kAbove, &done);
518 
519   // Handle 0.0/-0.0.
520   if (is_min) {
521     if (is_double) {
522       __ orpd(out, op2);
523     } else {
524       __ orps(out, op2);
525     }
526   } else {
527     if (is_double) {
528       __ andpd(out, op2);
529     } else {
530       __ andps(out, op2);
531     }
532   }
533   __ jmp(&done);
534 
535   // NaN handling.
536   __ Bind(&nan);
537   if (is_double) {
538     __ pushl(Immediate(kDoubleNaNHigh));
539     __ pushl(Immediate(kDoubleNaNLow));
540     __ movsd(out, Address(ESP, 0));
541     __ addl(ESP, Immediate(8));
542   } else {
543     __ pushl(Immediate(kFloatNaN));
544     __ movss(out, Address(ESP, 0));
545     __ addl(ESP, Immediate(4));
546   }
547   __ jmp(&done);
548 
549   // out := op2;
550   __ Bind(&op2_label);
551   if (is_double) {
552     __ movsd(out, op2);
553   } else {
554     __ movss(out, op2);
555   }
556 
557   // Done.
558   __ Bind(&done);
559 }
560 
CreateFPFPToFPLocations(ArenaAllocator * arena,HInvoke * invoke)561 static void CreateFPFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
562   LocationSummary* locations = new (arena) LocationSummary(invoke,
563                                                            LocationSummary::kNoCall,
564                                                            kIntrinsified);
565   locations->SetInAt(0, Location::RequiresFpuRegister());
566   locations->SetInAt(1, Location::RequiresFpuRegister());
567   // The following is sub-optimal, but all we can do for now. It would be fine to also accept
568   // the second input to be the output (we can simply swap inputs).
569   locations->SetOut(Location::SameAsFirstInput());
570 }
571 
VisitMathMinDoubleDouble(HInvoke * invoke)572 void IntrinsicLocationsBuilderX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
573   CreateFPFPToFPLocations(arena_, invoke);
574 }
575 
VisitMathMinDoubleDouble(HInvoke * invoke)576 void IntrinsicCodeGeneratorX86::VisitMathMinDoubleDouble(HInvoke* invoke) {
577   GenMinMaxFP(invoke->GetLocations(), true, true, GetAssembler());
578 }
579 
VisitMathMinFloatFloat(HInvoke * invoke)580 void IntrinsicLocationsBuilderX86::VisitMathMinFloatFloat(HInvoke* invoke) {
581   CreateFPFPToFPLocations(arena_, invoke);
582 }
583 
VisitMathMinFloatFloat(HInvoke * invoke)584 void IntrinsicCodeGeneratorX86::VisitMathMinFloatFloat(HInvoke* invoke) {
585   GenMinMaxFP(invoke->GetLocations(), true, false, GetAssembler());
586 }
587 
VisitMathMaxDoubleDouble(HInvoke * invoke)588 void IntrinsicLocationsBuilderX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
589   CreateFPFPToFPLocations(arena_, invoke);
590 }
591 
VisitMathMaxDoubleDouble(HInvoke * invoke)592 void IntrinsicCodeGeneratorX86::VisitMathMaxDoubleDouble(HInvoke* invoke) {
593   GenMinMaxFP(invoke->GetLocations(), false, true, GetAssembler());
594 }
595 
VisitMathMaxFloatFloat(HInvoke * invoke)596 void IntrinsicLocationsBuilderX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
597   CreateFPFPToFPLocations(arena_, invoke);
598 }
599 
VisitMathMaxFloatFloat(HInvoke * invoke)600 void IntrinsicCodeGeneratorX86::VisitMathMaxFloatFloat(HInvoke* invoke) {
601   GenMinMaxFP(invoke->GetLocations(), false, false, GetAssembler());
602 }
603 
GenMinMax(LocationSummary * locations,bool is_min,bool is_long,X86Assembler * assembler)604 static void GenMinMax(LocationSummary* locations, bool is_min, bool is_long,
605                       X86Assembler* assembler) {
606   Location op1_loc = locations->InAt(0);
607   Location op2_loc = locations->InAt(1);
608 
609   // Shortcut for same input locations.
610   if (op1_loc.Equals(op2_loc)) {
611     // Can return immediately, as op1_loc == out_loc.
612     // Note: if we ever support separate registers, e.g., output into memory, we need to check for
613     //       a copy here.
614     DCHECK(locations->Out().Equals(op1_loc));
615     return;
616   }
617 
618   if (is_long) {
619     // Need to perform a subtract to get the sign right.
620     // op1 is already in the same location as the output.
621     Location output = locations->Out();
622     Register output_lo = output.AsRegisterPairLow<Register>();
623     Register output_hi = output.AsRegisterPairHigh<Register>();
624 
625     Register op2_lo = op2_loc.AsRegisterPairLow<Register>();
626     Register op2_hi = op2_loc.AsRegisterPairHigh<Register>();
627 
628     // Spare register to compute the subtraction to set condition code.
629     Register temp = locations->GetTemp(0).AsRegister<Register>();
630 
631     // Subtract off op2_low.
632     __ movl(temp, output_lo);
633     __ subl(temp, op2_lo);
634 
635     // Now use the same tempo and the borrow to finish the subtraction of op2_hi.
636     __ movl(temp, output_hi);
637     __ sbbl(temp, op2_hi);
638 
639     // Now the condition code is correct.
640     Condition cond = is_min ? Condition::kGreaterEqual : Condition::kLess;
641     __ cmovl(cond, output_lo, op2_lo);
642     __ cmovl(cond, output_hi, op2_hi);
643   } else {
644     Register out = locations->Out().AsRegister<Register>();
645     Register op2 = op2_loc.AsRegister<Register>();
646 
647     //  (out := op1)
648     //  out <=? op2
649     //  if out is min jmp done
650     //  out := op2
651     // done:
652 
653     __ cmpl(out, op2);
654     Condition cond = is_min ? Condition::kGreater : Condition::kLess;
655     __ cmovl(cond, out, op2);
656   }
657 }
658 
CreateIntIntToIntLocations(ArenaAllocator * arena,HInvoke * invoke)659 static void CreateIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) {
660   LocationSummary* locations = new (arena) LocationSummary(invoke,
661                                                            LocationSummary::kNoCall,
662                                                            kIntrinsified);
663   locations->SetInAt(0, Location::RequiresRegister());
664   locations->SetInAt(1, Location::RequiresRegister());
665   locations->SetOut(Location::SameAsFirstInput());
666 }
667 
CreateLongLongToLongLocations(ArenaAllocator * arena,HInvoke * invoke)668 static void CreateLongLongToLongLocations(ArenaAllocator* arena, HInvoke* invoke) {
669   LocationSummary* locations = new (arena) LocationSummary(invoke,
670                                                            LocationSummary::kNoCall,
671                                                            kIntrinsified);
672   locations->SetInAt(0, Location::RequiresRegister());
673   locations->SetInAt(1, Location::RequiresRegister());
674   locations->SetOut(Location::SameAsFirstInput());
675   // Register to use to perform a long subtract to set cc.
676   locations->AddTemp(Location::RequiresRegister());
677 }
678 
VisitMathMinIntInt(HInvoke * invoke)679 void IntrinsicLocationsBuilderX86::VisitMathMinIntInt(HInvoke* invoke) {
680   CreateIntIntToIntLocations(arena_, invoke);
681 }
682 
VisitMathMinIntInt(HInvoke * invoke)683 void IntrinsicCodeGeneratorX86::VisitMathMinIntInt(HInvoke* invoke) {
684   GenMinMax(invoke->GetLocations(), true, false, GetAssembler());
685 }
686 
VisitMathMinLongLong(HInvoke * invoke)687 void IntrinsicLocationsBuilderX86::VisitMathMinLongLong(HInvoke* invoke) {
688   CreateLongLongToLongLocations(arena_, invoke);
689 }
690 
VisitMathMinLongLong(HInvoke * invoke)691 void IntrinsicCodeGeneratorX86::VisitMathMinLongLong(HInvoke* invoke) {
692   GenMinMax(invoke->GetLocations(), true, true, GetAssembler());
693 }
694 
VisitMathMaxIntInt(HInvoke * invoke)695 void IntrinsicLocationsBuilderX86::VisitMathMaxIntInt(HInvoke* invoke) {
696   CreateIntIntToIntLocations(arena_, invoke);
697 }
698 
VisitMathMaxIntInt(HInvoke * invoke)699 void IntrinsicCodeGeneratorX86::VisitMathMaxIntInt(HInvoke* invoke) {
700   GenMinMax(invoke->GetLocations(), false, false, GetAssembler());
701 }
702 
VisitMathMaxLongLong(HInvoke * invoke)703 void IntrinsicLocationsBuilderX86::VisitMathMaxLongLong(HInvoke* invoke) {
704   CreateLongLongToLongLocations(arena_, invoke);
705 }
706 
VisitMathMaxLongLong(HInvoke * invoke)707 void IntrinsicCodeGeneratorX86::VisitMathMaxLongLong(HInvoke* invoke) {
708   GenMinMax(invoke->GetLocations(), false, true, GetAssembler());
709 }
710 
CreateFPToFPLocations(ArenaAllocator * arena,HInvoke * invoke)711 static void CreateFPToFPLocations(ArenaAllocator* arena, HInvoke* invoke) {
712   LocationSummary* locations = new (arena) LocationSummary(invoke,
713                                                            LocationSummary::kNoCall,
714                                                            kIntrinsified);
715   locations->SetInAt(0, Location::RequiresFpuRegister());
716   locations->SetOut(Location::RequiresFpuRegister());
717 }
718 
VisitMathSqrt(HInvoke * invoke)719 void IntrinsicLocationsBuilderX86::VisitMathSqrt(HInvoke* invoke) {
720   CreateFPToFPLocations(arena_, invoke);
721 }
722 
VisitMathSqrt(HInvoke * invoke)723 void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
724   LocationSummary* locations = invoke->GetLocations();
725   XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
726   XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
727 
728   GetAssembler()->sqrtsd(out, in);
729 }
730 
InvokeOutOfLineIntrinsic(CodeGeneratorX86 * codegen,HInvoke * invoke)731 static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) {
732   MoveArguments(invoke, codegen);
733 
734   DCHECK(invoke->IsInvokeStaticOrDirect());
735   codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
736   codegen->RecordPcInfo(invoke, invoke->GetDexPc());
737 
738   // Copy the result back to the expected output.
739   Location out = invoke->GetLocations()->Out();
740   if (out.IsValid()) {
741     DCHECK(out.IsRegister());
742     MoveFromReturnRegister(out, invoke->GetType(), codegen);
743   }
744 }
745 
CreateSSE41FPToFPLocations(ArenaAllocator * arena,HInvoke * invoke,CodeGeneratorX86 * codegen)746 static void CreateSSE41FPToFPLocations(ArenaAllocator* arena,
747                                       HInvoke* invoke,
748                                       CodeGeneratorX86* codegen) {
749   // Do we have instruction support?
750   if (codegen->GetInstructionSetFeatures().HasSSE4_1()) {
751     CreateFPToFPLocations(arena, invoke);
752     return;
753   }
754 
755   // We have to fall back to a call to the intrinsic.
756   LocationSummary* locations = new (arena) LocationSummary(invoke,
757                                                            LocationSummary::kCall);
758   InvokeRuntimeCallingConvention calling_convention;
759   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
760   locations->SetOut(Location::FpuRegisterLocation(XMM0));
761   // Needs to be EAX for the invoke.
762   locations->AddTemp(Location::RegisterLocation(EAX));
763 }
764 
GenSSE41FPToFPIntrinsic(CodeGeneratorX86 * codegen,HInvoke * invoke,X86Assembler * assembler,int round_mode)765 static void GenSSE41FPToFPIntrinsic(CodeGeneratorX86* codegen,
766                                    HInvoke* invoke,
767                                    X86Assembler* assembler,
768                                    int round_mode) {
769   LocationSummary* locations = invoke->GetLocations();
770   if (locations->WillCall()) {
771     InvokeOutOfLineIntrinsic(codegen, invoke);
772   } else {
773     XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
774     XmmRegister out = locations->Out().AsFpuRegister<XmmRegister>();
775     __ roundsd(out, in, Immediate(round_mode));
776   }
777 }
778 
VisitMathCeil(HInvoke * invoke)779 void IntrinsicLocationsBuilderX86::VisitMathCeil(HInvoke* invoke) {
780   CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
781 }
782 
VisitMathCeil(HInvoke * invoke)783 void IntrinsicCodeGeneratorX86::VisitMathCeil(HInvoke* invoke) {
784   GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 2);
785 }
786 
VisitMathFloor(HInvoke * invoke)787 void IntrinsicLocationsBuilderX86::VisitMathFloor(HInvoke* invoke) {
788   CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
789 }
790 
VisitMathFloor(HInvoke * invoke)791 void IntrinsicCodeGeneratorX86::VisitMathFloor(HInvoke* invoke) {
792   GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 1);
793 }
794 
VisitMathRint(HInvoke * invoke)795 void IntrinsicLocationsBuilderX86::VisitMathRint(HInvoke* invoke) {
796   CreateSSE41FPToFPLocations(arena_, invoke, codegen_);
797 }
798 
VisitMathRint(HInvoke * invoke)799 void IntrinsicCodeGeneratorX86::VisitMathRint(HInvoke* invoke) {
800   GenSSE41FPToFPIntrinsic(codegen_, invoke, GetAssembler(), 0);
801 }
802 
803 // Note that 32 bit x86 doesn't have the capability to inline MathRoundDouble,
804 // as it needs 64 bit instructions.
VisitMathRoundFloat(HInvoke * invoke)805 void IntrinsicLocationsBuilderX86::VisitMathRoundFloat(HInvoke* invoke) {
806   // Do we have instruction support?
807   if (codegen_->GetInstructionSetFeatures().HasSSE4_1()) {
808     LocationSummary* locations = new (arena_) LocationSummary(invoke,
809                                                               LocationSummary::kNoCall,
810                                                               kIntrinsified);
811     locations->SetInAt(0, Location::RequiresFpuRegister());
812     locations->SetOut(Location::RequiresRegister());
813     locations->AddTemp(Location::RequiresFpuRegister());
814     locations->AddTemp(Location::RequiresFpuRegister());
815     return;
816   }
817 
818   // We have to fall back to a call to the intrinsic.
819   LocationSummary* locations = new (arena_) LocationSummary(invoke,
820                                                            LocationSummary::kCall);
821   InvokeRuntimeCallingConvention calling_convention;
822   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetFpuRegisterAt(0)));
823   locations->SetOut(Location::RegisterLocation(EAX));
824   // Needs to be EAX for the invoke.
825   locations->AddTemp(Location::RegisterLocation(EAX));
826 }
827 
VisitMathRoundFloat(HInvoke * invoke)828 void IntrinsicCodeGeneratorX86::VisitMathRoundFloat(HInvoke* invoke) {
829   LocationSummary* locations = invoke->GetLocations();
830   if (locations->WillCall()) {
831     InvokeOutOfLineIntrinsic(codegen_, invoke);
832     return;
833   }
834 
835   // Implement RoundFloat as t1 = floor(input + 0.5f);  convert to int.
836   XmmRegister in = locations->InAt(0).AsFpuRegister<XmmRegister>();
837   Register out = locations->Out().AsRegister<Register>();
838   XmmRegister maxInt = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
839   XmmRegister inPlusPointFive = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
840   Label done, nan;
841   X86Assembler* assembler = GetAssembler();
842 
843   // Generate 0.5 into inPlusPointFive.
844   __ movl(out, Immediate(bit_cast<int32_t, float>(0.5f)));
845   __ movd(inPlusPointFive, out);
846 
847   // Add in the input.
848   __ addss(inPlusPointFive, in);
849 
850   // And truncate to an integer.
851   __ roundss(inPlusPointFive, inPlusPointFive, Immediate(1));
852 
853   __ movl(out, Immediate(kPrimIntMax));
854   // maxInt = int-to-float(out)
855   __ cvtsi2ss(maxInt, out);
856 
857   // if inPlusPointFive >= maxInt goto done
858   __ comiss(inPlusPointFive, maxInt);
859   __ j(kAboveEqual, &done);
860 
861   // if input == NaN goto nan
862   __ j(kUnordered, &nan);
863 
864   // output = float-to-int-truncate(input)
865   __ cvttss2si(out, inPlusPointFive);
866   __ jmp(&done);
867   __ Bind(&nan);
868 
869   //  output = 0
870   __ xorl(out, out);
871   __ Bind(&done);
872 }
873 
VisitStringCharAt(HInvoke * invoke)874 void IntrinsicLocationsBuilderX86::VisitStringCharAt(HInvoke* invoke) {
875   // The inputs plus one temp.
876   LocationSummary* locations = new (arena_) LocationSummary(invoke,
877                                                             LocationSummary::kCallOnSlowPath,
878                                                             kIntrinsified);
879   locations->SetInAt(0, Location::RequiresRegister());
880   locations->SetInAt(1, Location::RequiresRegister());
881   locations->SetOut(Location::SameAsFirstInput());
882 }
883 
VisitStringCharAt(HInvoke * invoke)884 void IntrinsicCodeGeneratorX86::VisitStringCharAt(HInvoke* invoke) {
885   LocationSummary* locations = invoke->GetLocations();
886 
887   // Location of reference to data array
888   const int32_t value_offset = mirror::String::ValueOffset().Int32Value();
889   // Location of count
890   const int32_t count_offset = mirror::String::CountOffset().Int32Value();
891 
892   Register obj = locations->InAt(0).AsRegister<Register>();
893   Register idx = locations->InAt(1).AsRegister<Register>();
894   Register out = locations->Out().AsRegister<Register>();
895 
896   // TODO: Maybe we can support range check elimination. Overall, though, I think it's not worth
897   //       the cost.
898   // TODO: For simplicity, the index parameter is requested in a register, so different from Quick
899   //       we will not optimize the code for constants (which would save a register).
900 
901   SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
902   codegen_->AddSlowPath(slow_path);
903 
904   X86Assembler* assembler = GetAssembler();
905 
906   __ cmpl(idx, Address(obj, count_offset));
907   codegen_->MaybeRecordImplicitNullCheck(invoke);
908   __ j(kAboveEqual, slow_path->GetEntryLabel());
909 
910   // out = out[2*idx].
911   __ movzxw(out, Address(out, idx, ScaleFactor::TIMES_2, value_offset));
912 
913   __ Bind(slow_path->GetExitLabel());
914 }
915 
VisitStringCompareTo(HInvoke * invoke)916 void IntrinsicLocationsBuilderX86::VisitStringCompareTo(HInvoke* invoke) {
917   // The inputs plus one temp.
918   LocationSummary* locations = new (arena_) LocationSummary(invoke,
919                                                             LocationSummary::kCall,
920                                                             kIntrinsified);
921   InvokeRuntimeCallingConvention calling_convention;
922   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
923   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
924   locations->SetOut(Location::RegisterLocation(EAX));
925 }
926 
VisitStringCompareTo(HInvoke * invoke)927 void IntrinsicCodeGeneratorX86::VisitStringCompareTo(HInvoke* invoke) {
928   X86Assembler* assembler = GetAssembler();
929   LocationSummary* locations = invoke->GetLocations();
930 
931   // Note that the null check must have been done earlier.
932   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
933 
934   Register argument = locations->InAt(1).AsRegister<Register>();
935   __ testl(argument, argument);
936   SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
937   codegen_->AddSlowPath(slow_path);
938   __ j(kEqual, slow_path->GetEntryLabel());
939 
940   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pStringCompareTo)));
941   __ Bind(slow_path->GetExitLabel());
942 }
943 
CreateStringIndexOfLocations(HInvoke * invoke,ArenaAllocator * allocator,bool start_at_zero)944 static void CreateStringIndexOfLocations(HInvoke* invoke,
945                                          ArenaAllocator* allocator,
946                                          bool start_at_zero) {
947   LocationSummary* locations = new (allocator) LocationSummary(invoke,
948                                                                LocationSummary::kCallOnSlowPath,
949                                                                kIntrinsified);
950   // The data needs to be in EDI for scasw. So request that the string is there, anyways.
951   locations->SetInAt(0, Location::RegisterLocation(EDI));
952   // If we look for a constant char, we'll still have to copy it into EAX. So just request the
953   // allocator to do that, anyways. We can still do the constant check by checking the parameter
954   // of the instruction explicitly.
955   // Note: This works as we don't clobber EAX anywhere.
956   locations->SetInAt(1, Location::RegisterLocation(EAX));
957   if (!start_at_zero) {
958     locations->SetInAt(2, Location::RequiresRegister());          // The starting index.
959   }
960   // As we clobber EDI during execution anyways, also use it as the output.
961   locations->SetOut(Location::SameAsFirstInput());
962 
963   // repne scasw uses ECX as the counter.
964   locations->AddTemp(Location::RegisterLocation(ECX));
965   // Need another temporary to be able to compute the result.
966   locations->AddTemp(Location::RequiresRegister());
967 }
968 
GenerateStringIndexOf(HInvoke * invoke,X86Assembler * assembler,CodeGeneratorX86 * codegen,ArenaAllocator * allocator,bool start_at_zero)969 static void GenerateStringIndexOf(HInvoke* invoke,
970                                   X86Assembler* assembler,
971                                   CodeGeneratorX86* codegen,
972                                   ArenaAllocator* allocator,
973                                   bool start_at_zero) {
974   LocationSummary* locations = invoke->GetLocations();
975 
976   // Note that the null check must have been done earlier.
977   DCHECK(!invoke->CanDoImplicitNullCheckOn(invoke->InputAt(0)));
978 
979   Register string_obj = locations->InAt(0).AsRegister<Register>();
980   Register search_value = locations->InAt(1).AsRegister<Register>();
981   Register counter = locations->GetTemp(0).AsRegister<Register>();
982   Register string_length = locations->GetTemp(1).AsRegister<Register>();
983   Register out = locations->Out().AsRegister<Register>();
984 
985   // Check our assumptions for registers.
986   DCHECK_EQ(string_obj, EDI);
987   DCHECK_EQ(search_value, EAX);
988   DCHECK_EQ(counter, ECX);
989   DCHECK_EQ(out, EDI);
990 
991   // Check for code points > 0xFFFF. Either a slow-path check when we don't know statically,
992   // or directly dispatch if we have a constant.
993   SlowPathCodeX86* slow_path = nullptr;
994   if (invoke->InputAt(1)->IsIntConstant()) {
995     if (static_cast<uint32_t>(invoke->InputAt(1)->AsIntConstant()->GetValue()) >
996     std::numeric_limits<uint16_t>::max()) {
997       // Always needs the slow-path. We could directly dispatch to it, but this case should be
998       // rare, so for simplicity just put the full slow-path down and branch unconditionally.
999       slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
1000       codegen->AddSlowPath(slow_path);
1001       __ jmp(slow_path->GetEntryLabel());
1002       __ Bind(slow_path->GetExitLabel());
1003       return;
1004     }
1005   } else {
1006     __ cmpl(search_value, Immediate(std::numeric_limits<uint16_t>::max()));
1007     slow_path = new (allocator) IntrinsicSlowPathX86(invoke);
1008     codegen->AddSlowPath(slow_path);
1009     __ j(kAbove, slow_path->GetEntryLabel());
1010   }
1011 
1012   // From here down, we know that we are looking for a char that fits in 16 bits.
1013   // Location of reference to data array within the String object.
1014   int32_t value_offset = mirror::String::ValueOffset().Int32Value();
1015   // Location of count within the String object.
1016   int32_t count_offset = mirror::String::CountOffset().Int32Value();
1017 
1018   // Load string length, i.e., the count field of the string.
1019   __ movl(string_length, Address(string_obj, count_offset));
1020 
1021   // Do a zero-length check.
1022   // TODO: Support jecxz.
1023   Label not_found_label;
1024   __ testl(string_length, string_length);
1025   __ j(kEqual, &not_found_label);
1026 
1027   if (start_at_zero) {
1028     // Number of chars to scan is the same as the string length.
1029     __ movl(counter, string_length);
1030 
1031     // Move to the start of the string.
1032     __ addl(string_obj, Immediate(value_offset));
1033   } else {
1034     Register start_index = locations->InAt(2).AsRegister<Register>();
1035 
1036     // Do a start_index check.
1037     __ cmpl(start_index, string_length);
1038     __ j(kGreaterEqual, &not_found_label);
1039 
1040     // Ensure we have a start index >= 0;
1041     __ xorl(counter, counter);
1042     __ cmpl(start_index, Immediate(0));
1043     __ cmovl(kGreater, counter, start_index);
1044 
1045     // Move to the start of the string: string_obj + value_offset + 2 * start_index.
1046     __ leal(string_obj, Address(string_obj, counter, ScaleFactor::TIMES_2, value_offset));
1047 
1048     // Now update ecx (the repne scasw work counter). We have string.length - start_index left to
1049     // compare.
1050     __ negl(counter);
1051     __ leal(counter, Address(string_length, counter, ScaleFactor::TIMES_1, 0));
1052   }
1053 
1054   // Everything is set up for repne scasw:
1055   //   * Comparison address in EDI.
1056   //   * Counter in ECX.
1057   __ repne_scasw();
1058 
1059   // Did we find a match?
1060   __ j(kNotEqual, &not_found_label);
1061 
1062   // Yes, we matched.  Compute the index of the result.
1063   __ subl(string_length, counter);
1064   __ leal(out, Address(string_length, -1));
1065 
1066   Label done;
1067   __ jmp(&done);
1068 
1069   // Failed to match; return -1.
1070   __ Bind(&not_found_label);
1071   __ movl(out, Immediate(-1));
1072 
1073   // And join up at the end.
1074   __ Bind(&done);
1075   if (slow_path != nullptr) {
1076     __ Bind(slow_path->GetExitLabel());
1077   }
1078 }
1079 
VisitStringIndexOf(HInvoke * invoke)1080 void IntrinsicLocationsBuilderX86::VisitStringIndexOf(HInvoke* invoke) {
1081   CreateStringIndexOfLocations(invoke, arena_, true);
1082 }
1083 
VisitStringIndexOf(HInvoke * invoke)1084 void IntrinsicCodeGeneratorX86::VisitStringIndexOf(HInvoke* invoke) {
1085   GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), true);
1086 }
1087 
VisitStringIndexOfAfter(HInvoke * invoke)1088 void IntrinsicLocationsBuilderX86::VisitStringIndexOfAfter(HInvoke* invoke) {
1089   CreateStringIndexOfLocations(invoke, arena_, false);
1090 }
1091 
VisitStringIndexOfAfter(HInvoke * invoke)1092 void IntrinsicCodeGeneratorX86::VisitStringIndexOfAfter(HInvoke* invoke) {
1093   GenerateStringIndexOf(invoke, GetAssembler(), codegen_, GetAllocator(), false);
1094 }
1095 
VisitStringNewStringFromBytes(HInvoke * invoke)1096 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
1097   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1098                                                             LocationSummary::kCall,
1099                                                             kIntrinsified);
1100   InvokeRuntimeCallingConvention calling_convention;
1101   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1102   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1103   locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1104   locations->SetInAt(3, Location::RegisterLocation(calling_convention.GetRegisterAt(3)));
1105   locations->SetOut(Location::RegisterLocation(EAX));
1106 }
1107 
VisitStringNewStringFromBytes(HInvoke * invoke)1108 void IntrinsicCodeGeneratorX86::VisitStringNewStringFromBytes(HInvoke* invoke) {
1109   X86Assembler* assembler = GetAssembler();
1110   LocationSummary* locations = invoke->GetLocations();
1111 
1112   Register byte_array = locations->InAt(0).AsRegister<Register>();
1113   __ testl(byte_array, byte_array);
1114   SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
1115   codegen_->AddSlowPath(slow_path);
1116   __ j(kEqual, slow_path->GetEntryLabel());
1117 
1118   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromBytes)));
1119   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1120   __ Bind(slow_path->GetExitLabel());
1121 }
1122 
VisitStringNewStringFromChars(HInvoke * invoke)1123 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromChars(HInvoke* invoke) {
1124   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1125                                                             LocationSummary::kCall,
1126                                                             kIntrinsified);
1127   InvokeRuntimeCallingConvention calling_convention;
1128   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1129   locations->SetInAt(1, Location::RegisterLocation(calling_convention.GetRegisterAt(1)));
1130   locations->SetInAt(2, Location::RegisterLocation(calling_convention.GetRegisterAt(2)));
1131   locations->SetOut(Location::RegisterLocation(EAX));
1132 }
1133 
VisitStringNewStringFromChars(HInvoke * invoke)1134 void IntrinsicCodeGeneratorX86::VisitStringNewStringFromChars(HInvoke* invoke) {
1135   X86Assembler* assembler = GetAssembler();
1136 
1137   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromChars)));
1138   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1139 }
1140 
VisitStringNewStringFromString(HInvoke * invoke)1141 void IntrinsicLocationsBuilderX86::VisitStringNewStringFromString(HInvoke* invoke) {
1142   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1143                                                             LocationSummary::kCall,
1144                                                             kIntrinsified);
1145   InvokeRuntimeCallingConvention calling_convention;
1146   locations->SetInAt(0, Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
1147   locations->SetOut(Location::RegisterLocation(EAX));
1148 }
1149 
VisitStringNewStringFromString(HInvoke * invoke)1150 void IntrinsicCodeGeneratorX86::VisitStringNewStringFromString(HInvoke* invoke) {
1151   X86Assembler* assembler = GetAssembler();
1152   LocationSummary* locations = invoke->GetLocations();
1153 
1154   Register string_to_copy = locations->InAt(0).AsRegister<Register>();
1155   __ testl(string_to_copy, string_to_copy);
1156   SlowPathCodeX86* slow_path = new (GetAllocator()) IntrinsicSlowPathX86(invoke);
1157   codegen_->AddSlowPath(slow_path);
1158   __ j(kEqual, slow_path->GetEntryLabel());
1159 
1160   __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pAllocStringFromString)));
1161   codegen_->RecordPcInfo(invoke, invoke->GetDexPc());
1162   __ Bind(slow_path->GetExitLabel());
1163 }
1164 
GenPeek(LocationSummary * locations,Primitive::Type size,X86Assembler * assembler)1165 static void GenPeek(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
1166   Register address = locations->InAt(0).AsRegisterPairLow<Register>();
1167   Location out_loc = locations->Out();
1168   // x86 allows unaligned access. We do not have to check the input or use specific instructions
1169   // to avoid a SIGBUS.
1170   switch (size) {
1171     case Primitive::kPrimByte:
1172       __ movsxb(out_loc.AsRegister<Register>(), Address(address, 0));
1173       break;
1174     case Primitive::kPrimShort:
1175       __ movsxw(out_loc.AsRegister<Register>(), Address(address, 0));
1176       break;
1177     case Primitive::kPrimInt:
1178       __ movl(out_loc.AsRegister<Register>(), Address(address, 0));
1179       break;
1180     case Primitive::kPrimLong:
1181       __ movl(out_loc.AsRegisterPairLow<Register>(), Address(address, 0));
1182       __ movl(out_loc.AsRegisterPairHigh<Register>(), Address(address, 4));
1183       break;
1184     default:
1185       LOG(FATAL) << "Type not recognized for peek: " << size;
1186       UNREACHABLE();
1187   }
1188 }
1189 
VisitMemoryPeekByte(HInvoke * invoke)1190 void IntrinsicLocationsBuilderX86::VisitMemoryPeekByte(HInvoke* invoke) {
1191   CreateLongToIntLocations(arena_, invoke);
1192 }
1193 
VisitMemoryPeekByte(HInvoke * invoke)1194 void IntrinsicCodeGeneratorX86::VisitMemoryPeekByte(HInvoke* invoke) {
1195   GenPeek(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
1196 }
1197 
VisitMemoryPeekIntNative(HInvoke * invoke)1198 void IntrinsicLocationsBuilderX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
1199   CreateLongToIntLocations(arena_, invoke);
1200 }
1201 
VisitMemoryPeekIntNative(HInvoke * invoke)1202 void IntrinsicCodeGeneratorX86::VisitMemoryPeekIntNative(HInvoke* invoke) {
1203   GenPeek(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
1204 }
1205 
VisitMemoryPeekLongNative(HInvoke * invoke)1206 void IntrinsicLocationsBuilderX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
1207   CreateLongToLongLocations(arena_, invoke);
1208 }
1209 
VisitMemoryPeekLongNative(HInvoke * invoke)1210 void IntrinsicCodeGeneratorX86::VisitMemoryPeekLongNative(HInvoke* invoke) {
1211   GenPeek(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
1212 }
1213 
VisitMemoryPeekShortNative(HInvoke * invoke)1214 void IntrinsicLocationsBuilderX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
1215   CreateLongToIntLocations(arena_, invoke);
1216 }
1217 
VisitMemoryPeekShortNative(HInvoke * invoke)1218 void IntrinsicCodeGeneratorX86::VisitMemoryPeekShortNative(HInvoke* invoke) {
1219   GenPeek(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
1220 }
1221 
CreateLongIntToVoidLocations(ArenaAllocator * arena,Primitive::Type size,HInvoke * invoke)1222 static void CreateLongIntToVoidLocations(ArenaAllocator* arena, Primitive::Type size,
1223                                          HInvoke* invoke) {
1224   LocationSummary* locations = new (arena) LocationSummary(invoke,
1225                                                            LocationSummary::kNoCall,
1226                                                            kIntrinsified);
1227   locations->SetInAt(0, Location::RequiresRegister());
1228   HInstruction* value = invoke->InputAt(1);
1229   if (size == Primitive::kPrimByte) {
1230     locations->SetInAt(1, Location::ByteRegisterOrConstant(EDX, value));
1231   } else {
1232     locations->SetInAt(1, Location::RegisterOrConstant(value));
1233   }
1234 }
1235 
GenPoke(LocationSummary * locations,Primitive::Type size,X86Assembler * assembler)1236 static void GenPoke(LocationSummary* locations, Primitive::Type size, X86Assembler* assembler) {
1237   Register address = locations->InAt(0).AsRegisterPairLow<Register>();
1238   Location value_loc = locations->InAt(1);
1239   // x86 allows unaligned access. We do not have to check the input or use specific instructions
1240   // to avoid a SIGBUS.
1241   switch (size) {
1242     case Primitive::kPrimByte:
1243       if (value_loc.IsConstant()) {
1244         __ movb(Address(address, 0),
1245                 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1246       } else {
1247         __ movb(Address(address, 0), value_loc.AsRegister<ByteRegister>());
1248       }
1249       break;
1250     case Primitive::kPrimShort:
1251       if (value_loc.IsConstant()) {
1252         __ movw(Address(address, 0),
1253                 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1254       } else {
1255         __ movw(Address(address, 0), value_loc.AsRegister<Register>());
1256       }
1257       break;
1258     case Primitive::kPrimInt:
1259       if (value_loc.IsConstant()) {
1260         __ movl(Address(address, 0),
1261                 Immediate(value_loc.GetConstant()->AsIntConstant()->GetValue()));
1262       } else {
1263         __ movl(Address(address, 0), value_loc.AsRegister<Register>());
1264       }
1265       break;
1266     case Primitive::kPrimLong:
1267       if (value_loc.IsConstant()) {
1268         int64_t value = value_loc.GetConstant()->AsLongConstant()->GetValue();
1269         __ movl(Address(address, 0), Immediate(Low32Bits(value)));
1270         __ movl(Address(address, 4), Immediate(High32Bits(value)));
1271       } else {
1272         __ movl(Address(address, 0), value_loc.AsRegisterPairLow<Register>());
1273         __ movl(Address(address, 4), value_loc.AsRegisterPairHigh<Register>());
1274       }
1275       break;
1276     default:
1277       LOG(FATAL) << "Type not recognized for poke: " << size;
1278       UNREACHABLE();
1279   }
1280 }
1281 
VisitMemoryPokeByte(HInvoke * invoke)1282 void IntrinsicLocationsBuilderX86::VisitMemoryPokeByte(HInvoke* invoke) {
1283   CreateLongIntToVoidLocations(arena_, Primitive::kPrimByte, invoke);
1284 }
1285 
VisitMemoryPokeByte(HInvoke * invoke)1286 void IntrinsicCodeGeneratorX86::VisitMemoryPokeByte(HInvoke* invoke) {
1287   GenPoke(invoke->GetLocations(), Primitive::kPrimByte, GetAssembler());
1288 }
1289 
VisitMemoryPokeIntNative(HInvoke * invoke)1290 void IntrinsicLocationsBuilderX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
1291   CreateLongIntToVoidLocations(arena_, Primitive::kPrimInt, invoke);
1292 }
1293 
VisitMemoryPokeIntNative(HInvoke * invoke)1294 void IntrinsicCodeGeneratorX86::VisitMemoryPokeIntNative(HInvoke* invoke) {
1295   GenPoke(invoke->GetLocations(), Primitive::kPrimInt, GetAssembler());
1296 }
1297 
VisitMemoryPokeLongNative(HInvoke * invoke)1298 void IntrinsicLocationsBuilderX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
1299   CreateLongIntToVoidLocations(arena_, Primitive::kPrimLong, invoke);
1300 }
1301 
VisitMemoryPokeLongNative(HInvoke * invoke)1302 void IntrinsicCodeGeneratorX86::VisitMemoryPokeLongNative(HInvoke* invoke) {
1303   GenPoke(invoke->GetLocations(), Primitive::kPrimLong, GetAssembler());
1304 }
1305 
VisitMemoryPokeShortNative(HInvoke * invoke)1306 void IntrinsicLocationsBuilderX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
1307   CreateLongIntToVoidLocations(arena_, Primitive::kPrimShort, invoke);
1308 }
1309 
VisitMemoryPokeShortNative(HInvoke * invoke)1310 void IntrinsicCodeGeneratorX86::VisitMemoryPokeShortNative(HInvoke* invoke) {
1311   GenPoke(invoke->GetLocations(), Primitive::kPrimShort, GetAssembler());
1312 }
1313 
VisitThreadCurrentThread(HInvoke * invoke)1314 void IntrinsicLocationsBuilderX86::VisitThreadCurrentThread(HInvoke* invoke) {
1315   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1316                                                             LocationSummary::kNoCall,
1317                                                             kIntrinsified);
1318   locations->SetOut(Location::RequiresRegister());
1319 }
1320 
VisitThreadCurrentThread(HInvoke * invoke)1321 void IntrinsicCodeGeneratorX86::VisitThreadCurrentThread(HInvoke* invoke) {
1322   Register out = invoke->GetLocations()->Out().AsRegister<Register>();
1323   GetAssembler()->fs()->movl(out, Address::Absolute(Thread::PeerOffset<kX86WordSize>()));
1324 }
1325 
GenUnsafeGet(LocationSummary * locations,Primitive::Type type,bool is_volatile,X86Assembler * assembler)1326 static void GenUnsafeGet(LocationSummary* locations, Primitive::Type type,
1327                          bool is_volatile, X86Assembler* assembler) {
1328   Register base = locations->InAt(1).AsRegister<Register>();
1329   Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1330   Location output = locations->Out();
1331 
1332   switch (type) {
1333     case Primitive::kPrimInt:
1334     case Primitive::kPrimNot:
1335       __ movl(output.AsRegister<Register>(), Address(base, offset, ScaleFactor::TIMES_1, 0));
1336       break;
1337 
1338     case Primitive::kPrimLong: {
1339         Register output_lo = output.AsRegisterPairLow<Register>();
1340         Register output_hi = output.AsRegisterPairHigh<Register>();
1341         if (is_volatile) {
1342           // Need to use a XMM to read atomically.
1343           XmmRegister temp = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
1344           __ movsd(temp, Address(base, offset, ScaleFactor::TIMES_1, 0));
1345           __ movd(output_lo, temp);
1346           __ psrlq(temp, Immediate(32));
1347           __ movd(output_hi, temp);
1348         } else {
1349           __ movl(output_lo, Address(base, offset, ScaleFactor::TIMES_1, 0));
1350           __ movl(output_hi, Address(base, offset, ScaleFactor::TIMES_1, 4));
1351         }
1352       }
1353       break;
1354 
1355     default:
1356       LOG(FATAL) << "Unsupported op size " << type;
1357       UNREACHABLE();
1358   }
1359 }
1360 
CreateIntIntIntToIntLocations(ArenaAllocator * arena,HInvoke * invoke,bool is_long,bool is_volatile)1361 static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke,
1362                                           bool is_long, bool is_volatile) {
1363   LocationSummary* locations = new (arena) LocationSummary(invoke,
1364                                                            LocationSummary::kNoCall,
1365                                                            kIntrinsified);
1366   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
1367   locations->SetInAt(1, Location::RequiresRegister());
1368   locations->SetInAt(2, Location::RequiresRegister());
1369   if (is_long) {
1370     if (is_volatile) {
1371       // Need to use XMM to read volatile.
1372       locations->AddTemp(Location::RequiresFpuRegister());
1373       locations->SetOut(Location::RequiresRegister());
1374     } else {
1375       locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap);
1376     }
1377   } else {
1378     locations->SetOut(Location::RequiresRegister());
1379   }
1380 }
1381 
VisitUnsafeGet(HInvoke * invoke)1382 void IntrinsicLocationsBuilderX86::VisitUnsafeGet(HInvoke* invoke) {
1383   CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1384 }
VisitUnsafeGetVolatile(HInvoke * invoke)1385 void IntrinsicLocationsBuilderX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
1386   CreateIntIntIntToIntLocations(arena_, invoke, false, true);
1387 }
VisitUnsafeGetLong(HInvoke * invoke)1388 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLong(HInvoke* invoke) {
1389   CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1390 }
VisitUnsafeGetLongVolatile(HInvoke * invoke)1391 void IntrinsicLocationsBuilderX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
1392   CreateIntIntIntToIntLocations(arena_, invoke, true, true);
1393 }
VisitUnsafeGetObject(HInvoke * invoke)1394 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObject(HInvoke* invoke) {
1395   CreateIntIntIntToIntLocations(arena_, invoke, false, false);
1396 }
VisitUnsafeGetObjectVolatile(HInvoke * invoke)1397 void IntrinsicLocationsBuilderX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
1398   CreateIntIntIntToIntLocations(arena_, invoke, false, true);
1399 }
1400 
1401 
VisitUnsafeGet(HInvoke * invoke)1402 void IntrinsicCodeGeneratorX86::VisitUnsafeGet(HInvoke* invoke) {
1403   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, false, GetAssembler());
1404 }
VisitUnsafeGetVolatile(HInvoke * invoke)1405 void IntrinsicCodeGeneratorX86::VisitUnsafeGetVolatile(HInvoke* invoke) {
1406   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimInt, true, GetAssembler());
1407 }
VisitUnsafeGetLong(HInvoke * invoke)1408 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLong(HInvoke* invoke) {
1409   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, false, GetAssembler());
1410 }
VisitUnsafeGetLongVolatile(HInvoke * invoke)1411 void IntrinsicCodeGeneratorX86::VisitUnsafeGetLongVolatile(HInvoke* invoke) {
1412   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimLong, true, GetAssembler());
1413 }
VisitUnsafeGetObject(HInvoke * invoke)1414 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObject(HInvoke* invoke) {
1415   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, false, GetAssembler());
1416 }
VisitUnsafeGetObjectVolatile(HInvoke * invoke)1417 void IntrinsicCodeGeneratorX86::VisitUnsafeGetObjectVolatile(HInvoke* invoke) {
1418   GenUnsafeGet(invoke->GetLocations(), Primitive::kPrimNot, true, GetAssembler());
1419 }
1420 
1421 
CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator * arena,Primitive::Type type,HInvoke * invoke,bool is_volatile)1422 static void CreateIntIntIntIntToVoidPlusTempsLocations(ArenaAllocator* arena,
1423                                                        Primitive::Type type,
1424                                                        HInvoke* invoke,
1425                                                        bool is_volatile) {
1426   LocationSummary* locations = new (arena) LocationSummary(invoke,
1427                                                            LocationSummary::kNoCall,
1428                                                            kIntrinsified);
1429   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
1430   locations->SetInAt(1, Location::RequiresRegister());
1431   locations->SetInAt(2, Location::RequiresRegister());
1432   locations->SetInAt(3, Location::RequiresRegister());
1433   if (type == Primitive::kPrimNot) {
1434     // Need temp registers for card-marking.
1435     locations->AddTemp(Location::RequiresRegister());
1436     // Ensure the value is in a byte register.
1437     locations->AddTemp(Location::RegisterLocation(ECX));
1438   } else if (type == Primitive::kPrimLong && is_volatile) {
1439     locations->AddTemp(Location::RequiresFpuRegister());
1440     locations->AddTemp(Location::RequiresFpuRegister());
1441   }
1442 }
1443 
VisitUnsafePut(HInvoke * invoke)1444 void IntrinsicLocationsBuilderX86::VisitUnsafePut(HInvoke* invoke) {
1445   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
1446 }
VisitUnsafePutOrdered(HInvoke * invoke)1447 void IntrinsicLocationsBuilderX86::VisitUnsafePutOrdered(HInvoke* invoke) {
1448   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, false);
1449 }
VisitUnsafePutVolatile(HInvoke * invoke)1450 void IntrinsicLocationsBuilderX86::VisitUnsafePutVolatile(HInvoke* invoke) {
1451   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimInt, invoke, true);
1452 }
VisitUnsafePutObject(HInvoke * invoke)1453 void IntrinsicLocationsBuilderX86::VisitUnsafePutObject(HInvoke* invoke) {
1454   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
1455 }
VisitUnsafePutObjectOrdered(HInvoke * invoke)1456 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
1457   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, false);
1458 }
VisitUnsafePutObjectVolatile(HInvoke * invoke)1459 void IntrinsicLocationsBuilderX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
1460   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimNot, invoke, true);
1461 }
VisitUnsafePutLong(HInvoke * invoke)1462 void IntrinsicLocationsBuilderX86::VisitUnsafePutLong(HInvoke* invoke) {
1463   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
1464 }
VisitUnsafePutLongOrdered(HInvoke * invoke)1465 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1466   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, false);
1467 }
VisitUnsafePutLongVolatile(HInvoke * invoke)1468 void IntrinsicLocationsBuilderX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1469   CreateIntIntIntIntToVoidPlusTempsLocations(arena_, Primitive::kPrimLong, invoke, true);
1470 }
1471 
1472 // We don't care for ordered: it requires an AnyStore barrier, which is already given by the x86
1473 // memory model.
GenUnsafePut(LocationSummary * locations,Primitive::Type type,bool is_volatile,CodeGeneratorX86 * codegen)1474 static void GenUnsafePut(LocationSummary* locations,
1475                          Primitive::Type type,
1476                          bool is_volatile,
1477                          CodeGeneratorX86* codegen) {
1478   X86Assembler* assembler = reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
1479   Register base = locations->InAt(1).AsRegister<Register>();
1480   Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1481   Location value_loc = locations->InAt(3);
1482 
1483   if (type == Primitive::kPrimLong) {
1484     Register value_lo = value_loc.AsRegisterPairLow<Register>();
1485     Register value_hi = value_loc.AsRegisterPairHigh<Register>();
1486     if (is_volatile) {
1487       XmmRegister temp1 = locations->GetTemp(0).AsFpuRegister<XmmRegister>();
1488       XmmRegister temp2 = locations->GetTemp(1).AsFpuRegister<XmmRegister>();
1489       __ movd(temp1, value_lo);
1490       __ movd(temp2, value_hi);
1491       __ punpckldq(temp1, temp2);
1492       __ movsd(Address(base, offset, ScaleFactor::TIMES_1, 0), temp1);
1493     } else {
1494       __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_lo);
1495       __ movl(Address(base, offset, ScaleFactor::TIMES_1, 4), value_hi);
1496     }
1497   } else {
1498     __ movl(Address(base, offset, ScaleFactor::TIMES_1, 0), value_loc.AsRegister<Register>());
1499   }
1500 
1501   if (is_volatile) {
1502     __ mfence();
1503   }
1504 
1505   if (type == Primitive::kPrimNot) {
1506     codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
1507                         locations->GetTemp(1).AsRegister<Register>(),
1508                         base,
1509                         value_loc.AsRegister<Register>());
1510   }
1511 }
1512 
VisitUnsafePut(HInvoke * invoke)1513 void IntrinsicCodeGeneratorX86::VisitUnsafePut(HInvoke* invoke) {
1514   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
1515 }
VisitUnsafePutOrdered(HInvoke * invoke)1516 void IntrinsicCodeGeneratorX86::VisitUnsafePutOrdered(HInvoke* invoke) {
1517   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, false, codegen_);
1518 }
VisitUnsafePutVolatile(HInvoke * invoke)1519 void IntrinsicCodeGeneratorX86::VisitUnsafePutVolatile(HInvoke* invoke) {
1520   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimInt, true, codegen_);
1521 }
VisitUnsafePutObject(HInvoke * invoke)1522 void IntrinsicCodeGeneratorX86::VisitUnsafePutObject(HInvoke* invoke) {
1523   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
1524 }
VisitUnsafePutObjectOrdered(HInvoke * invoke)1525 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectOrdered(HInvoke* invoke) {
1526   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, false, codegen_);
1527 }
VisitUnsafePutObjectVolatile(HInvoke * invoke)1528 void IntrinsicCodeGeneratorX86::VisitUnsafePutObjectVolatile(HInvoke* invoke) {
1529   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimNot, true, codegen_);
1530 }
VisitUnsafePutLong(HInvoke * invoke)1531 void IntrinsicCodeGeneratorX86::VisitUnsafePutLong(HInvoke* invoke) {
1532   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
1533 }
VisitUnsafePutLongOrdered(HInvoke * invoke)1534 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongOrdered(HInvoke* invoke) {
1535   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, false, codegen_);
1536 }
VisitUnsafePutLongVolatile(HInvoke * invoke)1537 void IntrinsicCodeGeneratorX86::VisitUnsafePutLongVolatile(HInvoke* invoke) {
1538   GenUnsafePut(invoke->GetLocations(), Primitive::kPrimLong, true, codegen_);
1539 }
1540 
CreateIntIntIntIntIntToInt(ArenaAllocator * arena,Primitive::Type type,HInvoke * invoke)1541 static void CreateIntIntIntIntIntToInt(ArenaAllocator* arena, Primitive::Type type,
1542                                        HInvoke* invoke) {
1543   LocationSummary* locations = new (arena) LocationSummary(invoke,
1544                                                            LocationSummary::kNoCall,
1545                                                            kIntrinsified);
1546   locations->SetInAt(0, Location::NoLocation());        // Unused receiver.
1547   locations->SetInAt(1, Location::RequiresRegister());
1548   // Offset is a long, but in 32 bit mode, we only need the low word.
1549   // Can we update the invoke here to remove a TypeConvert to Long?
1550   locations->SetInAt(2, Location::RequiresRegister());
1551   // Expected value must be in EAX or EDX:EAX.
1552   // For long, new value must be in ECX:EBX.
1553   if (type == Primitive::kPrimLong) {
1554     locations->SetInAt(3, Location::RegisterPairLocation(EAX, EDX));
1555     locations->SetInAt(4, Location::RegisterPairLocation(EBX, ECX));
1556   } else {
1557     locations->SetInAt(3, Location::RegisterLocation(EAX));
1558     locations->SetInAt(4, Location::RequiresRegister());
1559   }
1560 
1561   // Force a byte register for the output.
1562   locations->SetOut(Location::RegisterLocation(EAX));
1563   if (type == Primitive::kPrimNot) {
1564     // Need temp registers for card-marking.
1565     locations->AddTemp(Location::RequiresRegister());
1566     // Need a byte register for marking.
1567     locations->AddTemp(Location::RegisterLocation(ECX));
1568   }
1569 }
1570 
VisitUnsafeCASInt(HInvoke * invoke)1571 void IntrinsicLocationsBuilderX86::VisitUnsafeCASInt(HInvoke* invoke) {
1572   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimInt, invoke);
1573 }
1574 
VisitUnsafeCASLong(HInvoke * invoke)1575 void IntrinsicLocationsBuilderX86::VisitUnsafeCASLong(HInvoke* invoke) {
1576   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimLong, invoke);
1577 }
1578 
VisitUnsafeCASObject(HInvoke * invoke)1579 void IntrinsicLocationsBuilderX86::VisitUnsafeCASObject(HInvoke* invoke) {
1580   CreateIntIntIntIntIntToInt(arena_, Primitive::kPrimNot, invoke);
1581 }
1582 
GenCAS(Primitive::Type type,HInvoke * invoke,CodeGeneratorX86 * codegen)1583 static void GenCAS(Primitive::Type type, HInvoke* invoke, CodeGeneratorX86* codegen) {
1584   X86Assembler* assembler =
1585     reinterpret_cast<X86Assembler*>(codegen->GetAssembler());
1586   LocationSummary* locations = invoke->GetLocations();
1587 
1588   Register base = locations->InAt(1).AsRegister<Register>();
1589   Register offset = locations->InAt(2).AsRegisterPairLow<Register>();
1590   Location out = locations->Out();
1591   DCHECK_EQ(out.AsRegister<Register>(), EAX);
1592 
1593   if (type == Primitive::kPrimLong) {
1594     DCHECK_EQ(locations->InAt(3).AsRegisterPairLow<Register>(), EAX);
1595     DCHECK_EQ(locations->InAt(3).AsRegisterPairHigh<Register>(), EDX);
1596     DCHECK_EQ(locations->InAt(4).AsRegisterPairLow<Register>(), EBX);
1597     DCHECK_EQ(locations->InAt(4).AsRegisterPairHigh<Register>(), ECX);
1598     __ LockCmpxchg8b(Address(base, offset, TIMES_1, 0));
1599   } else {
1600     // Integer or object.
1601     DCHECK_EQ(locations->InAt(3).AsRegister<Register>(), EAX);
1602     Register value = locations->InAt(4).AsRegister<Register>();
1603     if (type == Primitive::kPrimNot) {
1604       // Mark card for object assuming new value is stored.
1605       codegen->MarkGCCard(locations->GetTemp(0).AsRegister<Register>(),
1606                           locations->GetTemp(1).AsRegister<Register>(),
1607                           base,
1608                           value);
1609     }
1610 
1611     __ LockCmpxchgl(Address(base, offset, TIMES_1, 0), value);
1612   }
1613 
1614   // locked cmpxchg has full barrier semantics, and we don't need scheduling
1615   // barriers at this time.
1616 
1617   // Convert ZF into the boolean result.
1618   __ setb(kZero, out.AsRegister<Register>());
1619   __ movzxb(out.AsRegister<Register>(), out.AsRegister<ByteRegister>());
1620 }
1621 
VisitUnsafeCASInt(HInvoke * invoke)1622 void IntrinsicCodeGeneratorX86::VisitUnsafeCASInt(HInvoke* invoke) {
1623   GenCAS(Primitive::kPrimInt, invoke, codegen_);
1624 }
1625 
VisitUnsafeCASLong(HInvoke * invoke)1626 void IntrinsicCodeGeneratorX86::VisitUnsafeCASLong(HInvoke* invoke) {
1627   GenCAS(Primitive::kPrimLong, invoke, codegen_);
1628 }
1629 
VisitUnsafeCASObject(HInvoke * invoke)1630 void IntrinsicCodeGeneratorX86::VisitUnsafeCASObject(HInvoke* invoke) {
1631   GenCAS(Primitive::kPrimNot, invoke, codegen_);
1632 }
1633 
VisitIntegerReverse(HInvoke * invoke)1634 void IntrinsicLocationsBuilderX86::VisitIntegerReverse(HInvoke* invoke) {
1635   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1636                                                            LocationSummary::kNoCall,
1637                                                            kIntrinsified);
1638   locations->SetInAt(0, Location::RequiresRegister());
1639   locations->SetOut(Location::SameAsFirstInput());
1640   locations->AddTemp(Location::RequiresRegister());
1641 }
1642 
SwapBits(Register reg,Register temp,int32_t shift,int32_t mask,X86Assembler * assembler)1643 static void SwapBits(Register reg, Register temp, int32_t shift, int32_t mask,
1644                      X86Assembler* assembler) {
1645   Immediate imm_shift(shift);
1646   Immediate imm_mask(mask);
1647   __ movl(temp, reg);
1648   __ shrl(reg, imm_shift);
1649   __ andl(temp, imm_mask);
1650   __ andl(reg, imm_mask);
1651   __ shll(temp, imm_shift);
1652   __ orl(reg, temp);
1653 }
1654 
VisitIntegerReverse(HInvoke * invoke)1655 void IntrinsicCodeGeneratorX86::VisitIntegerReverse(HInvoke* invoke) {
1656   X86Assembler* assembler =
1657     reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
1658   LocationSummary* locations = invoke->GetLocations();
1659 
1660   Register reg = locations->InAt(0).AsRegister<Register>();
1661   Register temp = locations->GetTemp(0).AsRegister<Register>();
1662 
1663   /*
1664    * Use one bswap instruction to reverse byte order first and then use 3 rounds of
1665    * swapping bits to reverse bits in a number x. Using bswap to save instructions
1666    * compared to generic luni implementation which has 5 rounds of swapping bits.
1667    * x = bswap x
1668    * x = (x & 0x55555555) << 1 | (x >> 1) & 0x55555555;
1669    * x = (x & 0x33333333) << 2 | (x >> 2) & 0x33333333;
1670    * x = (x & 0x0F0F0F0F) << 4 | (x >> 4) & 0x0F0F0F0F;
1671    */
1672   __ bswapl(reg);
1673   SwapBits(reg, temp, 1, 0x55555555, assembler);
1674   SwapBits(reg, temp, 2, 0x33333333, assembler);
1675   SwapBits(reg, temp, 4, 0x0f0f0f0f, assembler);
1676 }
1677 
VisitLongReverse(HInvoke * invoke)1678 void IntrinsicLocationsBuilderX86::VisitLongReverse(HInvoke* invoke) {
1679   LocationSummary* locations = new (arena_) LocationSummary(invoke,
1680                                                            LocationSummary::kNoCall,
1681                                                            kIntrinsified);
1682   locations->SetInAt(0, Location::RequiresRegister());
1683   locations->SetOut(Location::SameAsFirstInput());
1684   locations->AddTemp(Location::RequiresRegister());
1685 }
1686 
VisitLongReverse(HInvoke * invoke)1687 void IntrinsicCodeGeneratorX86::VisitLongReverse(HInvoke* invoke) {
1688   X86Assembler* assembler =
1689     reinterpret_cast<X86Assembler*>(codegen_->GetAssembler());
1690   LocationSummary* locations = invoke->GetLocations();
1691 
1692   Register reg_low = locations->InAt(0).AsRegisterPairLow<Register>();
1693   Register reg_high = locations->InAt(0).AsRegisterPairHigh<Register>();
1694   Register temp = locations->GetTemp(0).AsRegister<Register>();
1695 
1696   // We want to swap high/low, then bswap each one, and then do the same
1697   // as a 32 bit reverse.
1698   // Exchange high and low.
1699   __ movl(temp, reg_low);
1700   __ movl(reg_low, reg_high);
1701   __ movl(reg_high, temp);
1702 
1703   // bit-reverse low
1704   __ bswapl(reg_low);
1705   SwapBits(reg_low, temp, 1, 0x55555555, assembler);
1706   SwapBits(reg_low, temp, 2, 0x33333333, assembler);
1707   SwapBits(reg_low, temp, 4, 0x0f0f0f0f, assembler);
1708 
1709   // bit-reverse high
1710   __ bswapl(reg_high);
1711   SwapBits(reg_high, temp, 1, 0x55555555, assembler);
1712   SwapBits(reg_high, temp, 2, 0x33333333, assembler);
1713   SwapBits(reg_high, temp, 4, 0x0f0f0f0f, assembler);
1714 }
1715 
1716 // Unimplemented intrinsics.
1717 
1718 #define UNIMPLEMENTED_INTRINSIC(Name)                                                   \
1719 void IntrinsicLocationsBuilderX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) { \
1720 }                                                                                       \
1721 void IntrinsicCodeGeneratorX86::Visit ## Name(HInvoke* invoke ATTRIBUTE_UNUSED) {    \
1722 }
1723 
1724 UNIMPLEMENTED_INTRINSIC(MathRoundDouble)
1725 UNIMPLEMENTED_INTRINSIC(StringGetCharsNoCheck)
1726 UNIMPLEMENTED_INTRINSIC(SystemArrayCopyChar)
1727 UNIMPLEMENTED_INTRINSIC(ReferenceGetReferent)
1728 
1729 }  // namespace x86
1730 }  // namespace art
1731