1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_arm64.h"
18 #include "base/logging.h"
19 #include "entrypoints/quick/quick_entrypoints.h"
20 #include "offsets.h"
21 #include "thread.h"
22 #include "utils.h"
23 
24 namespace art {
25 namespace arm64 {
26 
27 #ifdef ___
28 #error "ARM64 Assembler macro already defined."
29 #else
30 #define ___   vixl_masm_->
31 #endif
32 
EmitSlowPaths()33 void Arm64Assembler::EmitSlowPaths() {
34   if (!exception_blocks_.empty()) {
35     for (size_t i = 0; i < exception_blocks_.size(); i++) {
36       EmitExceptionPoll(exception_blocks_.at(i));
37     }
38   }
39   ___ FinalizeCode();
40 }
41 
CodeSize() const42 size_t Arm64Assembler::CodeSize() const {
43   return ___ SizeOfCodeGenerated();
44 }
45 
FinalizeInstructions(const MemoryRegion & region)46 void Arm64Assembler::FinalizeInstructions(const MemoryRegion& region) {
47   // Copy the instructions from the buffer.
48   MemoryRegion from(reinterpret_cast<void*>(vixl_buf_), CodeSize());
49   region.CopyFrom(0, from);
50 }
51 
GetCurrentThread(ManagedRegister tr)52 void Arm64Assembler::GetCurrentThread(ManagedRegister tr) {
53   ___ Mov(reg_x(tr.AsArm64().AsCoreRegister()), reg_x(ETR));
54 }
55 
GetCurrentThread(FrameOffset offset,ManagedRegister)56 void Arm64Assembler::GetCurrentThread(FrameOffset offset, ManagedRegister /* scratch */) {
57   StoreToOffset(ETR, SP, offset.Int32Value());
58 }
59 
60 // See Arm64 PCS Section 5.2.2.1.
IncreaseFrameSize(size_t adjust)61 void Arm64Assembler::IncreaseFrameSize(size_t adjust) {
62   CHECK_ALIGNED(adjust, kStackAlignment);
63   AddConstant(SP, -adjust);
64 }
65 
66 // See Arm64 PCS Section 5.2.2.1.
DecreaseFrameSize(size_t adjust)67 void Arm64Assembler::DecreaseFrameSize(size_t adjust) {
68   CHECK_ALIGNED(adjust, kStackAlignment);
69   AddConstant(SP, adjust);
70 }
71 
AddConstant(Register rd,int32_t value,Condition cond)72 void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
73   AddConstant(rd, rd, value, cond);
74 }
75 
AddConstant(Register rd,Register rn,int32_t value,Condition cond)76 void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
77                                  Condition cond) {
78   if ((cond == AL) || (cond == NV)) {
79     // VIXL macro-assembler handles all variants.
80     ___ Add(reg_x(rd), reg_x(rn), value);
81   } else {
82     // temp = rd + value
83     // rd = cond ? temp : rn
84     vixl::UseScratchRegisterScope temps(vixl_masm_);
85     temps.Exclude(reg_x(rd), reg_x(rn));
86     vixl::Register temp = temps.AcquireX();
87     ___ Add(temp, reg_x(rn), value);
88     ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
89   }
90 }
91 
StoreWToOffset(StoreOperandType type,WRegister source,Register base,int32_t offset)92 void Arm64Assembler::StoreWToOffset(StoreOperandType type, WRegister source,
93                                     Register base, int32_t offset) {
94   switch (type) {
95     case kStoreByte:
96       ___ Strb(reg_w(source), MEM_OP(reg_x(base), offset));
97       break;
98     case kStoreHalfword:
99       ___ Strh(reg_w(source), MEM_OP(reg_x(base), offset));
100       break;
101     case kStoreWord:
102       ___ Str(reg_w(source), MEM_OP(reg_x(base), offset));
103       break;
104     default:
105       LOG(FATAL) << "UNREACHABLE";
106   }
107 }
108 
StoreToOffset(Register source,Register base,int32_t offset)109 void Arm64Assembler::StoreToOffset(Register source, Register base, int32_t offset) {
110   CHECK_NE(source, SP);
111   ___ Str(reg_x(source), MEM_OP(reg_x(base), offset));
112 }
113 
StoreSToOffset(SRegister source,Register base,int32_t offset)114 void Arm64Assembler::StoreSToOffset(SRegister source, Register base, int32_t offset) {
115   ___ Str(reg_s(source), MEM_OP(reg_x(base), offset));
116 }
117 
StoreDToOffset(DRegister source,Register base,int32_t offset)118 void Arm64Assembler::StoreDToOffset(DRegister source, Register base, int32_t offset) {
119   ___ Str(reg_d(source), MEM_OP(reg_x(base), offset));
120 }
121 
Store(FrameOffset offs,ManagedRegister m_src,size_t size)122 void Arm64Assembler::Store(FrameOffset offs, ManagedRegister m_src, size_t size) {
123   Arm64ManagedRegister src = m_src.AsArm64();
124   if (src.IsNoRegister()) {
125     CHECK_EQ(0u, size);
126   } else if (src.IsWRegister()) {
127     CHECK_EQ(4u, size);
128     StoreWToOffset(kStoreWord, src.AsWRegister(), SP, offs.Int32Value());
129   } else if (src.IsCoreRegister()) {
130     CHECK_EQ(8u, size);
131     StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
132   } else if (src.IsSRegister()) {
133     StoreSToOffset(src.AsSRegister(), SP, offs.Int32Value());
134   } else {
135     CHECK(src.IsDRegister()) << src;
136     StoreDToOffset(src.AsDRegister(), SP, offs.Int32Value());
137   }
138 }
139 
StoreRef(FrameOffset offs,ManagedRegister m_src)140 void Arm64Assembler::StoreRef(FrameOffset offs, ManagedRegister m_src) {
141   Arm64ManagedRegister src = m_src.AsArm64();
142   CHECK(src.IsCoreRegister()) << src;
143   StoreWToOffset(kStoreWord, src.AsOverlappingCoreRegisterLow(), SP,
144                  offs.Int32Value());
145 }
146 
StoreRawPtr(FrameOffset offs,ManagedRegister m_src)147 void Arm64Assembler::StoreRawPtr(FrameOffset offs, ManagedRegister m_src) {
148   Arm64ManagedRegister src = m_src.AsArm64();
149   CHECK(src.IsCoreRegister()) << src;
150   StoreToOffset(src.AsCoreRegister(), SP, offs.Int32Value());
151 }
152 
StoreImmediateToFrame(FrameOffset offs,uint32_t imm,ManagedRegister m_scratch)153 void Arm64Assembler::StoreImmediateToFrame(FrameOffset offs, uint32_t imm,
154                                            ManagedRegister m_scratch) {
155   Arm64ManagedRegister scratch = m_scratch.AsArm64();
156   CHECK(scratch.IsCoreRegister()) << scratch;
157   LoadImmediate(scratch.AsCoreRegister(), imm);
158   StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP,
159                  offs.Int32Value());
160 }
161 
StoreImmediateToThread64(ThreadOffset<8> offs,uint32_t imm,ManagedRegister m_scratch)162 void Arm64Assembler::StoreImmediateToThread64(ThreadOffset<8> offs, uint32_t imm,
163                                             ManagedRegister m_scratch) {
164   Arm64ManagedRegister scratch = m_scratch.AsArm64();
165   CHECK(scratch.IsCoreRegister()) << scratch;
166   LoadImmediate(scratch.AsCoreRegister(), imm);
167   StoreToOffset(scratch.AsCoreRegister(), ETR, offs.Int32Value());
168 }
169 
StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,FrameOffset fr_offs,ManagedRegister m_scratch)170 void Arm64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> tr_offs,
171                                               FrameOffset fr_offs,
172                                               ManagedRegister m_scratch) {
173   Arm64ManagedRegister scratch = m_scratch.AsArm64();
174   CHECK(scratch.IsCoreRegister()) << scratch;
175   AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
176   StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
177 }
178 
StoreStackPointerToThread64(ThreadOffset<8> tr_offs)179 void Arm64Assembler::StoreStackPointerToThread64(ThreadOffset<8> tr_offs) {
180   vixl::UseScratchRegisterScope temps(vixl_masm_);
181   vixl::Register temp = temps.AcquireX();
182   ___ Mov(temp, reg_x(SP));
183   ___ Str(temp, MEM_OP(reg_x(ETR), tr_offs.Int32Value()));
184 }
185 
StoreSpanning(FrameOffset dest_off,ManagedRegister m_source,FrameOffset in_off,ManagedRegister m_scratch)186 void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_source,
187                                    FrameOffset in_off, ManagedRegister m_scratch) {
188   Arm64ManagedRegister source = m_source.AsArm64();
189   Arm64ManagedRegister scratch = m_scratch.AsArm64();
190   StoreToOffset(source.AsCoreRegister(), SP, dest_off.Int32Value());
191   LoadFromOffset(scratch.AsCoreRegister(), SP, in_off.Int32Value());
192   StoreToOffset(scratch.AsCoreRegister(), SP, dest_off.Int32Value() + 8);
193 }
194 
195 // Load routines.
LoadImmediate(Register dest,int32_t value,Condition cond)196 void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
197                                    Condition cond) {
198   if ((cond == AL) || (cond == NV)) {
199     ___ Mov(reg_x(dest), value);
200   } else {
201     // temp = value
202     // rd = cond ? temp : rd
203     if (value != 0) {
204       vixl::UseScratchRegisterScope temps(vixl_masm_);
205       temps.Exclude(reg_x(dest));
206       vixl::Register temp = temps.AcquireX();
207       ___ Mov(temp, value);
208       ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
209     } else {
210       ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
211     }
212   }
213 }
214 
LoadWFromOffset(LoadOperandType type,WRegister dest,Register base,int32_t offset)215 void Arm64Assembler::LoadWFromOffset(LoadOperandType type, WRegister dest,
216                                      Register base, int32_t offset) {
217   switch (type) {
218     case kLoadSignedByte:
219       ___ Ldrsb(reg_w(dest), MEM_OP(reg_x(base), offset));
220       break;
221     case kLoadSignedHalfword:
222       ___ Ldrsh(reg_w(dest), MEM_OP(reg_x(base), offset));
223       break;
224     case kLoadUnsignedByte:
225       ___ Ldrb(reg_w(dest), MEM_OP(reg_x(base), offset));
226       break;
227     case kLoadUnsignedHalfword:
228       ___ Ldrh(reg_w(dest), MEM_OP(reg_x(base), offset));
229       break;
230     case kLoadWord:
231       ___ Ldr(reg_w(dest), MEM_OP(reg_x(base), offset));
232       break;
233     default:
234         LOG(FATAL) << "UNREACHABLE";
235   }
236 }
237 
238 // Note: We can extend this member by adding load type info - see
239 // sign extended A64 load variants.
LoadFromOffset(Register dest,Register base,int32_t offset)240 void Arm64Assembler::LoadFromOffset(Register dest, Register base,
241                                     int32_t offset) {
242   CHECK_NE(dest, SP);
243   ___ Ldr(reg_x(dest), MEM_OP(reg_x(base), offset));
244 }
245 
LoadSFromOffset(SRegister dest,Register base,int32_t offset)246 void Arm64Assembler::LoadSFromOffset(SRegister dest, Register base,
247                                      int32_t offset) {
248   ___ Ldr(reg_s(dest), MEM_OP(reg_x(base), offset));
249 }
250 
LoadDFromOffset(DRegister dest,Register base,int32_t offset)251 void Arm64Assembler::LoadDFromOffset(DRegister dest, Register base,
252                                      int32_t offset) {
253   ___ Ldr(reg_d(dest), MEM_OP(reg_x(base), offset));
254 }
255 
Load(Arm64ManagedRegister dest,Register base,int32_t offset,size_t size)256 void Arm64Assembler::Load(Arm64ManagedRegister dest, Register base,
257                           int32_t offset, size_t size) {
258   if (dest.IsNoRegister()) {
259     CHECK_EQ(0u, size) << dest;
260   } else if (dest.IsWRegister()) {
261     CHECK_EQ(4u, size) << dest;
262     ___ Ldr(reg_w(dest.AsWRegister()), MEM_OP(reg_x(base), offset));
263   } else if (dest.IsCoreRegister()) {
264     CHECK_NE(dest.AsCoreRegister(), SP) << dest;
265     if (size == 4u) {
266       ___ Ldr(reg_w(dest.AsOverlappingCoreRegisterLow()), MEM_OP(reg_x(base), offset));
267     } else {
268       CHECK_EQ(8u, size) << dest;
269       ___ Ldr(reg_x(dest.AsCoreRegister()), MEM_OP(reg_x(base), offset));
270     }
271   } else if (dest.IsSRegister()) {
272     ___ Ldr(reg_s(dest.AsSRegister()), MEM_OP(reg_x(base), offset));
273   } else {
274     CHECK(dest.IsDRegister()) << dest;
275     ___ Ldr(reg_d(dest.AsDRegister()), MEM_OP(reg_x(base), offset));
276   }
277 }
278 
Load(ManagedRegister m_dst,FrameOffset src,size_t size)279 void Arm64Assembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
280   return Load(m_dst.AsArm64(), SP, src.Int32Value(), size);
281 }
282 
LoadFromThread64(ManagedRegister m_dst,ThreadOffset<8> src,size_t size)283 void Arm64Assembler::LoadFromThread64(ManagedRegister m_dst, ThreadOffset<8> src, size_t size) {
284   return Load(m_dst.AsArm64(), ETR, src.Int32Value(), size);
285 }
286 
LoadRef(ManagedRegister m_dst,FrameOffset offs)287 void Arm64Assembler::LoadRef(ManagedRegister m_dst, FrameOffset offs) {
288   Arm64ManagedRegister dst = m_dst.AsArm64();
289   CHECK(dst.IsCoreRegister()) << dst;
290   LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), SP, offs.Int32Value());
291 }
292 
LoadRef(ManagedRegister m_dst,ManagedRegister m_base,MemberOffset offs)293 void Arm64Assembler::LoadRef(ManagedRegister m_dst, ManagedRegister m_base,
294                              MemberOffset offs) {
295   Arm64ManagedRegister dst = m_dst.AsArm64();
296   Arm64ManagedRegister base = m_base.AsArm64();
297   CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
298   LoadWFromOffset(kLoadWord, dst.AsOverlappingCoreRegisterLow(), base.AsCoreRegister(),
299                   offs.Int32Value());
300 }
301 
LoadRawPtr(ManagedRegister m_dst,ManagedRegister m_base,Offset offs)302 void Arm64Assembler::LoadRawPtr(ManagedRegister m_dst, ManagedRegister m_base, Offset offs) {
303   Arm64ManagedRegister dst = m_dst.AsArm64();
304   Arm64ManagedRegister base = m_base.AsArm64();
305   CHECK(dst.IsCoreRegister() && base.IsCoreRegister());
306   // Remove dst and base form the temp list - higher level API uses IP1, IP0.
307   vixl::UseScratchRegisterScope temps(vixl_masm_);
308   temps.Exclude(reg_x(dst.AsCoreRegister()), reg_x(base.AsCoreRegister()));
309   ___ Ldr(reg_x(dst.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
310 }
311 
LoadRawPtrFromThread64(ManagedRegister m_dst,ThreadOffset<8> offs)312 void Arm64Assembler::LoadRawPtrFromThread64(ManagedRegister m_dst, ThreadOffset<8> offs) {
313   Arm64ManagedRegister dst = m_dst.AsArm64();
314   CHECK(dst.IsCoreRegister()) << dst;
315   LoadFromOffset(dst.AsCoreRegister(), ETR, offs.Int32Value());
316 }
317 
318 // Copying routines.
Move(ManagedRegister m_dst,ManagedRegister m_src,size_t size)319 void Arm64Assembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t size) {
320   Arm64ManagedRegister dst = m_dst.AsArm64();
321   Arm64ManagedRegister src = m_src.AsArm64();
322   if (!dst.Equals(src)) {
323     if (dst.IsCoreRegister()) {
324       if (size == 4) {
325         CHECK(src.IsWRegister());
326         ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
327       } else {
328         if (src.IsCoreRegister()) {
329           ___ Mov(reg_x(dst.AsCoreRegister()), reg_x(src.AsCoreRegister()));
330         } else {
331           ___ Mov(reg_x(dst.AsCoreRegister()), reg_w(src.AsWRegister()));
332         }
333       }
334     } else if (dst.IsWRegister()) {
335       CHECK(src.IsWRegister()) << src;
336       ___ Mov(reg_w(dst.AsWRegister()), reg_w(src.AsWRegister()));
337     } else if (dst.IsSRegister()) {
338       CHECK(src.IsSRegister()) << src;
339       ___ Fmov(reg_s(dst.AsSRegister()), reg_s(src.AsSRegister()));
340     } else {
341       CHECK(dst.IsDRegister()) << dst;
342       CHECK(src.IsDRegister()) << src;
343       ___ Fmov(reg_d(dst.AsDRegister()), reg_d(src.AsDRegister()));
344     }
345   }
346 }
347 
CopyRawPtrFromThread64(FrameOffset fr_offs,ThreadOffset<8> tr_offs,ManagedRegister m_scratch)348 void Arm64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
349                                           ThreadOffset<8> tr_offs,
350                                           ManagedRegister m_scratch) {
351   Arm64ManagedRegister scratch = m_scratch.AsArm64();
352   CHECK(scratch.IsCoreRegister()) << scratch;
353   LoadFromOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
354   StoreToOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
355 }
356 
CopyRawPtrToThread64(ThreadOffset<8> tr_offs,FrameOffset fr_offs,ManagedRegister m_scratch)357 void Arm64Assembler::CopyRawPtrToThread64(ThreadOffset<8> tr_offs,
358                                         FrameOffset fr_offs,
359                                         ManagedRegister m_scratch) {
360   Arm64ManagedRegister scratch = m_scratch.AsArm64();
361   CHECK(scratch.IsCoreRegister()) << scratch;
362   LoadFromOffset(scratch.AsCoreRegister(), SP, fr_offs.Int32Value());
363   StoreToOffset(scratch.AsCoreRegister(), ETR, tr_offs.Int32Value());
364 }
365 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister m_scratch)366 void Arm64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
367                              ManagedRegister m_scratch) {
368   Arm64ManagedRegister scratch = m_scratch.AsArm64();
369   CHECK(scratch.IsCoreRegister()) << scratch;
370   LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(),
371                   SP, src.Int32Value());
372   StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(),
373                  SP, dest.Int32Value());
374 }
375 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister m_scratch,size_t size)376 void Arm64Assembler::Copy(FrameOffset dest, FrameOffset src,
377                           ManagedRegister m_scratch, size_t size) {
378   Arm64ManagedRegister scratch = m_scratch.AsArm64();
379   CHECK(scratch.IsCoreRegister()) << scratch;
380   CHECK(size == 4 || size == 8) << size;
381   if (size == 4) {
382     LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, src.Int32Value());
383     StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), SP, dest.Int32Value());
384   } else if (size == 8) {
385     LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
386     StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
387   } else {
388     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
389   }
390 }
391 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister m_scratch,size_t size)392 void Arm64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
393                           ManagedRegister m_scratch, size_t size) {
394   Arm64ManagedRegister scratch = m_scratch.AsArm64();
395   Arm64ManagedRegister base = src_base.AsArm64();
396   CHECK(base.IsCoreRegister()) << base;
397   CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
398   CHECK(size == 4 || size == 8) << size;
399   if (size == 4) {
400     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), base.AsCoreRegister(),
401                    src_offset.Int32Value());
402     StoreWToOffset(kStoreWord, scratch.AsWRegister(), SP, dest.Int32Value());
403   } else if (size == 8) {
404     LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), src_offset.Int32Value());
405     StoreToOffset(scratch.AsCoreRegister(), SP, dest.Int32Value());
406   } else {
407     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
408   }
409 }
410 
Copy(ManagedRegister m_dest_base,Offset dest_offs,FrameOffset src,ManagedRegister m_scratch,size_t size)411 void Arm64Assembler::Copy(ManagedRegister m_dest_base, Offset dest_offs, FrameOffset src,
412                           ManagedRegister m_scratch, size_t size) {
413   Arm64ManagedRegister scratch = m_scratch.AsArm64();
414   Arm64ManagedRegister base = m_dest_base.AsArm64();
415   CHECK(base.IsCoreRegister()) << base;
416   CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
417   CHECK(size == 4 || size == 8) << size;
418   if (size == 4) {
419     LoadWFromOffset(kLoadWord, scratch.AsWRegister(), SP, src.Int32Value());
420     StoreWToOffset(kStoreWord, scratch.AsWRegister(), base.AsCoreRegister(),
421                    dest_offs.Int32Value());
422   } else if (size == 8) {
423     LoadFromOffset(scratch.AsCoreRegister(), SP, src.Int32Value());
424     StoreToOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), dest_offs.Int32Value());
425   } else {
426     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
427   }
428 }
429 
Copy(FrameOffset,FrameOffset,Offset,ManagedRegister,size_t)430 void Arm64Assembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
431                           ManagedRegister /*mscratch*/, size_t /*size*/) {
432   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
433 }
434 
Copy(ManagedRegister m_dest,Offset dest_offset,ManagedRegister m_src,Offset src_offset,ManagedRegister m_scratch,size_t size)435 void Arm64Assembler::Copy(ManagedRegister m_dest, Offset dest_offset,
436                           ManagedRegister m_src, Offset src_offset,
437                           ManagedRegister m_scratch, size_t size) {
438   Arm64ManagedRegister scratch = m_scratch.AsArm64();
439   Arm64ManagedRegister src = m_src.AsArm64();
440   Arm64ManagedRegister dest = m_dest.AsArm64();
441   CHECK(dest.IsCoreRegister()) << dest;
442   CHECK(src.IsCoreRegister()) << src;
443   CHECK(scratch.IsCoreRegister() || scratch.IsWRegister()) << scratch;
444   CHECK(size == 4 || size == 8) << size;
445   if (size == 4) {
446     if (scratch.IsWRegister()) {
447       LoadWFromOffset(kLoadWord, scratch.AsWRegister(), src.AsCoreRegister(),
448                     src_offset.Int32Value());
449       StoreWToOffset(kStoreWord, scratch.AsWRegister(), dest.AsCoreRegister(),
450                    dest_offset.Int32Value());
451     } else {
452       LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), src.AsCoreRegister(),
453                     src_offset.Int32Value());
454       StoreWToOffset(kStoreWord, scratch.AsOverlappingCoreRegisterLow(), dest.AsCoreRegister(),
455                    dest_offset.Int32Value());
456     }
457   } else if (size == 8) {
458     LoadFromOffset(scratch.AsCoreRegister(), src.AsCoreRegister(), src_offset.Int32Value());
459     StoreToOffset(scratch.AsCoreRegister(), dest.AsCoreRegister(), dest_offset.Int32Value());
460   } else {
461     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
462   }
463 }
464 
Copy(FrameOffset,Offset,FrameOffset,Offset,ManagedRegister,size_t)465 void Arm64Assembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/,
466                           FrameOffset /*src*/, Offset /*src_offset*/,
467                           ManagedRegister /*scratch*/, size_t /*size*/) {
468   UNIMPLEMENTED(FATAL) << "Unimplemented Copy() variant";
469 }
470 
MemoryBarrier(ManagedRegister m_scratch)471 void Arm64Assembler::MemoryBarrier(ManagedRegister m_scratch) {
472   // TODO: Should we check that m_scratch is IP? - see arm.
473 #if ANDROID_SMP != 0
474   ___ Dmb(vixl::InnerShareable, vixl::BarrierAll);
475 #endif
476 }
477 
SignExtend(ManagedRegister mreg,size_t size)478 void Arm64Assembler::SignExtend(ManagedRegister mreg, size_t size) {
479   Arm64ManagedRegister reg = mreg.AsArm64();
480   CHECK(size == 1 || size == 2) << size;
481   CHECK(reg.IsWRegister()) << reg;
482   if (size == 1) {
483     ___ sxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
484   } else {
485     ___ sxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
486   }
487 }
488 
ZeroExtend(ManagedRegister mreg,size_t size)489 void Arm64Assembler::ZeroExtend(ManagedRegister mreg, size_t size) {
490   Arm64ManagedRegister reg = mreg.AsArm64();
491   CHECK(size == 1 || size == 2) << size;
492   CHECK(reg.IsWRegister()) << reg;
493   if (size == 1) {
494     ___ uxtb(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
495   } else {
496     ___ uxth(reg_w(reg.AsWRegister()), reg_w(reg.AsWRegister()));
497   }
498 }
499 
VerifyObject(ManagedRegister,bool)500 void Arm64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
501   // TODO: not validating references.
502 }
503 
VerifyObject(FrameOffset,bool)504 void Arm64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
505   // TODO: not validating references.
506 }
507 
Call(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)508 void Arm64Assembler::Call(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
509   Arm64ManagedRegister base = m_base.AsArm64();
510   Arm64ManagedRegister scratch = m_scratch.AsArm64();
511   CHECK(base.IsCoreRegister()) << base;
512   CHECK(scratch.IsCoreRegister()) << scratch;
513   LoadFromOffset(scratch.AsCoreRegister(), base.AsCoreRegister(), offs.Int32Value());
514   ___ Blr(reg_x(scratch.AsCoreRegister()));
515 }
516 
JumpTo(ManagedRegister m_base,Offset offs,ManagedRegister m_scratch)517 void Arm64Assembler::JumpTo(ManagedRegister m_base, Offset offs, ManagedRegister m_scratch) {
518   Arm64ManagedRegister base = m_base.AsArm64();
519   Arm64ManagedRegister scratch = m_scratch.AsArm64();
520   CHECK(base.IsCoreRegister()) << base;
521   CHECK(scratch.IsCoreRegister()) << scratch;
522   // Remove base and scratch form the temp list - higher level API uses IP1, IP0.
523   vixl::UseScratchRegisterScope temps(vixl_masm_);
524   temps.Exclude(reg_x(base.AsCoreRegister()), reg_x(scratch.AsCoreRegister()));
525   ___ Ldr(reg_x(scratch.AsCoreRegister()), MEM_OP(reg_x(base.AsCoreRegister()), offs.Int32Value()));
526   ___ Br(reg_x(scratch.AsCoreRegister()));
527 }
528 
Call(FrameOffset base,Offset offs,ManagedRegister m_scratch)529 void Arm64Assembler::Call(FrameOffset base, Offset offs, ManagedRegister m_scratch) {
530   Arm64ManagedRegister scratch = m_scratch.AsArm64();
531   CHECK(scratch.IsCoreRegister()) << scratch;
532   // Call *(*(SP + base) + offset)
533   LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP, base.Int32Value());
534   LoadFromOffset(scratch.AsCoreRegister(), scratch.AsCoreRegister(), offs.Int32Value());
535   ___ Blr(reg_x(scratch.AsCoreRegister()));
536 }
537 
CallFromThread64(ThreadOffset<8>,ManagedRegister)538 void Arm64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*scratch*/) {
539   UNIMPLEMENTED(FATAL) << "Unimplemented Call() variant";
540 }
541 
CreateHandleScopeEntry(ManagedRegister m_out_reg,FrameOffset handle_scope_offs,ManagedRegister m_in_reg,bool null_allowed)542 void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffset handle_scope_offs,
543                                      ManagedRegister m_in_reg, bool null_allowed) {
544   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
545   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
546   // For now we only hold stale handle scope entries in x registers.
547   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
548   CHECK(out_reg.IsCoreRegister()) << out_reg;
549   if (null_allowed) {
550     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
551     // the address in the handle scope holding the reference.
552     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
553     if (in_reg.IsNoRegister()) {
554       LoadWFromOffset(kLoadWord, out_reg.AsOverlappingCoreRegisterLow(), SP,
555                       handle_scope_offs.Int32Value());
556       in_reg = out_reg;
557     }
558     ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
559     if (!out_reg.Equals(in_reg)) {
560       LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
561     }
562     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
563   } else {
564     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
565   }
566 }
567 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister m_scratch,bool null_allowed)568 void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset handle_scope_offset,
569                                      ManagedRegister m_scratch, bool null_allowed) {
570   Arm64ManagedRegister scratch = m_scratch.AsArm64();
571   CHECK(scratch.IsCoreRegister()) << scratch;
572   if (null_allowed) {
573     LoadWFromOffset(kLoadWord, scratch.AsOverlappingCoreRegisterLow(), SP,
574                     handle_scope_offset.Int32Value());
575     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
576     // the address in the handle scope holding the reference.
577     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
578     ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
579     // Move this logic in add constants with flags.
580     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
581   } else {
582     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
583   }
584   StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
585 }
586 
LoadReferenceFromHandleScope(ManagedRegister m_out_reg,ManagedRegister m_in_reg)587 void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
588                                            ManagedRegister m_in_reg) {
589   Arm64ManagedRegister out_reg = m_out_reg.AsArm64();
590   Arm64ManagedRegister in_reg = m_in_reg.AsArm64();
591   CHECK(out_reg.IsCoreRegister()) << out_reg;
592   CHECK(in_reg.IsCoreRegister()) << in_reg;
593   vixl::Label exit;
594   if (!out_reg.Equals(in_reg)) {
595     // FIXME: Who sets the flags here?
596     LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
597   }
598   ___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit);
599   LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
600   ___ Bind(&exit);
601 }
602 
ExceptionPoll(ManagedRegister m_scratch,size_t stack_adjust)603 void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) {
604   CHECK_ALIGNED(stack_adjust, kStackAlignment);
605   Arm64ManagedRegister scratch = m_scratch.AsArm64();
606   Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust);
607   exception_blocks_.push_back(current_exception);
608   LoadFromOffset(scratch.AsCoreRegister(), ETR, Thread::ExceptionOffset<8>().Int32Value());
609   ___ Cbnz(reg_x(scratch.AsCoreRegister()), current_exception->Entry());
610 }
611 
EmitExceptionPoll(Arm64Exception * exception)612 void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) {
613   vixl::UseScratchRegisterScope temps(vixl_masm_);
614   temps.Exclude(reg_x(exception->scratch_.AsCoreRegister()));
615   vixl::Register temp = temps.AcquireX();
616 
617   // Bind exception poll entry.
618   ___ Bind(exception->Entry());
619   if (exception->stack_adjust_ != 0) {  // Fix up the frame.
620     DecreaseFrameSize(exception->stack_adjust_);
621   }
622   // Pass exception object as argument.
623   // Don't care about preserving X0 as this won't return.
624   ___ Mov(reg_x(X0), reg_x(exception->scratch_.AsCoreRegister()));
625   ___ Ldr(temp, MEM_OP(reg_x(ETR), QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value()));
626 
627   // Move ETR(Callee saved) back to TR(Caller saved) reg. We use ETR on calls
628   // to external functions that might trash TR. We do not need the original
629   // ETR(X21) saved in BuildFrame().
630   ___ Mov(reg_x(TR), reg_x(ETR));
631 
632   ___ Blr(temp);
633   // Call should never return.
634   ___ Brk();
635 }
636 
637 constexpr size_t kFramePointerSize = 8;
638 
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)639 void Arm64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
640                         const std::vector<ManagedRegister>& callee_save_regs,
641                         const ManagedRegisterEntrySpills& entry_spills) {
642   CHECK_ALIGNED(frame_size, kStackAlignment);
643   CHECK(X0 == method_reg.AsArm64().AsCoreRegister());
644 
645   // TODO: *create APCS FP - end of FP chain;
646   //       *add support for saving a different set of callee regs.
647   // For now we check that the size of callee regs vector is 11.
648   CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
649   // Increase frame to required size - must be at least space to push StackReference<Method>.
650   CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
651   IncreaseFrameSize(frame_size);
652 
653   // TODO: Ugly hard code...
654   // Should generate these according to the spill mask automatically.
655   // TUNING: Use stp.
656   // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
657   size_t reg_offset = frame_size;
658   reg_offset -= 8;
659   StoreToOffset(LR, SP, reg_offset);
660   reg_offset -= 8;
661   StoreToOffset(X29, SP, reg_offset);
662   reg_offset -= 8;
663   StoreToOffset(X28, SP, reg_offset);
664   reg_offset -= 8;
665   StoreToOffset(X27, SP, reg_offset);
666   reg_offset -= 8;
667   StoreToOffset(X26, SP, reg_offset);
668   reg_offset -= 8;
669   StoreToOffset(X25, SP, reg_offset);
670   reg_offset -= 8;
671   StoreToOffset(X24, SP, reg_offset);
672   reg_offset -= 8;
673   StoreToOffset(X23, SP, reg_offset);
674   reg_offset -= 8;
675   StoreToOffset(X22, SP, reg_offset);
676   reg_offset -= 8;
677   StoreToOffset(X21, SP, reg_offset);
678   reg_offset -= 8;
679   StoreToOffset(X20, SP, reg_offset);
680 
681   // Move TR(Caller saved) to ETR(Callee saved). The original (ETR)X21 has been saved on stack.
682   // This way we make sure that TR is not trashed by native code.
683   ___ Mov(reg_x(ETR), reg_x(TR));
684 
685   // Write StackReference<Method>.
686   DCHECK_EQ(4U, sizeof(StackReference<mirror::ArtMethod>));
687   StoreWToOffset(StoreOperandType::kStoreWord, W0, SP, 0);
688 
689   // Write out entry spills
690   int32_t offset = frame_size + sizeof(StackReference<mirror::ArtMethod>);
691   for (size_t i = 0; i < entry_spills.size(); ++i) {
692     Arm64ManagedRegister reg = entry_spills.at(i).AsArm64();
693     if (reg.IsNoRegister()) {
694       // only increment stack offset.
695       ManagedRegisterSpill spill = entry_spills.at(i);
696       offset += spill.getSize();
697     } else if (reg.IsCoreRegister()) {
698       StoreToOffset(reg.AsCoreRegister(), SP, offset);
699       offset += 8;
700     } else if (reg.IsWRegister()) {
701       StoreWToOffset(kStoreWord, reg.AsWRegister(), SP, offset);
702       offset += 4;
703     } else if (reg.IsDRegister()) {
704       StoreDToOffset(reg.AsDRegister(), SP, offset);
705       offset += 8;
706     } else if (reg.IsSRegister()) {
707       StoreSToOffset(reg.AsSRegister(), SP, offset);
708       offset += 4;
709     }
710   }
711 }
712 
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)713 void Arm64Assembler::RemoveFrame(size_t frame_size, const std::vector<ManagedRegister>& callee_save_regs) {
714   CHECK_ALIGNED(frame_size, kStackAlignment);
715 
716   // For now we only check that the size of the frame is greater than the spill size.
717   CHECK_EQ(callee_save_regs.size(), kJniRefSpillRegsSize);
718   CHECK_GT(frame_size, kJniRefSpillRegsSize * kFramePointerSize);
719 
720   // We move ETR(aapcs64 callee saved) back to TR(aapcs64 caller saved) which might have
721   // been trashed in the native call. The original ETR(X21) is restored from stack.
722   ___ Mov(reg_x(TR), reg_x(ETR));
723 
724   // TODO: Ugly hard code...
725   // Should generate these according to the spill mask automatically.
726   // TUNING: Use ldp.
727   // Note: Must match Arm64JniCallingConvention::CoreSpillMask().
728   size_t reg_offset = frame_size;
729   reg_offset -= 8;
730   LoadFromOffset(LR, SP, reg_offset);
731   reg_offset -= 8;
732   LoadFromOffset(X29, SP, reg_offset);
733   reg_offset -= 8;
734   LoadFromOffset(X28, SP, reg_offset);
735   reg_offset -= 8;
736   LoadFromOffset(X27, SP, reg_offset);
737   reg_offset -= 8;
738   LoadFromOffset(X26, SP, reg_offset);
739   reg_offset -= 8;
740   LoadFromOffset(X25, SP, reg_offset);
741   reg_offset -= 8;
742   LoadFromOffset(X24, SP, reg_offset);
743   reg_offset -= 8;
744   LoadFromOffset(X23, SP, reg_offset);
745   reg_offset -= 8;
746   LoadFromOffset(X22, SP, reg_offset);
747   reg_offset -= 8;
748   LoadFromOffset(X21, SP, reg_offset);
749   reg_offset -= 8;
750   LoadFromOffset(X20, SP, reg_offset);
751 
752   // Decrease frame size to start of callee saved regs.
753   DecreaseFrameSize(frame_size);
754 
755   // Pop callee saved and return to LR.
756   ___ Ret();
757 }
758 
759 }  // namespace arm64
760 }  // namespace art
761