1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "assembler_mips64.h"
18 
19 #include "base/bit_utils.h"
20 #include "base/casts.h"
21 #include "entrypoints/quick/quick_entrypoints.h"
22 #include "memory_region.h"
23 #include "thread.h"
24 
25 namespace art {
26 namespace mips64 {
27 
Emit(uint32_t value)28 void Mips64Assembler::Emit(uint32_t value) {
29   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
30   buffer_.Emit<uint32_t>(value);
31 }
32 
EmitR(int opcode,GpuRegister rs,GpuRegister rt,GpuRegister rd,int shamt,int funct)33 void Mips64Assembler::EmitR(int opcode, GpuRegister rs, GpuRegister rt, GpuRegister rd,
34                             int shamt, int funct) {
35   CHECK_NE(rs, kNoGpuRegister);
36   CHECK_NE(rt, kNoGpuRegister);
37   CHECK_NE(rd, kNoGpuRegister);
38   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
39                       static_cast<uint32_t>(rs) << kRsShift |
40                       static_cast<uint32_t>(rt) << kRtShift |
41                       static_cast<uint32_t>(rd) << kRdShift |
42                       shamt << kShamtShift |
43                       funct;
44   Emit(encoding);
45 }
46 
EmitI(int opcode,GpuRegister rs,GpuRegister rt,uint16_t imm)47 void Mips64Assembler::EmitI(int opcode, GpuRegister rs, GpuRegister rt, uint16_t imm) {
48   CHECK_NE(rs, kNoGpuRegister);
49   CHECK_NE(rt, kNoGpuRegister);
50   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
51                       static_cast<uint32_t>(rs) << kRsShift |
52                       static_cast<uint32_t>(rt) << kRtShift |
53                       imm;
54   Emit(encoding);
55 }
56 
EmitI21(int opcode,GpuRegister rs,uint32_t imm21)57 void Mips64Assembler::EmitI21(int opcode, GpuRegister rs, uint32_t imm21) {
58   CHECK_NE(rs, kNoGpuRegister);
59   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
60                       static_cast<uint32_t>(rs) << kRsShift |
61                       (imm21 & 0x1FFFFF);
62   Emit(encoding);
63 }
64 
EmitJ(int opcode,uint32_t addr26)65 void Mips64Assembler::EmitJ(int opcode, uint32_t addr26) {
66   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
67                       (addr26 & 0x3FFFFFF);
68   Emit(encoding);
69 }
70 
EmitFR(int opcode,int fmt,FpuRegister ft,FpuRegister fs,FpuRegister fd,int funct)71 void Mips64Assembler::EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd,
72                              int funct) {
73   CHECK_NE(ft, kNoFpuRegister);
74   CHECK_NE(fs, kNoFpuRegister);
75   CHECK_NE(fd, kNoFpuRegister);
76   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
77                       fmt << kFmtShift |
78                       static_cast<uint32_t>(ft) << kFtShift |
79                       static_cast<uint32_t>(fs) << kFsShift |
80                       static_cast<uint32_t>(fd) << kFdShift |
81                       funct;
82   Emit(encoding);
83 }
84 
EmitFI(int opcode,int fmt,FpuRegister ft,uint16_t imm)85 void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) {
86   CHECK_NE(ft, kNoFpuRegister);
87   uint32_t encoding = static_cast<uint32_t>(opcode) << kOpcodeShift |
88                       fmt << kFmtShift |
89                       static_cast<uint32_t>(ft) << kFtShift |
90                       imm;
91   Emit(encoding);
92 }
93 
Add(GpuRegister rd,GpuRegister rs,GpuRegister rt)94 void Mips64Assembler::Add(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
95   EmitR(0, rs, rt, rd, 0, 0x20);
96 }
97 
Addi(GpuRegister rt,GpuRegister rs,uint16_t imm16)98 void Mips64Assembler::Addi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
99   EmitI(0x8, rs, rt, imm16);
100 }
101 
Addu(GpuRegister rd,GpuRegister rs,GpuRegister rt)102 void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
103   EmitR(0, rs, rt, rd, 0, 0x21);
104 }
105 
Addiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)106 void Mips64Assembler::Addiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
107   EmitI(0x9, rs, rt, imm16);
108 }
109 
Daddu(GpuRegister rd,GpuRegister rs,GpuRegister rt)110 void Mips64Assembler::Daddu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
111   EmitR(0, rs, rt, rd, 0, 0x2d);
112 }
113 
Daddiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)114 void Mips64Assembler::Daddiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
115   EmitI(0x19, rs, rt, imm16);
116 }
117 
Sub(GpuRegister rd,GpuRegister rs,GpuRegister rt)118 void Mips64Assembler::Sub(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
119   EmitR(0, rs, rt, rd, 0, 0x22);
120 }
121 
Subu(GpuRegister rd,GpuRegister rs,GpuRegister rt)122 void Mips64Assembler::Subu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
123   EmitR(0, rs, rt, rd, 0, 0x23);
124 }
125 
Dsubu(GpuRegister rd,GpuRegister rs,GpuRegister rt)126 void Mips64Assembler::Dsubu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
127   EmitR(0, rs, rt, rd, 0, 0x2f);
128 }
129 
MultR2(GpuRegister rs,GpuRegister rt)130 void Mips64Assembler::MultR2(GpuRegister rs, GpuRegister rt) {
131   EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x18);
132 }
133 
MultuR2(GpuRegister rs,GpuRegister rt)134 void Mips64Assembler::MultuR2(GpuRegister rs, GpuRegister rt) {
135   EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x19);
136 }
137 
DivR2(GpuRegister rs,GpuRegister rt)138 void Mips64Assembler::DivR2(GpuRegister rs, GpuRegister rt) {
139   EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1a);
140 }
141 
DivuR2(GpuRegister rs,GpuRegister rt)142 void Mips64Assembler::DivuR2(GpuRegister rs, GpuRegister rt) {
143   EmitR(0, rs, rt, static_cast<GpuRegister>(0), 0, 0x1b);
144 }
145 
MulR2(GpuRegister rd,GpuRegister rs,GpuRegister rt)146 void Mips64Assembler::MulR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
147   EmitR(0x1c, rs, rt, rd, 0, 2);
148 }
149 
DivR2(GpuRegister rd,GpuRegister rs,GpuRegister rt)150 void Mips64Assembler::DivR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
151   DivR2(rs, rt);
152   Mflo(rd);
153 }
154 
ModR2(GpuRegister rd,GpuRegister rs,GpuRegister rt)155 void Mips64Assembler::ModR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
156   DivR2(rs, rt);
157   Mfhi(rd);
158 }
159 
DivuR2(GpuRegister rd,GpuRegister rs,GpuRegister rt)160 void Mips64Assembler::DivuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
161   DivuR2(rs, rt);
162   Mflo(rd);
163 }
164 
ModuR2(GpuRegister rd,GpuRegister rs,GpuRegister rt)165 void Mips64Assembler::ModuR2(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
166   DivuR2(rs, rt);
167   Mfhi(rd);
168 }
169 
MulR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)170 void Mips64Assembler::MulR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
171   EmitR(0, rs, rt, rd, 2, 0x18);
172 }
173 
DivR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)174 void Mips64Assembler::DivR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
175   EmitR(0, rs, rt, rd, 2, 0x1a);
176 }
177 
ModR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)178 void Mips64Assembler::ModR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
179   EmitR(0, rs, rt, rd, 3, 0x1a);
180 }
181 
DivuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)182 void Mips64Assembler::DivuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
183   EmitR(0, rs, rt, rd, 2, 0x1b);
184 }
185 
ModuR6(GpuRegister rd,GpuRegister rs,GpuRegister rt)186 void Mips64Assembler::ModuR6(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
187   EmitR(0, rs, rt, rd, 3, 0x1b);
188 }
189 
Dmul(GpuRegister rd,GpuRegister rs,GpuRegister rt)190 void Mips64Assembler::Dmul(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
191   EmitR(0, rs, rt, rd, 2, 0x1c);
192 }
193 
Ddiv(GpuRegister rd,GpuRegister rs,GpuRegister rt)194 void Mips64Assembler::Ddiv(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
195   EmitR(0, rs, rt, rd, 2, 0x1e);
196 }
197 
Dmod(GpuRegister rd,GpuRegister rs,GpuRegister rt)198 void Mips64Assembler::Dmod(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
199   EmitR(0, rs, rt, rd, 3, 0x1e);
200 }
201 
Ddivu(GpuRegister rd,GpuRegister rs,GpuRegister rt)202 void Mips64Assembler::Ddivu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
203   EmitR(0, rs, rt, rd, 2, 0x1f);
204 }
205 
Dmodu(GpuRegister rd,GpuRegister rs,GpuRegister rt)206 void Mips64Assembler::Dmodu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
207   EmitR(0, rs, rt, rd, 3, 0x1f);
208 }
209 
And(GpuRegister rd,GpuRegister rs,GpuRegister rt)210 void Mips64Assembler::And(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
211   EmitR(0, rs, rt, rd, 0, 0x24);
212 }
213 
Andi(GpuRegister rt,GpuRegister rs,uint16_t imm16)214 void Mips64Assembler::Andi(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
215   EmitI(0xc, rs, rt, imm16);
216 }
217 
Or(GpuRegister rd,GpuRegister rs,GpuRegister rt)218 void Mips64Assembler::Or(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
219   EmitR(0, rs, rt, rd, 0, 0x25);
220 }
221 
Ori(GpuRegister rt,GpuRegister rs,uint16_t imm16)222 void Mips64Assembler::Ori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
223   EmitI(0xd, rs, rt, imm16);
224 }
225 
Xor(GpuRegister rd,GpuRegister rs,GpuRegister rt)226 void Mips64Assembler::Xor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
227   EmitR(0, rs, rt, rd, 0, 0x26);
228 }
229 
Xori(GpuRegister rt,GpuRegister rs,uint16_t imm16)230 void Mips64Assembler::Xori(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
231   EmitI(0xe, rs, rt, imm16);
232 }
233 
Nor(GpuRegister rd,GpuRegister rs,GpuRegister rt)234 void Mips64Assembler::Nor(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
235   EmitR(0, rs, rt, rd, 0, 0x27);
236 }
237 
Seb(GpuRegister rd,GpuRegister rt)238 void Mips64Assembler::Seb(GpuRegister rd, GpuRegister rt) {
239   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x10, 0x20);
240 }
241 
Seh(GpuRegister rd,GpuRegister rt)242 void Mips64Assembler::Seh(GpuRegister rd, GpuRegister rt) {
243   EmitR(0x1f, static_cast<GpuRegister>(0), rt, rd, 0x18, 0x20);
244 }
245 
Dext(GpuRegister rt,GpuRegister rs,int pos,int size_less_one)246 void Mips64Assembler::Dext(GpuRegister rt, GpuRegister rs, int pos, int size_less_one) {
247   DCHECK(0 <= pos && pos < 32) << pos;
248   DCHECK(0 <= size_less_one && size_less_one < 32) << size_less_one;
249   EmitR(0x1f, rs, rt, static_cast<GpuRegister>(size_less_one), pos, 3);
250 }
251 
Sll(GpuRegister rd,GpuRegister rt,int shamt)252 void Mips64Assembler::Sll(GpuRegister rd, GpuRegister rt, int shamt) {
253   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x00);
254 }
255 
Srl(GpuRegister rd,GpuRegister rt,int shamt)256 void Mips64Assembler::Srl(GpuRegister rd, GpuRegister rt, int shamt) {
257   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x02);
258 }
259 
Sra(GpuRegister rd,GpuRegister rt,int shamt)260 void Mips64Assembler::Sra(GpuRegister rd, GpuRegister rt, int shamt) {
261   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x03);
262 }
263 
Sllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)264 void Mips64Assembler::Sllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
265   EmitR(0, rs, rt, rd, 0, 0x04);
266 }
267 
Srlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)268 void Mips64Assembler::Srlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
269   EmitR(0, rs, rt, rd, 0, 0x06);
270 }
271 
Srav(GpuRegister rd,GpuRegister rt,GpuRegister rs)272 void Mips64Assembler::Srav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
273   EmitR(0, rs, rt, rd, 0, 0x07);
274 }
275 
Dsll(GpuRegister rd,GpuRegister rt,int shamt)276 void Mips64Assembler::Dsll(GpuRegister rd, GpuRegister rt, int shamt) {
277   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x38);
278 }
279 
Dsrl(GpuRegister rd,GpuRegister rt,int shamt)280 void Mips64Assembler::Dsrl(GpuRegister rd, GpuRegister rt, int shamt) {
281   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3a);
282 }
283 
Dsra(GpuRegister rd,GpuRegister rt,int shamt)284 void Mips64Assembler::Dsra(GpuRegister rd, GpuRegister rt, int shamt) {
285   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3b);
286 }
287 
Dsll32(GpuRegister rd,GpuRegister rt,int shamt)288 void Mips64Assembler::Dsll32(GpuRegister rd, GpuRegister rt, int shamt) {
289   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3c);
290 }
291 
Dsrl32(GpuRegister rd,GpuRegister rt,int shamt)292 void Mips64Assembler::Dsrl32(GpuRegister rd, GpuRegister rt, int shamt) {
293   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3e);
294 }
295 
Dsra32(GpuRegister rd,GpuRegister rt,int shamt)296 void Mips64Assembler::Dsra32(GpuRegister rd, GpuRegister rt, int shamt) {
297   EmitR(0, static_cast<GpuRegister>(0), rt, rd, shamt, 0x3f);
298 }
299 
Dsllv(GpuRegister rd,GpuRegister rt,GpuRegister rs)300 void Mips64Assembler::Dsllv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
301   EmitR(0, rs, rt, rd, 0, 0x14);
302 }
303 
Dsrlv(GpuRegister rd,GpuRegister rt,GpuRegister rs)304 void Mips64Assembler::Dsrlv(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
305   EmitR(0, rs, rt, rd, 0, 0x16);
306 }
307 
Dsrav(GpuRegister rd,GpuRegister rt,GpuRegister rs)308 void Mips64Assembler::Dsrav(GpuRegister rd, GpuRegister rt, GpuRegister rs) {
309   EmitR(0, rs, rt, rd, 0, 0x17);
310 }
311 
Lb(GpuRegister rt,GpuRegister rs,uint16_t imm16)312 void Mips64Assembler::Lb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
313   EmitI(0x20, rs, rt, imm16);
314 }
315 
Lh(GpuRegister rt,GpuRegister rs,uint16_t imm16)316 void Mips64Assembler::Lh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
317   EmitI(0x21, rs, rt, imm16);
318 }
319 
Lw(GpuRegister rt,GpuRegister rs,uint16_t imm16)320 void Mips64Assembler::Lw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
321   EmitI(0x23, rs, rt, imm16);
322 }
323 
Ld(GpuRegister rt,GpuRegister rs,uint16_t imm16)324 void Mips64Assembler::Ld(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
325   EmitI(0x37, rs, rt, imm16);
326 }
327 
Lbu(GpuRegister rt,GpuRegister rs,uint16_t imm16)328 void Mips64Assembler::Lbu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
329   EmitI(0x24, rs, rt, imm16);
330 }
331 
Lhu(GpuRegister rt,GpuRegister rs,uint16_t imm16)332 void Mips64Assembler::Lhu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
333   EmitI(0x25, rs, rt, imm16);
334 }
335 
Lwu(GpuRegister rt,GpuRegister rs,uint16_t imm16)336 void Mips64Assembler::Lwu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
337   EmitI(0x27, rs, rt, imm16);
338 }
339 
Lui(GpuRegister rt,uint16_t imm16)340 void Mips64Assembler::Lui(GpuRegister rt, uint16_t imm16) {
341   EmitI(0xf, static_cast<GpuRegister>(0), rt, imm16);
342 }
343 
Dahi(GpuRegister rs,uint16_t imm16)344 void Mips64Assembler::Dahi(GpuRegister rs, uint16_t imm16) {
345   EmitI(1, rs, static_cast<GpuRegister>(6), imm16);
346 }
347 
Dati(GpuRegister rs,uint16_t imm16)348 void Mips64Assembler::Dati(GpuRegister rs, uint16_t imm16) {
349   EmitI(1, rs, static_cast<GpuRegister>(0x1e), imm16);
350 }
351 
Sync(uint32_t stype)352 void Mips64Assembler::Sync(uint32_t stype) {
353   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
354            static_cast<GpuRegister>(0), stype & 0x1f, 0xf);
355 }
356 
Mfhi(GpuRegister rd)357 void Mips64Assembler::Mfhi(GpuRegister rd) {
358   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x10);
359 }
360 
Mflo(GpuRegister rd)361 void Mips64Assembler::Mflo(GpuRegister rd) {
362   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0), rd, 0, 0x12);
363 }
364 
Sb(GpuRegister rt,GpuRegister rs,uint16_t imm16)365 void Mips64Assembler::Sb(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
366   EmitI(0x28, rs, rt, imm16);
367 }
368 
Sh(GpuRegister rt,GpuRegister rs,uint16_t imm16)369 void Mips64Assembler::Sh(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
370   EmitI(0x29, rs, rt, imm16);
371 }
372 
Sw(GpuRegister rt,GpuRegister rs,uint16_t imm16)373 void Mips64Assembler::Sw(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
374   EmitI(0x2b, rs, rt, imm16);
375 }
376 
Sd(GpuRegister rt,GpuRegister rs,uint16_t imm16)377 void Mips64Assembler::Sd(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
378   EmitI(0x3f, rs, rt, imm16);
379 }
380 
Slt(GpuRegister rd,GpuRegister rs,GpuRegister rt)381 void Mips64Assembler::Slt(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
382   EmitR(0, rs, rt, rd, 0, 0x2a);
383 }
384 
Sltu(GpuRegister rd,GpuRegister rs,GpuRegister rt)385 void Mips64Assembler::Sltu(GpuRegister rd, GpuRegister rs, GpuRegister rt) {
386   EmitR(0, rs, rt, rd, 0, 0x2b);
387 }
388 
Slti(GpuRegister rt,GpuRegister rs,uint16_t imm16)389 void Mips64Assembler::Slti(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
390   EmitI(0xa, rs, rt, imm16);
391 }
392 
Sltiu(GpuRegister rt,GpuRegister rs,uint16_t imm16)393 void Mips64Assembler::Sltiu(GpuRegister rt, GpuRegister rs, uint16_t imm16) {
394   EmitI(0xb, rs, rt, imm16);
395 }
396 
Beq(GpuRegister rs,GpuRegister rt,uint16_t imm16)397 void Mips64Assembler::Beq(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
398   EmitI(0x4, rs, rt, imm16);
399   Nop();
400 }
401 
Bne(GpuRegister rs,GpuRegister rt,uint16_t imm16)402 void Mips64Assembler::Bne(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
403   EmitI(0x5, rs, rt, imm16);
404   Nop();
405 }
406 
J(uint32_t addr26)407 void Mips64Assembler::J(uint32_t addr26) {
408   EmitJ(0x2, addr26);
409   Nop();
410 }
411 
Jal(uint32_t addr26)412 void Mips64Assembler::Jal(uint32_t addr26) {
413   EmitJ(0x3, addr26);
414   Nop();
415 }
416 
Jalr(GpuRegister rd,GpuRegister rs)417 void Mips64Assembler::Jalr(GpuRegister rd, GpuRegister rs) {
418   EmitR(0, rs, static_cast<GpuRegister>(0), rd, 0, 0x09);
419   Nop();
420 }
421 
Jalr(GpuRegister rs)422 void Mips64Assembler::Jalr(GpuRegister rs) {
423   Jalr(RA, rs);
424 }
425 
Jr(GpuRegister rs)426 void Mips64Assembler::Jr(GpuRegister rs) {
427   Jalr(ZERO, rs);
428 }
429 
Auipc(GpuRegister rs,uint16_t imm16)430 void Mips64Assembler::Auipc(GpuRegister rs, uint16_t imm16) {
431   EmitI(0x3B, rs, static_cast<GpuRegister>(0x1E), imm16);
432 }
433 
Jic(GpuRegister rt,uint16_t imm16)434 void Mips64Assembler::Jic(GpuRegister rt, uint16_t imm16) {
435   EmitI(0x36, static_cast<GpuRegister>(0), rt, imm16);
436 }
437 
Jialc(GpuRegister rt,uint16_t imm16)438 void Mips64Assembler::Jialc(GpuRegister rt, uint16_t imm16) {
439   EmitI(0x3E, static_cast<GpuRegister>(0), rt, imm16);
440 }
441 
Bltc(GpuRegister rs,GpuRegister rt,uint16_t imm16)442 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
443   CHECK_NE(rs, ZERO);
444   CHECK_NE(rt, ZERO);
445   CHECK_NE(rs, rt);
446   EmitI(0x17, rs, rt, imm16);
447 }
448 
Bltzc(GpuRegister rt,uint16_t imm16)449 void Mips64Assembler::Bltzc(GpuRegister rt, uint16_t imm16) {
450   CHECK_NE(rt, ZERO);
451   EmitI(0x17, rt, rt, imm16);
452 }
453 
Bgtzc(GpuRegister rt,uint16_t imm16)454 void Mips64Assembler::Bgtzc(GpuRegister rt, uint16_t imm16) {
455   CHECK_NE(rt, ZERO);
456   EmitI(0x17, static_cast<GpuRegister>(0), rt, imm16);
457 }
458 
Bgec(GpuRegister rs,GpuRegister rt,uint16_t imm16)459 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
460   CHECK_NE(rs, ZERO);
461   CHECK_NE(rt, ZERO);
462   CHECK_NE(rs, rt);
463   EmitI(0x16, rs, rt, imm16);
464 }
465 
Bgezc(GpuRegister rt,uint16_t imm16)466 void Mips64Assembler::Bgezc(GpuRegister rt, uint16_t imm16) {
467   CHECK_NE(rt, ZERO);
468   EmitI(0x16, rt, rt, imm16);
469 }
470 
Blezc(GpuRegister rt,uint16_t imm16)471 void Mips64Assembler::Blezc(GpuRegister rt, uint16_t imm16) {
472   CHECK_NE(rt, ZERO);
473   EmitI(0x16, static_cast<GpuRegister>(0), rt, imm16);
474 }
475 
Bltuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)476 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
477   CHECK_NE(rs, ZERO);
478   CHECK_NE(rt, ZERO);
479   CHECK_NE(rs, rt);
480   EmitI(0x7, rs, rt, imm16);
481 }
482 
Bgeuc(GpuRegister rs,GpuRegister rt,uint16_t imm16)483 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
484   CHECK_NE(rs, ZERO);
485   CHECK_NE(rt, ZERO);
486   CHECK_NE(rs, rt);
487   EmitI(0x6, rs, rt, imm16);
488 }
489 
Beqc(GpuRegister rs,GpuRegister rt,uint16_t imm16)490 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
491   CHECK_NE(rs, ZERO);
492   CHECK_NE(rt, ZERO);
493   CHECK_NE(rs, rt);
494   EmitI(0x8, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
495 }
496 
Bnec(GpuRegister rs,GpuRegister rt,uint16_t imm16)497 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, uint16_t imm16) {
498   CHECK_NE(rs, ZERO);
499   CHECK_NE(rt, ZERO);
500   CHECK_NE(rs, rt);
501   EmitI(0x18, (rs < rt) ? rs : rt, (rs < rt) ? rt : rs, imm16);
502 }
503 
Beqzc(GpuRegister rs,uint32_t imm21)504 void Mips64Assembler::Beqzc(GpuRegister rs, uint32_t imm21) {
505   CHECK_NE(rs, ZERO);
506   EmitI21(0x36, rs, imm21);
507 }
508 
Bnezc(GpuRegister rs,uint32_t imm21)509 void Mips64Assembler::Bnezc(GpuRegister rs, uint32_t imm21) {
510   CHECK_NE(rs, ZERO);
511   EmitI21(0x3E, rs, imm21);
512 }
513 
AddS(FpuRegister fd,FpuRegister fs,FpuRegister ft)514 void Mips64Assembler::AddS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
515   EmitFR(0x11, 0x10, ft, fs, fd, 0x0);
516 }
517 
SubS(FpuRegister fd,FpuRegister fs,FpuRegister ft)518 void Mips64Assembler::SubS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
519   EmitFR(0x11, 0x10, ft, fs, fd, 0x1);
520 }
521 
MulS(FpuRegister fd,FpuRegister fs,FpuRegister ft)522 void Mips64Assembler::MulS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
523   EmitFR(0x11, 0x10, ft, fs, fd, 0x2);
524 }
525 
DivS(FpuRegister fd,FpuRegister fs,FpuRegister ft)526 void Mips64Assembler::DivS(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
527   EmitFR(0x11, 0x10, ft, fs, fd, 0x3);
528 }
529 
AddD(FpuRegister fd,FpuRegister fs,FpuRegister ft)530 void Mips64Assembler::AddD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
531   EmitFR(0x11, 0x11, ft, fs, fd, 0x0);
532 }
533 
SubD(FpuRegister fd,FpuRegister fs,FpuRegister ft)534 void Mips64Assembler::SubD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
535   EmitFR(0x11, 0x11, ft, fs, fd, 0x1);
536 }
537 
MulD(FpuRegister fd,FpuRegister fs,FpuRegister ft)538 void Mips64Assembler::MulD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
539   EmitFR(0x11, 0x11, ft, fs, fd, 0x2);
540 }
541 
DivD(FpuRegister fd,FpuRegister fs,FpuRegister ft)542 void Mips64Assembler::DivD(FpuRegister fd, FpuRegister fs, FpuRegister ft) {
543   EmitFR(0x11, 0x11, ft, fs, fd, 0x3);
544 }
545 
MovS(FpuRegister fd,FpuRegister fs)546 void Mips64Assembler::MovS(FpuRegister fd, FpuRegister fs) {
547   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x6);
548 }
549 
MovD(FpuRegister fd,FpuRegister fs)550 void Mips64Assembler::MovD(FpuRegister fd, FpuRegister fs) {
551   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x6);
552 }
553 
NegS(FpuRegister fd,FpuRegister fs)554 void Mips64Assembler::NegS(FpuRegister fd, FpuRegister fs) {
555   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x7);
556 }
557 
NegD(FpuRegister fd,FpuRegister fs)558 void Mips64Assembler::NegD(FpuRegister fd, FpuRegister fs) {
559   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x7);
560 }
561 
Cvtsw(FpuRegister fd,FpuRegister fs)562 void Mips64Assembler::Cvtsw(FpuRegister fd, FpuRegister fs) {
563   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x20);
564 }
565 
Cvtdw(FpuRegister fd,FpuRegister fs)566 void Mips64Assembler::Cvtdw(FpuRegister fd, FpuRegister fs) {
567   EmitFR(0x11, 0x14, static_cast<FpuRegister>(0), fs, fd, 0x21);
568 }
569 
Cvtsd(FpuRegister fd,FpuRegister fs)570 void Mips64Assembler::Cvtsd(FpuRegister fd, FpuRegister fs) {
571   EmitFR(0x11, 0x11, static_cast<FpuRegister>(0), fs, fd, 0x20);
572 }
573 
Cvtds(FpuRegister fd,FpuRegister fs)574 void Mips64Assembler::Cvtds(FpuRegister fd, FpuRegister fs) {
575   EmitFR(0x11, 0x10, static_cast<FpuRegister>(0), fs, fd, 0x21);
576 }
577 
Mfc1(GpuRegister rt,FpuRegister fs)578 void Mips64Assembler::Mfc1(GpuRegister rt, FpuRegister fs) {
579   EmitFR(0x11, 0x00, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
580 }
581 
Mtc1(GpuRegister rt,FpuRegister fs)582 void Mips64Assembler::Mtc1(GpuRegister rt, FpuRegister fs) {
583   EmitFR(0x11, 0x04, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
584 }
585 
Dmfc1(GpuRegister rt,FpuRegister fs)586 void Mips64Assembler::Dmfc1(GpuRegister rt, FpuRegister fs) {
587   EmitFR(0x11, 0x01, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
588 }
589 
Dmtc1(GpuRegister rt,FpuRegister fs)590 void Mips64Assembler::Dmtc1(GpuRegister rt, FpuRegister fs) {
591   EmitFR(0x11, 0x05, static_cast<FpuRegister>(rt), fs, static_cast<FpuRegister>(0), 0x0);
592 }
593 
Lwc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)594 void Mips64Assembler::Lwc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
595   EmitI(0x31, rs, static_cast<GpuRegister>(ft), imm16);
596 }
597 
Ldc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)598 void Mips64Assembler::Ldc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
599   EmitI(0x35, rs, static_cast<GpuRegister>(ft), imm16);
600 }
601 
Swc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)602 void Mips64Assembler::Swc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
603   EmitI(0x39, rs, static_cast<GpuRegister>(ft), imm16);
604 }
605 
Sdc1(FpuRegister ft,GpuRegister rs,uint16_t imm16)606 void Mips64Assembler::Sdc1(FpuRegister ft, GpuRegister rs, uint16_t imm16) {
607   EmitI(0x3d, rs, static_cast<GpuRegister>(ft), imm16);
608 }
609 
Break()610 void Mips64Assembler::Break() {
611   EmitR(0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
612         static_cast<GpuRegister>(0), 0, 0xD);
613 }
614 
Nop()615 void Mips64Assembler::Nop() {
616   EmitR(0x0, static_cast<GpuRegister>(0), static_cast<GpuRegister>(0),
617         static_cast<GpuRegister>(0), 0, 0x0);
618 }
619 
Move(GpuRegister rd,GpuRegister rs)620 void Mips64Assembler::Move(GpuRegister rd, GpuRegister rs) {
621   Or(rd, rs, ZERO);
622 }
623 
Clear(GpuRegister rd)624 void Mips64Assembler::Clear(GpuRegister rd) {
625   Move(rd, ZERO);
626 }
627 
Not(GpuRegister rd,GpuRegister rs)628 void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) {
629   Nor(rd, rs, ZERO);
630 }
631 
LoadConst32(GpuRegister rd,int32_t value)632 void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) {
633   if (IsUint<16>(value)) {
634     // Use OR with (unsigned) immediate to encode 16b unsigned int.
635     Ori(rd, ZERO, value);
636   } else if (IsInt<16>(value)) {
637     // Use ADD with (signed) immediate to encode 16b signed int.
638     Addiu(rd, ZERO, value);
639   } else {
640     Lui(rd, value >> 16);
641     if (value & 0xFFFF)
642       Ori(rd, rd, value);
643   }
644 }
645 
LoadConst64(GpuRegister rd,int64_t value)646 void Mips64Assembler::LoadConst64(GpuRegister rd, int64_t value) {
647   int bit31 = (value & UINT64_C(0x80000000)) != 0;
648 
649   // Loads with 1 instruction.
650   if (IsUint<16>(value)) {
651     Ori(rd, ZERO, value);
652   } else if (IsInt<16>(value)) {
653     Daddiu(rd, ZERO, value);
654   } else if ((value & 0xFFFF) == 0 && IsInt<16>(value >> 16)) {
655     Lui(rd, value >> 16);
656   } else if (IsInt<32>(value)) {
657     // Loads with 2 instructions.
658     Lui(rd, value >> 16);
659     Ori(rd, rd, value);
660   } else if ((value & 0xFFFF0000) == 0 && IsInt<16>(value >> 32)) {
661     Ori(rd, ZERO, value);
662     Dahi(rd, value >> 32);
663   } else if ((value & UINT64_C(0xFFFFFFFF0000)) == 0) {
664     Ori(rd, ZERO, value);
665     Dati(rd, value >> 48);
666   } else if ((value & 0xFFFF) == 0 &&
667              (-32768 - bit31) <= (value >> 32) && (value >> 32) <= (32767 - bit31)) {
668     Lui(rd, value >> 16);
669     Dahi(rd, (value >> 32) + bit31);
670   } else if ((value & 0xFFFF) == 0 && ((value >> 31) & 0x1FFFF) == ((0x20000 - bit31) & 0x1FFFF)) {
671     Lui(rd, value >> 16);
672     Dati(rd, (value >> 48) + bit31);
673   } else {
674     int shift_cnt = CTZ(value);
675     int64_t tmp = value >> shift_cnt;
676     if (IsUint<16>(tmp)) {
677       Ori(rd, ZERO, tmp);
678       if (shift_cnt < 32)
679         Dsll(rd, rd, shift_cnt);
680       else
681         Dsll32(rd, rd, shift_cnt & 31);
682     } else if (IsInt<16>(tmp)) {
683       Daddiu(rd, ZERO, tmp);
684       if (shift_cnt < 32)
685         Dsll(rd, rd, shift_cnt);
686       else
687         Dsll32(rd, rd, shift_cnt & 31);
688     } else if (IsInt<32>(tmp)) {
689       // Loads with 3 instructions.
690       Lui(rd, tmp >> 16);
691       Ori(rd, rd, tmp);
692       if (shift_cnt < 32)
693         Dsll(rd, rd, shift_cnt);
694       else
695         Dsll32(rd, rd, shift_cnt & 31);
696     } else {
697       shift_cnt = 16 + CTZ(value >> 16);
698       tmp = value >> shift_cnt;
699       if (IsUint<16>(tmp)) {
700         Ori(rd, ZERO, tmp);
701         if (shift_cnt < 32)
702           Dsll(rd, rd, shift_cnt);
703         else
704           Dsll32(rd, rd, shift_cnt & 31);
705         Ori(rd, rd, value);
706       } else if (IsInt<16>(tmp)) {
707         Daddiu(rd, ZERO, tmp);
708         if (shift_cnt < 32)
709           Dsll(rd, rd, shift_cnt);
710         else
711           Dsll32(rd, rd, shift_cnt & 31);
712         Ori(rd, rd, value);
713       } else {
714         // Loads with 3-4 instructions.
715         uint64_t tmp2 = value;
716         bool used_lui = false;
717         if (((tmp2 >> 16) & 0xFFFF) != 0 || (tmp2 & 0xFFFFFFFF) == 0) {
718           Lui(rd, tmp2 >> 16);
719           used_lui = true;
720         }
721         if ((tmp2 & 0xFFFF) != 0) {
722           if (used_lui)
723             Ori(rd, rd, tmp2);
724           else
725             Ori(rd, ZERO, tmp2);
726         }
727         if (bit31) {
728           tmp2 += UINT64_C(0x100000000);
729         }
730         if (((tmp2 >> 32) & 0xFFFF) != 0) {
731           Dahi(rd, tmp2 >> 32);
732         }
733         if (tmp2 & UINT64_C(0x800000000000)) {
734           tmp2 += UINT64_C(0x1000000000000);
735         }
736         if ((tmp2 >> 48) != 0) {
737           Dati(rd, tmp2 >> 48);
738         }
739       }
740     }
741   }
742 }
743 
Addiu32(GpuRegister rt,GpuRegister rs,int32_t value,GpuRegister rtmp)744 void Mips64Assembler::Addiu32(GpuRegister rt, GpuRegister rs, int32_t value, GpuRegister rtmp) {
745   if (IsInt<16>(value)) {
746     Addiu(rt, rs, value);
747   } else {
748     LoadConst32(rtmp, value);
749     Addu(rt, rs, rtmp);
750   }
751 }
752 
Daddiu64(GpuRegister rt,GpuRegister rs,int64_t value,GpuRegister rtmp)753 void Mips64Assembler::Daddiu64(GpuRegister rt, GpuRegister rs, int64_t value, GpuRegister rtmp) {
754   if (IsInt<16>(value)) {
755     Daddiu(rt, rs, value);
756   } else {
757     LoadConst64(rtmp, value);
758     Daddu(rt, rs, rtmp);
759   }
760 }
761 
762 //
763 // MIPS64R6 branches
764 //
765 //
766 // Unconditional (pc + 32-bit signed offset):
767 //
768 //   auipc    at, ofs_high
769 //   jic      at, ofs_low
770 //   // no delay/forbidden slot
771 //
772 //
773 // Conditional (pc + 32-bit signed offset):
774 //
775 //   b<cond>c   reg, +2      // skip next 2 instructions
776 //   auipc      at, ofs_high
777 //   jic        at, ofs_low
778 //   // no delay/forbidden slot
779 //
780 //
781 // Unconditional (pc + 32-bit signed offset) and link:
782 //
783 //   auipc    reg, ofs_high
784 //   daddiu   reg, ofs_low
785 //   jialc    reg, 0
786 //   // no delay/forbidden slot
787 //
788 //
789 // TODO: use shorter instruction sequences whenever possible.
790 //
791 
Bind(Label * label)792 void Mips64Assembler::Bind(Label* label) {
793   CHECK(!label->IsBound());
794   int32_t bound_pc = buffer_.Size();
795 
796   // Walk the list of the branches (auipc + jic pairs) referring to and preceding this label.
797   // Embed the previously unknown pc-relative addresses in them.
798   while (label->IsLinked()) {
799     int32_t position = label->Position();
800     // Extract the branch (instruction pair)
801     uint32_t auipc = buffer_.Load<uint32_t>(position);
802     uint32_t jic = buffer_.Load<uint32_t>(position + 4);  // actually, jic or daddiu
803 
804     // Extract the location of the previous pair in the list (walking the list backwards;
805     // the previous pair location was stored in the immediate operands of the instructions)
806     int32_t prev = (auipc << 16) | (jic & 0xFFFF);
807 
808     // Get the pc-relative address
809     uint32_t offset = bound_pc - position;
810     offset += (offset & 0x8000) << 1;  // account for sign extension in jic/daddiu
811 
812     // Embed it in the two instructions
813     auipc = (auipc & 0xFFFF0000) | (offset >> 16);
814     jic = (jic & 0xFFFF0000) | (offset & 0xFFFF);
815 
816     // Save the adjusted instructions
817     buffer_.Store<uint32_t>(position, auipc);
818     buffer_.Store<uint32_t>(position + 4, jic);
819 
820     // On to the previous branch in the list...
821     label->position_ = prev;
822   }
823 
824   // Now make the label object contain its own location
825   // (it will be used by the branches referring to and following this label)
826   label->BindTo(bound_pc);
827 }
828 
B(Label * label)829 void Mips64Assembler::B(Label* label) {
830   if (label->IsBound()) {
831     // Branch backwards (to a preceding label), distance is known
832     uint32_t offset = label->Position() - buffer_.Size();
833     CHECK_LE(static_cast<int32_t>(offset), 0);
834     offset += (offset & 0x8000) << 1;  // account for sign extension in jic
835     Auipc(AT, offset >> 16);
836     Jic(AT, offset);
837   } else {
838     // Branch forward (to a following label), distance is unknown
839     int32_t position = buffer_.Size();
840     // The first branch forward will have 0 in its pc-relative address (copied from label's
841     // position). It will be the terminator of the list of forward-reaching branches.
842     uint32_t prev = label->position_;
843     Auipc(AT, prev >> 16);
844     Jic(AT, prev);
845     // Now make the link object point to the location of this branch
846     // (this forms a linked list of branches preceding this label)
847     label->LinkTo(position);
848   }
849 }
850 
Jalr(Label * label,GpuRegister indirect_reg)851 void Mips64Assembler::Jalr(Label* label, GpuRegister indirect_reg) {
852   if (label->IsBound()) {
853     // Branch backwards (to a preceding label), distance is known
854     uint32_t offset = label->Position() - buffer_.Size();
855     CHECK_LE(static_cast<int32_t>(offset), 0);
856     offset += (offset & 0x8000) << 1;  // account for sign extension in daddiu
857     Auipc(indirect_reg, offset >> 16);
858     Daddiu(indirect_reg, indirect_reg, offset);
859     Jialc(indirect_reg, 0);
860   } else {
861     // Branch forward (to a following label), distance is unknown
862     int32_t position = buffer_.Size();
863     // The first branch forward will have 0 in its pc-relative address (copied from label's
864     // position). It will be the terminator of the list of forward-reaching branches.
865     uint32_t prev = label->position_;
866     Auipc(indirect_reg, prev >> 16);
867     Daddiu(indirect_reg, indirect_reg, prev);
868     Jialc(indirect_reg, 0);
869     // Now make the link object point to the location of this branch
870     // (this forms a linked list of branches preceding this label)
871     label->LinkTo(position);
872   }
873 }
874 
Bltc(GpuRegister rs,GpuRegister rt,Label * label)875 void Mips64Assembler::Bltc(GpuRegister rs, GpuRegister rt, Label* label) {
876   Bgec(rs, rt, 2);
877   B(label);
878 }
879 
Bltzc(GpuRegister rt,Label * label)880 void Mips64Assembler::Bltzc(GpuRegister rt, Label* label) {
881   Bgezc(rt, 2);
882   B(label);
883 }
884 
Bgtzc(GpuRegister rt,Label * label)885 void Mips64Assembler::Bgtzc(GpuRegister rt, Label* label) {
886   Blezc(rt, 2);
887   B(label);
888 }
889 
Bgec(GpuRegister rs,GpuRegister rt,Label * label)890 void Mips64Assembler::Bgec(GpuRegister rs, GpuRegister rt, Label* label) {
891   Bltc(rs, rt, 2);
892   B(label);
893 }
894 
Bgezc(GpuRegister rt,Label * label)895 void Mips64Assembler::Bgezc(GpuRegister rt, Label* label) {
896   Bltzc(rt, 2);
897   B(label);
898 }
899 
Blezc(GpuRegister rt,Label * label)900 void Mips64Assembler::Blezc(GpuRegister rt, Label* label) {
901   Bgtzc(rt, 2);
902   B(label);
903 }
904 
Bltuc(GpuRegister rs,GpuRegister rt,Label * label)905 void Mips64Assembler::Bltuc(GpuRegister rs, GpuRegister rt, Label* label) {
906   Bgeuc(rs, rt, 2);
907   B(label);
908 }
909 
Bgeuc(GpuRegister rs,GpuRegister rt,Label * label)910 void Mips64Assembler::Bgeuc(GpuRegister rs, GpuRegister rt, Label* label) {
911   Bltuc(rs, rt, 2);
912   B(label);
913 }
914 
Beqc(GpuRegister rs,GpuRegister rt,Label * label)915 void Mips64Assembler::Beqc(GpuRegister rs, GpuRegister rt, Label* label) {
916   Bnec(rs, rt, 2);
917   B(label);
918 }
919 
Bnec(GpuRegister rs,GpuRegister rt,Label * label)920 void Mips64Assembler::Bnec(GpuRegister rs, GpuRegister rt, Label* label) {
921   Beqc(rs, rt, 2);
922   B(label);
923 }
924 
Beqzc(GpuRegister rs,Label * label)925 void Mips64Assembler::Beqzc(GpuRegister rs, Label* label) {
926   Bnezc(rs, 2);
927   B(label);
928 }
929 
Bnezc(GpuRegister rs,Label * label)930 void Mips64Assembler::Bnezc(GpuRegister rs, Label* label) {
931   Beqzc(rs, 2);
932   B(label);
933 }
934 
LoadFromOffset(LoadOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)935 void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base,
936                                      int32_t offset) {
937   if (!IsInt<16>(offset)) {
938     LoadConst32(AT, offset);
939     Daddu(AT, AT, base);
940     base = AT;
941     offset = 0;
942   }
943 
944   switch (type) {
945     case kLoadSignedByte:
946       Lb(reg, base, offset);
947       break;
948     case kLoadUnsignedByte:
949       Lbu(reg, base, offset);
950       break;
951     case kLoadSignedHalfword:
952       Lh(reg, base, offset);
953       break;
954     case kLoadUnsignedHalfword:
955       Lhu(reg, base, offset);
956       break;
957     case kLoadWord:
958       Lw(reg, base, offset);
959       break;
960     case kLoadUnsignedWord:
961       Lwu(reg, base, offset);
962       break;
963     case kLoadDoubleword:
964       Ld(reg, base, offset);
965       break;
966   }
967 }
968 
LoadFpuFromOffset(LoadOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)969 void Mips64Assembler::LoadFpuFromOffset(LoadOperandType type, FpuRegister reg, GpuRegister base,
970                                         int32_t offset) {
971   if (!IsInt<16>(offset)) {
972     LoadConst32(AT, offset);
973     Daddu(AT, AT, base);
974     base = AT;
975     offset = 0;
976   }
977 
978   switch (type) {
979     case kLoadWord:
980       Lwc1(reg, base, offset);
981       break;
982     case kLoadDoubleword:
983       Ldc1(reg, base, offset);
984       break;
985     default:
986       LOG(FATAL) << "UNREACHABLE";
987   }
988 }
989 
EmitLoad(ManagedRegister m_dst,GpuRegister src_register,int32_t src_offset,size_t size)990 void Mips64Assembler::EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset,
991                                size_t size) {
992   Mips64ManagedRegister dst = m_dst.AsMips64();
993   if (dst.IsNoRegister()) {
994     CHECK_EQ(0u, size) << dst;
995   } else if (dst.IsGpuRegister()) {
996     if (size == 4) {
997       LoadFromOffset(kLoadWord, dst.AsGpuRegister(), src_register, src_offset);
998     } else if (size == 8) {
999       CHECK_EQ(8u, size) << dst;
1000       LoadFromOffset(kLoadDoubleword, dst.AsGpuRegister(), src_register, src_offset);
1001     } else {
1002       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
1003     }
1004   } else if (dst.IsFpuRegister()) {
1005     if (size == 4) {
1006       CHECK_EQ(4u, size) << dst;
1007       LoadFpuFromOffset(kLoadWord, dst.AsFpuRegister(), src_register, src_offset);
1008     } else if (size == 8) {
1009       CHECK_EQ(8u, size) << dst;
1010       LoadFpuFromOffset(kLoadDoubleword, dst.AsFpuRegister(), src_register, src_offset);
1011     } else {
1012       UNIMPLEMENTED(FATAL) << "We only support Load() of size 4 and 8";
1013     }
1014   }
1015 }
1016 
StoreToOffset(StoreOperandType type,GpuRegister reg,GpuRegister base,int32_t offset)1017 void Mips64Assembler::StoreToOffset(StoreOperandType type, GpuRegister reg, GpuRegister base,
1018                                     int32_t offset) {
1019   if (!IsInt<16>(offset)) {
1020     LoadConst32(AT, offset);
1021     Daddu(AT, AT, base);
1022     base = AT;
1023     offset = 0;
1024   }
1025 
1026   switch (type) {
1027     case kStoreByte:
1028       Sb(reg, base, offset);
1029       break;
1030     case kStoreHalfword:
1031       Sh(reg, base, offset);
1032       break;
1033     case kStoreWord:
1034       Sw(reg, base, offset);
1035       break;
1036     case kStoreDoubleword:
1037       Sd(reg, base, offset);
1038       break;
1039     default:
1040       LOG(FATAL) << "UNREACHABLE";
1041   }
1042 }
1043 
StoreFpuToOffset(StoreOperandType type,FpuRegister reg,GpuRegister base,int32_t offset)1044 void Mips64Assembler::StoreFpuToOffset(StoreOperandType type, FpuRegister reg, GpuRegister base,
1045                                        int32_t offset) {
1046   if (!IsInt<16>(offset)) {
1047     LoadConst32(AT, offset);
1048     Daddu(AT, AT, base);
1049     base = AT;
1050     offset = 0;
1051   }
1052 
1053   switch (type) {
1054     case kStoreWord:
1055       Swc1(reg, base, offset);
1056       break;
1057     case kStoreDoubleword:
1058       Sdc1(reg, base, offset);
1059       break;
1060     default:
1061       LOG(FATAL) << "UNREACHABLE";
1062   }
1063 }
1064 
DWARFReg(GpuRegister reg)1065 static dwarf::Reg DWARFReg(GpuRegister reg) {
1066   return dwarf::Reg::Mips64Core(static_cast<int>(reg));
1067 }
1068 
1069 constexpr size_t kFramePointerSize = 8;
1070 
BuildFrame(size_t frame_size,ManagedRegister method_reg,const std::vector<ManagedRegister> & callee_save_regs,const ManagedRegisterEntrySpills & entry_spills)1071 void Mips64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
1072                                  const std::vector<ManagedRegister>& callee_save_regs,
1073                                  const ManagedRegisterEntrySpills& entry_spills) {
1074   CHECK_ALIGNED(frame_size, kStackAlignment);
1075 
1076   // Increase frame to required size.
1077   IncreaseFrameSize(frame_size);
1078 
1079   // Push callee saves and return address
1080   int stack_offset = frame_size - kFramePointerSize;
1081   StoreToOffset(kStoreDoubleword, RA, SP, stack_offset);
1082   cfi_.RelOffset(DWARFReg(RA), stack_offset);
1083   for (int i = callee_save_regs.size() - 1; i >= 0; --i) {
1084     stack_offset -= kFramePointerSize;
1085     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
1086     StoreToOffset(kStoreDoubleword, reg, SP, stack_offset);
1087     cfi_.RelOffset(DWARFReg(reg), stack_offset);
1088   }
1089 
1090   // Write out Method*.
1091   StoreToOffset(kStoreDoubleword, method_reg.AsMips64().AsGpuRegister(), SP, 0);
1092 
1093   // Write out entry spills.
1094   int32_t offset = frame_size + kFramePointerSize;
1095   for (size_t i = 0; i < entry_spills.size(); ++i) {
1096     Mips64ManagedRegister reg = entry_spills.at(i).AsMips64();
1097     ManagedRegisterSpill spill = entry_spills.at(i);
1098     int32_t size = spill.getSize();
1099     if (reg.IsNoRegister()) {
1100       // only increment stack offset.
1101       offset += size;
1102     } else if (reg.IsFpuRegister()) {
1103       StoreFpuToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
1104           reg.AsFpuRegister(), SP, offset);
1105       offset += size;
1106     } else if (reg.IsGpuRegister()) {
1107       StoreToOffset((size == 4) ? kStoreWord : kStoreDoubleword,
1108           reg.AsGpuRegister(), SP, offset);
1109       offset += size;
1110     }
1111   }
1112 }
1113 
RemoveFrame(size_t frame_size,const std::vector<ManagedRegister> & callee_save_regs)1114 void Mips64Assembler::RemoveFrame(size_t frame_size,
1115                                   const std::vector<ManagedRegister>& callee_save_regs) {
1116   CHECK_ALIGNED(frame_size, kStackAlignment);
1117   cfi_.RememberState();
1118 
1119   // Pop callee saves and return address
1120   int stack_offset = frame_size - (callee_save_regs.size() * kFramePointerSize) - kFramePointerSize;
1121   for (size_t i = 0; i < callee_save_regs.size(); ++i) {
1122     GpuRegister reg = callee_save_regs.at(i).AsMips64().AsGpuRegister();
1123     LoadFromOffset(kLoadDoubleword, reg, SP, stack_offset);
1124     cfi_.Restore(DWARFReg(reg));
1125     stack_offset += kFramePointerSize;
1126   }
1127   LoadFromOffset(kLoadDoubleword, RA, SP, stack_offset);
1128   cfi_.Restore(DWARFReg(RA));
1129 
1130   // Decrease frame to required size.
1131   DecreaseFrameSize(frame_size);
1132 
1133   // Then jump to the return address.
1134   Jr(RA);
1135 
1136   // The CFI should be restored for any code that follows the exit block.
1137   cfi_.RestoreState();
1138   cfi_.DefCFAOffset(frame_size);
1139 }
1140 
IncreaseFrameSize(size_t adjust)1141 void Mips64Assembler::IncreaseFrameSize(size_t adjust) {
1142   CHECK_ALIGNED(adjust, kFramePointerSize);
1143   Daddiu64(SP, SP, static_cast<int32_t>(-adjust));
1144   cfi_.AdjustCFAOffset(adjust);
1145 }
1146 
DecreaseFrameSize(size_t adjust)1147 void Mips64Assembler::DecreaseFrameSize(size_t adjust) {
1148   CHECK_ALIGNED(adjust, kFramePointerSize);
1149   Daddiu64(SP, SP, static_cast<int32_t>(adjust));
1150   cfi_.AdjustCFAOffset(-adjust);
1151 }
1152 
Store(FrameOffset dest,ManagedRegister msrc,size_t size)1153 void Mips64Assembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
1154   Mips64ManagedRegister src = msrc.AsMips64();
1155   if (src.IsNoRegister()) {
1156     CHECK_EQ(0u, size);
1157   } else if (src.IsGpuRegister()) {
1158     CHECK(size == 4 || size == 8) << size;
1159     if (size == 8) {
1160       StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
1161     } else if (size == 4) {
1162       StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
1163     } else {
1164       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
1165     }
1166   } else if (src.IsFpuRegister()) {
1167     CHECK(size == 4 || size == 8) << size;
1168     if (size == 8) {
1169       StoreFpuToOffset(kStoreDoubleword, src.AsFpuRegister(), SP, dest.Int32Value());
1170     } else if (size == 4) {
1171       StoreFpuToOffset(kStoreWord, src.AsFpuRegister(), SP, dest.Int32Value());
1172     } else {
1173       UNIMPLEMENTED(FATAL) << "We only support Store() of size 4 and 8";
1174     }
1175   }
1176 }
1177 
StoreRef(FrameOffset dest,ManagedRegister msrc)1178 void Mips64Assembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
1179   Mips64ManagedRegister src = msrc.AsMips64();
1180   CHECK(src.IsGpuRegister());
1181   StoreToOffset(kStoreWord, src.AsGpuRegister(), SP, dest.Int32Value());
1182 }
1183 
StoreRawPtr(FrameOffset dest,ManagedRegister msrc)1184 void Mips64Assembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
1185   Mips64ManagedRegister src = msrc.AsMips64();
1186   CHECK(src.IsGpuRegister());
1187   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
1188 }
1189 
StoreImmediateToFrame(FrameOffset dest,uint32_t imm,ManagedRegister mscratch)1190 void Mips64Assembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
1191                                             ManagedRegister mscratch) {
1192   Mips64ManagedRegister scratch = mscratch.AsMips64();
1193   CHECK(scratch.IsGpuRegister()) << scratch;
1194   LoadConst32(scratch.AsGpuRegister(), imm);
1195   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
1196 }
1197 
StoreImmediateToThread64(ThreadOffset<8> dest,uint32_t imm,ManagedRegister mscratch)1198 void Mips64Assembler::StoreImmediateToThread64(ThreadOffset<8> dest, uint32_t imm,
1199                                                ManagedRegister mscratch) {
1200   Mips64ManagedRegister scratch = mscratch.AsMips64();
1201   CHECK(scratch.IsGpuRegister()) << scratch;
1202   // TODO: it's unclear wether 32 or 64 bits need to be stored (Arm64 and x86/x64 disagree?).
1203   // Is this function even referenced anywhere else in the code?
1204   LoadConst32(scratch.AsGpuRegister(), imm);
1205   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, dest.Int32Value());
1206 }
1207 
StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)1208 void Mips64Assembler::StoreStackOffsetToThread64(ThreadOffset<8> thr_offs,
1209                                                  FrameOffset fr_offs,
1210                                                  ManagedRegister mscratch) {
1211   Mips64ManagedRegister scratch = mscratch.AsMips64();
1212   CHECK(scratch.IsGpuRegister()) << scratch;
1213   Daddiu64(scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
1214   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
1215 }
1216 
StoreStackPointerToThread64(ThreadOffset<8> thr_offs)1217 void Mips64Assembler::StoreStackPointerToThread64(ThreadOffset<8> thr_offs) {
1218   StoreToOffset(kStoreDoubleword, SP, S1, thr_offs.Int32Value());
1219 }
1220 
StoreSpanning(FrameOffset dest,ManagedRegister msrc,FrameOffset in_off,ManagedRegister mscratch)1221 void Mips64Assembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
1222                                     FrameOffset in_off, ManagedRegister mscratch) {
1223   Mips64ManagedRegister src = msrc.AsMips64();
1224   Mips64ManagedRegister scratch = mscratch.AsMips64();
1225   StoreToOffset(kStoreDoubleword, src.AsGpuRegister(), SP, dest.Int32Value());
1226   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, in_off.Int32Value());
1227   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value() + 8);
1228 }
1229 
Load(ManagedRegister mdest,FrameOffset src,size_t size)1230 void Mips64Assembler::Load(ManagedRegister mdest, FrameOffset src, size_t size) {
1231   return EmitLoad(mdest, SP, src.Int32Value(), size);
1232 }
1233 
LoadFromThread64(ManagedRegister mdest,ThreadOffset<8> src,size_t size)1234 void Mips64Assembler::LoadFromThread64(ManagedRegister mdest, ThreadOffset<8> src, size_t size) {
1235   return EmitLoad(mdest, S1, src.Int32Value(), size);
1236 }
1237 
LoadRef(ManagedRegister mdest,FrameOffset src)1238 void Mips64Assembler::LoadRef(ManagedRegister mdest, FrameOffset src) {
1239   Mips64ManagedRegister dest = mdest.AsMips64();
1240   CHECK(dest.IsGpuRegister());
1241   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(), SP, src.Int32Value());
1242 }
1243 
LoadRef(ManagedRegister mdest,ManagedRegister base,MemberOffset offs,bool poison_reference)1244 void Mips64Assembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
1245                               bool poison_reference) {
1246   Mips64ManagedRegister dest = mdest.AsMips64();
1247   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
1248   LoadFromOffset(kLoadUnsignedWord, dest.AsGpuRegister(),
1249                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
1250   if (kPoisonHeapReferences && poison_reference) {
1251     // TODO: review
1252     // Negate the 32-bit ref
1253     Dsubu(dest.AsGpuRegister(), ZERO, dest.AsGpuRegister());
1254     // And constrain it to 32 bits (zero-extend into bits 32 through 63) as on Arm64 and x86/64
1255     Dext(dest.AsGpuRegister(), dest.AsGpuRegister(), 0, 31);
1256   }
1257 }
1258 
LoadRawPtr(ManagedRegister mdest,ManagedRegister base,Offset offs)1259 void Mips64Assembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
1260                                  Offset offs) {
1261   Mips64ManagedRegister dest = mdest.AsMips64();
1262   CHECK(dest.IsGpuRegister() && base.AsMips64().IsGpuRegister());
1263   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(),
1264                  base.AsMips64().AsGpuRegister(), offs.Int32Value());
1265 }
1266 
LoadRawPtrFromThread64(ManagedRegister mdest,ThreadOffset<8> offs)1267 void Mips64Assembler::LoadRawPtrFromThread64(ManagedRegister mdest,
1268                                              ThreadOffset<8> offs) {
1269   Mips64ManagedRegister dest = mdest.AsMips64();
1270   CHECK(dest.IsGpuRegister());
1271   LoadFromOffset(kLoadDoubleword, dest.AsGpuRegister(), S1, offs.Int32Value());
1272 }
1273 
SignExtend(ManagedRegister,size_t)1274 void Mips64Assembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1275   UNIMPLEMENTED(FATAL) << "no sign extension necessary for mips";
1276 }
1277 
ZeroExtend(ManagedRegister,size_t)1278 void Mips64Assembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
1279   UNIMPLEMENTED(FATAL) << "no zero extension necessary for mips";
1280 }
1281 
Move(ManagedRegister mdest,ManagedRegister msrc,size_t size)1282 void Mips64Assembler::Move(ManagedRegister mdest, ManagedRegister msrc, size_t size) {
1283   Mips64ManagedRegister dest = mdest.AsMips64();
1284   Mips64ManagedRegister src = msrc.AsMips64();
1285   if (!dest.Equals(src)) {
1286     if (dest.IsGpuRegister()) {
1287       CHECK(src.IsGpuRegister()) << src;
1288       Move(dest.AsGpuRegister(), src.AsGpuRegister());
1289     } else if (dest.IsFpuRegister()) {
1290       CHECK(src.IsFpuRegister()) << src;
1291       if (size == 4) {
1292         MovS(dest.AsFpuRegister(), src.AsFpuRegister());
1293       } else if (size == 8) {
1294         MovD(dest.AsFpuRegister(), src.AsFpuRegister());
1295       } else {
1296         UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
1297       }
1298     }
1299   }
1300 }
1301 
CopyRef(FrameOffset dest,FrameOffset src,ManagedRegister mscratch)1302 void Mips64Assembler::CopyRef(FrameOffset dest, FrameOffset src,
1303                               ManagedRegister mscratch) {
1304   Mips64ManagedRegister scratch = mscratch.AsMips64();
1305   CHECK(scratch.IsGpuRegister()) << scratch;
1306   LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
1307   StoreToOffset(kStoreWord, scratch.AsGpuRegister(), SP, dest.Int32Value());
1308 }
1309 
CopyRawPtrFromThread64(FrameOffset fr_offs,ThreadOffset<8> thr_offs,ManagedRegister mscratch)1310 void Mips64Assembler::CopyRawPtrFromThread64(FrameOffset fr_offs,
1311                                              ThreadOffset<8> thr_offs,
1312                                              ManagedRegister mscratch) {
1313   Mips64ManagedRegister scratch = mscratch.AsMips64();
1314   CHECK(scratch.IsGpuRegister()) << scratch;
1315   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), S1, thr_offs.Int32Value());
1316   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, fr_offs.Int32Value());
1317 }
1318 
CopyRawPtrToThread64(ThreadOffset<8> thr_offs,FrameOffset fr_offs,ManagedRegister mscratch)1319 void Mips64Assembler::CopyRawPtrToThread64(ThreadOffset<8> thr_offs,
1320                                            FrameOffset fr_offs,
1321                                            ManagedRegister mscratch) {
1322   Mips64ManagedRegister scratch = mscratch.AsMips64();
1323   CHECK(scratch.IsGpuRegister()) << scratch;
1324   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1325                  SP, fr_offs.Int32Value());
1326   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(),
1327                 S1, thr_offs.Int32Value());
1328 }
1329 
Copy(FrameOffset dest,FrameOffset src,ManagedRegister mscratch,size_t size)1330 void Mips64Assembler::Copy(FrameOffset dest, FrameOffset src,
1331                            ManagedRegister mscratch, size_t size) {
1332   Mips64ManagedRegister scratch = mscratch.AsMips64();
1333   CHECK(scratch.IsGpuRegister()) << scratch;
1334   CHECK(size == 4 || size == 8) << size;
1335   if (size == 4) {
1336     LoadFromOffset(kLoadWord, scratch.AsGpuRegister(), SP, src.Int32Value());
1337     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
1338   } else if (size == 8) {
1339     LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(), SP, src.Int32Value());
1340     StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, dest.Int32Value());
1341   } else {
1342     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
1343   }
1344 }
1345 
Copy(FrameOffset dest,ManagedRegister src_base,Offset src_offset,ManagedRegister mscratch,size_t size)1346 void Mips64Assembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
1347                            ManagedRegister mscratch, size_t size) {
1348   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
1349   CHECK(size == 4 || size == 8) << size;
1350   if (size == 4) {
1351     LoadFromOffset(kLoadWord, scratch, src_base.AsMips64().AsGpuRegister(),
1352                    src_offset.Int32Value());
1353     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
1354   } else if (size == 8) {
1355     LoadFromOffset(kLoadDoubleword, scratch, src_base.AsMips64().AsGpuRegister(),
1356                    src_offset.Int32Value());
1357     StoreToOffset(kStoreDoubleword, scratch, SP, dest.Int32Value());
1358   } else {
1359     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
1360   }
1361 }
1362 
Copy(ManagedRegister dest_base,Offset dest_offset,FrameOffset src,ManagedRegister mscratch,size_t size)1363 void Mips64Assembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
1364                            ManagedRegister mscratch, size_t size) {
1365   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
1366   CHECK(size == 4 || size == 8) << size;
1367   if (size == 4) {
1368     LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
1369     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
1370                   dest_offset.Int32Value());
1371   } else if (size == 8) {
1372     LoadFromOffset(kLoadDoubleword, scratch, SP, src.Int32Value());
1373     StoreToOffset(kStoreDoubleword, scratch, dest_base.AsMips64().AsGpuRegister(),
1374                   dest_offset.Int32Value());
1375   } else {
1376     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
1377   }
1378 }
1379 
Copy(FrameOffset,FrameOffset,Offset,ManagedRegister,size_t)1380 void Mips64Assembler::Copy(FrameOffset /*dest*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
1381                          ManagedRegister /*mscratch*/, size_t /*size*/) {
1382   UNIMPLEMENTED(FATAL) << "no mips64 implementation";
1383 }
1384 
Copy(ManagedRegister dest,Offset dest_offset,ManagedRegister src,Offset src_offset,ManagedRegister mscratch,size_t size)1385 void Mips64Assembler::Copy(ManagedRegister dest, Offset dest_offset,
1386                            ManagedRegister src, Offset src_offset,
1387                            ManagedRegister mscratch, size_t size) {
1388   GpuRegister scratch = mscratch.AsMips64().AsGpuRegister();
1389   CHECK(size == 4 || size == 8) << size;
1390   if (size == 4) {
1391     LoadFromOffset(kLoadWord, scratch, src.AsMips64().AsGpuRegister(), src_offset.Int32Value());
1392     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(), dest_offset.Int32Value());
1393   } else if (size == 8) {
1394     LoadFromOffset(kLoadDoubleword, scratch, src.AsMips64().AsGpuRegister(),
1395                    src_offset.Int32Value());
1396     StoreToOffset(kStoreDoubleword, scratch, dest.AsMips64().AsGpuRegister(),
1397                   dest_offset.Int32Value());
1398   } else {
1399     UNIMPLEMENTED(FATAL) << "We only support Copy() of size 4 and 8";
1400   }
1401 }
1402 
Copy(FrameOffset,Offset,FrameOffset,Offset,ManagedRegister,size_t)1403 void Mips64Assembler::Copy(FrameOffset /*dest*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset
1404 /*src_offset*/,
1405                          ManagedRegister /*mscratch*/, size_t /*size*/) {
1406   UNIMPLEMENTED(FATAL) << "no mips64 implementation";
1407 }
1408 
MemoryBarrier(ManagedRegister)1409 void Mips64Assembler::MemoryBarrier(ManagedRegister) {
1410   // TODO: sync?
1411   UNIMPLEMENTED(FATAL) << "no mips64 implementation";
1412 }
1413 
CreateHandleScopeEntry(ManagedRegister mout_reg,FrameOffset handle_scope_offset,ManagedRegister min_reg,bool null_allowed)1414 void Mips64Assembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
1415                                              FrameOffset handle_scope_offset,
1416                                              ManagedRegister min_reg,
1417                                              bool null_allowed) {
1418   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
1419   Mips64ManagedRegister in_reg = min_reg.AsMips64();
1420   CHECK(in_reg.IsNoRegister() || in_reg.IsGpuRegister()) << in_reg;
1421   CHECK(out_reg.IsGpuRegister()) << out_reg;
1422   if (null_allowed) {
1423     Label null_arg;
1424     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
1425     // the address in the handle scope holding the reference.
1426     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
1427     if (in_reg.IsNoRegister()) {
1428       LoadFromOffset(kLoadUnsignedWord, out_reg.AsGpuRegister(),
1429                      SP, handle_scope_offset.Int32Value());
1430       in_reg = out_reg;
1431     }
1432     if (!out_reg.Equals(in_reg)) {
1433       LoadConst32(out_reg.AsGpuRegister(), 0);
1434     }
1435     Beqzc(in_reg.AsGpuRegister(), &null_arg);
1436     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
1437     Bind(&null_arg);
1438   } else {
1439     Daddiu64(out_reg.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
1440   }
1441 }
1442 
CreateHandleScopeEntry(FrameOffset out_off,FrameOffset handle_scope_offset,ManagedRegister mscratch,bool null_allowed)1443 void Mips64Assembler::CreateHandleScopeEntry(FrameOffset out_off,
1444                                              FrameOffset handle_scope_offset,
1445                                              ManagedRegister mscratch,
1446                                              bool null_allowed) {
1447   Mips64ManagedRegister scratch = mscratch.AsMips64();
1448   CHECK(scratch.IsGpuRegister()) << scratch;
1449   if (null_allowed) {
1450     Label null_arg;
1451     LoadFromOffset(kLoadUnsignedWord, scratch.AsGpuRegister(), SP,
1452                    handle_scope_offset.Int32Value());
1453     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
1454     // the address in the handle scope holding the reference.
1455     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
1456     Beqzc(scratch.AsGpuRegister(), &null_arg);
1457     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
1458     Bind(&null_arg);
1459   } else {
1460     Daddiu64(scratch.AsGpuRegister(), SP, handle_scope_offset.Int32Value());
1461   }
1462   StoreToOffset(kStoreDoubleword, scratch.AsGpuRegister(), SP, out_off.Int32Value());
1463 }
1464 
1465 // Given a handle scope entry, load the associated reference.
LoadReferenceFromHandleScope(ManagedRegister mout_reg,ManagedRegister min_reg)1466 void Mips64Assembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
1467                                                    ManagedRegister min_reg) {
1468   Mips64ManagedRegister out_reg = mout_reg.AsMips64();
1469   Mips64ManagedRegister in_reg = min_reg.AsMips64();
1470   CHECK(out_reg.IsGpuRegister()) << out_reg;
1471   CHECK(in_reg.IsGpuRegister()) << in_reg;
1472   Label null_arg;
1473   if (!out_reg.Equals(in_reg)) {
1474     LoadConst32(out_reg.AsGpuRegister(), 0);
1475   }
1476   Beqzc(in_reg.AsGpuRegister(), &null_arg);
1477   LoadFromOffset(kLoadDoubleword, out_reg.AsGpuRegister(),
1478                  in_reg.AsGpuRegister(), 0);
1479   Bind(&null_arg);
1480 }
1481 
VerifyObject(ManagedRegister,bool)1482 void Mips64Assembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
1483   // TODO: not validating references
1484 }
1485 
VerifyObject(FrameOffset,bool)1486 void Mips64Assembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
1487   // TODO: not validating references
1488 }
1489 
Call(ManagedRegister mbase,Offset offset,ManagedRegister mscratch)1490 void Mips64Assembler::Call(ManagedRegister mbase, Offset offset, ManagedRegister mscratch) {
1491   Mips64ManagedRegister base = mbase.AsMips64();
1492   Mips64ManagedRegister scratch = mscratch.AsMips64();
1493   CHECK(base.IsGpuRegister()) << base;
1494   CHECK(scratch.IsGpuRegister()) << scratch;
1495   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1496                  base.AsGpuRegister(), offset.Int32Value());
1497   Jalr(scratch.AsGpuRegister());
1498   // TODO: place reference map on call
1499 }
1500 
Call(FrameOffset base,Offset offset,ManagedRegister mscratch)1501 void Mips64Assembler::Call(FrameOffset base, Offset offset, ManagedRegister mscratch) {
1502   Mips64ManagedRegister scratch = mscratch.AsMips64();
1503   CHECK(scratch.IsGpuRegister()) << scratch;
1504   // Call *(*(SP + base) + offset)
1505   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1506                  SP, base.Int32Value());
1507   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1508                  scratch.AsGpuRegister(), offset.Int32Value());
1509   Jalr(scratch.AsGpuRegister());
1510   // TODO: place reference map on call
1511 }
1512 
CallFromThread64(ThreadOffset<8>,ManagedRegister)1513 void Mips64Assembler::CallFromThread64(ThreadOffset<8> /*offset*/, ManagedRegister /*mscratch*/) {
1514   UNIMPLEMENTED(FATAL) << "no mips64 implementation";
1515 }
1516 
GetCurrentThread(ManagedRegister tr)1517 void Mips64Assembler::GetCurrentThread(ManagedRegister tr) {
1518   Move(tr.AsMips64().AsGpuRegister(), S1);
1519 }
1520 
GetCurrentThread(FrameOffset offset,ManagedRegister)1521 void Mips64Assembler::GetCurrentThread(FrameOffset offset,
1522                                        ManagedRegister /*mscratch*/) {
1523   StoreToOffset(kStoreDoubleword, S1, SP, offset.Int32Value());
1524 }
1525 
ExceptionPoll(ManagedRegister mscratch,size_t stack_adjust)1526 void Mips64Assembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
1527   Mips64ManagedRegister scratch = mscratch.AsMips64();
1528   Mips64ExceptionSlowPath* slow = new Mips64ExceptionSlowPath(scratch, stack_adjust);
1529   buffer_.EnqueueSlowPath(slow);
1530   LoadFromOffset(kLoadDoubleword, scratch.AsGpuRegister(),
1531                  S1, Thread::ExceptionOffset<8>().Int32Value());
1532   Bnezc(scratch.AsGpuRegister(), slow->Entry());
1533 }
1534 
Emit(Assembler * sasm)1535 void Mips64ExceptionSlowPath::Emit(Assembler* sasm) {
1536   Mips64Assembler* sp_asm = down_cast<Mips64Assembler*>(sasm);
1537 #define __ sp_asm->
1538   __ Bind(&entry_);
1539   if (stack_adjust_ != 0) {  // Fix up the frame.
1540     __ DecreaseFrameSize(stack_adjust_);
1541   }
1542   // Pass exception object as argument
1543   // Don't care about preserving A0 as this call won't return
1544   __ Move(A0, scratch_.AsGpuRegister());
1545   // Set up call to Thread::Current()->pDeliverException
1546   __ LoadFromOffset(kLoadDoubleword, T9, S1,
1547                     QUICK_ENTRYPOINT_OFFSET(8, pDeliverException).Int32Value());
1548   // TODO: check T9 usage
1549   __ Jr(T9);
1550   // Call never returns
1551   __ Break();
1552 #undef __
1553 }
1554 
1555 }  // namespace mips64
1556 }  // namespace art
1557