1 /* libs/pixelflinger/codeflinger/MIPS64Assembler.cpp
2 **
3 ** Copyright 2015, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 /* MIPS64 assembler and ARM->MIPS64 assembly translator
20 **
21 ** The approach is utilize MIPSAssembler generator, using inherited MIPS64Assembler
22 ** that overrides just the specific MIPS64r6 instructions.
23 ** For now ArmToMips64Assembler is copied over from ArmToMipsAssembler class,
24 ** changing some MIPS64r6 related stuff.
25 **
26 */
27 
28 
29 #define LOG_TAG "MIPS64Assembler"
30 
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <cutils/log.h>
34 #include <cutils/properties.h>
35 
36 #if defined(WITH_LIB_HARDWARE)
37 #include <hardware_legacy/qemu_tracing.h>
38 #endif
39 
40 #include <private/pixelflinger/ggl_context.h>
41 
42 #include "MIPS64Assembler.h"
43 #include "CodeCache.h"
44 #include "mips64_disassem.h"
45 
46 
47 #define NOT_IMPLEMENTED()  LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
48 
49 
50 // ----------------------------------------------------------------------------
51 
52 namespace android {
53 
54 // ----------------------------------------------------------------------------
55 #if 0
56 #pragma mark -
57 #pragma mark ArmToMips64Assembler...
58 #endif
59 
ArmToMips64Assembler(const sp<Assembly> & assembly,char * abuf,int linesz,int instr_count)60 ArmToMips64Assembler::ArmToMips64Assembler(const sp<Assembly>& assembly,
61                                            char *abuf, int linesz, int instr_count)
62     :   ARMAssemblerInterface(),
63         mArmDisassemblyBuffer(abuf),
64         mArmLineLength(linesz),
65         mArmInstrCount(instr_count),
66         mInum(0),
67         mAssembly(assembly)
68 {
69     mMips = new MIPS64Assembler(assembly, this);
70     mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
71     init_conditional_labels();
72 }
73 
ArmToMips64Assembler(void * assembly)74 ArmToMips64Assembler::ArmToMips64Assembler(void* assembly)
75     :   ARMAssemblerInterface(),
76         mArmDisassemblyBuffer(NULL),
77         mInum(0),
78         mAssembly(NULL)
79 {
80     mMips = new MIPS64Assembler(assembly, this);
81     mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
82     init_conditional_labels();
83 }
84 
~ArmToMips64Assembler()85 ArmToMips64Assembler::~ArmToMips64Assembler()
86 {
87     delete mMips;
88     free((void *) mArmPC);
89 }
90 
pc() const91 uint32_t* ArmToMips64Assembler::pc() const
92 {
93     return mMips->pc();
94 }
95 
base() const96 uint32_t* ArmToMips64Assembler::base() const
97 {
98     return mMips->base();
99 }
100 
reset()101 void ArmToMips64Assembler::reset()
102 {
103     cond.labelnum = 0;
104     mInum = 0;
105     mMips->reset();
106 }
107 
getCodegenArch()108 int ArmToMips64Assembler::getCodegenArch()
109 {
110     return CODEGEN_ARCH_MIPS64;
111 }
112 
comment(const char * string)113 void ArmToMips64Assembler::comment(const char* string)
114 {
115     mMips->comment(string);
116 }
117 
label(const char * theLabel)118 void ArmToMips64Assembler::label(const char* theLabel)
119 {
120     mMips->label(theLabel);
121 }
122 
disassemble(const char * name)123 void ArmToMips64Assembler::disassemble(const char* name)
124 {
125     mMips->disassemble(name);
126 }
127 
init_conditional_labels()128 void ArmToMips64Assembler::init_conditional_labels()
129 {
130     int i;
131     for (i=0;i<99; ++i) {
132         sprintf(cond.label[i], "cond_%d", i);
133     }
134 }
135 
136 
137 
138 #if 0
139 #pragma mark -
140 #pragma mark Prolog/Epilog & Generate...
141 #endif
142 
prolog()143 void ArmToMips64Assembler::prolog()
144 {
145     mArmPC[mInum++] = pc();  // save starting PC for this instr
146 
147     mMips->DADDIU(R_sp, R_sp, -(5 * 8));
148     mMips->SD(R_s0, R_sp, 0);
149     mMips->SD(R_s1, R_sp, 8);
150     mMips->SD(R_s2, R_sp, 16);
151     mMips->SD(R_s3, R_sp, 24);
152     mMips->SD(R_s4, R_sp, 32);
153     mMips->MOVE(R_v0, R_a0);    // move context * passed in a0 to v0 (arm r0)
154 }
155 
epilog(uint32_t touched)156 void ArmToMips64Assembler::epilog(uint32_t touched)
157 {
158     mArmPC[mInum++] = pc();  // save starting PC for this instr
159 
160     mMips->LD(R_s0, R_sp, 0);
161     mMips->LD(R_s1, R_sp, 8);
162     mMips->LD(R_s2, R_sp, 16);
163     mMips->LD(R_s3, R_sp, 24);
164     mMips->LD(R_s4, R_sp, 32);
165     mMips->DADDIU(R_sp, R_sp, (5 * 8));
166     mMips->JR(R_ra);
167 
168 }
169 
generate(const char * name)170 int ArmToMips64Assembler::generate(const char* name)
171 {
172     return mMips->generate(name);
173 }
174 
fix_branches()175 void ArmToMips64Assembler::fix_branches()
176 {
177     mMips->fix_branches();
178 }
179 
pcForLabel(const char * label)180 uint32_t* ArmToMips64Assembler::pcForLabel(const char* label)
181 {
182     return mMips->pcForLabel(label);
183 }
184 
set_condition(int mode,int R1,int R2)185 void ArmToMips64Assembler::set_condition(int mode, int R1, int R2) {
186     if (mode == 2) {
187         cond.type = SBIT_COND;
188     } else {
189         cond.type = CMP_COND;
190     }
191     cond.r1 = R1;
192     cond.r2 = R2;
193 }
194 
195 //----------------------------------------------------------
196 
197 #if 0
198 #pragma mark -
199 #pragma mark Addressing modes & shifters...
200 #endif
201 
202 
203 // do not need this for MIPS, but it is in the Interface (virtual)
buildImmediate(uint32_t immediate,uint32_t & rot,uint32_t & imm)204 int ArmToMips64Assembler::buildImmediate(
205         uint32_t immediate, uint32_t& rot, uint32_t& imm)
206 {
207     // for MIPS, any 32-bit immediate is OK
208     rot = 0;
209     imm = immediate;
210     return 0;
211 }
212 
213 // shifters...
214 
isValidImmediate(uint32_t immediate)215 bool ArmToMips64Assembler::isValidImmediate(uint32_t immediate)
216 {
217     // for MIPS, any 32-bit immediate is OK
218     return true;
219 }
220 
imm(uint32_t immediate)221 uint32_t ArmToMips64Assembler::imm(uint32_t immediate)
222 {
223     amode.value = immediate;
224     return AMODE_IMM;
225 }
226 
reg_imm(int Rm,int type,uint32_t shift)227 uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift)
228 {
229     amode.reg = Rm;
230     amode.stype = type;
231     amode.value = shift;
232     return AMODE_REG_IMM;
233 }
234 
reg_rrx(int Rm)235 uint32_t ArmToMips64Assembler::reg_rrx(int Rm)
236 {
237     // reg_rrx mode is not used in the GLLAssember code at this time
238     return AMODE_UNSUPPORTED;
239 }
240 
reg_reg(int Rm,int type,int Rs)241 uint32_t ArmToMips64Assembler::reg_reg(int Rm, int type, int Rs)
242 {
243     // reg_reg mode is not used in the GLLAssember code at this time
244     return AMODE_UNSUPPORTED;
245 }
246 
247 
248 // addressing modes...
249 // LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
immed12_pre(int32_t immed12,int W)250 uint32_t ArmToMips64Assembler::immed12_pre(int32_t immed12, int W)
251 {
252     LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
253                         "LDR(B)/STR(B)/PLD immediate too big (%08x)",
254                         immed12);
255     amode.value = immed12;
256     amode.writeback = W;
257     return AMODE_IMM_12_PRE;
258 }
259 
immed12_post(int32_t immed12)260 uint32_t ArmToMips64Assembler::immed12_post(int32_t immed12)
261 {
262     LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
263                         "LDR(B)/STR(B)/PLD immediate too big (%08x)",
264                         immed12);
265 
266     amode.value = immed12;
267     return AMODE_IMM_12_POST;
268 }
269 
reg_scale_pre(int Rm,int type,uint32_t shift,int W)270 uint32_t ArmToMips64Assembler::reg_scale_pre(int Rm, int type,
271         uint32_t shift, int W)
272 {
273     LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
274 
275     amode.reg = Rm;
276     // amode.stype = type;      // more advanced modes not used in GGLAssembler yet
277     // amode.value = shift;
278     // amode.writeback = W;
279     return AMODE_REG_SCALE_PRE;
280 }
281 
reg_scale_post(int Rm,int type,uint32_t shift)282 uint32_t ArmToMips64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
283 {
284     LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
285     return AMODE_UNSUPPORTED;
286 }
287 
288 // LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
immed8_pre(int32_t immed8,int W)289 uint32_t ArmToMips64Assembler::immed8_pre(int32_t immed8, int W)
290 {
291     LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
292 
293     LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
294                         "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
295                         immed8);
296     return AMODE_IMM_8_PRE;
297 }
298 
immed8_post(int32_t immed8)299 uint32_t ArmToMips64Assembler::immed8_post(int32_t immed8)
300 {
301     LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
302                         "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
303                         immed8);
304     amode.value = immed8;
305     return AMODE_IMM_8_POST;
306 }
307 
reg_pre(int Rm,int W)308 uint32_t ArmToMips64Assembler::reg_pre(int Rm, int W)
309 {
310     LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
311     amode.reg = Rm;
312     return AMODE_REG_PRE;
313 }
314 
reg_post(int Rm)315 uint32_t ArmToMips64Assembler::reg_post(int Rm)
316 {
317     LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
318     return AMODE_UNSUPPORTED;
319 }
320 
321 
322 
323 // ----------------------------------------------------------------------------
324 
325 #if 0
326 #pragma mark -
327 #pragma mark Data Processing...
328 #endif
329 
330 
331 static const char * const dpOpNames[] = {
332     "AND", "EOR", "SUB", "RSB", "ADD", "ADC", "SBC", "RSC",
333     "TST", "TEQ", "CMP", "CMN", "ORR", "MOV", "BIC", "MVN"
334 };
335 
336 // check if the operand registers from a previous CMP or S-bit instruction
337 // would be overwritten by this instruction. If so, move the value to a
338 // safe register.
339 // Note that we cannot tell at _this_ instruction time if a future (conditional)
340 // instruction will _also_ use this value (a defect of the simple 1-pass, one-
341 // instruction-at-a-time translation). Therefore we must be conservative and
342 // save the value before it is overwritten. This costs an extra MOVE instr.
343 
protectConditionalOperands(int Rd)344 void ArmToMips64Assembler::protectConditionalOperands(int Rd)
345 {
346     if (Rd == cond.r1) {
347         mMips->MOVE(R_cmp, cond.r1);
348         cond.r1 = R_cmp;
349     }
350     if (cond.type == CMP_COND && Rd == cond.r2) {
351         mMips->MOVE(R_cmp2, cond.r2);
352         cond.r2 = R_cmp2;
353     }
354 }
355 
356 
357 // interprets the addressing mode, and generates the common code
358 // used by the majority of data-processing ops. Many MIPS instructions
359 // have a register-based form and a different immediate form. See
360 // opAND below for an example. (this could be inlined)
361 //
362 // this works with the imm(), reg_imm() methods above, which are directly
363 // called by the GLLAssembler.
364 // note: _signed parameter defaults to false (un-signed)
365 // note: tmpReg parameter defaults to 1, MIPS register AT
dataProcAdrModes(int op,int & source,bool _signed,int tmpReg)366 int ArmToMips64Assembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
367 {
368     if (op < AMODE_REG) {
369         source = op;
370         return SRC_REG;
371     } else if (op == AMODE_IMM) {
372         if ((!_signed && amode.value > 0xffff)
373                 || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
374             mMips->LUI(tmpReg, (amode.value >> 16));
375             if (amode.value & 0x0000ffff) {
376                 mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
377             }
378             source = tmpReg;
379             return SRC_REG;
380         } else {
381             source = amode.value;
382             return SRC_IMM;
383         }
384     } else if (op == AMODE_REG_IMM) {
385         switch (amode.stype) {
386             case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
387             case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
388             case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
389             case ROR: mMips->ROTR(tmpReg, amode.reg, amode.value); break;
390         }
391         source = tmpReg;
392         return SRC_REG;
393     } else {  // adr mode RRX is not used in GGL Assembler at this time
394         // we are screwed, this should be exception, assert-fail or something
395         LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
396         return SRC_ERROR;
397     }
398 }
399 
400 
dataProcessing(int opcode,int cc,int s,int Rd,int Rn,uint32_t Op2)401 void ArmToMips64Assembler::dataProcessing(int opcode, int cc,
402         int s, int Rd, int Rn, uint32_t Op2)
403 {
404     int src;    // src is modified by dataProcAdrModes() - passed as int&
405 
406     if (cc != AL) {
407         protectConditionalOperands(Rd);
408         // the branch tests register(s) set by prev CMP or instr with 'S' bit set
409         // inverse the condition to jump past this conditional instruction
410         ArmToMips64Assembler::B(cc^1, cond.label[++cond.labelnum]);
411     } else {
412         mArmPC[mInum++] = pc();  // save starting PC for this instr
413     }
414 
415     switch (opcode) {
416     case opAND:
417         if (dataProcAdrModes(Op2, src) == SRC_REG) {
418             mMips->AND(Rd, Rn, src);
419         } else {                        // adr mode was SRC_IMM
420             mMips->ANDI(Rd, Rn, src);
421         }
422         break;
423 
424     case opADD:
425         // set "signed" to true for adr modes
426         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
427             mMips->ADDU(Rd, Rn, src);
428         } else {                        // adr mode was SRC_IMM
429             mMips->ADDIU(Rd, Rn, src);
430         }
431         break;
432 
433     case opSUB:
434         // set "signed" to true for adr modes
435         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
436             mMips->SUBU(Rd, Rn, src);
437         } else {                        // adr mode was SRC_IMM
438             mMips->SUBIU(Rd, Rn, src);
439         }
440         break;
441 
442     case opADD64:
443         // set "signed" to true for adr modes
444         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
445             mMips->DADDU(Rd, Rn, src);
446         } else {                        // adr mode was SRC_IMM
447             mMips->DADDIU(Rd, Rn, src);
448         }
449         break;
450 
451     case opSUB64:
452         // set "signed" to true for adr modes
453         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
454             mMips->DSUBU(Rd, Rn, src);
455         } else {                        // adr mode was SRC_IMM
456             mMips->DSUBIU(Rd, Rn, src);
457         }
458         break;
459 
460     case opEOR:
461         if (dataProcAdrModes(Op2, src) == SRC_REG) {
462             mMips->XOR(Rd, Rn, src);
463         } else {                        // adr mode was SRC_IMM
464             mMips->XORI(Rd, Rn, src);
465         }
466         break;
467 
468     case opORR:
469         if (dataProcAdrModes(Op2, src) == SRC_REG) {
470             mMips->OR(Rd, Rn, src);
471         } else {                        // adr mode was SRC_IMM
472             mMips->ORI(Rd, Rn, src);
473         }
474         break;
475 
476     case opBIC:
477         if (dataProcAdrModes(Op2, src) == SRC_IMM) {
478             // if we are 16-bit imnmediate, load to AT reg
479             mMips->ORI(R_at, 0, src);
480             src = R_at;
481         }
482         mMips->NOT(R_at, src);
483         mMips->AND(Rd, Rn, R_at);
484         break;
485 
486     case opRSB:
487         if (dataProcAdrModes(Op2, src) == SRC_IMM) {
488             // if we are 16-bit imnmediate, load to AT reg
489             mMips->ORI(R_at, 0, src);
490             src = R_at;
491         }
492         mMips->SUBU(Rd, src, Rn);   // subu with the parameters reversed
493         break;
494 
495     case opMOV:
496         if (Op2 < AMODE_REG) {  // op2 is reg # in this case
497             mMips->MOVE(Rd, Op2);
498         } else if (Op2 == AMODE_IMM) {
499             if (amode.value > 0xffff) {
500                 mMips->LUI(Rd, (amode.value >> 16));
501                 if (amode.value & 0x0000ffff) {
502                     mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
503                 }
504              } else {
505                 mMips->ORI(Rd, 0, amode.value);
506             }
507         } else if (Op2 == AMODE_REG_IMM) {
508             switch (amode.stype) {
509             case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
510             case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
511             case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
512             case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
513             }
514         }
515         else {
516             // adr mode RRX is not used in GGL Assembler at this time
517             mMips->UNIMPL();
518         }
519         break;
520 
521     case opMVN:     // this is a 1's complement: NOT
522         if (Op2 < AMODE_REG) {  // op2 is reg # in this case
523             mMips->NOR(Rd, Op2, 0);     // NOT is NOR with 0
524             break;
525         } else if (Op2 == AMODE_IMM) {
526             if (amode.value > 0xffff) {
527                 mMips->LUI(Rd, (amode.value >> 16));
528                 if (amode.value & 0x0000ffff) {
529                     mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
530                 }
531              } else {
532                 mMips->ORI(Rd, 0, amode.value);
533              }
534         } else if (Op2 == AMODE_REG_IMM) {
535             switch (amode.stype) {
536             case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
537             case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
538             case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
539             case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
540             }
541         }
542         else {
543             // adr mode RRX is not used in GGL Assembler at this time
544             mMips->UNIMPL();
545         }
546         mMips->NOR(Rd, Rd, 0);     // NOT is NOR with 0
547         break;
548 
549     case opCMP:
550         // Either operand of a CMP instr could get overwritten by a subsequent
551         // conditional instruction, which is ok, _UNLESS_ there is a _second_
552         // conditional instruction. Under MIPS, this requires doing the comparison
553         // again (SLT), and the original operands must be available. (and this
554         // pattern of multiple conditional instructions from same CMP _is_ used
555         // in GGL-Assembler)
556         //
557         // For now, if a conditional instr overwrites the operands, we will
558         // move them to dedicated temp regs. This is ugly, and inefficient,
559         // and should be optimized.
560         //
561         // WARNING: making an _Assumption_ that CMP operand regs will NOT be
562         // trashed by intervening NON-conditional instructions. In the general
563         // case this is legal, but it is NOT currently done in GGL-Assembler.
564 
565         cond.type = CMP_COND;
566         cond.r1 = Rn;
567         if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
568             cond.r2 = src;
569         } else {                        // adr mode was SRC_IMM
570             mMips->ORI(R_cmp2, R_zero, src);
571             cond.r2 = R_cmp2;
572         }
573 
574         break;
575 
576 
577     case opTST:
578     case opTEQ:
579     case opCMN:
580     case opADC:
581     case opSBC:
582     case opRSC:
583         mMips->UNIMPL(); // currently unused in GGL Assembler code
584         break;
585     }
586 
587     if (cc != AL) {
588         mMips->label(cond.label[cond.labelnum]);
589     }
590     if (s && opcode != opCMP) {
591         cond.type = SBIT_COND;
592         cond.r1 = Rd;
593     }
594 }
595 
596 
597 
598 #if 0
599 #pragma mark -
600 #pragma mark Multiply...
601 #endif
602 
603 // multiply, accumulate
MLA(int cc,int s,int Rd,int Rm,int Rs,int Rn)604 void ArmToMips64Assembler::MLA(int cc, int s,
605         int Rd, int Rm, int Rs, int Rn) {
606 
607     //ALOGW("MLA");
608     mArmPC[mInum++] = pc();  // save starting PC for this instr
609 
610     mMips->MUL(R_at, Rm, Rs);
611     mMips->ADDU(Rd, R_at, Rn);
612     if (s) {
613         cond.type = SBIT_COND;
614         cond.r1 = Rd;
615     }
616 }
617 
MUL(int cc,int s,int Rd,int Rm,int Rs)618 void ArmToMips64Assembler::MUL(int cc, int s,
619         int Rd, int Rm, int Rs) {
620     mArmPC[mInum++] = pc();
621     mMips->MUL(Rd, Rm, Rs);
622     if (s) {
623         cond.type = SBIT_COND;
624         cond.r1 = Rd;
625     }
626 }
627 
UMULL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)628 void ArmToMips64Assembler::UMULL(int cc, int s,
629         int RdLo, int RdHi, int Rm, int Rs) {
630     mArmPC[mInum++] = pc();
631     mMips->MUH(RdHi, Rm, Rs);
632     mMips->MUL(RdLo, Rm, Rs);
633 
634     if (s) {
635         cond.type = SBIT_COND;
636         cond.r1 = RdHi;     // BUG...
637         LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
638     }
639 }
640 
UMUAL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)641 void ArmToMips64Assembler::UMUAL(int cc, int s,
642         int RdLo, int RdHi, int Rm, int Rs) {
643     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
644                         "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
645     // *mPC++ =    (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
646     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
647     mArmPC[mInum++] = pc();
648     mMips->NOP2();
649     NOT_IMPLEMENTED();
650     if (s) {
651         cond.type = SBIT_COND;
652         cond.r1 = RdHi;     // BUG...
653         LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
654     }
655 }
656 
SMULL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)657 void ArmToMips64Assembler::SMULL(int cc, int s,
658         int RdLo, int RdHi, int Rm, int Rs) {
659     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
660                         "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
661     // *mPC++ =    (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
662     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
663     mArmPC[mInum++] = pc();
664     mMips->NOP2();
665     NOT_IMPLEMENTED();
666     if (s) {
667         cond.type = SBIT_COND;
668         cond.r1 = RdHi;     // BUG...
669         LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
670     }
671 }
SMUAL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)672 void ArmToMips64Assembler::SMUAL(int cc, int s,
673         int RdLo, int RdHi, int Rm, int Rs) {
674     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
675                         "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
676     // *mPC++ =    (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
677     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
678     mArmPC[mInum++] = pc();
679     mMips->NOP2();
680     NOT_IMPLEMENTED();
681     if (s) {
682         cond.type = SBIT_COND;
683         cond.r1 = RdHi;     // BUG...
684         LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
685     }
686 }
687 
688 
689 
690 #if 0
691 #pragma mark -
692 #pragma mark Branches...
693 #endif
694 
695 // branches...
696 
B(int cc,const char * label)697 void ArmToMips64Assembler::B(int cc, const char* label)
698 {
699     mArmPC[mInum++] = pc();
700     if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
701 
702     switch(cc) {
703         case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
704         case NE: mMips->BNE(cond.r1, cond.r2, label); break;
705         case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
706         case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
707         case MI: mMips->BLT(cond.r1, cond.r2, label); break;
708         case PL: mMips->BGE(cond.r1, cond.r2, label); break;
709 
710         case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
711         case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
712         case GE: mMips->BGE(cond.r1, cond.r2, label); break;
713         case LT: mMips->BLT(cond.r1, cond.r2, label); break;
714         case GT: mMips->BGT(cond.r1, cond.r2, label); break;
715         case LE: mMips->BLE(cond.r1, cond.r2, label); break;
716         case AL: mMips->B(label); break;
717         case NV: /* B Never - no instruction */ break;
718 
719         case VS:
720         case VC:
721         default:
722             LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
723             break;
724     }
725 }
726 
BL(int cc,const char * label)727 void ArmToMips64Assembler::BL(int cc, const char* label)
728 {
729     LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
730     mArmPC[mInum++] = pc();
731 }
732 
733 // no use for Branches with integer PC, but they're in the Interface class ....
B(int cc,uint32_t * to_pc)734 void ArmToMips64Assembler::B(int cc, uint32_t* to_pc)
735 {
736     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
737     mArmPC[mInum++] = pc();
738 }
739 
BL(int cc,uint32_t * to_pc)740 void ArmToMips64Assembler::BL(int cc, uint32_t* to_pc)
741 {
742     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
743     mArmPC[mInum++] = pc();
744 }
745 
BX(int cc,int Rn)746 void ArmToMips64Assembler::BX(int cc, int Rn)
747 {
748     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
749     mArmPC[mInum++] = pc();
750 }
751 
752 
753 
754 #if 0
755 #pragma mark -
756 #pragma mark Data Transfer...
757 #endif
758 
759 // data transfer...
LDR(int cc,int Rd,int Rn,uint32_t offset)760 void ArmToMips64Assembler::LDR(int cc, int Rd, int Rn, uint32_t offset)
761 {
762     mArmPC[mInum++] = pc();
763     // work-around for ARM default address mode of immed12_pre(0)
764     if (offset > AMODE_UNSUPPORTED) offset = 0;
765     switch (offset) {
766         case 0:
767             amode.value = 0;
768             amode.writeback = 0;
769             // fall thru to next case ....
770         case AMODE_IMM_12_PRE:
771             if (Rn == ARMAssemblerInterface::SP) {
772                 Rn = R_sp;      // convert LDR via Arm SP to LW via Mips SP
773             }
774             mMips->LW(Rd, Rn, amode.value);
775             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
776                 mMips->DADDIU(Rn, Rn, amode.value);
777             }
778             break;
779         case AMODE_IMM_12_POST:
780             if (Rn == ARMAssemblerInterface::SP) {
781                 Rn = R_sp;      // convert STR thru Arm SP to STR thru Mips SP
782             }
783             mMips->LW(Rd, Rn, 0);
784             mMips->DADDIU(Rn, Rn, amode.value);
785             break;
786         case AMODE_REG_SCALE_PRE:
787             // we only support simple base + index, no advanced modes for this one yet
788             mMips->DADDU(R_at, Rn, amode.reg);
789             mMips->LW(Rd, R_at, 0);
790             break;
791     }
792 }
793 
LDRB(int cc,int Rd,int Rn,uint32_t offset)794 void ArmToMips64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t offset)
795 {
796     mArmPC[mInum++] = pc();
797     // work-around for ARM default address mode of immed12_pre(0)
798     if (offset > AMODE_UNSUPPORTED) offset = 0;
799     switch (offset) {
800         case 0:
801             amode.value = 0;
802             amode.writeback = 0;
803             // fall thru to next case ....
804         case AMODE_IMM_12_PRE:
805             mMips->LBU(Rd, Rn, amode.value);
806             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
807                 mMips->DADDIU(Rn, Rn, amode.value);
808             }
809             break;
810         case AMODE_IMM_12_POST:
811             mMips->LBU(Rd, Rn, 0);
812             mMips->DADDIU(Rn, Rn, amode.value);
813             break;
814         case AMODE_REG_SCALE_PRE:
815             // we only support simple base + index, no advanced modes for this one yet
816             mMips->DADDU(R_at, Rn, amode.reg);
817             mMips->LBU(Rd, R_at, 0);
818             break;
819     }
820 
821 }
822 
STR(int cc,int Rd,int Rn,uint32_t offset)823 void ArmToMips64Assembler::STR(int cc, int Rd, int Rn, uint32_t offset)
824 {
825     mArmPC[mInum++] = pc();
826     // work-around for ARM default address mode of immed12_pre(0)
827     if (offset > AMODE_UNSUPPORTED) offset = 0;
828     switch (offset) {
829         case 0:
830             amode.value = 0;
831             amode.writeback = 0;
832             // fall thru to next case ....
833         case AMODE_IMM_12_PRE:
834             if (Rn == ARMAssemblerInterface::SP) {
835                 Rn = R_sp;  // convert STR thru Arm SP to SW thru Mips SP
836             }
837             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
838                 // If we will writeback, then update the index reg, then store.
839                 // This correctly handles stack-push case.
840                 mMips->DADDIU(Rn, Rn, amode.value);
841                 mMips->SW(Rd, Rn, 0);
842             } else {
843                 // No writeback so store offset by value
844                 mMips->SW(Rd, Rn, amode.value);
845             }
846             break;
847         case AMODE_IMM_12_POST:
848             mMips->SW(Rd, Rn, 0);
849             mMips->DADDIU(Rn, Rn, amode.value);  // post index always writes back
850             break;
851         case AMODE_REG_SCALE_PRE:
852             // we only support simple base + index, no advanced modes for this one yet
853             mMips->DADDU(R_at, Rn, amode.reg);
854             mMips->SW(Rd, R_at, 0);
855             break;
856     }
857 }
858 
STRB(int cc,int Rd,int Rn,uint32_t offset)859 void ArmToMips64Assembler::STRB(int cc, int Rd, int Rn, uint32_t offset)
860 {
861     mArmPC[mInum++] = pc();
862     // work-around for ARM default address mode of immed12_pre(0)
863     if (offset > AMODE_UNSUPPORTED) offset = 0;
864     switch (offset) {
865         case 0:
866             amode.value = 0;
867             amode.writeback = 0;
868             // fall thru to next case ....
869         case AMODE_IMM_12_PRE:
870             mMips->SB(Rd, Rn, amode.value);
871             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
872                 mMips->DADDIU(Rn, Rn, amode.value);
873             }
874             break;
875         case AMODE_IMM_12_POST:
876             mMips->SB(Rd, Rn, 0);
877             mMips->DADDIU(Rn, Rn, amode.value);
878             break;
879         case AMODE_REG_SCALE_PRE:
880             // we only support simple base + index, no advanced modes for this one yet
881             mMips->DADDU(R_at, Rn, amode.reg);
882             mMips->SB(Rd, R_at, 0);
883             break;
884     }
885 }
886 
LDRH(int cc,int Rd,int Rn,uint32_t offset)887 void ArmToMips64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t offset)
888 {
889     mArmPC[mInum++] = pc();
890     // work-around for ARM default address mode of immed8_pre(0)
891     if (offset > AMODE_UNSUPPORTED) offset = 0;
892     switch (offset) {
893         case 0:
894             amode.value = 0;
895             // fall thru to next case ....
896         case AMODE_IMM_8_PRE:      // no support yet for writeback
897             mMips->LHU(Rd, Rn, amode.value);
898             break;
899         case AMODE_IMM_8_POST:
900             mMips->LHU(Rd, Rn, 0);
901             mMips->DADDIU(Rn, Rn, amode.value);
902             break;
903         case AMODE_REG_PRE:
904             // we only support simple base +/- index
905             if (amode.reg >= 0) {
906                 mMips->DADDU(R_at, Rn, amode.reg);
907             } else {
908                 mMips->DSUBU(R_at, Rn, abs(amode.reg));
909             }
910             mMips->LHU(Rd, R_at, 0);
911             break;
912     }
913 }
914 
LDRSB(int cc,int Rd,int Rn,uint32_t offset)915 void ArmToMips64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
916 {
917     mArmPC[mInum++] = pc();
918     mMips->NOP2();
919     NOT_IMPLEMENTED();
920 }
921 
LDRSH(int cc,int Rd,int Rn,uint32_t offset)922 void ArmToMips64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
923 {
924     mArmPC[mInum++] = pc();
925     mMips->NOP2();
926     NOT_IMPLEMENTED();
927 }
928 
STRH(int cc,int Rd,int Rn,uint32_t offset)929 void ArmToMips64Assembler::STRH(int cc, int Rd, int Rn, uint32_t offset)
930 {
931     mArmPC[mInum++] = pc();
932     // work-around for ARM default address mode of immed8_pre(0)
933     if (offset > AMODE_UNSUPPORTED) offset = 0;
934     switch (offset) {
935         case 0:
936             amode.value = 0;
937             // fall thru to next case ....
938         case AMODE_IMM_8_PRE:      // no support yet for writeback
939             mMips->SH(Rd, Rn, amode.value);
940             break;
941         case AMODE_IMM_8_POST:
942             mMips->SH(Rd, Rn, 0);
943             mMips->DADDIU(Rn, Rn, amode.value);
944             break;
945         case AMODE_REG_PRE:
946             // we only support simple base +/- index
947             if (amode.reg >= 0) {
948                 mMips->DADDU(R_at, Rn, amode.reg);
949             } else {
950                 mMips->DSUBU(R_at, Rn, abs(amode.reg));
951             }
952             mMips->SH(Rd, R_at, 0);
953             break;
954     }
955 }
956 
957 
958 
959 #if 0
960 #pragma mark -
961 #pragma mark Block Data Transfer...
962 #endif
963 
964 // block data transfer...
LDM(int cc,int dir,int Rn,int W,uint32_t reg_list)965 void ArmToMips64Assembler::LDM(int cc, int dir,
966         int Rn, int W, uint32_t reg_list)
967 {   //                        ED FD EA FA      IB IA DB DA
968     // const uint8_t P[8] = { 1, 0, 1, 0,      1, 0, 1, 0 };
969     // const uint8_t U[8] = { 1, 1, 0, 0,      1, 1, 0, 0 };
970     // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
971     //         (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
972     mArmPC[mInum++] = pc();
973     mMips->NOP2();
974     NOT_IMPLEMENTED();
975 }
976 
STM(int cc,int dir,int Rn,int W,uint32_t reg_list)977 void ArmToMips64Assembler::STM(int cc, int dir,
978         int Rn, int W, uint32_t reg_list)
979 {   //                        FA EA FD ED      IB IA DB DA
980     // const uint8_t P[8] = { 0, 1, 0, 1,      1, 0, 1, 0 };
981     // const uint8_t U[8] = { 0, 0, 1, 1,      1, 1, 0, 0 };
982     // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
983     //         (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
984     mArmPC[mInum++] = pc();
985     mMips->NOP2();
986     NOT_IMPLEMENTED();
987 }
988 
989 
990 
991 #if 0
992 #pragma mark -
993 #pragma mark Special...
994 #endif
995 
996 // special...
SWP(int cc,int Rn,int Rd,int Rm)997 void ArmToMips64Assembler::SWP(int cc, int Rn, int Rd, int Rm) {
998     // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
999     mArmPC[mInum++] = pc();
1000     mMips->NOP2();
1001     NOT_IMPLEMENTED();
1002 }
1003 
SWPB(int cc,int Rn,int Rd,int Rm)1004 void ArmToMips64Assembler::SWPB(int cc, int Rn, int Rd, int Rm) {
1005     // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
1006     mArmPC[mInum++] = pc();
1007     mMips->NOP2();
1008     NOT_IMPLEMENTED();
1009 }
1010 
SWI(int cc,uint32_t comment)1011 void ArmToMips64Assembler::SWI(int cc, uint32_t comment) {
1012     // *mPC++ = (cc<<28) | (0xF<<24) | comment;
1013     mArmPC[mInum++] = pc();
1014     mMips->NOP2();
1015     NOT_IMPLEMENTED();
1016 }
1017 
1018 
1019 #if 0
1020 #pragma mark -
1021 #pragma mark DSP instructions...
1022 #endif
1023 
1024 // DSP instructions...
PLD(int Rn,uint32_t offset)1025 void ArmToMips64Assembler::PLD(int Rn, uint32_t offset) {
1026     LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
1027                         "PLD only P=1, W=0");
1028     // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
1029     mArmPC[mInum++] = pc();
1030     mMips->NOP2();
1031     NOT_IMPLEMENTED();
1032 }
1033 
CLZ(int cc,int Rd,int Rm)1034 void ArmToMips64Assembler::CLZ(int cc, int Rd, int Rm)
1035 {
1036     mArmPC[mInum++] = pc();
1037     mMips->CLZ(Rd, Rm);
1038 }
1039 
QADD(int cc,int Rd,int Rm,int Rn)1040 void ArmToMips64Assembler::QADD(int cc,  int Rd, int Rm, int Rn)
1041 {
1042     // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
1043     mArmPC[mInum++] = pc();
1044     mMips->NOP2();
1045     NOT_IMPLEMENTED();
1046 }
1047 
QDADD(int cc,int Rd,int Rm,int Rn)1048 void ArmToMips64Assembler::QDADD(int cc,  int Rd, int Rm, int Rn)
1049 {
1050     // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
1051     mArmPC[mInum++] = pc();
1052     mMips->NOP2();
1053     NOT_IMPLEMENTED();
1054 }
1055 
QSUB(int cc,int Rd,int Rm,int Rn)1056 void ArmToMips64Assembler::QSUB(int cc,  int Rd, int Rm, int Rn)
1057 {
1058     // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
1059     mArmPC[mInum++] = pc();
1060     mMips->NOP2();
1061     NOT_IMPLEMENTED();
1062 }
1063 
QDSUB(int cc,int Rd,int Rm,int Rn)1064 void ArmToMips64Assembler::QDSUB(int cc,  int Rd, int Rm, int Rn)
1065 {
1066     // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
1067     mArmPC[mInum++] = pc();
1068     mMips->NOP2();
1069     NOT_IMPLEMENTED();
1070 }
1071 
1072 // 16 x 16 signed multiply (like SMLAxx without the accumulate)
SMUL(int cc,int xy,int Rd,int Rm,int Rs)1073 void ArmToMips64Assembler::SMUL(int cc, int xy,
1074                 int Rd, int Rm, int Rs)
1075 {
1076     mArmPC[mInum++] = pc();
1077 
1078     // the 16 bits may be in the top or bottom half of 32-bit source reg,
1079     // as defined by the codes BB, BT, TB, TT (compressed param xy)
1080     // where x corresponds to Rm and y to Rs
1081 
1082     // select half-reg for Rm
1083     if (xy & xyTB) {
1084         // use top 16-bits
1085         mMips->SRA(R_at, Rm, 16);
1086     } else {
1087         // use bottom 16, but sign-extend to 32
1088         mMips->SEH(R_at, Rm);
1089     }
1090     // select half-reg for Rs
1091     if (xy & xyBT) {
1092         // use top 16-bits
1093         mMips->SRA(R_at2, Rs, 16);
1094     } else {
1095         // use bottom 16, but sign-extend to 32
1096         mMips->SEH(R_at2, Rs);
1097     }
1098     mMips->MUL(Rd, R_at, R_at2);
1099 }
1100 
1101 // signed 32b x 16b multiple, save top 32-bits of 48-bit result
SMULW(int cc,int y,int Rd,int Rm,int Rs)1102 void ArmToMips64Assembler::SMULW(int cc, int y,
1103                 int Rd, int Rm, int Rs)
1104 {
1105     mArmPC[mInum++] = pc();
1106 
1107     // the selector yT or yB refers to reg Rs
1108     if (y & yT) {
1109         // zero the bottom 16-bits, with 2 shifts, it can affect result
1110         mMips->SRL(R_at, Rs, 16);
1111         mMips->SLL(R_at, R_at, 16);
1112 
1113     } else {
1114         // move low 16-bit half, to high half
1115         mMips->SLL(R_at, Rs, 16);
1116     }
1117     mMips->MUH(Rd, Rm, R_at);
1118 }
1119 
1120 // 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
SMLA(int cc,int xy,int Rd,int Rm,int Rs,int Rn)1121 void ArmToMips64Assembler::SMLA(int cc, int xy,
1122                 int Rd, int Rm, int Rs, int Rn)
1123 {
1124     mArmPC[mInum++] = pc();
1125 
1126     // the 16 bits may be in the top or bottom half of 32-bit source reg,
1127     // as defined by the codes BB, BT, TB, TT (compressed param xy)
1128     // where x corresponds to Rm and y to Rs
1129 
1130     // select half-reg for Rm
1131     if (xy & xyTB) {
1132         // use top 16-bits
1133         mMips->SRA(R_at, Rm, 16);
1134     } else {
1135         // use bottom 16, but sign-extend to 32
1136         mMips->SEH(R_at, Rm);
1137     }
1138     // select half-reg for Rs
1139     if (xy & xyBT) {
1140         // use top 16-bits
1141         mMips->SRA(R_at2, Rs, 16);
1142     } else {
1143         // use bottom 16, but sign-extend to 32
1144         mMips->SEH(R_at2, Rs);
1145     }
1146 
1147     mMips->MUL(R_at, R_at, R_at2);
1148     mMips->ADDU(Rd, R_at, Rn);
1149 }
1150 
SMLAL(int cc,int xy,int RdHi,int RdLo,int Rs,int Rm)1151 void ArmToMips64Assembler::SMLAL(int cc, int xy,
1152                 int RdHi, int RdLo, int Rs, int Rm)
1153 {
1154     // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
1155     mArmPC[mInum++] = pc();
1156     mMips->NOP2();
1157     NOT_IMPLEMENTED();
1158 }
1159 
SMLAW(int cc,int y,int Rd,int Rm,int Rs,int Rn)1160 void ArmToMips64Assembler::SMLAW(int cc, int y,
1161                 int Rd, int Rm, int Rs, int Rn)
1162 {
1163     // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
1164     mArmPC[mInum++] = pc();
1165     mMips->NOP2();
1166     NOT_IMPLEMENTED();
1167 }
1168 
1169 // used by ARMv6 version of GGLAssembler::filter32
UXTB16(int cc,int Rd,int Rm,int rotate)1170 void ArmToMips64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
1171 {
1172     mArmPC[mInum++] = pc();
1173 
1174     //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
1175     //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
1176 
1177     mMips->ROTR(R_at2, Rm, rotate * 8);
1178     mMips->LUI(R_at, 0xFF);
1179     mMips->ORI(R_at, R_at, 0xFF);
1180     mMips->AND(Rd, R_at2, R_at);
1181 }
1182 
UBFX(int cc,int Rd,int Rn,int lsb,int width)1183 void ArmToMips64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
1184 {
1185      /* Placeholder for UBFX */
1186      mArmPC[mInum++] = pc();
1187 
1188      mMips->NOP2();
1189      NOT_IMPLEMENTED();
1190 }
1191 
1192 // ----------------------------------------------------------------------------
1193 // Address Processing...
1194 // ----------------------------------------------------------------------------
1195 
ADDR_ADD(int cc,int s,int Rd,int Rn,uint32_t Op2)1196 void ArmToMips64Assembler::ADDR_ADD(int cc,
1197         int s, int Rd, int Rn, uint32_t Op2)
1198 {
1199 //    if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1200 //    if(s  != 0) { NOT_IMPLEMENTED(); return;} //Not required
1201     dataProcessing(opADD64, cc, s, Rd, Rn, Op2);
1202 }
1203 
ADDR_SUB(int cc,int s,int Rd,int Rn,uint32_t Op2)1204 void ArmToMips64Assembler::ADDR_SUB(int cc,
1205         int s, int Rd, int Rn, uint32_t Op2)
1206 {
1207 //    if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1208 //    if(s  != 0) { NOT_IMPLEMENTED(); return;} //Not required
1209     dataProcessing(opSUB64, cc, s, Rd, Rn, Op2);
1210 }
1211 
ADDR_LDR(int cc,int Rd,int Rn,uint32_t offset)1212 void ArmToMips64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t offset) {
1213     mArmPC[mInum++] = pc();
1214     // work-around for ARM default address mode of immed12_pre(0)
1215     if (offset > AMODE_UNSUPPORTED) offset = 0;
1216     switch (offset) {
1217         case 0:
1218             amode.value = 0;
1219             amode.writeback = 0;
1220             // fall thru to next case ....
1221         case AMODE_IMM_12_PRE:
1222             if (Rn == ARMAssemblerInterface::SP) {
1223                 Rn = R_sp;      // convert LDR via Arm SP to LW via Mips SP
1224             }
1225             mMips->LD(Rd, Rn, amode.value);
1226             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
1227                 mMips->DADDIU(Rn, Rn, amode.value);
1228             }
1229             break;
1230         case AMODE_IMM_12_POST:
1231             if (Rn == ARMAssemblerInterface::SP) {
1232                 Rn = R_sp;      // convert STR thru Arm SP to STR thru Mips SP
1233             }
1234             mMips->LD(Rd, Rn, 0);
1235             mMips->DADDIU(Rn, Rn, amode.value);
1236             break;
1237         case AMODE_REG_SCALE_PRE:
1238             // we only support simple base + index, no advanced modes for this one yet
1239             mMips->DADDU(R_at, Rn, amode.reg);
1240             mMips->LD(Rd, R_at, 0);
1241             break;
1242     }
1243 }
1244 
ADDR_STR(int cc,int Rd,int Rn,uint32_t offset)1245 void ArmToMips64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t offset) {
1246     mArmPC[mInum++] = pc();
1247     // work-around for ARM default address mode of immed12_pre(0)
1248     if (offset > AMODE_UNSUPPORTED) offset = 0;
1249     switch (offset) {
1250         case 0:
1251             amode.value = 0;
1252             amode.writeback = 0;
1253             // fall thru to next case ....
1254         case AMODE_IMM_12_PRE:
1255             if (Rn == ARMAssemblerInterface::SP) {
1256                 Rn = R_sp;  // convert STR thru Arm SP to SW thru Mips SP
1257             }
1258             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
1259                 // If we will writeback, then update the index reg, then store.
1260                 // This correctly handles stack-push case.
1261                 mMips->DADDIU(Rn, Rn, amode.value);
1262                 mMips->SD(Rd, Rn, 0);
1263             } else {
1264                 // No writeback so store offset by value
1265                 mMips->SD(Rd, Rn, amode.value);
1266             }
1267             break;
1268         case AMODE_IMM_12_POST:
1269             mMips->SD(Rd, Rn, 0);
1270             mMips->DADDIU(Rn, Rn, amode.value);  // post index always writes back
1271             break;
1272         case AMODE_REG_SCALE_PRE:
1273             // we only support simple base + index, no advanced modes for this one yet
1274             mMips->DADDU(R_at, Rn, amode.reg);
1275             mMips->SD(Rd, R_at, 0);
1276             break;
1277     }
1278 }
1279 
1280 #if 0
1281 #pragma mark -
1282 #pragma mark MIPS Assembler...
1283 #endif
1284 
1285 
1286 //**************************************************************************
1287 //**************************************************************************
1288 //**************************************************************************
1289 
1290 
1291 /* MIPS64 assembler
1292 ** this is a subset of mips64r6, targeted specifically at ARM instruction
1293 ** replacement in the pixelflinger/codeflinger code.
1294 **
1295 ** This class is extended from MIPSAssembler class and overrides only
1296 ** MIPS64r6 specific stuff.
1297 */
1298 
MIPS64Assembler(const sp<Assembly> & assembly,ArmToMips64Assembler * parent)1299 MIPS64Assembler::MIPS64Assembler(const sp<Assembly>& assembly, ArmToMips64Assembler *parent)
1300     : mParent(parent),
1301     MIPSAssembler::MIPSAssembler(assembly, NULL)
1302 {
1303 }
1304 
MIPS64Assembler(void * assembly,ArmToMips64Assembler * parent)1305 MIPS64Assembler::MIPS64Assembler(void* assembly, ArmToMips64Assembler *parent)
1306     : mParent(parent),
1307     MIPSAssembler::MIPSAssembler(assembly)
1308 {
1309 }
1310 
~MIPS64Assembler()1311 MIPS64Assembler::~MIPS64Assembler()
1312 {
1313 }
1314 
reset()1315 void MIPS64Assembler::reset()
1316 {
1317     if (mAssembly != NULL) {
1318         mBase = mPC = (uint32_t *)mAssembly->base();
1319     } else {
1320         mPC = mBase = base();
1321     }
1322     mBranchTargets.clear();
1323     mLabels.clear();
1324     mLabelsInverseMapping.clear();
1325     mComments.clear();
1326 }
1327 
1328 
disassemble(const char * name)1329 void MIPS64Assembler::disassemble(const char* name)
1330 {
1331     char di_buf[140];
1332 
1333     bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
1334 
1335     typedef char dstr[40];
1336     dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
1337 
1338     if (mParent->mArmDisassemblyBuffer != NULL) {
1339         for (int i=0; i<mParent->mArmInstrCount; ++i) {
1340             string_detab(lines[i]);
1341         }
1342     }
1343 
1344     // iArm is an index to Arm instructions 1...n for this assembly sequence
1345     // mArmPC[iArm] holds the value of the Mips-PC for the first MIPS
1346     // instruction corresponding to that Arm instruction number
1347 
1348     int iArm = 0;
1349     size_t count = pc()-base();
1350     uint32_t* mipsPC = base();
1351 
1352     while (count--) {
1353         ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
1354         if (label >= 0) {
1355             ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
1356         }
1357         ssize_t comment = mComments.indexOfKey(mipsPC);
1358         if (comment >= 0) {
1359             ALOGW("; %s\n", mComments.valueAt(comment));
1360         }
1361         ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
1362         string_detab(di_buf);
1363         string_pad(di_buf, 30);
1364         ALOGW("%08lx:    %08x    %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
1365         mipsPC++;
1366     }
1367 }
1368 
fix_branches()1369 void MIPS64Assembler::fix_branches()
1370 {
1371     // fixup all the branches
1372     size_t count = mBranchTargets.size();
1373     while (count--) {
1374         const branch_target_t& bt = mBranchTargets[count];
1375         uint32_t* target_pc = mLabels.valueFor(bt.label);
1376         LOG_ALWAYS_FATAL_IF(!target_pc,
1377                 "error resolving branch targets, target_pc is null");
1378         int32_t offset = int32_t(target_pc - (bt.pc+1));
1379         *bt.pc |= offset & 0x00FFFF;
1380     }
1381 }
1382 
DADDU(int Rd,int Rs,int Rt)1383 void MIPS64Assembler::DADDU(int Rd, int Rs, int Rt)
1384 {
1385     *mPC++ = (spec_op<<OP_SHF) | (daddu_fn<<FUNC_SHF)
1386                     | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
1387 }
1388 
DADDIU(int Rt,int Rs,int16_t imm)1389 void MIPS64Assembler::DADDIU(int Rt, int Rs, int16_t imm)
1390 {
1391     *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1392 }
1393 
DSUBU(int Rd,int Rs,int Rt)1394 void MIPS64Assembler::DSUBU(int Rd, int Rs, int Rt)
1395 {
1396     *mPC++ = (spec_op<<OP_SHF) | (dsubu_fn<<FUNC_SHF) |
1397                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1398 }
1399 
DSUBIU(int Rt,int Rs,int16_t imm)1400 void MIPS64Assembler::DSUBIU(int Rt, int Rs, int16_t imm)   // really addiu(d, s, -j)
1401 {
1402     *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
1403 }
1404 
MUL(int Rd,int Rs,int Rt)1405 void MIPS64Assembler::MUL(int Rd, int Rs, int Rt)
1406 {
1407     *mPC++ = (spec_op<<OP_SHF) | (mul_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1408                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1409 }
1410 
MUH(int Rd,int Rs,int Rt)1411 void MIPS64Assembler::MUH(int Rd, int Rs, int Rt)
1412 {
1413     *mPC++ = (spec_op<<OP_SHF) | (muh_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1414                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1415 }
1416 
CLO(int Rd,int Rs)1417 void MIPS64Assembler::CLO(int Rd, int Rs)
1418 {
1419     *mPC++ = (spec_op<<OP_SHF) | (17<<FUNC_SHF) |
1420                         (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1421 }
1422 
CLZ(int Rd,int Rs)1423 void MIPS64Assembler::CLZ(int Rd, int Rs)
1424 {
1425     *mPC++ = (spec_op<<OP_SHF) | (16<<FUNC_SHF) |
1426                         (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1427 }
1428 
LD(int Rt,int Rbase,int16_t offset)1429 void MIPS64Assembler::LD(int Rt, int Rbase, int16_t offset)
1430 {
1431     *mPC++ = (ld_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1432 }
1433 
SD(int Rt,int Rbase,int16_t offset)1434 void MIPS64Assembler::SD(int Rt, int Rbase, int16_t offset)
1435 {
1436     *mPC++ = (sd_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1437 }
1438 
LUI(int Rt,int16_t offset)1439 void MIPS64Assembler::LUI(int Rt, int16_t offset)
1440 {
1441     *mPC++ = (aui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1442 }
1443 
1444 
JR(int Rs)1445 void MIPS64Assembler::JR(int Rs)
1446 {
1447         *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jalr_fn << FUNC_SHF);
1448         MIPS64Assembler::NOP();
1449 }
1450 
1451 }; // namespace android:
1452