1 /* libs/pixelflinger/codeflinger/MIPS64Assembler.cpp
2 **
3 ** Copyright 2015, The Android Open Source Project
4 **
5 ** Licensed under the Apache License, Version 2.0 (the "License");
6 ** you may not use this file except in compliance with the License.
7 ** You may obtain a copy of the License at
8 **
9 **     http://www.apache.org/licenses/LICENSE-2.0
10 **
11 ** Unless required by applicable law or agreed to in writing, software
12 ** distributed under the License is distributed on an "AS IS" BASIS,
13 ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 ** See the License for the specific language governing permissions and
15 ** limitations under the License.
16 */
17 
18 
19 /* MIPS64 assembler and ARM->MIPS64 assembly translator
20 **
21 ** The approach is utilize MIPSAssembler generator, using inherited MIPS64Assembler
22 ** that overrides just the specific MIPS64r6 instructions.
23 ** For now ArmToMips64Assembler is copied over from ArmToMipsAssembler class,
24 ** changing some MIPS64r6 related stuff.
25 **
26 */
27 
28 #define LOG_TAG "MIPS64Assembler"
29 
30 #include <stdio.h>
31 #include <stdlib.h>
32 
33 #include <cutils/properties.h>
34 #include <log/log.h>
35 #include <private/pixelflinger/ggl_context.h>
36 
37 #include "MIPS64Assembler.h"
38 #include "CodeCache.h"
39 #include "mips64_disassem.h"
40 
41 #define NOT_IMPLEMENTED()  LOG_ALWAYS_FATAL("Arm instruction %s not yet implemented\n", __func__)
42 
43 // ----------------------------------------------------------------------------
44 
45 namespace android {
46 
47 // ----------------------------------------------------------------------------
48 #if 0
49 #pragma mark -
50 #pragma mark ArmToMips64Assembler...
51 #endif
52 
ArmToMips64Assembler(const sp<Assembly> & assembly,char * abuf,int linesz,int instr_count)53 ArmToMips64Assembler::ArmToMips64Assembler(const sp<Assembly>& assembly,
54                                            char *abuf, int linesz, int instr_count)
55     :   ARMAssemblerInterface(),
56         mArmDisassemblyBuffer(abuf),
57         mArmLineLength(linesz),
58         mArmInstrCount(instr_count),
59         mInum(0),
60         mAssembly(assembly)
61 {
62     mMips = new MIPS64Assembler(assembly, this);
63     mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
64     init_conditional_labels();
65 }
66 
ArmToMips64Assembler(void * assembly)67 ArmToMips64Assembler::ArmToMips64Assembler(void* assembly)
68     :   ARMAssemblerInterface(),
69         mArmDisassemblyBuffer(NULL),
70         mInum(0),
71         mAssembly(NULL)
72 {
73     mMips = new MIPS64Assembler(assembly, this);
74     mArmPC = (uint32_t **) malloc(ARM_MAX_INSTUCTIONS * sizeof(uint32_t *));
75     init_conditional_labels();
76 }
77 
~ArmToMips64Assembler()78 ArmToMips64Assembler::~ArmToMips64Assembler()
79 {
80     delete mMips;
81     free((void *) mArmPC);
82 }
83 
pc() const84 uint32_t* ArmToMips64Assembler::pc() const
85 {
86     return mMips->pc();
87 }
88 
base() const89 uint32_t* ArmToMips64Assembler::base() const
90 {
91     return mMips->base();
92 }
93 
reset()94 void ArmToMips64Assembler::reset()
95 {
96     cond.labelnum = 0;
97     mInum = 0;
98     mMips->reset();
99 }
100 
getCodegenArch()101 int ArmToMips64Assembler::getCodegenArch()
102 {
103     return CODEGEN_ARCH_MIPS64;
104 }
105 
comment(const char * string)106 void ArmToMips64Assembler::comment(const char* string)
107 {
108     mMips->comment(string);
109 }
110 
label(const char * theLabel)111 void ArmToMips64Assembler::label(const char* theLabel)
112 {
113     mMips->label(theLabel);
114 }
115 
disassemble(const char * name)116 void ArmToMips64Assembler::disassemble(const char* name)
117 {
118     mMips->disassemble(name);
119 }
120 
init_conditional_labels()121 void ArmToMips64Assembler::init_conditional_labels()
122 {
123     int i;
124     for (i=0;i<99; ++i) {
125         sprintf(cond.label[i], "cond_%d", i);
126     }
127 }
128 
129 
130 
131 #if 0
132 #pragma mark -
133 #pragma mark Prolog/Epilog & Generate...
134 #endif
135 
prolog()136 void ArmToMips64Assembler::prolog()
137 {
138     mArmPC[mInum++] = pc();  // save starting PC for this instr
139 
140     mMips->DADDIU(R_sp, R_sp, -(5 * 8));
141     mMips->SD(R_s0, R_sp, 0);
142     mMips->SD(R_s1, R_sp, 8);
143     mMips->SD(R_s2, R_sp, 16);
144     mMips->SD(R_s3, R_sp, 24);
145     mMips->SD(R_s4, R_sp, 32);
146     mMips->MOVE(R_v0, R_a0);    // move context * passed in a0 to v0 (arm r0)
147 }
148 
epilog(uint32_t touched)149 void ArmToMips64Assembler::epilog(uint32_t touched)
150 {
151     mArmPC[mInum++] = pc();  // save starting PC for this instr
152 
153     mMips->LD(R_s0, R_sp, 0);
154     mMips->LD(R_s1, R_sp, 8);
155     mMips->LD(R_s2, R_sp, 16);
156     mMips->LD(R_s3, R_sp, 24);
157     mMips->LD(R_s4, R_sp, 32);
158     mMips->DADDIU(R_sp, R_sp, (5 * 8));
159     mMips->JR(R_ra);
160 
161 }
162 
generate(const char * name)163 int ArmToMips64Assembler::generate(const char* name)
164 {
165     return mMips->generate(name);
166 }
167 
fix_branches()168 void ArmToMips64Assembler::fix_branches()
169 {
170     mMips->fix_branches();
171 }
172 
pcForLabel(const char * label)173 uint32_t* ArmToMips64Assembler::pcForLabel(const char* label)
174 {
175     return mMips->pcForLabel(label);
176 }
177 
set_condition(int mode,int R1,int R2)178 void ArmToMips64Assembler::set_condition(int mode, int R1, int R2) {
179     if (mode == 2) {
180         cond.type = SBIT_COND;
181     } else {
182         cond.type = CMP_COND;
183     }
184     cond.r1 = R1;
185     cond.r2 = R2;
186 }
187 
188 //----------------------------------------------------------
189 
190 #if 0
191 #pragma mark -
192 #pragma mark Addressing modes & shifters...
193 #endif
194 
195 
196 // do not need this for MIPS, but it is in the Interface (virtual)
buildImmediate(uint32_t immediate,uint32_t & rot,uint32_t & imm)197 int ArmToMips64Assembler::buildImmediate(
198         uint32_t immediate, uint32_t& rot, uint32_t& imm)
199 {
200     // for MIPS, any 32-bit immediate is OK
201     rot = 0;
202     imm = immediate;
203     return 0;
204 }
205 
206 // shifters...
207 
isValidImmediate(uint32_t immediate)208 bool ArmToMips64Assembler::isValidImmediate(uint32_t immediate)
209 {
210     // for MIPS, any 32-bit immediate is OK
211     return true;
212 }
213 
imm(uint32_t immediate)214 uint32_t ArmToMips64Assembler::imm(uint32_t immediate)
215 {
216     amode.value = immediate;
217     return AMODE_IMM;
218 }
219 
reg_imm(int Rm,int type,uint32_t shift)220 uint32_t ArmToMips64Assembler::reg_imm(int Rm, int type, uint32_t shift)
221 {
222     amode.reg = Rm;
223     amode.stype = type;
224     amode.value = shift;
225     return AMODE_REG_IMM;
226 }
227 
reg_rrx(int Rm)228 uint32_t ArmToMips64Assembler::reg_rrx(int Rm)
229 {
230     // reg_rrx mode is not used in the GLLAssember code at this time
231     return AMODE_UNSUPPORTED;
232 }
233 
reg_reg(int Rm,int type,int Rs)234 uint32_t ArmToMips64Assembler::reg_reg(int Rm, int type, int Rs)
235 {
236     // reg_reg mode is not used in the GLLAssember code at this time
237     return AMODE_UNSUPPORTED;
238 }
239 
240 
241 // addressing modes...
242 // LDR(B)/STR(B)/PLD (immediate and Rm can be negative, which indicate U=0)
immed12_pre(int32_t immed12,int W)243 uint32_t ArmToMips64Assembler::immed12_pre(int32_t immed12, int W)
244 {
245     LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
246                         "LDR(B)/STR(B)/PLD immediate too big (%08x)",
247                         immed12);
248     amode.value = immed12;
249     amode.writeback = W;
250     return AMODE_IMM_12_PRE;
251 }
252 
immed12_post(int32_t immed12)253 uint32_t ArmToMips64Assembler::immed12_post(int32_t immed12)
254 {
255     LOG_ALWAYS_FATAL_IF(abs(immed12) >= 0x800,
256                         "LDR(B)/STR(B)/PLD immediate too big (%08x)",
257                         immed12);
258 
259     amode.value = immed12;
260     return AMODE_IMM_12_POST;
261 }
262 
reg_scale_pre(int Rm,int type,uint32_t shift,int W)263 uint32_t ArmToMips64Assembler::reg_scale_pre(int Rm, int type,
264         uint32_t shift, int W)
265 {
266     LOG_ALWAYS_FATAL_IF(W | type | shift, "reg_scale_pre adv modes not yet implemented");
267 
268     amode.reg = Rm;
269     // amode.stype = type;      // more advanced modes not used in GGLAssembler yet
270     // amode.value = shift;
271     // amode.writeback = W;
272     return AMODE_REG_SCALE_PRE;
273 }
274 
reg_scale_post(int Rm,int type,uint32_t shift)275 uint32_t ArmToMips64Assembler::reg_scale_post(int Rm, int type, uint32_t shift)
276 {
277     LOG_ALWAYS_FATAL("adr mode reg_scale_post not yet implemented\n");
278     return AMODE_UNSUPPORTED;
279 }
280 
281 // LDRH/LDRSB/LDRSH/STRH (immediate and Rm can be negative, which indicate U=0)
immed8_pre(int32_t immed8,int W)282 uint32_t ArmToMips64Assembler::immed8_pre(int32_t immed8, int W)
283 {
284     LOG_ALWAYS_FATAL("adr mode immed8_pre not yet implemented\n");
285 
286     LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
287                         "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
288                         immed8);
289     return AMODE_IMM_8_PRE;
290 }
291 
immed8_post(int32_t immed8)292 uint32_t ArmToMips64Assembler::immed8_post(int32_t immed8)
293 {
294     LOG_ALWAYS_FATAL_IF(abs(immed8) >= 0x100,
295                         "LDRH/LDRSB/LDRSH/STRH immediate too big (%08x)",
296                         immed8);
297     amode.value = immed8;
298     return AMODE_IMM_8_POST;
299 }
300 
reg_pre(int Rm,int W)301 uint32_t ArmToMips64Assembler::reg_pre(int Rm, int W)
302 {
303     LOG_ALWAYS_FATAL_IF(W, "reg_pre writeback not yet implemented");
304     amode.reg = Rm;
305     return AMODE_REG_PRE;
306 }
307 
reg_post(int Rm)308 uint32_t ArmToMips64Assembler::reg_post(int Rm)
309 {
310     LOG_ALWAYS_FATAL("adr mode reg_post not yet implemented\n");
311     return AMODE_UNSUPPORTED;
312 }
313 
314 
315 
316 // ----------------------------------------------------------------------------
317 
318 #if 0
319 #pragma mark -
320 #pragma mark Data Processing...
321 #endif
322 
323 
324 static const char * const dpOpNames[] = {
325     "AND", "EOR", "SUB", "RSB", "ADD", "ADC", "SBC", "RSC",
326     "TST", "TEQ", "CMP", "CMN", "ORR", "MOV", "BIC", "MVN"
327 };
328 
329 // check if the operand registers from a previous CMP or S-bit instruction
330 // would be overwritten by this instruction. If so, move the value to a
331 // safe register.
332 // Note that we cannot tell at _this_ instruction time if a future (conditional)
333 // instruction will _also_ use this value (a defect of the simple 1-pass, one-
334 // instruction-at-a-time translation). Therefore we must be conservative and
335 // save the value before it is overwritten. This costs an extra MOVE instr.
336 
protectConditionalOperands(int Rd)337 void ArmToMips64Assembler::protectConditionalOperands(int Rd)
338 {
339     if (Rd == cond.r1) {
340         mMips->MOVE(R_cmp, cond.r1);
341         cond.r1 = R_cmp;
342     }
343     if (cond.type == CMP_COND && Rd == cond.r2) {
344         mMips->MOVE(R_cmp2, cond.r2);
345         cond.r2 = R_cmp2;
346     }
347 }
348 
349 
350 // interprets the addressing mode, and generates the common code
351 // used by the majority of data-processing ops. Many MIPS instructions
352 // have a register-based form and a different immediate form. See
353 // opAND below for an example. (this could be inlined)
354 //
355 // this works with the imm(), reg_imm() methods above, which are directly
356 // called by the GLLAssembler.
357 // note: _signed parameter defaults to false (un-signed)
358 // note: tmpReg parameter defaults to 1, MIPS register AT
dataProcAdrModes(int op,int & source,bool _signed,int tmpReg)359 int ArmToMips64Assembler::dataProcAdrModes(int op, int& source, bool _signed, int tmpReg)
360 {
361     if (op < AMODE_REG) {
362         source = op;
363         return SRC_REG;
364     } else if (op == AMODE_IMM) {
365         if ((!_signed && amode.value > 0xffff)
366                 || (_signed && ((int)amode.value < -32768 || (int)amode.value > 32767) )) {
367             mMips->LUI(tmpReg, (amode.value >> 16));
368             if (amode.value & 0x0000ffff) {
369                 mMips->ORI(tmpReg, tmpReg, (amode.value & 0x0000ffff));
370             }
371             source = tmpReg;
372             return SRC_REG;
373         } else {
374             source = amode.value;
375             return SRC_IMM;
376         }
377     } else if (op == AMODE_REG_IMM) {
378         switch (amode.stype) {
379             case LSL: mMips->SLL(tmpReg, amode.reg, amode.value); break;
380             case LSR: mMips->SRL(tmpReg, amode.reg, amode.value); break;
381             case ASR: mMips->SRA(tmpReg, amode.reg, amode.value); break;
382             case ROR: mMips->ROTR(tmpReg, amode.reg, amode.value); break;
383         }
384         source = tmpReg;
385         return SRC_REG;
386     } else {  // adr mode RRX is not used in GGL Assembler at this time
387         // we are screwed, this should be exception, assert-fail or something
388         LOG_ALWAYS_FATAL("adr mode reg_rrx not yet implemented\n");
389         return SRC_ERROR;
390     }
391 }
392 
393 
dataProcessing(int opcode,int cc,int s,int Rd,int Rn,uint32_t Op2)394 void ArmToMips64Assembler::dataProcessing(int opcode, int cc,
395         int s, int Rd, int Rn, uint32_t Op2)
396 {
397     int src;    // src is modified by dataProcAdrModes() - passed as int&
398 
399     if (cc != AL) {
400         protectConditionalOperands(Rd);
401         // the branch tests register(s) set by prev CMP or instr with 'S' bit set
402         // inverse the condition to jump past this conditional instruction
403         ArmToMips64Assembler::B(cc^1, cond.label[++cond.labelnum]);
404     } else {
405         mArmPC[mInum++] = pc();  // save starting PC for this instr
406     }
407 
408     switch (opcode) {
409     case opAND:
410         if (dataProcAdrModes(Op2, src) == SRC_REG) {
411             mMips->AND(Rd, Rn, src);
412         } else {                        // adr mode was SRC_IMM
413             mMips->ANDI(Rd, Rn, src);
414         }
415         break;
416 
417     case opADD:
418         // set "signed" to true for adr modes
419         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
420             mMips->ADDU(Rd, Rn, src);
421         } else {                        // adr mode was SRC_IMM
422             mMips->ADDIU(Rd, Rn, src);
423         }
424         break;
425 
426     case opSUB:
427         // set "signed" to true for adr modes
428         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
429             mMips->SUBU(Rd, Rn, src);
430         } else {                        // adr mode was SRC_IMM
431             mMips->SUBIU(Rd, Rn, src);
432         }
433         break;
434 
435     case opADD64:
436         // set "signed" to true for adr modes
437         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
438             mMips->DADDU(Rd, Rn, src);
439         } else {                        // adr mode was SRC_IMM
440             mMips->DADDIU(Rd, Rn, src);
441         }
442         break;
443 
444     case opSUB64:
445         // set "signed" to true for adr modes
446         if (dataProcAdrModes(Op2, src, true) == SRC_REG) {
447             mMips->DSUBU(Rd, Rn, src);
448         } else {                        // adr mode was SRC_IMM
449             mMips->DSUBIU(Rd, Rn, src);
450         }
451         break;
452 
453     case opEOR:
454         if (dataProcAdrModes(Op2, src) == SRC_REG) {
455             mMips->XOR(Rd, Rn, src);
456         } else {                        // adr mode was SRC_IMM
457             mMips->XORI(Rd, Rn, src);
458         }
459         break;
460 
461     case opORR:
462         if (dataProcAdrModes(Op2, src) == SRC_REG) {
463             mMips->OR(Rd, Rn, src);
464         } else {                        // adr mode was SRC_IMM
465             mMips->ORI(Rd, Rn, src);
466         }
467         break;
468 
469     case opBIC:
470         if (dataProcAdrModes(Op2, src) == SRC_IMM) {
471             // if we are 16-bit imnmediate, load to AT reg
472             mMips->ORI(R_at, 0, src);
473             src = R_at;
474         }
475         mMips->NOT(R_at, src);
476         mMips->AND(Rd, Rn, R_at);
477         break;
478 
479     case opRSB:
480         if (dataProcAdrModes(Op2, src) == SRC_IMM) {
481             // if we are 16-bit imnmediate, load to AT reg
482             mMips->ORI(R_at, 0, src);
483             src = R_at;
484         }
485         mMips->SUBU(Rd, src, Rn);   // subu with the parameters reversed
486         break;
487 
488     case opMOV:
489         if (Op2 < AMODE_REG) {  // op2 is reg # in this case
490             mMips->MOVE(Rd, Op2);
491         } else if (Op2 == AMODE_IMM) {
492             if (amode.value > 0xffff) {
493                 mMips->LUI(Rd, (amode.value >> 16));
494                 if (amode.value & 0x0000ffff) {
495                     mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
496                 }
497              } else {
498                 mMips->ORI(Rd, 0, amode.value);
499             }
500         } else if (Op2 == AMODE_REG_IMM) {
501             switch (amode.stype) {
502             case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
503             case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
504             case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
505             case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
506             }
507         }
508         else {
509             // adr mode RRX is not used in GGL Assembler at this time
510             mMips->UNIMPL();
511         }
512         break;
513 
514     case opMVN:     // this is a 1's complement: NOT
515         if (Op2 < AMODE_REG) {  // op2 is reg # in this case
516             mMips->NOR(Rd, Op2, 0);     // NOT is NOR with 0
517             break;
518         } else if (Op2 == AMODE_IMM) {
519             if (amode.value > 0xffff) {
520                 mMips->LUI(Rd, (amode.value >> 16));
521                 if (amode.value & 0x0000ffff) {
522                     mMips->ORI(Rd, Rd, (amode.value & 0x0000ffff));
523                 }
524              } else {
525                 mMips->ORI(Rd, 0, amode.value);
526              }
527         } else if (Op2 == AMODE_REG_IMM) {
528             switch (amode.stype) {
529             case LSL: mMips->SLL(Rd, amode.reg, amode.value); break;
530             case LSR: mMips->SRL(Rd, amode.reg, amode.value); break;
531             case ASR: mMips->SRA(Rd, amode.reg, amode.value); break;
532             case ROR: mMips->ROTR(Rd, amode.reg, amode.value); break;
533             }
534         }
535         else {
536             // adr mode RRX is not used in GGL Assembler at this time
537             mMips->UNIMPL();
538         }
539         mMips->NOR(Rd, Rd, 0);     // NOT is NOR with 0
540         break;
541 
542     case opCMP:
543         // Either operand of a CMP instr could get overwritten by a subsequent
544         // conditional instruction, which is ok, _UNLESS_ there is a _second_
545         // conditional instruction. Under MIPS, this requires doing the comparison
546         // again (SLT), and the original operands must be available. (and this
547         // pattern of multiple conditional instructions from same CMP _is_ used
548         // in GGL-Assembler)
549         //
550         // For now, if a conditional instr overwrites the operands, we will
551         // move them to dedicated temp regs. This is ugly, and inefficient,
552         // and should be optimized.
553         //
554         // WARNING: making an _Assumption_ that CMP operand regs will NOT be
555         // trashed by intervening NON-conditional instructions. In the general
556         // case this is legal, but it is NOT currently done in GGL-Assembler.
557 
558         cond.type = CMP_COND;
559         cond.r1 = Rn;
560         if (dataProcAdrModes(Op2, src, false, R_cmp2) == SRC_REG) {
561             cond.r2 = src;
562         } else {                        // adr mode was SRC_IMM
563             mMips->ORI(R_cmp2, R_zero, src);
564             cond.r2 = R_cmp2;
565         }
566 
567         break;
568 
569 
570     case opTST:
571     case opTEQ:
572     case opCMN:
573     case opADC:
574     case opSBC:
575     case opRSC:
576         mMips->UNIMPL(); // currently unused in GGL Assembler code
577         break;
578     }
579 
580     if (cc != AL) {
581         mMips->label(cond.label[cond.labelnum]);
582     }
583     if (s && opcode != opCMP) {
584         cond.type = SBIT_COND;
585         cond.r1 = Rd;
586     }
587 }
588 
589 
590 
591 #if 0
592 #pragma mark -
593 #pragma mark Multiply...
594 #endif
595 
596 // multiply, accumulate
MLA(int cc,int s,int Rd,int Rm,int Rs,int Rn)597 void ArmToMips64Assembler::MLA(int cc, int s,
598         int Rd, int Rm, int Rs, int Rn) {
599 
600     //ALOGW("MLA");
601     mArmPC[mInum++] = pc();  // save starting PC for this instr
602 
603     mMips->MUL(R_at, Rm, Rs);
604     mMips->ADDU(Rd, R_at, Rn);
605     if (s) {
606         cond.type = SBIT_COND;
607         cond.r1 = Rd;
608     }
609 }
610 
MUL(int cc,int s,int Rd,int Rm,int Rs)611 void ArmToMips64Assembler::MUL(int cc, int s,
612         int Rd, int Rm, int Rs) {
613     mArmPC[mInum++] = pc();
614     mMips->MUL(Rd, Rm, Rs);
615     if (s) {
616         cond.type = SBIT_COND;
617         cond.r1 = Rd;
618     }
619 }
620 
UMULL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)621 void ArmToMips64Assembler::UMULL(int cc, int s,
622         int RdLo, int RdHi, int Rm, int Rs) {
623     mArmPC[mInum++] = pc();
624     mMips->MUH(RdHi, Rm, Rs);
625     mMips->MUL(RdLo, Rm, Rs);
626 
627     if (s) {
628         cond.type = SBIT_COND;
629         cond.r1 = RdHi;     // BUG...
630         LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
631     }
632 }
633 
UMUAL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)634 void ArmToMips64Assembler::UMUAL(int cc, int s,
635         int RdLo, int RdHi, int Rm, int Rs) {
636     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
637                         "UMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
638     // *mPC++ =    (cc<<28) | (1<<23) | (1<<21) | (s<<20) |
639     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
640     mArmPC[mInum++] = pc();
641     mMips->NOP2();
642     NOT_IMPLEMENTED();
643     if (s) {
644         cond.type = SBIT_COND;
645         cond.r1 = RdHi;     // BUG...
646         LOG_ALWAYS_FATAL("Condition on UMULL must be on 64-bit result\n");
647     }
648 }
649 
SMULL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)650 void ArmToMips64Assembler::SMULL(int cc, int s,
651         int RdLo, int RdHi, int Rm, int Rs) {
652     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
653                         "SMULL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
654     // *mPC++ =    (cc<<28) | (1<<23) | (1<<22) | (s<<20) |
655     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
656     mArmPC[mInum++] = pc();
657     mMips->NOP2();
658     NOT_IMPLEMENTED();
659     if (s) {
660         cond.type = SBIT_COND;
661         cond.r1 = RdHi;     // BUG...
662         LOG_ALWAYS_FATAL("Condition on SMULL must be on 64-bit result\n");
663     }
664 }
SMUAL(int cc,int s,int RdLo,int RdHi,int Rm,int Rs)665 void ArmToMips64Assembler::SMUAL(int cc, int s,
666         int RdLo, int RdHi, int Rm, int Rs) {
667     LOG_FATAL_IF(RdLo==Rm || RdHi==Rm || RdLo==RdHi,
668                         "SMUAL(r%u,r%u,r%u,r%u)", RdLo,RdHi,Rm,Rs);
669     // *mPC++ =    (cc<<28) | (1<<23) | (1<<22) | (1<<21) | (s<<20) |
670     //             (RdHi<<16) | (RdLo<<12) | (Rs<<8) | 0x90 | Rm;
671     mArmPC[mInum++] = pc();
672     mMips->NOP2();
673     NOT_IMPLEMENTED();
674     if (s) {
675         cond.type = SBIT_COND;
676         cond.r1 = RdHi;     // BUG...
677         LOG_ALWAYS_FATAL("Condition on SMUAL must be on 64-bit result\n");
678     }
679 }
680 
681 
682 
683 #if 0
684 #pragma mark -
685 #pragma mark Branches...
686 #endif
687 
688 // branches...
689 
B(int cc,const char * label)690 void ArmToMips64Assembler::B(int cc, const char* label)
691 {
692     mArmPC[mInum++] = pc();
693     if (cond.type == SBIT_COND) { cond.r2 = R_zero; }
694 
695     switch(cc) {
696         case EQ: mMips->BEQ(cond.r1, cond.r2, label); break;
697         case NE: mMips->BNE(cond.r1, cond.r2, label); break;
698         case HS: mMips->BGEU(cond.r1, cond.r2, label); break;
699         case LO: mMips->BLTU(cond.r1, cond.r2, label); break;
700         case MI: mMips->BLT(cond.r1, cond.r2, label); break;
701         case PL: mMips->BGE(cond.r1, cond.r2, label); break;
702 
703         case HI: mMips->BGTU(cond.r1, cond.r2, label); break;
704         case LS: mMips->BLEU(cond.r1, cond.r2, label); break;
705         case GE: mMips->BGE(cond.r1, cond.r2, label); break;
706         case LT: mMips->BLT(cond.r1, cond.r2, label); break;
707         case GT: mMips->BGT(cond.r1, cond.r2, label); break;
708         case LE: mMips->BLE(cond.r1, cond.r2, label); break;
709         case AL: mMips->B(label); break;
710         case NV: /* B Never - no instruction */ break;
711 
712         case VS:
713         case VC:
714         default:
715             LOG_ALWAYS_FATAL("Unsupported cc: %02x\n", cc);
716             break;
717     }
718 }
719 
BL(int cc,const char * label)720 void ArmToMips64Assembler::BL(int cc, const char* label)
721 {
722     LOG_ALWAYS_FATAL("branch-and-link not supported yet\n");
723     mArmPC[mInum++] = pc();
724 }
725 
726 // no use for Branches with integer PC, but they're in the Interface class ....
B(int cc,uint32_t * to_pc)727 void ArmToMips64Assembler::B(int cc, uint32_t* to_pc)
728 {
729     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
730     mArmPC[mInum++] = pc();
731 }
732 
BL(int cc,uint32_t * to_pc)733 void ArmToMips64Assembler::BL(int cc, uint32_t* to_pc)
734 {
735     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
736     mArmPC[mInum++] = pc();
737 }
738 
BX(int cc,int Rn)739 void ArmToMips64Assembler::BX(int cc, int Rn)
740 {
741     LOG_ALWAYS_FATAL("branch to absolute PC not supported, use Label\n");
742     mArmPC[mInum++] = pc();
743 }
744 
745 
746 
747 #if 0
748 #pragma mark -
749 #pragma mark Data Transfer...
750 #endif
751 
752 // data transfer...
LDR(int cc,int Rd,int Rn,uint32_t offset)753 void ArmToMips64Assembler::LDR(int cc, int Rd, int Rn, uint32_t offset)
754 {
755     mArmPC[mInum++] = pc();
756     // work-around for ARM default address mode of immed12_pre(0)
757     if (offset > AMODE_UNSUPPORTED) offset = 0;
758     switch (offset) {
759         case 0:
760             amode.value = 0;
761             amode.writeback = 0;
762             // fall thru to next case ....
763         case AMODE_IMM_12_PRE:
764             if (Rn == ARMAssemblerInterface::SP) {
765                 Rn = R_sp;      // convert LDR via Arm SP to LW via Mips SP
766             }
767             mMips->LW(Rd, Rn, amode.value);
768             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
769                 mMips->DADDIU(Rn, Rn, amode.value);
770             }
771             break;
772         case AMODE_IMM_12_POST:
773             if (Rn == ARMAssemblerInterface::SP) {
774                 Rn = R_sp;      // convert STR thru Arm SP to STR thru Mips SP
775             }
776             mMips->LW(Rd, Rn, 0);
777             mMips->DADDIU(Rn, Rn, amode.value);
778             break;
779         case AMODE_REG_SCALE_PRE:
780             // we only support simple base + index, no advanced modes for this one yet
781             mMips->DADDU(R_at, Rn, amode.reg);
782             mMips->LW(Rd, R_at, 0);
783             break;
784     }
785 }
786 
LDRB(int cc,int Rd,int Rn,uint32_t offset)787 void ArmToMips64Assembler::LDRB(int cc, int Rd, int Rn, uint32_t offset)
788 {
789     mArmPC[mInum++] = pc();
790     // work-around for ARM default address mode of immed12_pre(0)
791     if (offset > AMODE_UNSUPPORTED) offset = 0;
792     switch (offset) {
793         case 0:
794             amode.value = 0;
795             amode.writeback = 0;
796             // fall thru to next case ....
797         case AMODE_IMM_12_PRE:
798             mMips->LBU(Rd, Rn, amode.value);
799             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
800                 mMips->DADDIU(Rn, Rn, amode.value);
801             }
802             break;
803         case AMODE_IMM_12_POST:
804             mMips->LBU(Rd, Rn, 0);
805             mMips->DADDIU(Rn, Rn, amode.value);
806             break;
807         case AMODE_REG_SCALE_PRE:
808             // we only support simple base + index, no advanced modes for this one yet
809             mMips->DADDU(R_at, Rn, amode.reg);
810             mMips->LBU(Rd, R_at, 0);
811             break;
812     }
813 
814 }
815 
STR(int cc,int Rd,int Rn,uint32_t offset)816 void ArmToMips64Assembler::STR(int cc, int Rd, int Rn, uint32_t offset)
817 {
818     mArmPC[mInum++] = pc();
819     // work-around for ARM default address mode of immed12_pre(0)
820     if (offset > AMODE_UNSUPPORTED) offset = 0;
821     switch (offset) {
822         case 0:
823             amode.value = 0;
824             amode.writeback = 0;
825             // fall thru to next case ....
826         case AMODE_IMM_12_PRE:
827             if (Rn == ARMAssemblerInterface::SP) {
828                 Rn = R_sp;  // convert STR thru Arm SP to SW thru Mips SP
829             }
830             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
831                 // If we will writeback, then update the index reg, then store.
832                 // This correctly handles stack-push case.
833                 mMips->DADDIU(Rn, Rn, amode.value);
834                 mMips->SW(Rd, Rn, 0);
835             } else {
836                 // No writeback so store offset by value
837                 mMips->SW(Rd, Rn, amode.value);
838             }
839             break;
840         case AMODE_IMM_12_POST:
841             mMips->SW(Rd, Rn, 0);
842             mMips->DADDIU(Rn, Rn, amode.value);  // post index always writes back
843             break;
844         case AMODE_REG_SCALE_PRE:
845             // we only support simple base + index, no advanced modes for this one yet
846             mMips->DADDU(R_at, Rn, amode.reg);
847             mMips->SW(Rd, R_at, 0);
848             break;
849     }
850 }
851 
STRB(int cc,int Rd,int Rn,uint32_t offset)852 void ArmToMips64Assembler::STRB(int cc, int Rd, int Rn, uint32_t offset)
853 {
854     mArmPC[mInum++] = pc();
855     // work-around for ARM default address mode of immed12_pre(0)
856     if (offset > AMODE_UNSUPPORTED) offset = 0;
857     switch (offset) {
858         case 0:
859             amode.value = 0;
860             amode.writeback = 0;
861             // fall thru to next case ....
862         case AMODE_IMM_12_PRE:
863             mMips->SB(Rd, Rn, amode.value);
864             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
865                 mMips->DADDIU(Rn, Rn, amode.value);
866             }
867             break;
868         case AMODE_IMM_12_POST:
869             mMips->SB(Rd, Rn, 0);
870             mMips->DADDIU(Rn, Rn, amode.value);
871             break;
872         case AMODE_REG_SCALE_PRE:
873             // we only support simple base + index, no advanced modes for this one yet
874             mMips->DADDU(R_at, Rn, amode.reg);
875             mMips->SB(Rd, R_at, 0);
876             break;
877     }
878 }
879 
LDRH(int cc,int Rd,int Rn,uint32_t offset)880 void ArmToMips64Assembler::LDRH(int cc, int Rd, int Rn, uint32_t offset)
881 {
882     mArmPC[mInum++] = pc();
883     // work-around for ARM default address mode of immed8_pre(0)
884     if (offset > AMODE_UNSUPPORTED) offset = 0;
885     switch (offset) {
886         case 0:
887             amode.value = 0;
888             // fall thru to next case ....
889         case AMODE_IMM_8_PRE:      // no support yet for writeback
890             mMips->LHU(Rd, Rn, amode.value);
891             break;
892         case AMODE_IMM_8_POST:
893             mMips->LHU(Rd, Rn, 0);
894             mMips->DADDIU(Rn, Rn, amode.value);
895             break;
896         case AMODE_REG_PRE:
897             // we only support simple base +/- index
898             if (amode.reg >= 0) {
899                 mMips->DADDU(R_at, Rn, amode.reg);
900             } else {
901                 mMips->DSUBU(R_at, Rn, abs(amode.reg));
902             }
903             mMips->LHU(Rd, R_at, 0);
904             break;
905     }
906 }
907 
LDRSB(int cc,int Rd,int Rn,uint32_t offset)908 void ArmToMips64Assembler::LDRSB(int cc, int Rd, int Rn, uint32_t offset)
909 {
910     mArmPC[mInum++] = pc();
911     mMips->NOP2();
912     NOT_IMPLEMENTED();
913 }
914 
LDRSH(int cc,int Rd,int Rn,uint32_t offset)915 void ArmToMips64Assembler::LDRSH(int cc, int Rd, int Rn, uint32_t offset)
916 {
917     mArmPC[mInum++] = pc();
918     mMips->NOP2();
919     NOT_IMPLEMENTED();
920 }
921 
STRH(int cc,int Rd,int Rn,uint32_t offset)922 void ArmToMips64Assembler::STRH(int cc, int Rd, int Rn, uint32_t offset)
923 {
924     mArmPC[mInum++] = pc();
925     // work-around for ARM default address mode of immed8_pre(0)
926     if (offset > AMODE_UNSUPPORTED) offset = 0;
927     switch (offset) {
928         case 0:
929             amode.value = 0;
930             // fall thru to next case ....
931         case AMODE_IMM_8_PRE:      // no support yet for writeback
932             mMips->SH(Rd, Rn, amode.value);
933             break;
934         case AMODE_IMM_8_POST:
935             mMips->SH(Rd, Rn, 0);
936             mMips->DADDIU(Rn, Rn, amode.value);
937             break;
938         case AMODE_REG_PRE:
939             // we only support simple base +/- index
940             if (amode.reg >= 0) {
941                 mMips->DADDU(R_at, Rn, amode.reg);
942             } else {
943                 mMips->DSUBU(R_at, Rn, abs(amode.reg));
944             }
945             mMips->SH(Rd, R_at, 0);
946             break;
947     }
948 }
949 
950 
951 
952 #if 0
953 #pragma mark -
954 #pragma mark Block Data Transfer...
955 #endif
956 
957 // block data transfer...
LDM(int cc,int dir,int Rn,int W,uint32_t reg_list)958 void ArmToMips64Assembler::LDM(int cc, int dir,
959         int Rn, int W, uint32_t reg_list)
960 {   //                        ED FD EA FA      IB IA DB DA
961     // const uint8_t P[8] = { 1, 0, 1, 0,      1, 0, 1, 0 };
962     // const uint8_t U[8] = { 1, 1, 0, 0,      1, 1, 0, 0 };
963     // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
964     //         (uint32_t(U[dir])<<23) | (1<<20) | (W<<21) | (Rn<<16) | reg_list;
965     mArmPC[mInum++] = pc();
966     mMips->NOP2();
967     NOT_IMPLEMENTED();
968 }
969 
STM(int cc,int dir,int Rn,int W,uint32_t reg_list)970 void ArmToMips64Assembler::STM(int cc, int dir,
971         int Rn, int W, uint32_t reg_list)
972 {   //                        FA EA FD ED      IB IA DB DA
973     // const uint8_t P[8] = { 0, 1, 0, 1,      1, 0, 1, 0 };
974     // const uint8_t U[8] = { 0, 0, 1, 1,      1, 1, 0, 0 };
975     // *mPC++ = (cc<<28) | (4<<25) | (uint32_t(P[dir])<<24) |
976     //         (uint32_t(U[dir])<<23) | (0<<20) | (W<<21) | (Rn<<16) | reg_list;
977     mArmPC[mInum++] = pc();
978     mMips->NOP2();
979     NOT_IMPLEMENTED();
980 }
981 
982 
983 
984 #if 0
985 #pragma mark -
986 #pragma mark Special...
987 #endif
988 
989 // special...
SWP(int cc,int Rn,int Rd,int Rm)990 void ArmToMips64Assembler::SWP(int cc, int Rn, int Rd, int Rm) {
991     // *mPC++ = (cc<<28) | (2<<23) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
992     mArmPC[mInum++] = pc();
993     mMips->NOP2();
994     NOT_IMPLEMENTED();
995 }
996 
SWPB(int cc,int Rn,int Rd,int Rm)997 void ArmToMips64Assembler::SWPB(int cc, int Rn, int Rd, int Rm) {
998     // *mPC++ = (cc<<28) | (2<<23) | (1<<22) | (Rn<<16) | (Rd << 12) | 0x90 | Rm;
999     mArmPC[mInum++] = pc();
1000     mMips->NOP2();
1001     NOT_IMPLEMENTED();
1002 }
1003 
SWI(int cc,uint32_t comment)1004 void ArmToMips64Assembler::SWI(int cc, uint32_t comment) {
1005     // *mPC++ = (cc<<28) | (0xF<<24) | comment;
1006     mArmPC[mInum++] = pc();
1007     mMips->NOP2();
1008     NOT_IMPLEMENTED();
1009 }
1010 
1011 
1012 #if 0
1013 #pragma mark -
1014 #pragma mark DSP instructions...
1015 #endif
1016 
1017 // DSP instructions...
PLD(int Rn,uint32_t offset)1018 void ArmToMips64Assembler::PLD(int Rn, uint32_t offset) {
1019     LOG_ALWAYS_FATAL_IF(!((offset&(1<<24)) && !(offset&(1<<21))),
1020                         "PLD only P=1, W=0");
1021     // *mPC++ = 0xF550F000 | (Rn<<16) | offset;
1022     mArmPC[mInum++] = pc();
1023     mMips->NOP2();
1024     NOT_IMPLEMENTED();
1025 }
1026 
CLZ(int cc,int Rd,int Rm)1027 void ArmToMips64Assembler::CLZ(int cc, int Rd, int Rm)
1028 {
1029     mArmPC[mInum++] = pc();
1030     mMips->CLZ(Rd, Rm);
1031 }
1032 
QADD(int cc,int Rd,int Rm,int Rn)1033 void ArmToMips64Assembler::QADD(int cc,  int Rd, int Rm, int Rn)
1034 {
1035     // *mPC++ = (cc<<28) | 0x1000050 | (Rn<<16) | (Rd<<12) | Rm;
1036     mArmPC[mInum++] = pc();
1037     mMips->NOP2();
1038     NOT_IMPLEMENTED();
1039 }
1040 
QDADD(int cc,int Rd,int Rm,int Rn)1041 void ArmToMips64Assembler::QDADD(int cc,  int Rd, int Rm, int Rn)
1042 {
1043     // *mPC++ = (cc<<28) | 0x1400050 | (Rn<<16) | (Rd<<12) | Rm;
1044     mArmPC[mInum++] = pc();
1045     mMips->NOP2();
1046     NOT_IMPLEMENTED();
1047 }
1048 
QSUB(int cc,int Rd,int Rm,int Rn)1049 void ArmToMips64Assembler::QSUB(int cc,  int Rd, int Rm, int Rn)
1050 {
1051     // *mPC++ = (cc<<28) | 0x1200050 | (Rn<<16) | (Rd<<12) | Rm;
1052     mArmPC[mInum++] = pc();
1053     mMips->NOP2();
1054     NOT_IMPLEMENTED();
1055 }
1056 
QDSUB(int cc,int Rd,int Rm,int Rn)1057 void ArmToMips64Assembler::QDSUB(int cc,  int Rd, int Rm, int Rn)
1058 {
1059     // *mPC++ = (cc<<28) | 0x1600050 | (Rn<<16) | (Rd<<12) | Rm;
1060     mArmPC[mInum++] = pc();
1061     mMips->NOP2();
1062     NOT_IMPLEMENTED();
1063 }
1064 
1065 // 16 x 16 signed multiply (like SMLAxx without the accumulate)
SMUL(int cc,int xy,int Rd,int Rm,int Rs)1066 void ArmToMips64Assembler::SMUL(int cc, int xy,
1067                 int Rd, int Rm, int Rs)
1068 {
1069     mArmPC[mInum++] = pc();
1070 
1071     // the 16 bits may be in the top or bottom half of 32-bit source reg,
1072     // as defined by the codes BB, BT, TB, TT (compressed param xy)
1073     // where x corresponds to Rm and y to Rs
1074 
1075     // select half-reg for Rm
1076     if (xy & xyTB) {
1077         // use top 16-bits
1078         mMips->SRA(R_at, Rm, 16);
1079     } else {
1080         // use bottom 16, but sign-extend to 32
1081         mMips->SEH(R_at, Rm);
1082     }
1083     // select half-reg for Rs
1084     if (xy & xyBT) {
1085         // use top 16-bits
1086         mMips->SRA(R_at2, Rs, 16);
1087     } else {
1088         // use bottom 16, but sign-extend to 32
1089         mMips->SEH(R_at2, Rs);
1090     }
1091     mMips->MUL(Rd, R_at, R_at2);
1092 }
1093 
1094 // signed 32b x 16b multiple, save top 32-bits of 48-bit result
SMULW(int cc,int y,int Rd,int Rm,int Rs)1095 void ArmToMips64Assembler::SMULW(int cc, int y,
1096                 int Rd, int Rm, int Rs)
1097 {
1098     mArmPC[mInum++] = pc();
1099 
1100     // the selector yT or yB refers to reg Rs
1101     if (y & yT) {
1102         // zero the bottom 16-bits, with 2 shifts, it can affect result
1103         mMips->SRL(R_at, Rs, 16);
1104         mMips->SLL(R_at, R_at, 16);
1105 
1106     } else {
1107         // move low 16-bit half, to high half
1108         mMips->SLL(R_at, Rs, 16);
1109     }
1110     mMips->MUH(Rd, Rm, R_at);
1111 }
1112 
1113 // 16 x 16 signed multiply, accumulate: Rd = Rm{16} * Rs{16} + Rn
SMLA(int cc,int xy,int Rd,int Rm,int Rs,int Rn)1114 void ArmToMips64Assembler::SMLA(int cc, int xy,
1115                 int Rd, int Rm, int Rs, int Rn)
1116 {
1117     mArmPC[mInum++] = pc();
1118 
1119     // the 16 bits may be in the top or bottom half of 32-bit source reg,
1120     // as defined by the codes BB, BT, TB, TT (compressed param xy)
1121     // where x corresponds to Rm and y to Rs
1122 
1123     // select half-reg for Rm
1124     if (xy & xyTB) {
1125         // use top 16-bits
1126         mMips->SRA(R_at, Rm, 16);
1127     } else {
1128         // use bottom 16, but sign-extend to 32
1129         mMips->SEH(R_at, Rm);
1130     }
1131     // select half-reg for Rs
1132     if (xy & xyBT) {
1133         // use top 16-bits
1134         mMips->SRA(R_at2, Rs, 16);
1135     } else {
1136         // use bottom 16, but sign-extend to 32
1137         mMips->SEH(R_at2, Rs);
1138     }
1139 
1140     mMips->MUL(R_at, R_at, R_at2);
1141     mMips->ADDU(Rd, R_at, Rn);
1142 }
1143 
SMLAL(int cc,int xy,int RdHi,int RdLo,int Rs,int Rm)1144 void ArmToMips64Assembler::SMLAL(int cc, int xy,
1145                 int RdHi, int RdLo, int Rs, int Rm)
1146 {
1147     // *mPC++ = (cc<<28) | 0x1400080 | (RdHi<<16) | (RdLo<<12) | (Rs<<8) | (xy<<4) | Rm;
1148     mArmPC[mInum++] = pc();
1149     mMips->NOP2();
1150     NOT_IMPLEMENTED();
1151 }
1152 
SMLAW(int cc,int y,int Rd,int Rm,int Rs,int Rn)1153 void ArmToMips64Assembler::SMLAW(int cc, int y,
1154                 int Rd, int Rm, int Rs, int Rn)
1155 {
1156     // *mPC++ = (cc<<28) | 0x1200080 | (Rd<<16) | (Rn<<12) | (Rs<<8) | (y<<4) | Rm;
1157     mArmPC[mInum++] = pc();
1158     mMips->NOP2();
1159     NOT_IMPLEMENTED();
1160 }
1161 
1162 // used by ARMv6 version of GGLAssembler::filter32
UXTB16(int cc,int Rd,int Rm,int rotate)1163 void ArmToMips64Assembler::UXTB16(int cc, int Rd, int Rm, int rotate)
1164 {
1165     mArmPC[mInum++] = pc();
1166 
1167     //Rd[31:16] := ZeroExtend((Rm ROR (8 * sh))[23:16]),
1168     //Rd[15:0] := ZeroExtend((Rm ROR (8 * sh))[7:0]). sh 0-3.
1169 
1170     mMips->ROTR(R_at2, Rm, rotate * 8);
1171     mMips->LUI(R_at, 0xFF);
1172     mMips->ORI(R_at, R_at, 0xFF);
1173     mMips->AND(Rd, R_at2, R_at);
1174 }
1175 
UBFX(int cc,int Rd,int Rn,int lsb,int width)1176 void ArmToMips64Assembler::UBFX(int cc, int Rd, int Rn, int lsb, int width)
1177 {
1178      /* Placeholder for UBFX */
1179      mArmPC[mInum++] = pc();
1180 
1181      mMips->NOP2();
1182      NOT_IMPLEMENTED();
1183 }
1184 
1185 // ----------------------------------------------------------------------------
1186 // Address Processing...
1187 // ----------------------------------------------------------------------------
1188 
ADDR_ADD(int cc,int s,int Rd,int Rn,uint32_t Op2)1189 void ArmToMips64Assembler::ADDR_ADD(int cc,
1190         int s, int Rd, int Rn, uint32_t Op2)
1191 {
1192 //    if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1193 //    if(s  != 0) { NOT_IMPLEMENTED(); return;} //Not required
1194     dataProcessing(opADD64, cc, s, Rd, Rn, Op2);
1195 }
1196 
ADDR_SUB(int cc,int s,int Rd,int Rn,uint32_t Op2)1197 void ArmToMips64Assembler::ADDR_SUB(int cc,
1198         int s, int Rd, int Rn, uint32_t Op2)
1199 {
1200 //    if(cc != AL){ NOT_IMPLEMENTED(); return;} //Not required
1201 //    if(s  != 0) { NOT_IMPLEMENTED(); return;} //Not required
1202     dataProcessing(opSUB64, cc, s, Rd, Rn, Op2);
1203 }
1204 
ADDR_LDR(int cc,int Rd,int Rn,uint32_t offset)1205 void ArmToMips64Assembler::ADDR_LDR(int cc, int Rd, int Rn, uint32_t offset) {
1206     mArmPC[mInum++] = pc();
1207     // work-around for ARM default address mode of immed12_pre(0)
1208     if (offset > AMODE_UNSUPPORTED) offset = 0;
1209     switch (offset) {
1210         case 0:
1211             amode.value = 0;
1212             amode.writeback = 0;
1213             // fall thru to next case ....
1214         case AMODE_IMM_12_PRE:
1215             if (Rn == ARMAssemblerInterface::SP) {
1216                 Rn = R_sp;      // convert LDR via Arm SP to LW via Mips SP
1217             }
1218             mMips->LD(Rd, Rn, amode.value);
1219             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
1220                 mMips->DADDIU(Rn, Rn, amode.value);
1221             }
1222             break;
1223         case AMODE_IMM_12_POST:
1224             if (Rn == ARMAssemblerInterface::SP) {
1225                 Rn = R_sp;      // convert STR thru Arm SP to STR thru Mips SP
1226             }
1227             mMips->LD(Rd, Rn, 0);
1228             mMips->DADDIU(Rn, Rn, amode.value);
1229             break;
1230         case AMODE_REG_SCALE_PRE:
1231             // we only support simple base + index, no advanced modes for this one yet
1232             mMips->DADDU(R_at, Rn, amode.reg);
1233             mMips->LD(Rd, R_at, 0);
1234             break;
1235     }
1236 }
1237 
ADDR_STR(int cc,int Rd,int Rn,uint32_t offset)1238 void ArmToMips64Assembler::ADDR_STR(int cc, int Rd, int Rn, uint32_t offset) {
1239     mArmPC[mInum++] = pc();
1240     // work-around for ARM default address mode of immed12_pre(0)
1241     if (offset > AMODE_UNSUPPORTED) offset = 0;
1242     switch (offset) {
1243         case 0:
1244             amode.value = 0;
1245             amode.writeback = 0;
1246             // fall thru to next case ....
1247         case AMODE_IMM_12_PRE:
1248             if (Rn == ARMAssemblerInterface::SP) {
1249                 Rn = R_sp;  // convert STR thru Arm SP to SW thru Mips SP
1250             }
1251             if (amode.writeback) {      // OPTIONAL writeback on pre-index mode
1252                 // If we will writeback, then update the index reg, then store.
1253                 // This correctly handles stack-push case.
1254                 mMips->DADDIU(Rn, Rn, amode.value);
1255                 mMips->SD(Rd, Rn, 0);
1256             } else {
1257                 // No writeback so store offset by value
1258                 mMips->SD(Rd, Rn, amode.value);
1259             }
1260             break;
1261         case AMODE_IMM_12_POST:
1262             mMips->SD(Rd, Rn, 0);
1263             mMips->DADDIU(Rn, Rn, amode.value);  // post index always writes back
1264             break;
1265         case AMODE_REG_SCALE_PRE:
1266             // we only support simple base + index, no advanced modes for this one yet
1267             mMips->DADDU(R_at, Rn, amode.reg);
1268             mMips->SD(Rd, R_at, 0);
1269             break;
1270     }
1271 }
1272 
1273 #if 0
1274 #pragma mark -
1275 #pragma mark MIPS Assembler...
1276 #endif
1277 
1278 
1279 //**************************************************************************
1280 //**************************************************************************
1281 //**************************************************************************
1282 
1283 
1284 /* MIPS64 assembler
1285 ** this is a subset of mips64r6, targeted specifically at ARM instruction
1286 ** replacement in the pixelflinger/codeflinger code.
1287 **
1288 ** This class is extended from MIPSAssembler class and overrides only
1289 ** MIPS64r6 specific stuff.
1290 */
1291 
MIPS64Assembler(const sp<Assembly> & assembly,ArmToMips64Assembler * parent)1292 MIPS64Assembler::MIPS64Assembler(const sp<Assembly>& assembly, ArmToMips64Assembler *parent)
1293     : mParent(parent),
1294     MIPSAssembler::MIPSAssembler(assembly, NULL)
1295 {
1296 }
1297 
MIPS64Assembler(void * assembly,ArmToMips64Assembler * parent)1298 MIPS64Assembler::MIPS64Assembler(void* assembly, ArmToMips64Assembler *parent)
1299     : mParent(parent),
1300     MIPSAssembler::MIPSAssembler(assembly)
1301 {
1302 }
1303 
~MIPS64Assembler()1304 MIPS64Assembler::~MIPS64Assembler()
1305 {
1306 }
1307 
reset()1308 void MIPS64Assembler::reset()
1309 {
1310     if (mAssembly != NULL) {
1311         mBase = mPC = (uint32_t *)mAssembly->base();
1312     } else {
1313         mPC = mBase = base();
1314     }
1315     mBranchTargets.clear();
1316     mLabels.clear();
1317     mLabelsInverseMapping.clear();
1318     mComments.clear();
1319 }
1320 
1321 
disassemble(const char * name)1322 void MIPS64Assembler::disassemble(const char* name)
1323 {
1324     char di_buf[140];
1325 
1326     bool arm_disasm_fmt = (mParent->mArmDisassemblyBuffer == NULL) ? false : true;
1327 
1328     typedef char dstr[40];
1329     dstr *lines = (dstr *)mParent->mArmDisassemblyBuffer;
1330 
1331     if (mParent->mArmDisassemblyBuffer != NULL) {
1332         for (int i=0; i<mParent->mArmInstrCount; ++i) {
1333             string_detab(lines[i]);
1334         }
1335     }
1336 
1337     // iArm is an index to Arm instructions 1...n for this assembly sequence
1338     // mArmPC[iArm] holds the value of the Mips-PC for the first MIPS
1339     // instruction corresponding to that Arm instruction number
1340 
1341     int iArm = 0;
1342     size_t count = pc()-base();
1343     uint32_t* mipsPC = base();
1344 
1345     while (count--) {
1346         ssize_t label = mLabelsInverseMapping.indexOfKey(mipsPC);
1347         if (label >= 0) {
1348             ALOGW("%s:\n", mLabelsInverseMapping.valueAt(label));
1349         }
1350         ssize_t comment = mComments.indexOfKey(mipsPC);
1351         if (comment >= 0) {
1352             ALOGW("; %s\n", mComments.valueAt(comment));
1353         }
1354         ::mips_disassem(mipsPC, di_buf, arm_disasm_fmt);
1355         string_detab(di_buf);
1356         string_pad(di_buf, 30);
1357         ALOGW("%08lx:    %08x    %s", uintptr_t(mipsPC), uint32_t(*mipsPC), di_buf);
1358         mipsPC++;
1359     }
1360 }
1361 
fix_branches()1362 void MIPS64Assembler::fix_branches()
1363 {
1364     // fixup all the branches
1365     size_t count = mBranchTargets.size();
1366     while (count--) {
1367         const branch_target_t& bt = mBranchTargets[count];
1368         uint32_t* target_pc = mLabels.valueFor(bt.label);
1369         LOG_ALWAYS_FATAL_IF(!target_pc,
1370                 "error resolving branch targets, target_pc is null");
1371         int32_t offset = int32_t(target_pc - (bt.pc+1));
1372         *bt.pc |= offset & 0x00FFFF;
1373     }
1374 }
1375 
DADDU(int Rd,int Rs,int Rt)1376 void MIPS64Assembler::DADDU(int Rd, int Rs, int Rt)
1377 {
1378     *mPC++ = (spec_op<<OP_SHF) | (daddu_fn<<FUNC_SHF)
1379                     | (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF);
1380 }
1381 
DADDIU(int Rt,int Rs,int16_t imm)1382 void MIPS64Assembler::DADDIU(int Rt, int Rs, int16_t imm)
1383 {
1384     *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | (imm & MSK_16);
1385 }
1386 
DSUBU(int Rd,int Rs,int Rt)1387 void MIPS64Assembler::DSUBU(int Rd, int Rs, int Rt)
1388 {
1389     *mPC++ = (spec_op<<OP_SHF) | (dsubu_fn<<FUNC_SHF) |
1390                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1391 }
1392 
DSUBIU(int Rt,int Rs,int16_t imm)1393 void MIPS64Assembler::DSUBIU(int Rt, int Rs, int16_t imm)   // really addiu(d, s, -j)
1394 {
1395     *mPC++ = (daddiu_op<<OP_SHF) | (Rt<<RT_SHF) | (Rs<<RS_SHF) | ((-imm) & MSK_16);
1396 }
1397 
MUL(int Rd,int Rs,int Rt)1398 void MIPS64Assembler::MUL(int Rd, int Rs, int Rt)
1399 {
1400     *mPC++ = (spec_op<<OP_SHF) | (mul_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1401                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1402 }
1403 
MUH(int Rd,int Rs,int Rt)1404 void MIPS64Assembler::MUH(int Rd, int Rs, int Rt)
1405 {
1406     *mPC++ = (spec_op<<OP_SHF) | (muh_fn<<RE_SHF) | (sop30_fn<<FUNC_SHF) |
1407                         (Rs<<RS_SHF) | (Rt<<RT_SHF) | (Rd<<RD_SHF) ;
1408 }
1409 
CLO(int Rd,int Rs)1410 void MIPS64Assembler::CLO(int Rd, int Rs)
1411 {
1412     *mPC++ = (spec_op<<OP_SHF) | (17<<FUNC_SHF) |
1413                         (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1414 }
1415 
CLZ(int Rd,int Rs)1416 void MIPS64Assembler::CLZ(int Rd, int Rs)
1417 {
1418     *mPC++ = (spec_op<<OP_SHF) | (16<<FUNC_SHF) |
1419                         (Rd<<RD_SHF) | (Rs<<RS_SHF) | (1<<RE_SHF);
1420 }
1421 
LD(int Rt,int Rbase,int16_t offset)1422 void MIPS64Assembler::LD(int Rt, int Rbase, int16_t offset)
1423 {
1424     *mPC++ = (ld_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1425 }
1426 
SD(int Rt,int Rbase,int16_t offset)1427 void MIPS64Assembler::SD(int Rt, int Rbase, int16_t offset)
1428 {
1429     *mPC++ = (sd_op<<OP_SHF) | (Rbase<<RS_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1430 }
1431 
LUI(int Rt,int16_t offset)1432 void MIPS64Assembler::LUI(int Rt, int16_t offset)
1433 {
1434     *mPC++ = (aui_op<<OP_SHF) | (Rt<<RT_SHF) | (offset & MSK_16);
1435 }
1436 
1437 
JR(int Rs)1438 void MIPS64Assembler::JR(int Rs)
1439 {
1440         *mPC++ = (spec_op<<OP_SHF) | (Rs<<RS_SHF) | (jalr_fn << FUNC_SHF);
1441         MIPS64Assembler::NOP();
1442 }
1443 
1444 }; // namespace android:
1445