1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <dirent.h>
18 #include <fstream>
19 #include <sys/types.h>
20 #include <map>
21 
22 #include "gtest/gtest.h"
23 #include "utils/arm/assembler_thumb2.h"
24 #include "base/hex_dump.h"
25 #include "common_runtime_test.h"
26 
27 namespace art {
28 namespace arm {
29 
30 // Include results file (generated manually)
31 #include "assembler_thumb_test_expected.cc.inc"
32 
33 #ifndef HAVE_ANDROID_OS
34 // This controls whether the results are printed to the
35 // screen or compared against the expected output.
36 // To generate new expected output, set this to true and
37 // copy the output into the .cc.inc file in the form
38 // of the other results.
39 //
40 // When this is false, the results are not printed to the
41 // output, but are compared against the expected results
42 // in the .cc.inc file.
43 static constexpr bool kPrintResults = false;
44 #endif
45 
SetAndroidData()46 void SetAndroidData() {
47   const char* data = getenv("ANDROID_DATA");
48   if (data == nullptr) {
49     setenv("ANDROID_DATA", "/tmp", 1);
50   }
51 }
52 
CompareIgnoringSpace(const char * s1,const char * s2)53 int CompareIgnoringSpace(const char* s1, const char* s2) {
54   while (*s1 != '\0') {
55     while (isspace(*s1)) ++s1;
56     while (isspace(*s2)) ++s2;
57     if (*s1 == '\0' || *s1 != *s2) {
58       break;
59     }
60     ++s1;
61     ++s2;
62   }
63   return *s1 - *s2;
64 }
65 
dump(std::vector<uint8_t> & code,const char * testname)66 void dump(std::vector<uint8_t>& code, const char* testname) {
67   // This will only work on the host.  There is no as, objcopy or objdump on the
68   // device.
69 #ifndef HAVE_ANDROID_OS
70   static bool results_ok = false;
71   static std::string toolsdir;
72 
73   if (!results_ok) {
74     setup_results();
75     toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
76     SetAndroidData();
77     results_ok = true;
78   }
79 
80   ScratchFile file;
81 
82   const char* filename = file.GetFilename().c_str();
83 
84   std::ofstream out(filename);
85   if (out) {
86     out << ".section \".text\"\n";
87     out << ".syntax unified\n";
88     out << ".arch armv7-a\n";
89     out << ".thumb\n";
90     out << ".thumb_func\n";
91     out << ".type " << testname << ", #function\n";
92     out << ".global " << testname << "\n";
93     out << testname << ":\n";
94     out << ".fnstart\n";
95 
96     for (uint32_t i = 0 ; i < code.size(); ++i) {
97       out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
98     }
99     out << ".fnend\n";
100     out << ".size " << testname << ", .-" << testname << "\n";
101   }
102   out.close();
103 
104   char cmd[1024];
105 
106   // Assemble the .S
107   snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
108   system(cmd);
109 
110   // Remove the $d symbols to prevent the disassembler dumping the instructions
111   // as .word
112   snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
113   system(cmd);
114 
115   // Disassemble.
116 
117   snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^  *[0-9a-f][0-9a-f]*:'",
118     toolsdir.c_str(), filename);
119   if (kPrintResults) {
120     // Print the results only, don't check. This is used to generate new output for inserting
121     // into the .inc file.
122     system(cmd);
123   } else {
124     // Check the results match the appropriate results in the .inc file.
125     FILE *fp = popen(cmd, "r");
126     ASSERT_TRUE(fp != nullptr);
127 
128     std::map<std::string, const char**>::iterator results = test_results.find(testname);
129     ASSERT_NE(results, test_results.end());
130 
131     uint32_t lineindex = 0;
132 
133     while (!feof(fp)) {
134       char testline[256];
135       char *s = fgets(testline, sizeof(testline), fp);
136       if (s == nullptr) {
137         break;
138       }
139       if (CompareIgnoringSpace(results->second[lineindex], testline) != 0) {
140         LOG(FATAL) << "Output is not as expected at line: " << lineindex
141           << results->second[lineindex] << "/" << testline;
142       }
143       ++lineindex;
144     }
145     // Check that we are at the end.
146     ASSERT_TRUE(results->second[lineindex] == nullptr);
147     fclose(fp);
148   }
149 
150   char buf[FILENAME_MAX];
151   snprintf(buf, sizeof(buf), "%s.o", filename);
152   unlink(buf);
153 
154   snprintf(buf, sizeof(buf), "%s.oo", filename);
155   unlink(buf);
156 #endif
157 }
158 
159 #define __ assembler->
160 
TEST(Thumb2AssemblerTest,SimpleMov)161 TEST(Thumb2AssemblerTest, SimpleMov) {
162   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
163 
164   __ mov(R0, ShifterOperand(R1));
165   __ mov(R8, ShifterOperand(R9));
166 
167   __ mov(R0, ShifterOperand(1));
168   __ mov(R8, ShifterOperand(9));
169 
170   size_t cs = __ CodeSize();
171   std::vector<uint8_t> managed_code(cs);
172   MemoryRegion code(&managed_code[0], managed_code.size());
173   __ FinalizeInstructions(code);
174   dump(managed_code, "SimpleMov");
175   delete assembler;
176 }
177 
TEST(Thumb2AssemblerTest,SimpleMov32)178 TEST(Thumb2AssemblerTest, SimpleMov32) {
179   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
180   assembler->Force32Bit();
181 
182   __ mov(R0, ShifterOperand(R1));
183   __ mov(R8, ShifterOperand(R9));
184 
185   size_t cs = __ CodeSize();
186   std::vector<uint8_t> managed_code(cs);
187   MemoryRegion code(&managed_code[0], managed_code.size());
188   __ FinalizeInstructions(code);
189   dump(managed_code, "SimpleMov32");
190   delete assembler;
191 }
192 
TEST(Thumb2AssemblerTest,SimpleMovAdd)193 TEST(Thumb2AssemblerTest, SimpleMovAdd) {
194   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
195 
196   __ mov(R0, ShifterOperand(R1));
197   __ add(R0, R1, ShifterOperand(R2));
198   __ add(R0, R1, ShifterOperand());
199 
200   size_t cs = __ CodeSize();
201   std::vector<uint8_t> managed_code(cs);
202   MemoryRegion code(&managed_code[0], managed_code.size());
203   __ FinalizeInstructions(code);
204   dump(managed_code, "SimpleMovAdd");
205   delete assembler;
206 }
207 
TEST(Thumb2AssemblerTest,DataProcessingRegister)208 TEST(Thumb2AssemblerTest, DataProcessingRegister) {
209   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
210 
211   __ mov(R0, ShifterOperand(R1));
212   __ mvn(R0, ShifterOperand(R1));
213 
214   // 32 bit variants.
215   __ add(R0, R1, ShifterOperand(R2));
216   __ sub(R0, R1, ShifterOperand(R2));
217   __ and_(R0, R1, ShifterOperand(R2));
218   __ orr(R0, R1, ShifterOperand(R2));
219   __ eor(R0, R1, ShifterOperand(R2));
220   __ bic(R0, R1, ShifterOperand(R2));
221   __ adc(R0, R1, ShifterOperand(R2));
222   __ sbc(R0, R1, ShifterOperand(R2));
223   __ rsb(R0, R1, ShifterOperand(R2));
224 
225   // 16 bit variants.
226   __ add(R0, R1, ShifterOperand());
227   __ sub(R0, R1, ShifterOperand());
228   __ and_(R0, R0, ShifterOperand(R1));
229   __ orr(R0, R0, ShifterOperand(R1));
230   __ eor(R0, R0, ShifterOperand(R1));
231   __ bic(R0, R0, ShifterOperand(R1));
232   __ adc(R0, R0, ShifterOperand(R1));
233   __ sbc(R0, R0, ShifterOperand(R1));
234   __ rsb(R0, R0, ShifterOperand(R1));
235 
236   __ tst(R0, ShifterOperand(R1));
237   __ teq(R0, ShifterOperand(R1));
238   __ cmp(R0, ShifterOperand(R1));
239   __ cmn(R0, ShifterOperand(R1));
240 
241   __ movs(R0, ShifterOperand(R1));
242   __ mvns(R0, ShifterOperand(R1));
243 
244   // 32 bit variants.
245   __ add(R12, R1, ShifterOperand(R0));
246 
247   size_t cs = __ CodeSize();
248   std::vector<uint8_t> managed_code(cs);
249   MemoryRegion code(&managed_code[0], managed_code.size());
250   __ FinalizeInstructions(code);
251   dump(managed_code, "DataProcessingRegister");
252   delete assembler;
253 }
254 
TEST(Thumb2AssemblerTest,DataProcessingImmediate)255 TEST(Thumb2AssemblerTest, DataProcessingImmediate) {
256   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
257 
258   __ mov(R0, ShifterOperand(0x55));
259   __ mvn(R0, ShifterOperand(0x55));
260   __ add(R0, R1, ShifterOperand(0x55));
261   __ sub(R0, R1, ShifterOperand(0x55));
262   __ and_(R0, R1, ShifterOperand(0x55));
263   __ orr(R0, R1, ShifterOperand(0x55));
264   __ eor(R0, R1, ShifterOperand(0x55));
265   __ bic(R0, R1, ShifterOperand(0x55));
266   __ adc(R0, R1, ShifterOperand(0x55));
267   __ sbc(R0, R1, ShifterOperand(0x55));
268   __ rsb(R0, R1, ShifterOperand(0x55));
269 
270   __ tst(R0, ShifterOperand(0x55));
271   __ teq(R0, ShifterOperand(0x55));
272   __ cmp(R0, ShifterOperand(0x55));
273   __ cmn(R0, ShifterOperand(0x55));
274 
275   __ add(R0, R1, ShifterOperand(5));
276   __ sub(R0, R1, ShifterOperand(5));
277 
278   __ movs(R0, ShifterOperand(0x55));
279   __ mvns(R0, ShifterOperand(0x55));
280 
281   size_t cs = __ CodeSize();
282   std::vector<uint8_t> managed_code(cs);
283   MemoryRegion code(&managed_code[0], managed_code.size());
284   __ FinalizeInstructions(code);
285   dump(managed_code, "DataProcessingImmediate");
286   delete assembler;
287 }
288 
TEST(Thumb2AssemblerTest,DataProcessingModifiedImmediate)289 TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
290   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
291 
292   __ mov(R0, ShifterOperand(0x550055));
293   __ mvn(R0, ShifterOperand(0x550055));
294   __ add(R0, R1, ShifterOperand(0x550055));
295   __ sub(R0, R1, ShifterOperand(0x550055));
296   __ and_(R0, R1, ShifterOperand(0x550055));
297   __ orr(R0, R1, ShifterOperand(0x550055));
298   __ eor(R0, R1, ShifterOperand(0x550055));
299   __ bic(R0, R1, ShifterOperand(0x550055));
300   __ adc(R0, R1, ShifterOperand(0x550055));
301   __ sbc(R0, R1, ShifterOperand(0x550055));
302   __ rsb(R0, R1, ShifterOperand(0x550055));
303 
304   __ tst(R0, ShifterOperand(0x550055));
305   __ teq(R0, ShifterOperand(0x550055));
306   __ cmp(R0, ShifterOperand(0x550055));
307   __ cmn(R0, ShifterOperand(0x550055));
308 
309   size_t cs = __ CodeSize();
310   std::vector<uint8_t> managed_code(cs);
311   MemoryRegion code(&managed_code[0], managed_code.size());
312   __ FinalizeInstructions(code);
313   dump(managed_code, "DataProcessingModifiedImmediate");
314   delete assembler;
315 }
316 
317 
TEST(Thumb2AssemblerTest,DataProcessingModifiedImmediates)318 TEST(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
319   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
320 
321   __ mov(R0, ShifterOperand(0x550055));
322   __ mov(R0, ShifterOperand(0x55005500));
323   __ mov(R0, ShifterOperand(0x55555555));
324   __ mov(R0, ShifterOperand(0xd5000000));       // rotated to first position
325   __ mov(R0, ShifterOperand(0x6a000000));       // rotated to second position
326   __ mov(R0, ShifterOperand(0x350));            // rotated to 2nd last position
327   __ mov(R0, ShifterOperand(0x1a8));            // rotated to last position
328 
329   size_t cs = __ CodeSize();
330   std::vector<uint8_t> managed_code(cs);
331   MemoryRegion code(&managed_code[0], managed_code.size());
332   __ FinalizeInstructions(code);
333   dump(managed_code, "DataProcessingModifiedImmediates");
334   delete assembler;
335 }
336 
TEST(Thumb2AssemblerTest,DataProcessingShiftedRegister)337 TEST(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
338   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
339 
340   __ mov(R3, ShifterOperand(R4, LSL, 4));
341   __ mov(R3, ShifterOperand(R4, LSR, 5));
342   __ mov(R3, ShifterOperand(R4, ASR, 6));
343   __ mov(R3, ShifterOperand(R4, ROR, 7));
344   __ mov(R3, ShifterOperand(R4, ROR));
345 
346   // 32 bit variants.
347   __ mov(R8, ShifterOperand(R4, LSL, 4));
348   __ mov(R8, ShifterOperand(R4, LSR, 5));
349   __ mov(R8, ShifterOperand(R4, ASR, 6));
350   __ mov(R8, ShifterOperand(R4, ROR, 7));
351   __ mov(R8, ShifterOperand(R4, RRX));
352 
353   size_t cs = __ CodeSize();
354   std::vector<uint8_t> managed_code(cs);
355   MemoryRegion code(&managed_code[0], managed_code.size());
356   __ FinalizeInstructions(code);
357   dump(managed_code, "DataProcessingShiftedRegister");
358   delete assembler;
359 }
360 
361 
TEST(Thumb2AssemblerTest,BasicLoad)362 TEST(Thumb2AssemblerTest, BasicLoad) {
363   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
364 
365   __ ldr(R3, Address(R4, 24));
366   __ ldrb(R3, Address(R4, 24));
367   __ ldrh(R3, Address(R4, 24));
368   __ ldrsb(R3, Address(R4, 24));
369   __ ldrsh(R3, Address(R4, 24));
370 
371   __ ldr(R3, Address(SP, 24));
372 
373   // 32 bit variants
374   __ ldr(R8, Address(R4, 24));
375   __ ldrb(R8, Address(R4, 24));
376   __ ldrh(R8, Address(R4, 24));
377   __ ldrsb(R8, Address(R4, 24));
378   __ ldrsh(R8, Address(R4, 24));
379 
380   size_t cs = __ CodeSize();
381   std::vector<uint8_t> managed_code(cs);
382   MemoryRegion code(&managed_code[0], managed_code.size());
383   __ FinalizeInstructions(code);
384   dump(managed_code, "BasicLoad");
385   delete assembler;
386 }
387 
388 
TEST(Thumb2AssemblerTest,BasicStore)389 TEST(Thumb2AssemblerTest, BasicStore) {
390   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
391 
392   __ str(R3, Address(R4, 24));
393   __ strb(R3, Address(R4, 24));
394   __ strh(R3, Address(R4, 24));
395 
396   __ str(R3, Address(SP, 24));
397 
398   // 32 bit variants.
399   __ str(R8, Address(R4, 24));
400   __ strb(R8, Address(R4, 24));
401   __ strh(R8, Address(R4, 24));
402 
403   size_t cs = __ CodeSize();
404   std::vector<uint8_t> managed_code(cs);
405   MemoryRegion code(&managed_code[0], managed_code.size());
406   __ FinalizeInstructions(code);
407   dump(managed_code, "BasicStore");
408   delete assembler;
409 }
410 
TEST(Thumb2AssemblerTest,ComplexLoad)411 TEST(Thumb2AssemblerTest, ComplexLoad) {
412   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
413 
414   __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
415   __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
416   __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
417   __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
418   __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
419   __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
420 
421   __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
422   __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
423   __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
424   __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
425   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
426   __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
427 
428   __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
429   __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
430   __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
431   __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
432   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
433   __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
434 
435   __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
436   __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
437   __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
438   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
439   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
440   __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
441 
442   __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
443   __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
444   __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
445   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
446   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
447   __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
448 
449   size_t cs = __ CodeSize();
450   std::vector<uint8_t> managed_code(cs);
451   MemoryRegion code(&managed_code[0], managed_code.size());
452   __ FinalizeInstructions(code);
453   dump(managed_code, "ComplexLoad");
454   delete assembler;
455 }
456 
457 
TEST(Thumb2AssemblerTest,ComplexStore)458 TEST(Thumb2AssemblerTest, ComplexStore) {
459   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
460 
461   __ str(R3, Address(R4, 24, Address::Mode::Offset));
462   __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
463   __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
464   __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
465   __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
466   __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
467 
468   __ strb(R3, Address(R4, 24, Address::Mode::Offset));
469   __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
470   __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
471   __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
472   __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
473   __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
474 
475   __ strh(R3, Address(R4, 24, Address::Mode::Offset));
476   __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
477   __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
478   __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
479   __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
480   __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
481 
482   size_t cs = __ CodeSize();
483   std::vector<uint8_t> managed_code(cs);
484   MemoryRegion code(&managed_code[0], managed_code.size());
485   __ FinalizeInstructions(code);
486   dump(managed_code, "ComplexStore");
487   delete assembler;
488 }
489 
TEST(Thumb2AssemblerTest,NegativeLoadStore)490 TEST(Thumb2AssemblerTest, NegativeLoadStore) {
491   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
492 
493   __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
494   __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
495   __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
496   __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
497   __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
498   __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
499 
500   __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
501   __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
502   __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
503   __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
504   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
505   __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
506 
507   __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
508   __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
509   __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
510   __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
511   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
512   __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
513 
514   __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
515   __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
516   __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
517   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
518   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
519   __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
520 
521   __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
522   __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
523   __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
524   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
525   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
526   __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
527 
528   __ str(R3, Address(R4, -24, Address::Mode::Offset));
529   __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
530   __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
531   __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
532   __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
533   __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
534 
535   __ strb(R3, Address(R4, -24, Address::Mode::Offset));
536   __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
537   __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
538   __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
539   __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
540   __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
541 
542   __ strh(R3, Address(R4, -24, Address::Mode::Offset));
543   __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
544   __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
545   __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
546   __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
547   __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
548 
549   size_t cs = __ CodeSize();
550   std::vector<uint8_t> managed_code(cs);
551   MemoryRegion code(&managed_code[0], managed_code.size());
552   __ FinalizeInstructions(code);
553   dump(managed_code, "NegativeLoadStore");
554   delete assembler;
555 }
556 
TEST(Thumb2AssemblerTest,SimpleLoadStoreDual)557 TEST(Thumb2AssemblerTest, SimpleLoadStoreDual) {
558   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
559 
560   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
561   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
562 
563   size_t cs = __ CodeSize();
564   std::vector<uint8_t> managed_code(cs);
565   MemoryRegion code(&managed_code[0], managed_code.size());
566   __ FinalizeInstructions(code);
567   dump(managed_code, "SimpleLoadStoreDual");
568   delete assembler;
569 }
570 
TEST(Thumb2AssemblerTest,ComplexLoadStoreDual)571 TEST(Thumb2AssemblerTest, ComplexLoadStoreDual) {
572   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
573 
574   __ strd(R2, Address(R0, 24, Address::Mode::Offset));
575   __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
576   __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
577   __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
578   __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
579   __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
580 
581   __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
582   __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
583   __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
584   __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
585   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
586   __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
587 
588   size_t cs = __ CodeSize();
589   std::vector<uint8_t> managed_code(cs);
590   MemoryRegion code(&managed_code[0], managed_code.size());
591   __ FinalizeInstructions(code);
592   dump(managed_code, "ComplexLoadStoreDual");
593   delete assembler;
594 }
595 
TEST(Thumb2AssemblerTest,NegativeLoadStoreDual)596 TEST(Thumb2AssemblerTest, NegativeLoadStoreDual) {
597   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
598 
599   __ strd(R2, Address(R0, -24, Address::Mode::Offset));
600   __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
601   __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
602   __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
603   __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
604   __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
605 
606   __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
607   __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
608   __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
609   __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
610   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
611   __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
612 
613   size_t cs = __ CodeSize();
614   std::vector<uint8_t> managed_code(cs);
615   MemoryRegion code(&managed_code[0], managed_code.size());
616   __ FinalizeInstructions(code);
617   dump(managed_code, "NegativeLoadStoreDual");
618   delete assembler;
619 }
620 
TEST(Thumb2AssemblerTest,SimpleBranch)621 TEST(Thumb2AssemblerTest, SimpleBranch) {
622   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
623 
624   Label l1;
625   __ mov(R0, ShifterOperand(2));
626   __ Bind(&l1);
627   __ mov(R1, ShifterOperand(1));
628   __ b(&l1);
629   Label l2;
630   __ b(&l2);
631   __ mov(R1, ShifterOperand(2));
632   __ Bind(&l2);
633   __ mov(R0, ShifterOperand(3));
634 
635   Label l3;
636   __ mov(R0, ShifterOperand(2));
637   __ Bind(&l3);
638   __ mov(R1, ShifterOperand(1));
639   __ b(&l3, EQ);
640 
641   Label l4;
642   __ b(&l4, EQ);
643   __ mov(R1, ShifterOperand(2));
644   __ Bind(&l4);
645   __ mov(R0, ShifterOperand(3));
646 
647   // 2 linked labels.
648   Label l5;
649   __ b(&l5);
650   __ mov(R1, ShifterOperand(4));
651   __ b(&l5);
652   __ mov(R1, ShifterOperand(5));
653   __ Bind(&l5);
654   __ mov(R0, ShifterOperand(6));
655 
656   size_t cs = __ CodeSize();
657   std::vector<uint8_t> managed_code(cs);
658   MemoryRegion code(&managed_code[0], managed_code.size());
659   __ FinalizeInstructions(code);
660   dump(managed_code, "SimpleBranch");
661   delete assembler;
662 }
663 
TEST(Thumb2AssemblerTest,LongBranch)664 TEST(Thumb2AssemblerTest, LongBranch) {
665   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
666   assembler->Force32Bit();
667   // 32 bit branches.
668   Label l1;
669   __ mov(R0, ShifterOperand(2));
670   __ Bind(&l1);
671   __ mov(R1, ShifterOperand(1));
672   __ b(&l1);
673 
674   Label l2;
675   __ b(&l2);
676   __ mov(R1, ShifterOperand(2));
677   __ Bind(&l2);
678   __ mov(R0, ShifterOperand(3));
679 
680   Label l3;
681   __ mov(R0, ShifterOperand(2));
682   __ Bind(&l3);
683   __ mov(R1, ShifterOperand(1));
684   __ b(&l3, EQ);
685 
686   Label l4;
687   __ b(&l4, EQ);
688   __ mov(R1, ShifterOperand(2));
689   __ Bind(&l4);
690   __ mov(R0, ShifterOperand(3));
691 
692   // 2 linked labels.
693   Label l5;
694   __ b(&l5);
695   __ mov(R1, ShifterOperand(4));
696   __ b(&l5);
697   __ mov(R1, ShifterOperand(5));
698   __ Bind(&l5);
699   __ mov(R0, ShifterOperand(6));
700 
701   size_t cs = __ CodeSize();
702   std::vector<uint8_t> managed_code(cs);
703   MemoryRegion code(&managed_code[0], managed_code.size());
704   __ FinalizeInstructions(code);
705   dump(managed_code, "LongBranch");
706   delete assembler;
707 }
708 
TEST(Thumb2AssemblerTest,LoadMultiple)709 TEST(Thumb2AssemblerTest, LoadMultiple) {
710   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
711 
712   // 16 bit.
713   __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
714 
715   // 32 bit.
716   __ ldm(DB_W, R4, (1 << LR | 1 << R11));
717   __ ldm(DB, R4, (1 << LR | 1 << R11));
718 
719   // Single reg is converted to ldr
720   __ ldm(DB_W, R4, (1 << R5));
721 
722   size_t cs = __ CodeSize();
723   std::vector<uint8_t> managed_code(cs);
724   MemoryRegion code(&managed_code[0], managed_code.size());
725   __ FinalizeInstructions(code);
726   dump(managed_code, "LoadMultiple");
727   delete assembler;
728 }
729 
TEST(Thumb2AssemblerTest,StoreMultiple)730 TEST(Thumb2AssemblerTest, StoreMultiple) {
731   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
732 
733   // 16 bit.
734   __ stm(IA_W, R4, (1 << R0 | 1 << R3));
735 
736   // 32 bit.
737   __ stm(IA_W, R4, (1 << LR | 1 << R11));
738   __ stm(IA, R4, (1 << LR | 1 << R11));
739 
740   // Single reg is converted to str
741   __ stm(IA_W, R4, (1 << R5));
742   __ stm(IA, R4, (1 << R5));
743 
744   size_t cs = __ CodeSize();
745   std::vector<uint8_t> managed_code(cs);
746   MemoryRegion code(&managed_code[0], managed_code.size());
747   __ FinalizeInstructions(code);
748   dump(managed_code, "StoreMultiple");
749   delete assembler;
750 }
751 
TEST(Thumb2AssemblerTest,MovWMovT)752 TEST(Thumb2AssemblerTest, MovWMovT) {
753   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
754 
755   __ movw(R4, 0);         // 16 bit.
756   __ movw(R4, 0x34);      // 16 bit.
757   __ movw(R9, 0x34);      // 32 bit due to high register.
758   __ movw(R3, 0x1234);    // 32 bit due to large value.
759   __ movw(R9, 0xffff);    // 32 bit due to large value and high register.
760 
761   // Always 32 bit.
762   __ movt(R0, 0);
763   __ movt(R0, 0x1234);
764   __ movt(R1, 0xffff);
765 
766   size_t cs = __ CodeSize();
767   std::vector<uint8_t> managed_code(cs);
768   MemoryRegion code(&managed_code[0], managed_code.size());
769   __ FinalizeInstructions(code);
770   dump(managed_code, "MovWMovT");
771   delete assembler;
772 }
773 
TEST(Thumb2AssemblerTest,SpecialAddSub)774 TEST(Thumb2AssemblerTest, SpecialAddSub) {
775   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
776 
777   __ add(R2, SP, ShifterOperand(0x50));   // 16 bit.
778   __ add(SP, SP, ShifterOperand(0x50));   // 16 bit.
779   __ add(R8, SP, ShifterOperand(0x50));   // 32 bit.
780 
781   __ add(R2, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
782   __ add(SP, SP, ShifterOperand(0xf00));  // 32 bit due to imm size.
783 
784   __ sub(SP, SP, ShifterOperand(0x50));     // 16 bit
785   __ sub(R0, SP, ShifterOperand(0x50));     // 32 bit
786   __ sub(R8, SP, ShifterOperand(0x50));     // 32 bit.
787 
788   __ sub(SP, SP, ShifterOperand(0xf00));   // 32 bit due to imm size
789 
790   size_t cs = __ CodeSize();
791   std::vector<uint8_t> managed_code(cs);
792   MemoryRegion code(&managed_code[0], managed_code.size());
793   __ FinalizeInstructions(code);
794   dump(managed_code, "SpecialAddSub");
795   delete assembler;
796 }
797 
TEST(Thumb2AssemblerTest,StoreToOffset)798 TEST(Thumb2AssemblerTest, StoreToOffset) {
799   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
800 
801   __ StoreToOffset(kStoreWord, R2, R4, 12);     // Simple
802   __ StoreToOffset(kStoreWord, R2, R4, 0x2000);     // Offset too big.
803   __ StoreToOffset(kStoreWord, R0, R12, 12);
804   __ StoreToOffset(kStoreHalfword, R0, R12, 12);
805   __ StoreToOffset(kStoreByte, R2, R12, 12);
806 
807   size_t cs = __ CodeSize();
808   std::vector<uint8_t> managed_code(cs);
809   MemoryRegion code(&managed_code[0], managed_code.size());
810   __ FinalizeInstructions(code);
811   dump(managed_code, "StoreToOffset");
812   delete assembler;
813 }
814 
815 
TEST(Thumb2AssemblerTest,IfThen)816 TEST(Thumb2AssemblerTest, IfThen) {
817   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
818 
819   __ it(EQ);
820   __ mov(R1, ShifterOperand(1), EQ);
821 
822   __ it(EQ, kItThen);
823   __ mov(R1, ShifterOperand(1), EQ);
824   __ mov(R2, ShifterOperand(2), EQ);
825 
826   __ it(EQ, kItElse);
827   __ mov(R1, ShifterOperand(1), EQ);
828   __ mov(R2, ShifterOperand(2), NE);
829 
830   __ it(EQ, kItThen, kItElse);
831   __ mov(R1, ShifterOperand(1), EQ);
832   __ mov(R2, ShifterOperand(2), EQ);
833   __ mov(R3, ShifterOperand(3), NE);
834 
835   __ it(EQ, kItElse, kItElse);
836   __ mov(R1, ShifterOperand(1), EQ);
837   __ mov(R2, ShifterOperand(2), NE);
838   __ mov(R3, ShifterOperand(3), NE);
839 
840   __ it(EQ, kItThen, kItThen, kItElse);
841   __ mov(R1, ShifterOperand(1), EQ);
842   __ mov(R2, ShifterOperand(2), EQ);
843   __ mov(R3, ShifterOperand(3), EQ);
844   __ mov(R4, ShifterOperand(4), NE);
845 
846   size_t cs = __ CodeSize();
847   std::vector<uint8_t> managed_code(cs);
848   MemoryRegion code(&managed_code[0], managed_code.size());
849   __ FinalizeInstructions(code);
850   dump(managed_code, "IfThen");
851   delete assembler;
852 }
853 
TEST(Thumb2AssemblerTest,CbzCbnz)854 TEST(Thumb2AssemblerTest, CbzCbnz) {
855   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
856 
857   Label l1;
858   __ cbz(R2, &l1);
859   __ mov(R1, ShifterOperand(3));
860   __ mov(R2, ShifterOperand(3));
861   __ Bind(&l1);
862   __ mov(R2, ShifterOperand(4));
863 
864   Label l2;
865   __ cbnz(R2, &l2);
866   __ mov(R8, ShifterOperand(3));
867   __ mov(R2, ShifterOperand(3));
868   __ Bind(&l2);
869   __ mov(R2, ShifterOperand(4));
870 
871   size_t cs = __ CodeSize();
872   std::vector<uint8_t> managed_code(cs);
873   MemoryRegion code(&managed_code[0], managed_code.size());
874   __ FinalizeInstructions(code);
875   dump(managed_code, "CbzCbnz");
876   delete assembler;
877 }
878 
TEST(Thumb2AssemblerTest,Multiply)879 TEST(Thumb2AssemblerTest, Multiply) {
880   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
881 
882   __ mul(R0, R1, R0);
883   __ mul(R0, R1, R2);
884   __ mul(R8, R9, R8);
885   __ mul(R8, R9, R10);
886 
887   __ mla(R0, R1, R2, R3);
888   __ mla(R8, R9, R8, R9);
889 
890   __ mls(R0, R1, R2, R3);
891   __ mls(R8, R9, R8, R9);
892 
893   __ umull(R0, R1, R2, R3);
894   __ umull(R8, R9, R10, R11);
895 
896   size_t cs = __ CodeSize();
897   std::vector<uint8_t> managed_code(cs);
898   MemoryRegion code(&managed_code[0], managed_code.size());
899   __ FinalizeInstructions(code);
900   dump(managed_code, "Multiply");
901   delete assembler;
902 }
903 
TEST(Thumb2AssemblerTest,Divide)904 TEST(Thumb2AssemblerTest, Divide) {
905   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
906 
907   __ sdiv(R0, R1, R2);
908   __ sdiv(R8, R9, R10);
909 
910   __ udiv(R0, R1, R2);
911   __ udiv(R8, R9, R10);
912 
913   size_t cs = __ CodeSize();
914   std::vector<uint8_t> managed_code(cs);
915   MemoryRegion code(&managed_code[0], managed_code.size());
916   __ FinalizeInstructions(code);
917   dump(managed_code, "Divide");
918   delete assembler;
919 }
920 
TEST(Thumb2AssemblerTest,VMov)921 TEST(Thumb2AssemblerTest, VMov) {
922   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
923 
924   __ vmovs(S1, 1.0);
925   __ vmovd(D1, 1.0);
926 
927   __ vmovs(S1, S2);
928   __ vmovd(D1, D2);
929 
930   size_t cs = __ CodeSize();
931   std::vector<uint8_t> managed_code(cs);
932   MemoryRegion code(&managed_code[0], managed_code.size());
933   __ FinalizeInstructions(code);
934   dump(managed_code, "VMov");
935   delete assembler;
936 }
937 
938 
TEST(Thumb2AssemblerTest,BasicFloatingPoint)939 TEST(Thumb2AssemblerTest, BasicFloatingPoint) {
940   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
941 
942   __ vadds(S0, S1, S2);
943   __ vsubs(S0, S1, S2);
944   __ vmuls(S0, S1, S2);
945   __ vmlas(S0, S1, S2);
946   __ vmlss(S0, S1, S2);
947   __ vdivs(S0, S1, S2);
948   __ vabss(S0, S1);
949   __ vnegs(S0, S1);
950   __ vsqrts(S0, S1);
951 
952   __ vaddd(D0, D1, D2);
953   __ vsubd(D0, D1, D2);
954   __ vmuld(D0, D1, D2);
955   __ vmlad(D0, D1, D2);
956   __ vmlsd(D0, D1, D2);
957   __ vdivd(D0, D1, D2);
958   __ vabsd(D0, D1);
959   __ vnegd(D0, D1);
960   __ vsqrtd(D0, D1);
961 
962   size_t cs = __ CodeSize();
963   std::vector<uint8_t> managed_code(cs);
964   MemoryRegion code(&managed_code[0], managed_code.size());
965   __ FinalizeInstructions(code);
966   dump(managed_code, "BasicFloatingPoint");
967   delete assembler;
968 }
969 
TEST(Thumb2AssemblerTest,FloatingPointConversions)970 TEST(Thumb2AssemblerTest, FloatingPointConversions) {
971   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
972 
973   __ vcvtsd(S2, D2);
974   __ vcvtds(D2, S2);
975 
976   __ vcvtis(S1, S2);
977   __ vcvtsi(S1, S2);
978 
979   __ vcvtid(S1, D2);
980   __ vcvtdi(D1, S2);
981 
982   __ vcvtus(S1, S2);
983   __ vcvtsu(S1, S2);
984 
985   __ vcvtud(S1, D2);
986   __ vcvtdu(D1, S2);
987 
988   size_t cs = __ CodeSize();
989   std::vector<uint8_t> managed_code(cs);
990   MemoryRegion code(&managed_code[0], managed_code.size());
991   __ FinalizeInstructions(code);
992   dump(managed_code, "FloatingPointConversions");
993   delete assembler;
994 }
995 
TEST(Thumb2AssemblerTest,FloatingPointComparisons)996 TEST(Thumb2AssemblerTest, FloatingPointComparisons) {
997   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
998 
999   __ vcmps(S0, S1);
1000   __ vcmpd(D0, D1);
1001 
1002   __ vcmpsz(S2);
1003   __ vcmpdz(D2);
1004 
1005   size_t cs = __ CodeSize();
1006   std::vector<uint8_t> managed_code(cs);
1007   MemoryRegion code(&managed_code[0], managed_code.size());
1008   __ FinalizeInstructions(code);
1009   dump(managed_code, "FloatingPointComparisons");
1010   delete assembler;
1011 }
1012 
TEST(Thumb2AssemblerTest,Calls)1013 TEST(Thumb2AssemblerTest, Calls) {
1014   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1015 
1016   __ blx(LR);
1017   __ bx(LR);
1018 
1019   size_t cs = __ CodeSize();
1020   std::vector<uint8_t> managed_code(cs);
1021   MemoryRegion code(&managed_code[0], managed_code.size());
1022   __ FinalizeInstructions(code);
1023   dump(managed_code, "Calls");
1024   delete assembler;
1025 }
1026 
TEST(Thumb2AssemblerTest,Breakpoint)1027 TEST(Thumb2AssemblerTest, Breakpoint) {
1028   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1029 
1030   __ bkpt(0);
1031 
1032   size_t cs = __ CodeSize();
1033   std::vector<uint8_t> managed_code(cs);
1034   MemoryRegion code(&managed_code[0], managed_code.size());
1035   __ FinalizeInstructions(code);
1036   dump(managed_code, "Breakpoint");
1037   delete assembler;
1038 }
1039 
TEST(Thumb2AssemblerTest,StrR1)1040 TEST(Thumb2AssemblerTest, StrR1) {
1041   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1042 
1043   __ str(R1, Address(SP, 68));
1044   __ str(R1, Address(SP, 1068));
1045 
1046   size_t cs = __ CodeSize();
1047   std::vector<uint8_t> managed_code(cs);
1048   MemoryRegion code(&managed_code[0], managed_code.size());
1049   __ FinalizeInstructions(code);
1050   dump(managed_code, "StrR1");
1051   delete assembler;
1052 }
1053 
TEST(Thumb2AssemblerTest,VPushPop)1054 TEST(Thumb2AssemblerTest, VPushPop) {
1055   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1056 
1057   __ vpushs(S2, 4);
1058   __ vpushd(D2, 4);
1059 
1060   __ vpops(S2, 4);
1061   __ vpopd(D2, 4);
1062 
1063   size_t cs = __ CodeSize();
1064   std::vector<uint8_t> managed_code(cs);
1065   MemoryRegion code(&managed_code[0], managed_code.size());
1066   __ FinalizeInstructions(code);
1067   dump(managed_code, "VPushPop");
1068   delete assembler;
1069 }
1070 
TEST(Thumb2AssemblerTest,Max16BitBranch)1071 TEST(Thumb2AssemblerTest, Max16BitBranch) {
1072   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1073 
1074   Label l1;
1075   __ b(&l1);
1076   for (int i = 0 ; i < (1 << 11) ; i += 2) {
1077     __ mov(R3, ShifterOperand(i & 0xff));
1078   }
1079   __ Bind(&l1);
1080   __ mov(R1, ShifterOperand(R2));
1081 
1082   size_t cs = __ CodeSize();
1083   std::vector<uint8_t> managed_code(cs);
1084   MemoryRegion code(&managed_code[0], managed_code.size());
1085   __ FinalizeInstructions(code);
1086   dump(managed_code, "Max16BitBranch");
1087   delete assembler;
1088 }
1089 
TEST(Thumb2AssemblerTest,Branch32)1090 TEST(Thumb2AssemblerTest, Branch32) {
1091   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1092 
1093   Label l1;
1094   __ b(&l1);
1095   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1096     __ mov(R3, ShifterOperand(i & 0xff));
1097   }
1098   __ Bind(&l1);
1099   __ mov(R1, ShifterOperand(R2));
1100 
1101   size_t cs = __ CodeSize();
1102   std::vector<uint8_t> managed_code(cs);
1103   MemoryRegion code(&managed_code[0], managed_code.size());
1104   __ FinalizeInstructions(code);
1105   dump(managed_code, "Branch32");
1106   delete assembler;
1107 }
1108 
TEST(Thumb2AssemblerTest,CompareAndBranchMax)1109 TEST(Thumb2AssemblerTest, CompareAndBranchMax) {
1110   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1111 
1112   Label l1;
1113   __ cbz(R4, &l1);
1114   for (int i = 0 ; i < (1 << 7) ; i += 2) {
1115     __ mov(R3, ShifterOperand(i & 0xff));
1116   }
1117   __ Bind(&l1);
1118   __ mov(R1, ShifterOperand(R2));
1119 
1120   size_t cs = __ CodeSize();
1121   std::vector<uint8_t> managed_code(cs);
1122   MemoryRegion code(&managed_code[0], managed_code.size());
1123   __ FinalizeInstructions(code);
1124   dump(managed_code, "CompareAndBranchMax");
1125   delete assembler;
1126 }
1127 
TEST(Thumb2AssemblerTest,CompareAndBranchRelocation16)1128 TEST(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1129   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1130 
1131   Label l1;
1132   __ cbz(R4, &l1);
1133   for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1134     __ mov(R3, ShifterOperand(i & 0xff));
1135   }
1136   __ Bind(&l1);
1137   __ mov(R1, ShifterOperand(R2));
1138 
1139   size_t cs = __ CodeSize();
1140   std::vector<uint8_t> managed_code(cs);
1141   MemoryRegion code(&managed_code[0], managed_code.size());
1142   __ FinalizeInstructions(code);
1143   dump(managed_code, "CompareAndBranchRelocation16");
1144   delete assembler;
1145 }
1146 
TEST(Thumb2AssemblerTest,CompareAndBranchRelocation32)1147 TEST(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1148   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1149 
1150   Label l1;
1151   __ cbz(R4, &l1);
1152   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1153     __ mov(R3, ShifterOperand(i & 0xff));
1154   }
1155   __ Bind(&l1);
1156   __ mov(R1, ShifterOperand(R2));
1157 
1158   size_t cs = __ CodeSize();
1159   std::vector<uint8_t> managed_code(cs);
1160   MemoryRegion code(&managed_code[0], managed_code.size());
1161   __ FinalizeInstructions(code);
1162   dump(managed_code, "CompareAndBranchRelocation32");
1163   delete assembler;
1164 }
1165 
TEST(Thumb2AssemblerTest,MixedBranch32)1166 TEST(Thumb2AssemblerTest, MixedBranch32) {
1167   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1168 
1169   Label l1;
1170   Label l2;
1171   __ b(&l1);      // Forwards.
1172   __ Bind(&l2);
1173 
1174   // Space to force relocation.
1175   for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1176     __ mov(R3, ShifterOperand(i & 0xff));
1177   }
1178   __ b(&l2);      // Backwards.
1179   __ Bind(&l1);
1180   __ mov(R1, ShifterOperand(R2));
1181 
1182   size_t cs = __ CodeSize();
1183   std::vector<uint8_t> managed_code(cs);
1184   MemoryRegion code(&managed_code[0], managed_code.size());
1185   __ FinalizeInstructions(code);
1186   dump(managed_code, "MixedBranch32");
1187   delete assembler;
1188 }
1189 
TEST(Thumb2AssemblerTest,Shifts)1190 TEST(Thumb2AssemblerTest, Shifts) {
1191   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1192 
1193   // 16 bit
1194   __ Lsl(R0, R1, 5);
1195   __ Lsr(R0, R1, 5);
1196   __ Asr(R0, R1, 5);
1197 
1198   __ Lsl(R0, R0, R1);
1199   __ Lsr(R0, R0, R1);
1200   __ Asr(R0, R0, R1);
1201 
1202   // 32 bit due to high registers.
1203   __ Lsl(R8, R1, 5);
1204   __ Lsr(R0, R8, 5);
1205   __ Asr(R8, R1, 5);
1206   __ Ror(R0, R8, 5);
1207 
1208   // 32 bit due to different Rd and Rn.
1209   __ Lsl(R0, R1, R2);
1210   __ Lsr(R0, R1, R2);
1211   __ Asr(R0, R1, R2);
1212   __ Ror(R0, R1, R2);
1213 
1214   // 32 bit due to use of high registers.
1215   __ Lsl(R8, R1, R2);
1216   __ Lsr(R0, R8, R2);
1217   __ Asr(R0, R1, R8);
1218 
1219   // S bit (all 32 bit)
1220 
1221   // 32 bit due to high registers.
1222   __ Lsl(R8, R1, 5, true);
1223   __ Lsr(R0, R8, 5, true);
1224   __ Asr(R8, R1, 5, true);
1225   __ Ror(R0, R8, 5, true);
1226 
1227   // 32 bit due to different Rd and Rn.
1228   __ Lsl(R0, R1, R2, true);
1229   __ Lsr(R0, R1, R2, true);
1230   __ Asr(R0, R1, R2, true);
1231   __ Ror(R0, R1, R2, true);
1232 
1233   // 32 bit due to use of high registers.
1234   __ Lsl(R8, R1, R2, true);
1235   __ Lsr(R0, R8, R2, true);
1236   __ Asr(R0, R1, R8, true);
1237 
1238   size_t cs = __ CodeSize();
1239   std::vector<uint8_t> managed_code(cs);
1240   MemoryRegion code(&managed_code[0], managed_code.size());
1241   __ FinalizeInstructions(code);
1242   dump(managed_code, "Shifts");
1243   delete assembler;
1244 }
1245 
TEST(Thumb2AssemblerTest,LoadStoreRegOffset)1246 TEST(Thumb2AssemblerTest, LoadStoreRegOffset) {
1247   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1248 
1249   // 16 bit.
1250   __ ldr(R0, Address(R1, R2));
1251   __ str(R0, Address(R1, R2));
1252 
1253   // 32 bit due to shift.
1254   __ ldr(R0, Address(R1, R2, LSL, 1));
1255   __ str(R0, Address(R1, R2, LSL, 1));
1256 
1257   __ ldr(R0, Address(R1, R2, LSL, 3));
1258   __ str(R0, Address(R1, R2, LSL, 3));
1259 
1260   // 32 bit due to high register use.
1261   __ ldr(R8, Address(R1, R2));
1262   __ str(R8, Address(R1, R2));
1263 
1264   __ ldr(R1, Address(R8, R2));
1265   __ str(R2, Address(R8, R2));
1266 
1267   __ ldr(R0, Address(R1, R8));
1268   __ str(R0, Address(R1, R8));
1269 
1270   size_t cs = __ CodeSize();
1271   std::vector<uint8_t> managed_code(cs);
1272   MemoryRegion code(&managed_code[0], managed_code.size());
1273   __ FinalizeInstructions(code);
1274   dump(managed_code, "LoadStoreRegOffset");
1275   delete assembler;
1276 }
1277 
TEST(Thumb2AssemblerTest,LoadStoreLiteral)1278 TEST(Thumb2AssemblerTest, LoadStoreLiteral) {
1279   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1280 
1281   __ ldr(R0, Address(4));
1282   __ str(R0, Address(4));
1283 
1284   __ ldr(R0, Address(-8));
1285   __ str(R0, Address(-8));
1286 
1287   // Limits.
1288   __ ldr(R0, Address(0x3ff));       // 10 bits (16 bit).
1289   __ ldr(R0, Address(0x7ff));       // 11 bits (32 bit).
1290   __ str(R0, Address(0x3ff));       // 32 bit (no 16 bit str(literal)).
1291   __ str(R0, Address(0x7ff));       // 11 bits (32 bit).
1292 
1293   size_t cs = __ CodeSize();
1294   std::vector<uint8_t> managed_code(cs);
1295   MemoryRegion code(&managed_code[0], managed_code.size());
1296   __ FinalizeInstructions(code);
1297   dump(managed_code, "LoadStoreLiteral");
1298   delete assembler;
1299 }
1300 
TEST(Thumb2AssemblerTest,LoadStoreLimits)1301 TEST(Thumb2AssemblerTest, LoadStoreLimits) {
1302   arm::Thumb2Assembler* assembler = static_cast<arm::Thumb2Assembler*>(Assembler::Create(kThumb2));
1303 
1304   __ ldr(R0, Address(R4, 124));     // 16 bit.
1305   __ ldr(R0, Address(R4, 128));     // 32 bit.
1306 
1307   __ ldrb(R0, Address(R4, 31));     // 16 bit.
1308   __ ldrb(R0, Address(R4, 32));     // 32 bit.
1309 
1310   __ ldrh(R0, Address(R4, 62));     // 16 bit.
1311   __ ldrh(R0, Address(R4, 64));     // 32 bit.
1312 
1313   __ ldrsb(R0, Address(R4, 31));     // 32 bit.
1314   __ ldrsb(R0, Address(R4, 32));     // 32 bit.
1315 
1316   __ ldrsh(R0, Address(R4, 62));     // 32 bit.
1317   __ ldrsh(R0, Address(R4, 64));     // 32 bit.
1318 
1319   __ str(R0, Address(R4, 124));     // 16 bit.
1320   __ str(R0, Address(R4, 128));     // 32 bit.
1321 
1322   __ strb(R0, Address(R4, 31));     // 16 bit.
1323   __ strb(R0, Address(R4, 32));     // 32 bit.
1324 
1325   __ strh(R0, Address(R4, 62));     // 16 bit.
1326   __ strh(R0, Address(R4, 64));     // 32 bit.
1327 
1328   size_t cs = __ CodeSize();
1329   std::vector<uint8_t> managed_code(cs);
1330   MemoryRegion code(&managed_code[0], managed_code.size());
1331   __ FinalizeInstructions(code);
1332   dump(managed_code, "LoadStoreLimits");
1333   delete assembler;
1334 }
1335 
1336 #undef __
1337 }  // namespace arm
1338 }  // namespace art
1339