1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <dirent.h>
18 #include <errno.h>
19 #include <fstream>
20 #include <map>
21 #include <string.h>
22 #include <sys/types.h>
23
24 #include "gtest/gtest.h"
25 #include "utils/arm/assembler_thumb2.h"
26
27 #include "jni/quick/calling_convention.h"
28 #include "utils/arm/jni_macro_assembler_arm_vixl.h"
29
30 #include "base/hex_dump.h"
31 #include "common_runtime_test.h"
32
33 namespace art {
34 namespace arm {
35
36 // Include results file (generated manually)
37 #include "assembler_thumb_test_expected.cc.inc"
38
39 #ifndef ART_TARGET_ANDROID
40 // This controls whether the results are printed to the
41 // screen or compared against the expected output.
42 // To generate new expected output, set this to true and
43 // copy the output into the .cc.inc file in the form
44 // of the other results.
45 //
46 // When this is false, the results are not printed to the
47 // output, but are compared against the expected results
48 // in the .cc.inc file.
49 static constexpr bool kPrintResults = false;
50 #endif
51
SetAndroidData()52 void SetAndroidData() {
53 const char* data = getenv("ANDROID_DATA");
54 if (data == nullptr) {
55 setenv("ANDROID_DATA", "/tmp", 1);
56 }
57 }
58
CompareIgnoringSpace(const char * s1,const char * s2)59 int CompareIgnoringSpace(const char* s1, const char* s2) {
60 while (*s1 != '\0') {
61 while (isspace(*s1)) ++s1;
62 while (isspace(*s2)) ++s2;
63 if (*s1 == '\0' || *s1 != *s2) {
64 break;
65 }
66 ++s1;
67 ++s2;
68 }
69 return *s1 - *s2;
70 }
71
InitResults()72 void InitResults() {
73 if (test_results.empty()) {
74 setup_results();
75 }
76 }
77
GetToolsDir()78 std::string GetToolsDir() {
79 #ifndef ART_TARGET_ANDROID
80 // This will only work on the host. There is no as, objcopy or objdump on the device.
81 static std::string toolsdir;
82
83 if (toolsdir.empty()) {
84 setup_results();
85 toolsdir = CommonRuntimeTest::GetAndroidTargetToolsDir(kThumb2);
86 SetAndroidData();
87 }
88
89 return toolsdir;
90 #else
91 return std::string();
92 #endif
93 }
94
DumpAndCheck(std::vector<uint8_t> & code,const char * testname,const char * const * results)95 void DumpAndCheck(std::vector<uint8_t>& code, const char* testname, const char* const* results) {
96 #ifndef ART_TARGET_ANDROID
97 static std::string toolsdir = GetToolsDir();
98
99 ScratchFile file;
100
101 const char* filename = file.GetFilename().c_str();
102
103 std::ofstream out(filename);
104 if (out) {
105 out << ".section \".text\"\n";
106 out << ".syntax unified\n";
107 out << ".arch armv7-a\n";
108 out << ".thumb\n";
109 out << ".thumb_func\n";
110 out << ".type " << testname << ", #function\n";
111 out << ".global " << testname << "\n";
112 out << testname << ":\n";
113 out << ".fnstart\n";
114
115 for (uint32_t i = 0 ; i < code.size(); ++i) {
116 out << ".byte " << (static_cast<int>(code[i]) & 0xff) << "\n";
117 }
118 out << ".fnend\n";
119 out << ".size " << testname << ", .-" << testname << "\n";
120 }
121 out.close();
122
123 char cmd[1024];
124
125 // Assemble the .S
126 snprintf(cmd, sizeof(cmd), "%sas %s -o %s.o", toolsdir.c_str(), filename, filename);
127 int cmd_result = system(cmd);
128 ASSERT_EQ(cmd_result, 0) << strerror(errno);
129
130 // Remove the $d symbols to prevent the disassembler dumping the instructions
131 // as .word
132 snprintf(cmd, sizeof(cmd), "%sobjcopy -N '$d' %s.o %s.oo", toolsdir.c_str(), filename, filename);
133 int cmd_result2 = system(cmd);
134 ASSERT_EQ(cmd_result2, 0) << strerror(errno);
135
136 // Disassemble.
137
138 snprintf(cmd, sizeof(cmd), "%sobjdump -d %s.oo | grep '^ *[0-9a-f][0-9a-f]*:'",
139 toolsdir.c_str(), filename);
140 if (kPrintResults) {
141 // Print the results only, don't check. This is used to generate new output for inserting
142 // into the .inc file, so let's add the appropriate prefix/suffix needed in the C++ code.
143 strcat(cmd, " | sed '-es/^/ \"/' | sed '-es/$/\\\\n\",/'");
144 int cmd_result3 = system(cmd);
145 ASSERT_EQ(cmd_result3, 0) << strerror(errno);
146 } else {
147 // Check the results match the appropriate results in the .inc file.
148 FILE *fp = popen(cmd, "r");
149 ASSERT_TRUE(fp != nullptr);
150
151 uint32_t lineindex = 0;
152
153 while (!feof(fp)) {
154 char testline[256];
155 char *s = fgets(testline, sizeof(testline), fp);
156 if (s == nullptr) {
157 break;
158 }
159 if (CompareIgnoringSpace(results[lineindex], testline) != 0) {
160 LOG(FATAL) << "Output is not as expected at line: " << lineindex
161 << results[lineindex] << "/" << testline << ", test name: " << testname;
162 }
163 ++lineindex;
164 }
165 // Check that we are at the end.
166 ASSERT_TRUE(results[lineindex] == nullptr);
167 fclose(fp);
168 }
169
170 char buf[FILENAME_MAX];
171 snprintf(buf, sizeof(buf), "%s.o", filename);
172 unlink(buf);
173
174 snprintf(buf, sizeof(buf), "%s.oo", filename);
175 unlink(buf);
176 #endif // ART_TARGET_ANDROID
177 }
178
179 #define __ assembler->
180
EmitAndCheck(arm::Thumb2Assembler * assembler,const char * testname,const char * const * results)181 void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname,
182 const char* const* results) {
183 __ FinalizeCode();
184 size_t cs = __ CodeSize();
185 std::vector<uint8_t> managed_code(cs);
186 MemoryRegion code(&managed_code[0], managed_code.size());
187 __ FinalizeInstructions(code);
188
189 DumpAndCheck(managed_code, testname, results);
190 }
191
EmitAndCheck(arm::Thumb2Assembler * assembler,const char * testname)192 void EmitAndCheck(arm::Thumb2Assembler* assembler, const char* testname) {
193 InitResults();
194 std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
195 ASSERT_NE(results, test_results.end());
196
197 EmitAndCheck(assembler, testname, results->second);
198 }
199
200 #undef __
201
202 class Thumb2AssemblerTest : public ::testing::Test {
203 public:
Thumb2AssemblerTest()204 Thumb2AssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
205
206 ArenaPool pool;
207 ArenaAllocator arena;
208 arm::Thumb2Assembler assembler;
209 };
210
211 #define __ assembler.
212
TEST_F(Thumb2AssemblerTest,SimpleMov)213 TEST_F(Thumb2AssemblerTest, SimpleMov) {
214 __ movs(R0, ShifterOperand(R1));
215 __ mov(R0, ShifterOperand(R1));
216 __ mov(R8, ShifterOperand(R9));
217
218 __ mov(R0, ShifterOperand(1));
219 __ mov(R8, ShifterOperand(9));
220
221 EmitAndCheck(&assembler, "SimpleMov");
222 }
223
TEST_F(Thumb2AssemblerTest,SimpleMov32)224 TEST_F(Thumb2AssemblerTest, SimpleMov32) {
225 __ Force32Bit();
226
227 __ mov(R0, ShifterOperand(R1));
228 __ mov(R8, ShifterOperand(R9));
229
230 EmitAndCheck(&assembler, "SimpleMov32");
231 }
232
TEST_F(Thumb2AssemblerTest,SimpleMovAdd)233 TEST_F(Thumb2AssemblerTest, SimpleMovAdd) {
234 __ mov(R0, ShifterOperand(R1));
235 __ adds(R0, R1, ShifterOperand(R2));
236 __ add(R0, R1, ShifterOperand(0));
237
238 EmitAndCheck(&assembler, "SimpleMovAdd");
239 }
240
TEST_F(Thumb2AssemblerTest,DataProcessingRegister)241 TEST_F(Thumb2AssemblerTest, DataProcessingRegister) {
242 // 32 bit variants using low registers.
243 __ mvn(R0, ShifterOperand(R1), AL, kCcKeep);
244 __ add(R0, R1, ShifterOperand(R2), AL, kCcKeep);
245 __ sub(R0, R1, ShifterOperand(R2), AL, kCcKeep);
246 __ and_(R0, R1, ShifterOperand(R2), AL, kCcKeep);
247 __ orr(R0, R1, ShifterOperand(R2), AL, kCcKeep);
248 __ orn(R0, R1, ShifterOperand(R2), AL, kCcKeep);
249 __ eor(R0, R1, ShifterOperand(R2), AL, kCcKeep);
250 __ bic(R0, R1, ShifterOperand(R2), AL, kCcKeep);
251 __ adc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
252 __ sbc(R0, R1, ShifterOperand(R2), AL, kCcKeep);
253 __ rsb(R0, R1, ShifterOperand(R2), AL, kCcKeep);
254 __ teq(R0, ShifterOperand(R1));
255
256 // 16 bit variants using low registers.
257 __ movs(R0, ShifterOperand(R1));
258 __ mov(R0, ShifterOperand(R1), AL, kCcKeep);
259 __ mvns(R0, ShifterOperand(R1));
260 __ add(R0, R0, ShifterOperand(R1), AL, kCcKeep);
261 __ adds(R0, R1, ShifterOperand(R2));
262 __ subs(R0, R1, ShifterOperand(R2));
263 __ adcs(R0, R0, ShifterOperand(R1));
264 __ sbcs(R0, R0, ShifterOperand(R1));
265 __ ands(R0, R0, ShifterOperand(R1));
266 __ orrs(R0, R0, ShifterOperand(R1));
267 __ eors(R0, R0, ShifterOperand(R1));
268 __ bics(R0, R0, ShifterOperand(R1));
269 __ tst(R0, ShifterOperand(R1));
270 __ cmp(R0, ShifterOperand(R1));
271 __ cmn(R0, ShifterOperand(R1));
272
273 // 16-bit variants using high registers.
274 __ mov(R1, ShifterOperand(R8), AL, kCcKeep);
275 __ mov(R9, ShifterOperand(R0), AL, kCcKeep);
276 __ mov(R8, ShifterOperand(R9), AL, kCcKeep);
277 __ add(R1, R1, ShifterOperand(R8), AL, kCcKeep);
278 __ add(R9, R9, ShifterOperand(R0), AL, kCcKeep);
279 __ add(R8, R8, ShifterOperand(R9), AL, kCcKeep);
280 __ cmp(R0, ShifterOperand(R9));
281 __ cmp(R8, ShifterOperand(R1));
282 __ cmp(R9, ShifterOperand(R8));
283
284 // The 16-bit RSBS Rd, Rn, #0, also known as NEGS Rd, Rn is specified using
285 // an immediate (0) but emitted without any, so we test it here.
286 __ rsbs(R0, R1, ShifterOperand(0));
287 __ rsbs(R0, R0, ShifterOperand(0)); // Check Rd == Rn code path.
288
289 // 32 bit variants using high registers that would be 16-bit if using low registers.
290 __ movs(R0, ShifterOperand(R8));
291 __ mvns(R0, ShifterOperand(R8));
292 __ add(R0, R1, ShifterOperand(R8), AL, kCcKeep);
293 __ adds(R0, R1, ShifterOperand(R8));
294 __ subs(R0, R1, ShifterOperand(R8));
295 __ adcs(R0, R0, ShifterOperand(R8));
296 __ sbcs(R0, R0, ShifterOperand(R8));
297 __ ands(R0, R0, ShifterOperand(R8));
298 __ orrs(R0, R0, ShifterOperand(R8));
299 __ eors(R0, R0, ShifterOperand(R8));
300 __ bics(R0, R0, ShifterOperand(R8));
301 __ tst(R0, ShifterOperand(R8));
302 __ cmn(R0, ShifterOperand(R8));
303 __ rsbs(R0, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit.
304 __ rsbs(R8, R8, ShifterOperand(0)); // Check that this is not emitted as 16-bit (Rd == Rn).
305
306 // 32-bit variants of instructions that would be 16-bit outside IT block.
307 __ it(arm::EQ);
308 __ mvns(R0, ShifterOperand(R1), arm::EQ);
309 __ it(arm::EQ);
310 __ adds(R0, R1, ShifterOperand(R2), arm::EQ);
311 __ it(arm::EQ);
312 __ subs(R0, R1, ShifterOperand(R2), arm::EQ);
313 __ it(arm::EQ);
314 __ adcs(R0, R0, ShifterOperand(R1), arm::EQ);
315 __ it(arm::EQ);
316 __ sbcs(R0, R0, ShifterOperand(R1), arm::EQ);
317 __ it(arm::EQ);
318 __ ands(R0, R0, ShifterOperand(R1), arm::EQ);
319 __ it(arm::EQ);
320 __ orrs(R0, R0, ShifterOperand(R1), arm::EQ);
321 __ it(arm::EQ);
322 __ eors(R0, R0, ShifterOperand(R1), arm::EQ);
323 __ it(arm::EQ);
324 __ bics(R0, R0, ShifterOperand(R1), arm::EQ);
325
326 // 16-bit variants of instructions that would be 32-bit outside IT block.
327 __ it(arm::EQ);
328 __ mvn(R0, ShifterOperand(R1), arm::EQ, kCcKeep);
329 __ it(arm::EQ);
330 __ add(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
331 __ it(arm::EQ);
332 __ sub(R0, R1, ShifterOperand(R2), arm::EQ, kCcKeep);
333 __ it(arm::EQ);
334 __ adc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
335 __ it(arm::EQ);
336 __ sbc(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
337 __ it(arm::EQ);
338 __ and_(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
339 __ it(arm::EQ);
340 __ orr(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
341 __ it(arm::EQ);
342 __ eor(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
343 __ it(arm::EQ);
344 __ bic(R0, R0, ShifterOperand(R1), arm::EQ, kCcKeep);
345
346 // 16 bit variants selected for the default kCcDontCare.
347 __ mov(R0, ShifterOperand(R1));
348 __ mvn(R0, ShifterOperand(R1));
349 __ add(R0, R0, ShifterOperand(R1));
350 __ add(R0, R1, ShifterOperand(R2));
351 __ sub(R0, R1, ShifterOperand(R2));
352 __ adc(R0, R0, ShifterOperand(R1));
353 __ sbc(R0, R0, ShifterOperand(R1));
354 __ and_(R0, R0, ShifterOperand(R1));
355 __ orr(R0, R0, ShifterOperand(R1));
356 __ eor(R0, R0, ShifterOperand(R1));
357 __ bic(R0, R0, ShifterOperand(R1));
358 __ mov(R1, ShifterOperand(R8));
359 __ mov(R9, ShifterOperand(R0));
360 __ mov(R8, ShifterOperand(R9));
361 __ add(R1, R1, ShifterOperand(R8));
362 __ add(R9, R9, ShifterOperand(R0));
363 __ add(R8, R8, ShifterOperand(R9));
364 __ rsb(R0, R1, ShifterOperand(0));
365 __ rsb(R0, R0, ShifterOperand(0));
366
367 // And an arbitrary 32-bit instruction using IP.
368 __ add(R12, R1, ShifterOperand(R0), AL, kCcKeep);
369
370 EmitAndCheck(&assembler, "DataProcessingRegister");
371 }
372
TEST_F(Thumb2AssemblerTest,DataProcessingImmediate)373 TEST_F(Thumb2AssemblerTest, DataProcessingImmediate) {
374 __ mov(R0, ShifterOperand(0x55));
375 __ mvn(R0, ShifterOperand(0x55));
376 __ add(R0, R1, ShifterOperand(0x55));
377 __ sub(R0, R1, ShifterOperand(0x55));
378 __ and_(R0, R1, ShifterOperand(0x55));
379 __ orr(R0, R1, ShifterOperand(0x55));
380 __ orn(R0, R1, ShifterOperand(0x55));
381 __ eor(R0, R1, ShifterOperand(0x55));
382 __ bic(R0, R1, ShifterOperand(0x55));
383 __ adc(R0, R1, ShifterOperand(0x55));
384 __ sbc(R0, R1, ShifterOperand(0x55));
385 __ rsb(R0, R1, ShifterOperand(0x55));
386
387 __ tst(R0, ShifterOperand(0x55));
388 __ teq(R0, ShifterOperand(0x55));
389 __ cmp(R0, ShifterOperand(0x55));
390 __ cmn(R0, ShifterOperand(0x55));
391
392 __ add(R0, R1, ShifterOperand(5));
393 __ sub(R0, R1, ShifterOperand(5));
394
395 __ movs(R0, ShifterOperand(0x55));
396 __ mvns(R0, ShifterOperand(0x55));
397
398 __ adds(R0, R1, ShifterOperand(5));
399 __ subs(R0, R1, ShifterOperand(5));
400
401 EmitAndCheck(&assembler, "DataProcessingImmediate");
402 }
403
TEST_F(Thumb2AssemblerTest,DataProcessingModifiedImmediate)404 TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediate) {
405 __ mov(R0, ShifterOperand(0x550055));
406 __ mvn(R0, ShifterOperand(0x550055));
407 __ add(R0, R1, ShifterOperand(0x550055));
408 __ sub(R0, R1, ShifterOperand(0x550055));
409 __ and_(R0, R1, ShifterOperand(0x550055));
410 __ orr(R0, R1, ShifterOperand(0x550055));
411 __ orn(R0, R1, ShifterOperand(0x550055));
412 __ eor(R0, R1, ShifterOperand(0x550055));
413 __ bic(R0, R1, ShifterOperand(0x550055));
414 __ adc(R0, R1, ShifterOperand(0x550055));
415 __ sbc(R0, R1, ShifterOperand(0x550055));
416 __ rsb(R0, R1, ShifterOperand(0x550055));
417
418 __ tst(R0, ShifterOperand(0x550055));
419 __ teq(R0, ShifterOperand(0x550055));
420 __ cmp(R0, ShifterOperand(0x550055));
421 __ cmn(R0, ShifterOperand(0x550055));
422
423 EmitAndCheck(&assembler, "DataProcessingModifiedImmediate");
424 }
425
426
TEST_F(Thumb2AssemblerTest,DataProcessingModifiedImmediates)427 TEST_F(Thumb2AssemblerTest, DataProcessingModifiedImmediates) {
428 __ mov(R0, ShifterOperand(0x550055));
429 __ mov(R0, ShifterOperand(0x55005500));
430 __ mov(R0, ShifterOperand(0x55555555));
431 __ mov(R0, ShifterOperand(0xd5000000)); // rotated to first position
432 __ mov(R0, ShifterOperand(0x6a000000)); // rotated to second position
433 __ mov(R0, ShifterOperand(0x350)); // rotated to 2nd last position
434 __ mov(R0, ShifterOperand(0x1a8)); // rotated to last position
435
436 EmitAndCheck(&assembler, "DataProcessingModifiedImmediates");
437 }
438
TEST_F(Thumb2AssemblerTest,DataProcessingShiftedRegister)439 TEST_F(Thumb2AssemblerTest, DataProcessingShiftedRegister) {
440 // 16-bit variants.
441 __ movs(R3, ShifterOperand(R4, LSL, 4));
442 __ movs(R3, ShifterOperand(R4, LSR, 5));
443 __ movs(R3, ShifterOperand(R4, ASR, 6));
444
445 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
446 __ movs(R3, ShifterOperand(R4, ROR, 7));
447
448 // 32-bit RRX because RRX has no 16-bit version.
449 __ movs(R3, ShifterOperand(R4, RRX));
450
451 // 32 bit variants (not setting condition codes).
452 __ mov(R3, ShifterOperand(R4, LSL, 4), AL, kCcKeep);
453 __ mov(R3, ShifterOperand(R4, LSR, 5), AL, kCcKeep);
454 __ mov(R3, ShifterOperand(R4, ASR, 6), AL, kCcKeep);
455 __ mov(R3, ShifterOperand(R4, ROR, 7), AL, kCcKeep);
456 __ mov(R3, ShifterOperand(R4, RRX), AL, kCcKeep);
457
458 // 32 bit variants (high registers).
459 __ movs(R8, ShifterOperand(R4, LSL, 4));
460 __ movs(R8, ShifterOperand(R4, LSR, 5));
461 __ movs(R8, ShifterOperand(R4, ASR, 6));
462 __ movs(R8, ShifterOperand(R4, ROR, 7));
463 __ movs(R8, ShifterOperand(R4, RRX));
464
465 EmitAndCheck(&assembler, "DataProcessingShiftedRegister");
466 }
467
TEST_F(Thumb2AssemblerTest,ShiftImmediate)468 TEST_F(Thumb2AssemblerTest, ShiftImmediate) {
469 // Note: This test produces the same results as DataProcessingShiftedRegister
470 // but it does so using shift functions instead of mov().
471
472 // 16-bit variants.
473 __ Lsl(R3, R4, 4);
474 __ Lsr(R3, R4, 5);
475 __ Asr(R3, R4, 6);
476
477 // 32-bit ROR because ROR immediate doesn't have the same 16-bit version as other shifts.
478 __ Ror(R3, R4, 7);
479
480 // 32-bit RRX because RRX has no 16-bit version.
481 __ Rrx(R3, R4);
482
483 // 32 bit variants (not setting condition codes).
484 __ Lsl(R3, R4, 4, AL, kCcKeep);
485 __ Lsr(R3, R4, 5, AL, kCcKeep);
486 __ Asr(R3, R4, 6, AL, kCcKeep);
487 __ Ror(R3, R4, 7, AL, kCcKeep);
488 __ Rrx(R3, R4, AL, kCcKeep);
489
490 // 32 bit variants (high registers).
491 __ Lsls(R8, R4, 4);
492 __ Lsrs(R8, R4, 5);
493 __ Asrs(R8, R4, 6);
494 __ Rors(R8, R4, 7);
495 __ Rrxs(R8, R4);
496
497 EmitAndCheck(&assembler, "ShiftImmediate");
498 }
499
TEST_F(Thumb2AssemblerTest,BasicLoad)500 TEST_F(Thumb2AssemblerTest, BasicLoad) {
501 __ ldr(R3, Address(R4, 24));
502 __ ldrb(R3, Address(R4, 24));
503 __ ldrh(R3, Address(R4, 24));
504 __ ldrsb(R3, Address(R4, 24));
505 __ ldrsh(R3, Address(R4, 24));
506
507 __ ldr(R3, Address(SP, 24));
508
509 // 32 bit variants
510 __ ldr(R8, Address(R4, 24));
511 __ ldrb(R8, Address(R4, 24));
512 __ ldrh(R8, Address(R4, 24));
513 __ ldrsb(R8, Address(R4, 24));
514 __ ldrsh(R8, Address(R4, 24));
515
516 EmitAndCheck(&assembler, "BasicLoad");
517 }
518
519
TEST_F(Thumb2AssemblerTest,BasicStore)520 TEST_F(Thumb2AssemblerTest, BasicStore) {
521 __ str(R3, Address(R4, 24));
522 __ strb(R3, Address(R4, 24));
523 __ strh(R3, Address(R4, 24));
524
525 __ str(R3, Address(SP, 24));
526
527 // 32 bit variants.
528 __ str(R8, Address(R4, 24));
529 __ strb(R8, Address(R4, 24));
530 __ strh(R8, Address(R4, 24));
531
532 EmitAndCheck(&assembler, "BasicStore");
533 }
534
TEST_F(Thumb2AssemblerTest,ComplexLoad)535 TEST_F(Thumb2AssemblerTest, ComplexLoad) {
536 __ ldr(R3, Address(R4, 24, Address::Mode::Offset));
537 __ ldr(R3, Address(R4, 24, Address::Mode::PreIndex));
538 __ ldr(R3, Address(R4, 24, Address::Mode::PostIndex));
539 __ ldr(R3, Address(R4, 24, Address::Mode::NegOffset));
540 __ ldr(R3, Address(R4, 24, Address::Mode::NegPreIndex));
541 __ ldr(R3, Address(R4, 24, Address::Mode::NegPostIndex));
542
543 __ ldrb(R3, Address(R4, 24, Address::Mode::Offset));
544 __ ldrb(R3, Address(R4, 24, Address::Mode::PreIndex));
545 __ ldrb(R3, Address(R4, 24, Address::Mode::PostIndex));
546 __ ldrb(R3, Address(R4, 24, Address::Mode::NegOffset));
547 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
548 __ ldrb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
549
550 __ ldrh(R3, Address(R4, 24, Address::Mode::Offset));
551 __ ldrh(R3, Address(R4, 24, Address::Mode::PreIndex));
552 __ ldrh(R3, Address(R4, 24, Address::Mode::PostIndex));
553 __ ldrh(R3, Address(R4, 24, Address::Mode::NegOffset));
554 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
555 __ ldrh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
556
557 __ ldrsb(R3, Address(R4, 24, Address::Mode::Offset));
558 __ ldrsb(R3, Address(R4, 24, Address::Mode::PreIndex));
559 __ ldrsb(R3, Address(R4, 24, Address::Mode::PostIndex));
560 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegOffset));
561 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
562 __ ldrsb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
563
564 __ ldrsh(R3, Address(R4, 24, Address::Mode::Offset));
565 __ ldrsh(R3, Address(R4, 24, Address::Mode::PreIndex));
566 __ ldrsh(R3, Address(R4, 24, Address::Mode::PostIndex));
567 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegOffset));
568 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
569 __ ldrsh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
570
571 EmitAndCheck(&assembler, "ComplexLoad");
572 }
573
574
TEST_F(Thumb2AssemblerTest,ComplexStore)575 TEST_F(Thumb2AssemblerTest, ComplexStore) {
576 __ str(R3, Address(R4, 24, Address::Mode::Offset));
577 __ str(R3, Address(R4, 24, Address::Mode::PreIndex));
578 __ str(R3, Address(R4, 24, Address::Mode::PostIndex));
579 __ str(R3, Address(R4, 24, Address::Mode::NegOffset));
580 __ str(R3, Address(R4, 24, Address::Mode::NegPreIndex));
581 __ str(R3, Address(R4, 24, Address::Mode::NegPostIndex));
582
583 __ strb(R3, Address(R4, 24, Address::Mode::Offset));
584 __ strb(R3, Address(R4, 24, Address::Mode::PreIndex));
585 __ strb(R3, Address(R4, 24, Address::Mode::PostIndex));
586 __ strb(R3, Address(R4, 24, Address::Mode::NegOffset));
587 __ strb(R3, Address(R4, 24, Address::Mode::NegPreIndex));
588 __ strb(R3, Address(R4, 24, Address::Mode::NegPostIndex));
589
590 __ strh(R3, Address(R4, 24, Address::Mode::Offset));
591 __ strh(R3, Address(R4, 24, Address::Mode::PreIndex));
592 __ strh(R3, Address(R4, 24, Address::Mode::PostIndex));
593 __ strh(R3, Address(R4, 24, Address::Mode::NegOffset));
594 __ strh(R3, Address(R4, 24, Address::Mode::NegPreIndex));
595 __ strh(R3, Address(R4, 24, Address::Mode::NegPostIndex));
596
597 EmitAndCheck(&assembler, "ComplexStore");
598 }
599
TEST_F(Thumb2AssemblerTest,NegativeLoadStore)600 TEST_F(Thumb2AssemblerTest, NegativeLoadStore) {
601 __ ldr(R3, Address(R4, -24, Address::Mode::Offset));
602 __ ldr(R3, Address(R4, -24, Address::Mode::PreIndex));
603 __ ldr(R3, Address(R4, -24, Address::Mode::PostIndex));
604 __ ldr(R3, Address(R4, -24, Address::Mode::NegOffset));
605 __ ldr(R3, Address(R4, -24, Address::Mode::NegPreIndex));
606 __ ldr(R3, Address(R4, -24, Address::Mode::NegPostIndex));
607
608 __ ldrb(R3, Address(R4, -24, Address::Mode::Offset));
609 __ ldrb(R3, Address(R4, -24, Address::Mode::PreIndex));
610 __ ldrb(R3, Address(R4, -24, Address::Mode::PostIndex));
611 __ ldrb(R3, Address(R4, -24, Address::Mode::NegOffset));
612 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
613 __ ldrb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
614
615 __ ldrh(R3, Address(R4, -24, Address::Mode::Offset));
616 __ ldrh(R3, Address(R4, -24, Address::Mode::PreIndex));
617 __ ldrh(R3, Address(R4, -24, Address::Mode::PostIndex));
618 __ ldrh(R3, Address(R4, -24, Address::Mode::NegOffset));
619 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
620 __ ldrh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
621
622 __ ldrsb(R3, Address(R4, -24, Address::Mode::Offset));
623 __ ldrsb(R3, Address(R4, -24, Address::Mode::PreIndex));
624 __ ldrsb(R3, Address(R4, -24, Address::Mode::PostIndex));
625 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegOffset));
626 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
627 __ ldrsb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
628
629 __ ldrsh(R3, Address(R4, -24, Address::Mode::Offset));
630 __ ldrsh(R3, Address(R4, -24, Address::Mode::PreIndex));
631 __ ldrsh(R3, Address(R4, -24, Address::Mode::PostIndex));
632 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegOffset));
633 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
634 __ ldrsh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
635
636 __ str(R3, Address(R4, -24, Address::Mode::Offset));
637 __ str(R3, Address(R4, -24, Address::Mode::PreIndex));
638 __ str(R3, Address(R4, -24, Address::Mode::PostIndex));
639 __ str(R3, Address(R4, -24, Address::Mode::NegOffset));
640 __ str(R3, Address(R4, -24, Address::Mode::NegPreIndex));
641 __ str(R3, Address(R4, -24, Address::Mode::NegPostIndex));
642
643 __ strb(R3, Address(R4, -24, Address::Mode::Offset));
644 __ strb(R3, Address(R4, -24, Address::Mode::PreIndex));
645 __ strb(R3, Address(R4, -24, Address::Mode::PostIndex));
646 __ strb(R3, Address(R4, -24, Address::Mode::NegOffset));
647 __ strb(R3, Address(R4, -24, Address::Mode::NegPreIndex));
648 __ strb(R3, Address(R4, -24, Address::Mode::NegPostIndex));
649
650 __ strh(R3, Address(R4, -24, Address::Mode::Offset));
651 __ strh(R3, Address(R4, -24, Address::Mode::PreIndex));
652 __ strh(R3, Address(R4, -24, Address::Mode::PostIndex));
653 __ strh(R3, Address(R4, -24, Address::Mode::NegOffset));
654 __ strh(R3, Address(R4, -24, Address::Mode::NegPreIndex));
655 __ strh(R3, Address(R4, -24, Address::Mode::NegPostIndex));
656
657 EmitAndCheck(&assembler, "NegativeLoadStore");
658 }
659
TEST_F(Thumb2AssemblerTest,SimpleLoadStoreDual)660 TEST_F(Thumb2AssemblerTest, SimpleLoadStoreDual) {
661 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
662 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
663
664 EmitAndCheck(&assembler, "SimpleLoadStoreDual");
665 }
666
TEST_F(Thumb2AssemblerTest,ComplexLoadStoreDual)667 TEST_F(Thumb2AssemblerTest, ComplexLoadStoreDual) {
668 __ strd(R2, Address(R0, 24, Address::Mode::Offset));
669 __ strd(R2, Address(R0, 24, Address::Mode::PreIndex));
670 __ strd(R2, Address(R0, 24, Address::Mode::PostIndex));
671 __ strd(R2, Address(R0, 24, Address::Mode::NegOffset));
672 __ strd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
673 __ strd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
674
675 __ ldrd(R2, Address(R0, 24, Address::Mode::Offset));
676 __ ldrd(R2, Address(R0, 24, Address::Mode::PreIndex));
677 __ ldrd(R2, Address(R0, 24, Address::Mode::PostIndex));
678 __ ldrd(R2, Address(R0, 24, Address::Mode::NegOffset));
679 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPreIndex));
680 __ ldrd(R2, Address(R0, 24, Address::Mode::NegPostIndex));
681
682 EmitAndCheck(&assembler, "ComplexLoadStoreDual");
683 }
684
TEST_F(Thumb2AssemblerTest,NegativeLoadStoreDual)685 TEST_F(Thumb2AssemblerTest, NegativeLoadStoreDual) {
686 __ strd(R2, Address(R0, -24, Address::Mode::Offset));
687 __ strd(R2, Address(R0, -24, Address::Mode::PreIndex));
688 __ strd(R2, Address(R0, -24, Address::Mode::PostIndex));
689 __ strd(R2, Address(R0, -24, Address::Mode::NegOffset));
690 __ strd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
691 __ strd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
692
693 __ ldrd(R2, Address(R0, -24, Address::Mode::Offset));
694 __ ldrd(R2, Address(R0, -24, Address::Mode::PreIndex));
695 __ ldrd(R2, Address(R0, -24, Address::Mode::PostIndex));
696 __ ldrd(R2, Address(R0, -24, Address::Mode::NegOffset));
697 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPreIndex));
698 __ ldrd(R2, Address(R0, -24, Address::Mode::NegPostIndex));
699
700 EmitAndCheck(&assembler, "NegativeLoadStoreDual");
701 }
702
TEST_F(Thumb2AssemblerTest,SimpleBranch)703 TEST_F(Thumb2AssemblerTest, SimpleBranch) {
704 Label l1;
705 __ mov(R0, ShifterOperand(2));
706 __ Bind(&l1);
707 __ mov(R1, ShifterOperand(1));
708 __ b(&l1);
709 Label l2;
710 __ b(&l2);
711 __ mov(R1, ShifterOperand(2));
712 __ Bind(&l2);
713 __ mov(R0, ShifterOperand(3));
714
715 Label l3;
716 __ mov(R0, ShifterOperand(2));
717 __ Bind(&l3);
718 __ mov(R1, ShifterOperand(1));
719 __ b(&l3, EQ);
720
721 Label l4;
722 __ b(&l4, EQ);
723 __ mov(R1, ShifterOperand(2));
724 __ Bind(&l4);
725 __ mov(R0, ShifterOperand(3));
726
727 // 2 linked labels.
728 Label l5;
729 __ b(&l5);
730 __ mov(R1, ShifterOperand(4));
731 __ b(&l5);
732 __ mov(R1, ShifterOperand(5));
733 __ Bind(&l5);
734 __ mov(R0, ShifterOperand(6));
735
736 EmitAndCheck(&assembler, "SimpleBranch");
737 }
738
TEST_F(Thumb2AssemblerTest,LongBranch)739 TEST_F(Thumb2AssemblerTest, LongBranch) {
740 __ Force32Bit();
741 // 32 bit branches.
742 Label l1;
743 __ mov(R0, ShifterOperand(2));
744 __ Bind(&l1);
745 __ mov(R1, ShifterOperand(1));
746 __ b(&l1);
747
748 Label l2;
749 __ b(&l2);
750 __ mov(R1, ShifterOperand(2));
751 __ Bind(&l2);
752 __ mov(R0, ShifterOperand(3));
753
754 Label l3;
755 __ mov(R0, ShifterOperand(2));
756 __ Bind(&l3);
757 __ mov(R1, ShifterOperand(1));
758 __ b(&l3, EQ);
759
760 Label l4;
761 __ b(&l4, EQ);
762 __ mov(R1, ShifterOperand(2));
763 __ Bind(&l4);
764 __ mov(R0, ShifterOperand(3));
765
766 // 2 linked labels.
767 Label l5;
768 __ b(&l5);
769 __ mov(R1, ShifterOperand(4));
770 __ b(&l5);
771 __ mov(R1, ShifterOperand(5));
772 __ Bind(&l5);
773 __ mov(R0, ShifterOperand(6));
774
775 EmitAndCheck(&assembler, "LongBranch");
776 }
777
TEST_F(Thumb2AssemblerTest,LoadMultiple)778 TEST_F(Thumb2AssemblerTest, LoadMultiple) {
779 // 16 bit.
780 __ ldm(DB_W, R4, (1 << R0 | 1 << R3));
781
782 // 32 bit.
783 __ ldm(DB_W, R4, (1 << LR | 1 << R11));
784 __ ldm(DB, R4, (1 << LR | 1 << R11));
785
786 // Single reg is converted to ldr
787 __ ldm(DB_W, R4, (1 << R5));
788
789 EmitAndCheck(&assembler, "LoadMultiple");
790 }
791
TEST_F(Thumb2AssemblerTest,StoreMultiple)792 TEST_F(Thumb2AssemblerTest, StoreMultiple) {
793 // 16 bit.
794 __ stm(IA_W, R4, (1 << R0 | 1 << R3));
795
796 // 32 bit.
797 __ stm(IA_W, R4, (1 << LR | 1 << R11));
798 __ stm(IA, R4, (1 << LR | 1 << R11));
799
800 // Single reg is converted to str
801 __ stm(IA_W, R4, (1 << R5));
802 __ stm(IA, R4, (1 << R5));
803
804 EmitAndCheck(&assembler, "StoreMultiple");
805 }
806
TEST_F(Thumb2AssemblerTest,MovWMovT)807 TEST_F(Thumb2AssemblerTest, MovWMovT) {
808 // Always 32 bit.
809 __ movw(R4, 0);
810 __ movw(R4, 0x34);
811 __ movw(R9, 0x34);
812 __ movw(R3, 0x1234);
813 __ movw(R9, 0xffff);
814
815 // Always 32 bit.
816 __ movt(R0, 0);
817 __ movt(R0, 0x1234);
818 __ movt(R1, 0xffff);
819
820 EmitAndCheck(&assembler, "MovWMovT");
821 }
822
TEST_F(Thumb2AssemblerTest,SpecialAddSub)823 TEST_F(Thumb2AssemblerTest, SpecialAddSub) {
824 __ add(R2, SP, ShifterOperand(0x50)); // 16 bit.
825 __ add(SP, SP, ShifterOperand(0x50)); // 16 bit.
826 __ add(R8, SP, ShifterOperand(0x50)); // 32 bit.
827
828 __ add(R2, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
829 __ add(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size.
830 __ add(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
831
832 __ sub(SP, SP, ShifterOperand(0x50)); // 16 bit
833 __ sub(R0, SP, ShifterOperand(0x50)); // 32 bit
834 __ sub(R8, SP, ShifterOperand(0x50)); // 32 bit.
835
836 __ sub(SP, SP, ShifterOperand(0xf00)); // 32 bit due to imm size
837 __ sub(SP, SP, ShifterOperand(0xffc)); // 32 bit due to imm size; encoding T4.
838
839 EmitAndCheck(&assembler, "SpecialAddSub");
840 }
841
TEST_F(Thumb2AssemblerTest,LoadFromOffset)842 TEST_F(Thumb2AssemblerTest, LoadFromOffset) {
843 __ LoadFromOffset(kLoadWord, R2, R4, 12);
844 __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
845 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
846 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
847 __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
848 __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
849 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
850 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
851 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
852 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
853 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
854 __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
855 __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
856 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
857 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
858 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
859 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
860 __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
861
862 __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
863 __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
864
865 __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
866 __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
867 __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
868
869 EmitAndCheck(&assembler, "LoadFromOffset");
870 }
871
TEST_F(Thumb2AssemblerTest,StoreToOffset)872 TEST_F(Thumb2AssemblerTest, StoreToOffset) {
873 __ StoreToOffset(kStoreWord, R2, R4, 12);
874 __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
875 __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
876 __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
877 __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
878 __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
879 __ StoreToOffset(kStoreHalfword, R2, R4, 12);
880 __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
881 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
882 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
883 __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
884 __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
885 __ StoreToOffset(kStoreWordPair, R2, R4, 12);
886 __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
887 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
888 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
889 __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
890 __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
891
892 __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
893 __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
894
895 __ StoreToOffset(kStoreByte, R2, R4, 12);
896
897 EmitAndCheck(&assembler, "StoreToOffset");
898 }
899
TEST_F(Thumb2AssemblerTest,IfThen)900 TEST_F(Thumb2AssemblerTest, IfThen) {
901 __ it(EQ);
902 __ mov(R1, ShifterOperand(1), EQ);
903
904 __ it(EQ, kItThen);
905 __ mov(R1, ShifterOperand(1), EQ);
906 __ mov(R2, ShifterOperand(2), EQ);
907
908 __ it(EQ, kItElse);
909 __ mov(R1, ShifterOperand(1), EQ);
910 __ mov(R2, ShifterOperand(2), NE);
911
912 __ it(EQ, kItThen, kItElse);
913 __ mov(R1, ShifterOperand(1), EQ);
914 __ mov(R2, ShifterOperand(2), EQ);
915 __ mov(R3, ShifterOperand(3), NE);
916
917 __ it(EQ, kItElse, kItElse);
918 __ mov(R1, ShifterOperand(1), EQ);
919 __ mov(R2, ShifterOperand(2), NE);
920 __ mov(R3, ShifterOperand(3), NE);
921
922 __ it(EQ, kItThen, kItThen, kItElse);
923 __ mov(R1, ShifterOperand(1), EQ);
924 __ mov(R2, ShifterOperand(2), EQ);
925 __ mov(R3, ShifterOperand(3), EQ);
926 __ mov(R4, ShifterOperand(4), NE);
927
928 EmitAndCheck(&assembler, "IfThen");
929 }
930
TEST_F(Thumb2AssemblerTest,CbzCbnz)931 TEST_F(Thumb2AssemblerTest, CbzCbnz) {
932 Label l1;
933 __ cbz(R2, &l1);
934 __ mov(R1, ShifterOperand(3));
935 __ mov(R2, ShifterOperand(3));
936 __ Bind(&l1);
937 __ mov(R2, ShifterOperand(4));
938
939 Label l2;
940 __ cbnz(R2, &l2);
941 __ mov(R8, ShifterOperand(3));
942 __ mov(R2, ShifterOperand(3));
943 __ Bind(&l2);
944 __ mov(R2, ShifterOperand(4));
945
946 EmitAndCheck(&assembler, "CbzCbnz");
947 }
948
TEST_F(Thumb2AssemblerTest,Multiply)949 TEST_F(Thumb2AssemblerTest, Multiply) {
950 __ mul(R0, R1, R0);
951 __ mul(R0, R1, R2);
952 __ mul(R8, R9, R8);
953 __ mul(R8, R9, R10);
954
955 __ mla(R0, R1, R2, R3);
956 __ mla(R8, R9, R8, R9);
957
958 __ mls(R0, R1, R2, R3);
959 __ mls(R8, R9, R8, R9);
960
961 __ umull(R0, R1, R2, R3);
962 __ umull(R8, R9, R10, R11);
963
964 EmitAndCheck(&assembler, "Multiply");
965 }
966
TEST_F(Thumb2AssemblerTest,Divide)967 TEST_F(Thumb2AssemblerTest, Divide) {
968 __ sdiv(R0, R1, R2);
969 __ sdiv(R8, R9, R10);
970
971 __ udiv(R0, R1, R2);
972 __ udiv(R8, R9, R10);
973
974 EmitAndCheck(&assembler, "Divide");
975 }
976
TEST_F(Thumb2AssemblerTest,VMov)977 TEST_F(Thumb2AssemblerTest, VMov) {
978 __ vmovs(S1, 1.0);
979 __ vmovd(D1, 1.0);
980
981 __ vmovs(S1, S2);
982 __ vmovd(D1, D2);
983
984 EmitAndCheck(&assembler, "VMov");
985 }
986
987
TEST_F(Thumb2AssemblerTest,BasicFloatingPoint)988 TEST_F(Thumb2AssemblerTest, BasicFloatingPoint) {
989 __ vadds(S0, S1, S2);
990 __ vsubs(S0, S1, S2);
991 __ vmuls(S0, S1, S2);
992 __ vmlas(S0, S1, S2);
993 __ vmlss(S0, S1, S2);
994 __ vdivs(S0, S1, S2);
995 __ vabss(S0, S1);
996 __ vnegs(S0, S1);
997 __ vsqrts(S0, S1);
998
999 __ vaddd(D0, D1, D2);
1000 __ vsubd(D0, D1, D2);
1001 __ vmuld(D0, D1, D2);
1002 __ vmlad(D0, D1, D2);
1003 __ vmlsd(D0, D1, D2);
1004 __ vdivd(D0, D1, D2);
1005 __ vabsd(D0, D1);
1006 __ vnegd(D0, D1);
1007 __ vsqrtd(D0, D1);
1008
1009 EmitAndCheck(&assembler, "BasicFloatingPoint");
1010 }
1011
TEST_F(Thumb2AssemblerTest,FloatingPointConversions)1012 TEST_F(Thumb2AssemblerTest, FloatingPointConversions) {
1013 __ vcvtsd(S2, D2);
1014 __ vcvtds(D2, S2);
1015
1016 __ vcvtis(S1, S2);
1017 __ vcvtsi(S1, S2);
1018
1019 __ vcvtid(S1, D2);
1020 __ vcvtdi(D1, S2);
1021
1022 __ vcvtus(S1, S2);
1023 __ vcvtsu(S1, S2);
1024
1025 __ vcvtud(S1, D2);
1026 __ vcvtdu(D1, S2);
1027
1028 EmitAndCheck(&assembler, "FloatingPointConversions");
1029 }
1030
TEST_F(Thumb2AssemblerTest,FloatingPointComparisons)1031 TEST_F(Thumb2AssemblerTest, FloatingPointComparisons) {
1032 __ vcmps(S0, S1);
1033 __ vcmpd(D0, D1);
1034
1035 __ vcmpsz(S2);
1036 __ vcmpdz(D2);
1037
1038 EmitAndCheck(&assembler, "FloatingPointComparisons");
1039 }
1040
TEST_F(Thumb2AssemblerTest,Calls)1041 TEST_F(Thumb2AssemblerTest, Calls) {
1042 __ blx(LR);
1043 __ bx(LR);
1044
1045 EmitAndCheck(&assembler, "Calls");
1046 }
1047
TEST_F(Thumb2AssemblerTest,Breakpoint)1048 TEST_F(Thumb2AssemblerTest, Breakpoint) {
1049 __ bkpt(0);
1050
1051 EmitAndCheck(&assembler, "Breakpoint");
1052 }
1053
TEST_F(Thumb2AssemblerTest,StrR1)1054 TEST_F(Thumb2AssemblerTest, StrR1) {
1055 __ str(R1, Address(SP, 68));
1056 __ str(R1, Address(SP, 1068));
1057
1058 EmitAndCheck(&assembler, "StrR1");
1059 }
1060
TEST_F(Thumb2AssemblerTest,VPushPop)1061 TEST_F(Thumb2AssemblerTest, VPushPop) {
1062 __ vpushs(S2, 4);
1063 __ vpushd(D2, 4);
1064
1065 __ vpops(S2, 4);
1066 __ vpopd(D2, 4);
1067
1068 EmitAndCheck(&assembler, "VPushPop");
1069 }
1070
TEST_F(Thumb2AssemblerTest,Max16BitBranch)1071 TEST_F(Thumb2AssemblerTest, Max16BitBranch) {
1072 Label l1;
1073 __ b(&l1);
1074 for (int i = 0 ; i < (1 << 11) ; i += 2) {
1075 __ mov(R3, ShifterOperand(i & 0xff));
1076 }
1077 __ Bind(&l1);
1078 __ mov(R1, ShifterOperand(R2));
1079
1080 EmitAndCheck(&assembler, "Max16BitBranch");
1081 }
1082
TEST_F(Thumb2AssemblerTest,Branch32)1083 TEST_F(Thumb2AssemblerTest, Branch32) {
1084 Label l1;
1085 __ b(&l1);
1086 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1087 __ mov(R3, ShifterOperand(i & 0xff));
1088 }
1089 __ Bind(&l1);
1090 __ mov(R1, ShifterOperand(R2));
1091
1092 EmitAndCheck(&assembler, "Branch32");
1093 }
1094
TEST_F(Thumb2AssemblerTest,CompareAndBranchMax)1095 TEST_F(Thumb2AssemblerTest, CompareAndBranchMax) {
1096 Label l1;
1097 __ cbz(R4, &l1);
1098 for (int i = 0 ; i < (1 << 7) ; i += 2) {
1099 __ mov(R3, ShifterOperand(i & 0xff));
1100 }
1101 __ Bind(&l1);
1102 __ mov(R1, ShifterOperand(R2));
1103
1104 EmitAndCheck(&assembler, "CompareAndBranchMax");
1105 }
1106
TEST_F(Thumb2AssemblerTest,CompareAndBranchRelocation16)1107 TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation16) {
1108 Label l1;
1109 __ cbz(R4, &l1);
1110 for (int i = 0 ; i < (1 << 7) + 2 ; i += 2) {
1111 __ mov(R3, ShifterOperand(i & 0xff));
1112 }
1113 __ Bind(&l1);
1114 __ mov(R1, ShifterOperand(R2));
1115
1116 EmitAndCheck(&assembler, "CompareAndBranchRelocation16");
1117 }
1118
TEST_F(Thumb2AssemblerTest,CompareAndBranchRelocation32)1119 TEST_F(Thumb2AssemblerTest, CompareAndBranchRelocation32) {
1120 Label l1;
1121 __ cbz(R4, &l1);
1122 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1123 __ mov(R3, ShifterOperand(i & 0xff));
1124 }
1125 __ Bind(&l1);
1126 __ mov(R1, ShifterOperand(R2));
1127
1128 EmitAndCheck(&assembler, "CompareAndBranchRelocation32");
1129 }
1130
TEST_F(Thumb2AssemblerTest,MixedBranch32)1131 TEST_F(Thumb2AssemblerTest, MixedBranch32) {
1132 Label l1;
1133 Label l2;
1134 __ b(&l1); // Forwards.
1135 __ Bind(&l2);
1136
1137 // Space to force relocation.
1138 for (int i = 0 ; i < (1 << 11) + 2 ; i += 2) {
1139 __ mov(R3, ShifterOperand(i & 0xff));
1140 }
1141 __ b(&l2); // Backwards.
1142 __ Bind(&l1);
1143 __ mov(R1, ShifterOperand(R2));
1144
1145 EmitAndCheck(&assembler, "MixedBranch32");
1146 }
1147
TEST_F(Thumb2AssemblerTest,Shifts)1148 TEST_F(Thumb2AssemblerTest, Shifts) {
1149 // 16 bit selected for CcDontCare.
1150 __ Lsl(R0, R1, 5);
1151 __ Lsr(R0, R1, 5);
1152 __ Asr(R0, R1, 5);
1153
1154 __ Lsl(R0, R0, R1);
1155 __ Lsr(R0, R0, R1);
1156 __ Asr(R0, R0, R1);
1157 __ Ror(R0, R0, R1);
1158
1159 // 16 bit with kCcSet.
1160 __ Lsls(R0, R1, 5);
1161 __ Lsrs(R0, R1, 5);
1162 __ Asrs(R0, R1, 5);
1163
1164 __ Lsls(R0, R0, R1);
1165 __ Lsrs(R0, R0, R1);
1166 __ Asrs(R0, R0, R1);
1167 __ Rors(R0, R0, R1);
1168
1169 // 32-bit with kCcKeep.
1170 __ Lsl(R0, R1, 5, AL, kCcKeep);
1171 __ Lsr(R0, R1, 5, AL, kCcKeep);
1172 __ Asr(R0, R1, 5, AL, kCcKeep);
1173
1174 __ Lsl(R0, R0, R1, AL, kCcKeep);
1175 __ Lsr(R0, R0, R1, AL, kCcKeep);
1176 __ Asr(R0, R0, R1, AL, kCcKeep);
1177 __ Ror(R0, R0, R1, AL, kCcKeep);
1178
1179 // 32-bit because ROR immediate doesn't have a 16-bit version like the other shifts.
1180 __ Ror(R0, R1, 5);
1181 __ Rors(R0, R1, 5);
1182 __ Ror(R0, R1, 5, AL, kCcKeep);
1183
1184 // 32 bit due to high registers.
1185 __ Lsl(R8, R1, 5);
1186 __ Lsr(R0, R8, 5);
1187 __ Asr(R8, R1, 5);
1188 __ Ror(R0, R8, 5);
1189
1190 // 32 bit due to different Rd and Rn.
1191 __ Lsl(R0, R1, R2);
1192 __ Lsr(R0, R1, R2);
1193 __ Asr(R0, R1, R2);
1194 __ Ror(R0, R1, R2);
1195
1196 // 32 bit due to use of high registers.
1197 __ Lsl(R8, R1, R2);
1198 __ Lsr(R0, R8, R2);
1199 __ Asr(R0, R1, R8);
1200
1201 // S bit (all 32 bit)
1202
1203 // 32 bit due to high registers.
1204 __ Lsls(R8, R1, 5);
1205 __ Lsrs(R0, R8, 5);
1206 __ Asrs(R8, R1, 5);
1207 __ Rors(R0, R8, 5);
1208
1209 // 32 bit due to different Rd and Rn.
1210 __ Lsls(R0, R1, R2);
1211 __ Lsrs(R0, R1, R2);
1212 __ Asrs(R0, R1, R2);
1213 __ Rors(R0, R1, R2);
1214
1215 // 32 bit due to use of high registers.
1216 __ Lsls(R8, R1, R2);
1217 __ Lsrs(R0, R8, R2);
1218 __ Asrs(R0, R1, R8);
1219
1220 EmitAndCheck(&assembler, "Shifts");
1221 }
1222
TEST_F(Thumb2AssemblerTest,LoadStoreRegOffset)1223 TEST_F(Thumb2AssemblerTest, LoadStoreRegOffset) {
1224 // 16 bit.
1225 __ ldr(R0, Address(R1, R2));
1226 __ str(R0, Address(R1, R2));
1227
1228 // 32 bit due to shift.
1229 __ ldr(R0, Address(R1, R2, LSL, 1));
1230 __ str(R0, Address(R1, R2, LSL, 1));
1231
1232 __ ldr(R0, Address(R1, R2, LSL, 3));
1233 __ str(R0, Address(R1, R2, LSL, 3));
1234
1235 // 32 bit due to high register use.
1236 __ ldr(R8, Address(R1, R2));
1237 __ str(R8, Address(R1, R2));
1238
1239 __ ldr(R1, Address(R8, R2));
1240 __ str(R2, Address(R8, R2));
1241
1242 __ ldr(R0, Address(R1, R8));
1243 __ str(R0, Address(R1, R8));
1244
1245 EmitAndCheck(&assembler, "LoadStoreRegOffset");
1246 }
1247
TEST_F(Thumb2AssemblerTest,LoadStoreLimits)1248 TEST_F(Thumb2AssemblerTest, LoadStoreLimits) {
1249 __ ldr(R0, Address(R4, 124)); // 16 bit.
1250 __ ldr(R0, Address(R4, 128)); // 32 bit.
1251
1252 __ ldrb(R0, Address(R4, 31)); // 16 bit.
1253 __ ldrb(R0, Address(R4, 32)); // 32 bit.
1254
1255 __ ldrh(R0, Address(R4, 62)); // 16 bit.
1256 __ ldrh(R0, Address(R4, 64)); // 32 bit.
1257
1258 __ ldrsb(R0, Address(R4, 31)); // 32 bit.
1259 __ ldrsb(R0, Address(R4, 32)); // 32 bit.
1260
1261 __ ldrsh(R0, Address(R4, 62)); // 32 bit.
1262 __ ldrsh(R0, Address(R4, 64)); // 32 bit.
1263
1264 __ str(R0, Address(R4, 124)); // 16 bit.
1265 __ str(R0, Address(R4, 128)); // 32 bit.
1266
1267 __ strb(R0, Address(R4, 31)); // 16 bit.
1268 __ strb(R0, Address(R4, 32)); // 32 bit.
1269
1270 __ strh(R0, Address(R4, 62)); // 16 bit.
1271 __ strh(R0, Address(R4, 64)); // 32 bit.
1272
1273 EmitAndCheck(&assembler, "LoadStoreLimits");
1274 }
1275
TEST_F(Thumb2AssemblerTest,CompareAndBranch)1276 TEST_F(Thumb2AssemblerTest, CompareAndBranch) {
1277 Label label;
1278 __ CompareAndBranchIfZero(arm::R0, &label);
1279 __ CompareAndBranchIfZero(arm::R11, &label);
1280 __ CompareAndBranchIfNonZero(arm::R0, &label);
1281 __ CompareAndBranchIfNonZero(arm::R11, &label);
1282 __ Bind(&label);
1283
1284 EmitAndCheck(&assembler, "CompareAndBranch");
1285 }
1286
TEST_F(Thumb2AssemblerTest,AddConstant)1287 TEST_F(Thumb2AssemblerTest, AddConstant) {
1288 // Low registers, Rd != Rn.
1289 __ AddConstant(R0, R1, 0); // MOV.
1290 __ AddConstant(R0, R1, 1); // 16-bit ADDS, encoding T1.
1291 __ AddConstant(R0, R1, 7); // 16-bit ADDS, encoding T1.
1292 __ AddConstant(R0, R1, 8); // 32-bit ADD, encoding T3.
1293 __ AddConstant(R0, R1, 255); // 32-bit ADD, encoding T3.
1294 __ AddConstant(R0, R1, 256); // 32-bit ADD, encoding T3.
1295 __ AddConstant(R0, R1, 257); // 32-bit ADD, encoding T4.
1296 __ AddConstant(R0, R1, 0xfff); // 32-bit ADD, encoding T4.
1297 __ AddConstant(R0, R1, 0x1000); // 32-bit ADD, encoding T3.
1298 __ AddConstant(R0, R1, 0x1001); // MVN+SUB.
1299 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1300 __ AddConstant(R0, R1, 0xffff); // MOVW+ADD.
1301 __ AddConstant(R0, R1, 0x10000); // 32-bit ADD, encoding T3.
1302 __ AddConstant(R0, R1, 0x10001); // 32-bit ADD, encoding T3.
1303 __ AddConstant(R0, R1, 0x10002); // MVN+SUB.
1304 __ AddConstant(R0, R1, 0x10003); // MOVW+MOVT+ADD.
1305 __ AddConstant(R0, R1, -1); // 16-bit SUBS.
1306 __ AddConstant(R0, R1, -7); // 16-bit SUBS.
1307 __ AddConstant(R0, R1, -8); // 32-bit SUB, encoding T3.
1308 __ AddConstant(R0, R1, -255); // 32-bit SUB, encoding T3.
1309 __ AddConstant(R0, R1, -256); // 32-bit SUB, encoding T3.
1310 __ AddConstant(R0, R1, -257); // 32-bit SUB, encoding T4.
1311 __ AddConstant(R0, R1, -0xfff); // 32-bit SUB, encoding T4.
1312 __ AddConstant(R0, R1, -0x1000); // 32-bit SUB, encoding T3.
1313 __ AddConstant(R0, R1, -0x1001); // MVN+ADD.
1314 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1315 __ AddConstant(R0, R1, -0xffff); // MOVW+SUB.
1316 __ AddConstant(R0, R1, -0x10000); // 32-bit SUB, encoding T3.
1317 __ AddConstant(R0, R1, -0x10001); // 32-bit SUB, encoding T3.
1318 __ AddConstant(R0, R1, -0x10002); // MVN+ADD.
1319 __ AddConstant(R0, R1, -0x10003); // MOVW+MOVT+ADD.
1320
1321 // Low registers, Rd == Rn.
1322 __ AddConstant(R0, R0, 0); // Nothing.
1323 __ AddConstant(R1, R1, 1); // 16-bit ADDS, encoding T2,
1324 __ AddConstant(R0, R0, 7); // 16-bit ADDS, encoding T2.
1325 __ AddConstant(R1, R1, 8); // 16-bit ADDS, encoding T2.
1326 __ AddConstant(R0, R0, 255); // 16-bit ADDS, encoding T2.
1327 __ AddConstant(R1, R1, 256); // 32-bit ADD, encoding T3.
1328 __ AddConstant(R0, R0, 257); // 32-bit ADD, encoding T4.
1329 __ AddConstant(R1, R1, 0xfff); // 32-bit ADD, encoding T4.
1330 __ AddConstant(R0, R0, 0x1000); // 32-bit ADD, encoding T3.
1331 __ AddConstant(R1, R1, 0x1001); // MVN+SUB.
1332 __ AddConstant(R0, R0, 0x1002); // MOVW+ADD.
1333 __ AddConstant(R1, R1, 0xffff); // MOVW+ADD.
1334 __ AddConstant(R0, R0, 0x10000); // 32-bit ADD, encoding T3.
1335 __ AddConstant(R1, R1, 0x10001); // 32-bit ADD, encoding T3.
1336 __ AddConstant(R0, R0, 0x10002); // MVN+SUB.
1337 __ AddConstant(R1, R1, 0x10003); // MOVW+MOVT+ADD.
1338 __ AddConstant(R0, R0, -1); // 16-bit SUBS, encoding T2.
1339 __ AddConstant(R1, R1, -7); // 16-bit SUBS, encoding T2.
1340 __ AddConstant(R0, R0, -8); // 16-bit SUBS, encoding T2.
1341 __ AddConstant(R1, R1, -255); // 16-bit SUBS, encoding T2.
1342 __ AddConstant(R0, R0, -256); // 32-bit SUB, encoding T3.
1343 __ AddConstant(R1, R1, -257); // 32-bit SUB, encoding T4.
1344 __ AddConstant(R0, R0, -0xfff); // 32-bit SUB, encoding T4.
1345 __ AddConstant(R1, R1, -0x1000); // 32-bit SUB, encoding T3.
1346 __ AddConstant(R0, R0, -0x1001); // MVN+ADD.
1347 __ AddConstant(R1, R1, -0x1002); // MOVW+SUB.
1348 __ AddConstant(R0, R0, -0xffff); // MOVW+SUB.
1349 __ AddConstant(R1, R1, -0x10000); // 32-bit SUB, encoding T3.
1350 __ AddConstant(R0, R0, -0x10001); // 32-bit SUB, encoding T3.
1351 __ AddConstant(R1, R1, -0x10002); // MVN+ADD.
1352 __ AddConstant(R0, R0, -0x10003); // MOVW+MOVT+ADD.
1353
1354 // High registers.
1355 __ AddConstant(R8, R8, 0); // Nothing.
1356 __ AddConstant(R8, R1, 1); // 32-bit ADD, encoding T3,
1357 __ AddConstant(R0, R8, 7); // 32-bit ADD, encoding T3.
1358 __ AddConstant(R8, R8, 8); // 32-bit ADD, encoding T3.
1359 __ AddConstant(R8, R1, 255); // 32-bit ADD, encoding T3.
1360 __ AddConstant(R0, R8, 256); // 32-bit ADD, encoding T3.
1361 __ AddConstant(R8, R8, 257); // 32-bit ADD, encoding T4.
1362 __ AddConstant(R8, R1, 0xfff); // 32-bit ADD, encoding T4.
1363 __ AddConstant(R0, R8, 0x1000); // 32-bit ADD, encoding T3.
1364 __ AddConstant(R8, R8, 0x1001); // MVN+SUB.
1365 __ AddConstant(R0, R1, 0x1002); // MOVW+ADD.
1366 __ AddConstant(R0, R8, 0xffff); // MOVW+ADD.
1367 __ AddConstant(R8, R8, 0x10000); // 32-bit ADD, encoding T3.
1368 __ AddConstant(R8, R1, 0x10001); // 32-bit ADD, encoding T3.
1369 __ AddConstant(R0, R8, 0x10002); // MVN+SUB.
1370 __ AddConstant(R0, R8, 0x10003); // MOVW+MOVT+ADD.
1371 __ AddConstant(R8, R8, -1); // 32-bit ADD, encoding T3.
1372 __ AddConstant(R8, R1, -7); // 32-bit SUB, encoding T3.
1373 __ AddConstant(R0, R8, -8); // 32-bit SUB, encoding T3.
1374 __ AddConstant(R8, R8, -255); // 32-bit SUB, encoding T3.
1375 __ AddConstant(R8, R1, -256); // 32-bit SUB, encoding T3.
1376 __ AddConstant(R0, R8, -257); // 32-bit SUB, encoding T4.
1377 __ AddConstant(R8, R8, -0xfff); // 32-bit SUB, encoding T4.
1378 __ AddConstant(R8, R1, -0x1000); // 32-bit SUB, encoding T3.
1379 __ AddConstant(R0, R8, -0x1001); // MVN+ADD.
1380 __ AddConstant(R0, R1, -0x1002); // MOVW+SUB.
1381 __ AddConstant(R8, R1, -0xffff); // MOVW+SUB.
1382 __ AddConstant(R0, R8, -0x10000); // 32-bit SUB, encoding T3.
1383 __ AddConstant(R8, R8, -0x10001); // 32-bit SUB, encoding T3.
1384 __ AddConstant(R8, R1, -0x10002); // MVN+SUB.
1385 __ AddConstant(R0, R8, -0x10003); // MOVW+MOVT+ADD.
1386
1387 // Low registers, Rd != Rn, kCcKeep.
1388 __ AddConstant(R0, R1, 0, AL, kCcKeep); // MOV.
1389 __ AddConstant(R0, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1390 __ AddConstant(R0, R1, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1391 __ AddConstant(R0, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1392 __ AddConstant(R0, R1, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1393 __ AddConstant(R0, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1394 __ AddConstant(R0, R1, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1395 __ AddConstant(R0, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1396 __ AddConstant(R0, R1, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1397 __ AddConstant(R0, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1398 __ AddConstant(R0, R1, 0x1002, AL, kCcKeep); // MOVW+ADD.
1399 __ AddConstant(R0, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1400 __ AddConstant(R0, R1, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1401 __ AddConstant(R0, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1402 __ AddConstant(R0, R1, 0x10002, AL, kCcKeep); // MVN+SUB.
1403 __ AddConstant(R0, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1404 __ AddConstant(R0, R1, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1405 __ AddConstant(R0, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1406 __ AddConstant(R0, R1, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1407 __ AddConstant(R0, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1408 __ AddConstant(R0, R1, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1409 __ AddConstant(R0, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1410 __ AddConstant(R0, R1, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1411 __ AddConstant(R0, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1412 __ AddConstant(R0, R1, -0x1001, AL, kCcKeep); // MVN+ADD.
1413 __ AddConstant(R0, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1414 __ AddConstant(R0, R1, -0xffff, AL, kCcKeep); // MOVW+SUB.
1415 __ AddConstant(R0, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1416 __ AddConstant(R0, R1, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1417 __ AddConstant(R0, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1418 __ AddConstant(R0, R1, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1419
1420 // Low registers, Rd == Rn, kCcKeep.
1421 __ AddConstant(R0, R0, 0, AL, kCcKeep); // Nothing.
1422 __ AddConstant(R1, R1, 1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1423 __ AddConstant(R0, R0, 7, AL, kCcKeep); // 32-bit ADD, encoding T3.
1424 __ AddConstant(R1, R1, 8, AL, kCcKeep); // 32-bit ADD, encoding T3.
1425 __ AddConstant(R0, R0, 255, AL, kCcKeep); // 32-bit ADD, encoding T3.
1426 __ AddConstant(R1, R1, 256, AL, kCcKeep); // 32-bit ADD, encoding T3.
1427 __ AddConstant(R0, R0, 257, AL, kCcKeep); // 32-bit ADD, encoding T4.
1428 __ AddConstant(R1, R1, 0xfff, AL, kCcKeep); // 32-bit ADD, encoding T4.
1429 __ AddConstant(R0, R0, 0x1000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1430 __ AddConstant(R1, R1, 0x1001, AL, kCcKeep); // MVN+SUB.
1431 __ AddConstant(R0, R0, 0x1002, AL, kCcKeep); // MOVW+ADD.
1432 __ AddConstant(R1, R1, 0xffff, AL, kCcKeep); // MOVW+ADD.
1433 __ AddConstant(R0, R0, 0x10000, AL, kCcKeep); // 32-bit ADD, encoding T3.
1434 __ AddConstant(R1, R1, 0x10001, AL, kCcKeep); // 32-bit ADD, encoding T3.
1435 __ AddConstant(R0, R0, 0x10002, AL, kCcKeep); // MVN+SUB.
1436 __ AddConstant(R1, R1, 0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1437 __ AddConstant(R0, R0, -1, AL, kCcKeep); // 32-bit ADD, encoding T3.
1438 __ AddConstant(R1, R1, -7, AL, kCcKeep); // 32-bit SUB, encoding T3.
1439 __ AddConstant(R0, R0, -8, AL, kCcKeep); // 32-bit SUB, encoding T3.
1440 __ AddConstant(R1, R1, -255, AL, kCcKeep); // 32-bit SUB, encoding T3.
1441 __ AddConstant(R0, R0, -256, AL, kCcKeep); // 32-bit SUB, encoding T3.
1442 __ AddConstant(R1, R1, -257, AL, kCcKeep); // 32-bit SUB, encoding T4.
1443 __ AddConstant(R0, R0, -0xfff, AL, kCcKeep); // 32-bit SUB, encoding T4.
1444 __ AddConstant(R1, R1, -0x1000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1445 __ AddConstant(R0, R0, -0x1001, AL, kCcKeep); // MVN+ADD.
1446 __ AddConstant(R1, R1, -0x1002, AL, kCcKeep); // MOVW+SUB.
1447 __ AddConstant(R0, R0, -0xffff, AL, kCcKeep); // MOVW+SUB.
1448 __ AddConstant(R1, R1, -0x10000, AL, kCcKeep); // 32-bit SUB, encoding T3.
1449 __ AddConstant(R0, R0, -0x10001, AL, kCcKeep); // 32-bit SUB, encoding T3.
1450 __ AddConstant(R1, R1, -0x10002, AL, kCcKeep); // MVN+ADD.
1451 __ AddConstant(R0, R0, -0x10003, AL, kCcKeep); // MOVW+MOVT+ADD.
1452
1453 // Low registers, Rd != Rn, kCcSet.
1454 __ AddConstant(R0, R1, 0, AL, kCcSet); // 16-bit ADDS.
1455 __ AddConstant(R0, R1, 1, AL, kCcSet); // 16-bit ADDS.
1456 __ AddConstant(R0, R1, 7, AL, kCcSet); // 16-bit ADDS.
1457 __ AddConstant(R0, R1, 8, AL, kCcSet); // 32-bit ADDS, encoding T3.
1458 __ AddConstant(R0, R1, 255, AL, kCcSet); // 32-bit ADDS, encoding T3.
1459 __ AddConstant(R0, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1460 __ AddConstant(R0, R1, 257, AL, kCcSet); // MVN+SUBS.
1461 __ AddConstant(R0, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1462 __ AddConstant(R0, R1, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1463 __ AddConstant(R0, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1464 __ AddConstant(R0, R1, 0x1002, AL, kCcSet); // MOVW+ADDS.
1465 __ AddConstant(R0, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1466 __ AddConstant(R0, R1, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1467 __ AddConstant(R0, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1468 __ AddConstant(R0, R1, 0x10002, AL, kCcSet); // MVN+SUBS.
1469 __ AddConstant(R0, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1470 __ AddConstant(R0, R1, -1, AL, kCcSet); // 16-bit SUBS.
1471 __ AddConstant(R0, R1, -7, AL, kCcSet); // 16-bit SUBS.
1472 __ AddConstant(R0, R1, -8, AL, kCcSet); // 32-bit SUBS, encoding T3.
1473 __ AddConstant(R0, R1, -255, AL, kCcSet); // 32-bit SUBS, encoding T3.
1474 __ AddConstant(R0, R1, -256, AL, kCcSet); // 32-bit SUBS, encoding T3.
1475 __ AddConstant(R0, R1, -257, AL, kCcSet); // MVN+ADDS.
1476 __ AddConstant(R0, R1, -0xfff, AL, kCcSet); // MOVW+SUBS.
1477 __ AddConstant(R0, R1, -0x1000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1478 __ AddConstant(R0, R1, -0x1001, AL, kCcSet); // MVN+ADDS.
1479 __ AddConstant(R0, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1480 __ AddConstant(R0, R1, -0xffff, AL, kCcSet); // MOVW+SUBS.
1481 __ AddConstant(R0, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1482 __ AddConstant(R0, R1, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1483 __ AddConstant(R0, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1484 __ AddConstant(R0, R1, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1485
1486 // Low registers, Rd == Rn, kCcSet.
1487 __ AddConstant(R0, R0, 0, AL, kCcSet); // 16-bit ADDS, encoding T2.
1488 __ AddConstant(R1, R1, 1, AL, kCcSet); // 16-bit ADDS, encoding T2.
1489 __ AddConstant(R0, R0, 7, AL, kCcSet); // 16-bit ADDS, encoding T2.
1490 __ AddConstant(R1, R1, 8, AL, kCcSet); // 16-bit ADDS, encoding T2.
1491 __ AddConstant(R0, R0, 255, AL, kCcSet); // 16-bit ADDS, encoding T2.
1492 __ AddConstant(R1, R1, 256, AL, kCcSet); // 32-bit ADDS, encoding T3.
1493 __ AddConstant(R0, R0, 257, AL, kCcSet); // MVN+SUBS.
1494 __ AddConstant(R1, R1, 0xfff, AL, kCcSet); // MOVW+ADDS.
1495 __ AddConstant(R0, R0, 0x1000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1496 __ AddConstant(R1, R1, 0x1001, AL, kCcSet); // MVN+SUBS.
1497 __ AddConstant(R0, R0, 0x1002, AL, kCcSet); // MOVW+ADDS.
1498 __ AddConstant(R1, R1, 0xffff, AL, kCcSet); // MOVW+ADDS.
1499 __ AddConstant(R0, R0, 0x10000, AL, kCcSet); // 32-bit ADDS, encoding T3.
1500 __ AddConstant(R1, R1, 0x10001, AL, kCcSet); // 32-bit ADDS, encoding T3.
1501 __ AddConstant(R0, R0, 0x10002, AL, kCcSet); // MVN+SUBS.
1502 __ AddConstant(R1, R1, 0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1503 __ AddConstant(R0, R0, -1, AL, kCcSet); // 16-bit SUBS, encoding T2.
1504 __ AddConstant(R1, R1, -7, AL, kCcSet); // 16-bit SUBS, encoding T2.
1505 __ AddConstant(R0, R0, -8, AL, kCcSet); // 16-bit SUBS, encoding T2.
1506 __ AddConstant(R1, R1, -255, AL, kCcSet); // 16-bit SUBS, encoding T2.
1507 __ AddConstant(R0, R0, -256, AL, kCcSet); // 32-bit SUB, encoding T3.
1508 __ AddConstant(R1, R1, -257, AL, kCcSet); // MNV+ADDS.
1509 __ AddConstant(R0, R0, -0xfff, AL, kCcSet); // MOVW+SUBS.
1510 __ AddConstant(R1, R1, -0x1000, AL, kCcSet); // 32-bit SUB, encoding T3.
1511 __ AddConstant(R0, R0, -0x1001, AL, kCcSet); // MVN+ADDS.
1512 __ AddConstant(R1, R1, -0x1002, AL, kCcSet); // MOVW+SUBS.
1513 __ AddConstant(R0, R0, -0xffff, AL, kCcSet); // MOVW+SUBS.
1514 __ AddConstant(R1, R1, -0x10000, AL, kCcSet); // 32-bit SUBS, encoding T3.
1515 __ AddConstant(R0, R0, -0x10001, AL, kCcSet); // 32-bit SUBS, encoding T3.
1516 __ AddConstant(R1, R1, -0x10002, AL, kCcSet); // MVN+ADDS.
1517 __ AddConstant(R0, R0, -0x10003, AL, kCcSet); // MOVW+MOVT+ADDS.
1518
1519 __ it(EQ);
1520 __ AddConstant(R0, R1, 1, EQ, kCcSet); // 32-bit ADDS, encoding T3.
1521 __ it(NE);
1522 __ AddConstant(R0, R1, 1, NE, kCcKeep); // 16-bit ADDS, encoding T1.
1523 __ it(GE);
1524 __ AddConstant(R0, R0, 1, GE, kCcSet); // 32-bit ADDS, encoding T3.
1525 __ it(LE);
1526 __ AddConstant(R0, R0, 1, LE, kCcKeep); // 16-bit ADDS, encoding T2.
1527
1528 EmitAndCheck(&assembler, "AddConstant");
1529 }
1530
TEST_F(Thumb2AssemblerTest,CmpConstant)1531 TEST_F(Thumb2AssemblerTest, CmpConstant) {
1532 __ CmpConstant(R0, 0); // 16-bit CMP.
1533 __ CmpConstant(R1, 1); // 16-bit CMP.
1534 __ CmpConstant(R0, 7); // 16-bit CMP.
1535 __ CmpConstant(R1, 8); // 16-bit CMP.
1536 __ CmpConstant(R0, 255); // 16-bit CMP.
1537 __ CmpConstant(R1, 256); // 32-bit CMP.
1538 __ CmpConstant(R0, 257); // MNV+CMN.
1539 __ CmpConstant(R1, 0xfff); // MOVW+CMP.
1540 __ CmpConstant(R0, 0x1000); // 32-bit CMP.
1541 __ CmpConstant(R1, 0x1001); // MNV+CMN.
1542 __ CmpConstant(R0, 0x1002); // MOVW+CMP.
1543 __ CmpConstant(R1, 0xffff); // MOVW+CMP.
1544 __ CmpConstant(R0, 0x10000); // 32-bit CMP.
1545 __ CmpConstant(R1, 0x10001); // 32-bit CMP.
1546 __ CmpConstant(R0, 0x10002); // MVN+CMN.
1547 __ CmpConstant(R1, 0x10003); // MOVW+MOVT+CMP.
1548 __ CmpConstant(R0, -1); // 32-bit CMP.
1549 __ CmpConstant(R1, -7); // CMN.
1550 __ CmpConstant(R0, -8); // CMN.
1551 __ CmpConstant(R1, -255); // CMN.
1552 __ CmpConstant(R0, -256); // CMN.
1553 __ CmpConstant(R1, -257); // MNV+CMP.
1554 __ CmpConstant(R0, -0xfff); // MOVW+CMN.
1555 __ CmpConstant(R1, -0x1000); // CMN.
1556 __ CmpConstant(R0, -0x1001); // MNV+CMP.
1557 __ CmpConstant(R1, -0x1002); // MOVW+CMN.
1558 __ CmpConstant(R0, -0xffff); // MOVW+CMN.
1559 __ CmpConstant(R1, -0x10000); // CMN.
1560 __ CmpConstant(R0, -0x10001); // CMN.
1561 __ CmpConstant(R1, -0x10002); // MVN+CMP.
1562 __ CmpConstant(R0, -0x10003); // MOVW+MOVT+CMP.
1563
1564 __ CmpConstant(R8, 0); // 32-bit CMP.
1565 __ CmpConstant(R9, 1); // 32-bit CMP.
1566 __ CmpConstant(R8, 7); // 32-bit CMP.
1567 __ CmpConstant(R9, 8); // 32-bit CMP.
1568 __ CmpConstant(R8, 255); // 32-bit CMP.
1569 __ CmpConstant(R9, 256); // 32-bit CMP.
1570 __ CmpConstant(R8, 257); // MNV+CMN
1571 __ CmpConstant(R9, 0xfff); // MOVW+CMP.
1572 __ CmpConstant(R8, 0x1000); // 32-bit CMP.
1573 __ CmpConstant(R9, 0x1001); // MVN+CMN.
1574 __ CmpConstant(R8, 0x1002); // MOVW+CMP.
1575 __ CmpConstant(R9, 0xffff); // MOVW+CMP.
1576 __ CmpConstant(R8, 0x10000); // 32-bit CMP.
1577 __ CmpConstant(R9, 0x10001); // 32-bit CMP.
1578 __ CmpConstant(R8, 0x10002); // MVN+CMN.
1579 __ CmpConstant(R9, 0x10003); // MOVW+MOVT+CMP.
1580 __ CmpConstant(R8, -1); // 32-bit CMP
1581 __ CmpConstant(R9, -7); // CMN.
1582 __ CmpConstant(R8, -8); // CMN.
1583 __ CmpConstant(R9, -255); // CMN.
1584 __ CmpConstant(R8, -256); // CMN.
1585 __ CmpConstant(R9, -257); // MNV+CMP.
1586 __ CmpConstant(R8, -0xfff); // MOVW+CMN.
1587 __ CmpConstant(R9, -0x1000); // CMN.
1588 __ CmpConstant(R8, -0x1001); // MVN+CMP.
1589 __ CmpConstant(R9, -0x1002); // MOVW+CMN.
1590 __ CmpConstant(R8, -0xffff); // MOVW+CMN.
1591 __ CmpConstant(R9, -0x10000); // CMN.
1592 __ CmpConstant(R8, -0x10001); // CMN.
1593 __ CmpConstant(R9, -0x10002); // MVN+CMP.
1594 __ CmpConstant(R8, -0x10003); // MOVW+MOVT+CMP.
1595
1596 EmitAndCheck(&assembler, "CmpConstant");
1597 }
1598
1599 #define ENABLE_VIXL_TEST
1600
1601 #ifdef ENABLE_VIXL_TEST
1602
1603 #define ARM_VIXL
1604
1605 #ifdef ARM_VIXL
1606 typedef arm::ArmVIXLJNIMacroAssembler JniAssemblerType;
1607 #else
1608 typedef arm::Thumb2Assembler AssemblerType;
1609 #endif
1610
1611 class ArmVIXLAssemblerTest : public ::testing::Test {
1612 public:
ArmVIXLAssemblerTest()1613 ArmVIXLAssemblerTest() : pool(), arena(&pool), assembler(&arena) { }
1614
1615 ArenaPool pool;
1616 ArenaAllocator arena;
1617 JniAssemblerType assembler;
1618 };
1619
1620 #undef __
1621 #define __ assembler->
1622
EmitAndCheck(JniAssemblerType * assembler,const char * testname,const char * const * results)1623 void EmitAndCheck(JniAssemblerType* assembler, const char* testname,
1624 const char* const* results) {
1625 __ FinalizeCode();
1626 size_t cs = __ CodeSize();
1627 std::vector<uint8_t> managed_code(cs);
1628 MemoryRegion code(&managed_code[0], managed_code.size());
1629 __ FinalizeInstructions(code);
1630
1631 DumpAndCheck(managed_code, testname, results);
1632 }
1633
EmitAndCheck(JniAssemblerType * assembler,const char * testname)1634 void EmitAndCheck(JniAssemblerType* assembler, const char* testname) {
1635 InitResults();
1636 std::map<std::string, const char* const*>::iterator results = test_results.find(testname);
1637 ASSERT_NE(results, test_results.end());
1638
1639 EmitAndCheck(assembler, testname, results->second);
1640 }
1641
1642 #undef __
1643 #define __ assembler.
1644
TEST_F(ArmVIXLAssemblerTest,VixlJniHelpers)1645 TEST_F(ArmVIXLAssemblerTest, VixlJniHelpers) {
1646 const bool is_static = true;
1647 const bool is_synchronized = false;
1648 const bool is_critical_native = false;
1649 const char* shorty = "IIFII";
1650
1651 ArenaPool pool;
1652 ArenaAllocator arena(&pool);
1653
1654 std::unique_ptr<JniCallingConvention> jni_conv(
1655 JniCallingConvention::Create(&arena,
1656 is_static,
1657 is_synchronized,
1658 is_critical_native,
1659 shorty,
1660 kThumb2));
1661 std::unique_ptr<ManagedRuntimeCallingConvention> mr_conv(
1662 ManagedRuntimeCallingConvention::Create(&arena, is_static, is_synchronized, shorty, kThumb2));
1663 const int frame_size(jni_conv->FrameSize());
1664 ArrayRef<const ManagedRegister> callee_save_regs = jni_conv->CalleeSaveRegisters();
1665
1666 const ManagedRegister method_register = ArmManagedRegister::FromCoreRegister(R0);
1667 const ManagedRegister scratch_register = ArmManagedRegister::FromCoreRegister(R12);
1668
1669 __ BuildFrame(frame_size, mr_conv->MethodRegister(), callee_save_regs, mr_conv->EntrySpills());
1670 __ IncreaseFrameSize(32);
1671
1672 // Loads
1673 __ IncreaseFrameSize(4096);
1674 __ Load(method_register, FrameOffset(32), 4);
1675 __ Load(method_register, FrameOffset(124), 4);
1676 __ Load(method_register, FrameOffset(132), 4);
1677 __ Load(method_register, FrameOffset(1020), 4);
1678 __ Load(method_register, FrameOffset(1024), 4);
1679 __ Load(scratch_register, FrameOffset(4092), 4);
1680 __ Load(scratch_register, FrameOffset(4096), 4);
1681 __ LoadRawPtrFromThread(scratch_register, ThreadOffset32(512));
1682 __ LoadRef(method_register, scratch_register, MemberOffset(128), /* unpoison_reference */ false);
1683
1684 // Stores
1685 __ Store(FrameOffset(32), method_register, 4);
1686 __ Store(FrameOffset(124), method_register, 4);
1687 __ Store(FrameOffset(132), method_register, 4);
1688 __ Store(FrameOffset(1020), method_register, 4);
1689 __ Store(FrameOffset(1024), method_register, 4);
1690 __ Store(FrameOffset(4092), scratch_register, 4);
1691 __ Store(FrameOffset(4096), scratch_register, 4);
1692 __ StoreImmediateToFrame(FrameOffset(48), 0xFF, scratch_register);
1693 __ StoreImmediateToFrame(FrameOffset(48), 0xFFFFFF, scratch_register);
1694 __ StoreRawPtr(FrameOffset(48), scratch_register);
1695 __ StoreRef(FrameOffset(48), scratch_register);
1696 __ StoreSpanning(FrameOffset(48), method_register, FrameOffset(48), scratch_register);
1697 __ StoreStackOffsetToThread(ThreadOffset32(512), FrameOffset(4096), scratch_register);
1698 __ StoreStackPointerToThread(ThreadOffset32(512));
1699
1700 // Other
1701 __ Call(method_register, FrameOffset(48), scratch_register);
1702 __ Copy(FrameOffset(48), FrameOffset(44), scratch_register, 4);
1703 __ CopyRawPtrFromThread(FrameOffset(44), ThreadOffset32(512), scratch_register);
1704 __ CopyRef(FrameOffset(48), FrameOffset(44), scratch_register);
1705 __ GetCurrentThread(method_register);
1706 __ GetCurrentThread(FrameOffset(48), scratch_register);
1707 __ Move(scratch_register, method_register, 4);
1708 __ VerifyObject(scratch_register, false);
1709
1710 __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, true);
1711 __ CreateHandleScopeEntry(scratch_register, FrameOffset(48), scratch_register, false);
1712 __ CreateHandleScopeEntry(method_register, FrameOffset(48), scratch_register, true);
1713 __ CreateHandleScopeEntry(FrameOffset(48), FrameOffset(64), scratch_register, true);
1714 __ CreateHandleScopeEntry(method_register, FrameOffset(0), scratch_register, true);
1715 __ CreateHandleScopeEntry(method_register, FrameOffset(1025), scratch_register, true);
1716 __ CreateHandleScopeEntry(scratch_register, FrameOffset(1025), scratch_register, true);
1717
1718 __ ExceptionPoll(scratch_register, 0);
1719
1720 // Push the target out of range of branch emitted by ExceptionPoll.
1721 for (int i = 0; i < 64; i++) {
1722 __ Store(FrameOffset(2047), scratch_register, 4);
1723 }
1724
1725 __ DecreaseFrameSize(4096);
1726 __ DecreaseFrameSize(32);
1727 __ RemoveFrame(frame_size, callee_save_regs);
1728
1729 EmitAndCheck(&assembler, "VixlJniHelpers");
1730 }
1731
1732 #ifdef ARM_VIXL
1733 #define R0 vixl::aarch32::r0
1734 #define R2 vixl::aarch32::r2
1735 #define R4 vixl::aarch32::r4
1736 #define R12 vixl::aarch32::r12
1737 #undef __
1738 #define __ assembler.asm_.
1739 #endif
1740
TEST_F(ArmVIXLAssemblerTest,VixlLoadFromOffset)1741 TEST_F(ArmVIXLAssemblerTest, VixlLoadFromOffset) {
1742 __ LoadFromOffset(kLoadWord, R2, R4, 12);
1743 __ LoadFromOffset(kLoadWord, R2, R4, 0xfff);
1744 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000);
1745 __ LoadFromOffset(kLoadWord, R2, R4, 0x1000a4);
1746 __ LoadFromOffset(kLoadWord, R2, R4, 0x101000);
1747 __ LoadFromOffset(kLoadWord, R4, R4, 0x101000);
1748 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 12);
1749 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0xfff);
1750 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000);
1751 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x1000a4);
1752 __ LoadFromOffset(kLoadUnsignedHalfword, R2, R4, 0x101000);
1753 __ LoadFromOffset(kLoadUnsignedHalfword, R4, R4, 0x101000);
1754 __ LoadFromOffset(kLoadWordPair, R2, R4, 12);
1755 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x3fc);
1756 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400);
1757 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x400a4);
1758 __ LoadFromOffset(kLoadWordPair, R2, R4, 0x40400);
1759 __ LoadFromOffset(kLoadWordPair, R4, R4, 0x40400);
1760
1761 vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
1762 temps.Exclude(R12);
1763 __ LoadFromOffset(kLoadWord, R0, R12, 12); // 32-bit because of R12.
1764 temps.Include(R12);
1765 __ LoadFromOffset(kLoadWord, R2, R4, 0xa4 - 0x100000);
1766
1767 __ LoadFromOffset(kLoadSignedByte, R2, R4, 12);
1768 __ LoadFromOffset(kLoadUnsignedByte, R2, R4, 12);
1769 __ LoadFromOffset(kLoadSignedHalfword, R2, R4, 12);
1770
1771 EmitAndCheck(&assembler, "VixlLoadFromOffset");
1772 }
1773
TEST_F(ArmVIXLAssemblerTest,VixlStoreToOffset)1774 TEST_F(ArmVIXLAssemblerTest, VixlStoreToOffset) {
1775 __ StoreToOffset(kStoreWord, R2, R4, 12);
1776 __ StoreToOffset(kStoreWord, R2, R4, 0xfff);
1777 __ StoreToOffset(kStoreWord, R2, R4, 0x1000);
1778 __ StoreToOffset(kStoreWord, R2, R4, 0x1000a4);
1779 __ StoreToOffset(kStoreWord, R2, R4, 0x101000);
1780 __ StoreToOffset(kStoreWord, R4, R4, 0x101000);
1781 __ StoreToOffset(kStoreHalfword, R2, R4, 12);
1782 __ StoreToOffset(kStoreHalfword, R2, R4, 0xfff);
1783 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000);
1784 __ StoreToOffset(kStoreHalfword, R2, R4, 0x1000a4);
1785 __ StoreToOffset(kStoreHalfword, R2, R4, 0x101000);
1786 __ StoreToOffset(kStoreHalfword, R4, R4, 0x101000);
1787 __ StoreToOffset(kStoreWordPair, R2, R4, 12);
1788 __ StoreToOffset(kStoreWordPair, R2, R4, 0x3fc);
1789 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400);
1790 __ StoreToOffset(kStoreWordPair, R2, R4, 0x400a4);
1791 __ StoreToOffset(kStoreWordPair, R2, R4, 0x40400);
1792 __ StoreToOffset(kStoreWordPair, R4, R4, 0x40400);
1793
1794 vixl::aarch32::UseScratchRegisterScope temps(assembler.asm_.GetVIXLAssembler());
1795 temps.Exclude(R12);
1796 __ StoreToOffset(kStoreWord, R0, R12, 12); // 32-bit because of R12.
1797 temps.Include(R12);
1798 __ StoreToOffset(kStoreWord, R2, R4, 0xa4 - 0x100000);
1799
1800 __ StoreToOffset(kStoreByte, R2, R4, 12);
1801
1802 EmitAndCheck(&assembler, "VixlStoreToOffset");
1803 }
1804
1805 #undef __
1806 #endif // ENABLE_VIXL_TEST
1807 } // namespace arm
1808 } // namespace art
1809