1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "asm_support_mips.S" 18 19#include "arch/quick_alloc_entrypoints.S" 20 21 .set noreorder 22 .balign 4 23 24 /* Deliver the given exception */ 25 .extern artDeliverExceptionFromCode 26 /* Deliver an exception pending on a thread */ 27 .extern artDeliverPendingExceptionFromCode 28 29#define ARG_SLOT_SIZE 32 // space for a0-a3 plus 4 more words 30 31 /* 32 * Macro that sets up the callee save frame to conform with 33 * Runtime::CreateCalleeSaveMethod(kSaveAllCalleeSaves) 34 * Callee-save: $s0-$s8 + $gp + $ra, 11 total + 1 word for Method* 35 * Clobbers $t0 and $sp 36 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 37 * Reserves FRAME_SIZE_SAVE_ALL_CALLEE_SAVES + ARG_SLOT_SIZE bytes on the stack 38 */ 39.macro SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 40 addiu $sp, $sp, -96 41 .cfi_adjust_cfa_offset 96 42 43 // Ugly compile-time check, but we only have the preprocessor. 44#if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVES != 96) 45#error "FRAME_SIZE_SAVE_ALL_CALLEE_SAVES(MIPS) size not as expected." 46#endif 47 48 sw $ra, 92($sp) 49 .cfi_rel_offset 31, 92 50 sw $s8, 88($sp) 51 .cfi_rel_offset 30, 88 52 sw $gp, 84($sp) 53 .cfi_rel_offset 28, 84 54 sw $s7, 80($sp) 55 .cfi_rel_offset 23, 80 56 sw $s6, 76($sp) 57 .cfi_rel_offset 22, 76 58 sw $s5, 72($sp) 59 .cfi_rel_offset 21, 72 60 sw $s4, 68($sp) 61 .cfi_rel_offset 20, 68 62 sw $s3, 64($sp) 63 .cfi_rel_offset 19, 64 64 sw $s2, 60($sp) 65 .cfi_rel_offset 18, 60 66 sw $s1, 56($sp) 67 .cfi_rel_offset 17, 56 68 sw $s0, 52($sp) 69 .cfi_rel_offset 16, 52 70 71 SDu $f30, $f31, 44, $sp, $t1 72 SDu $f28, $f29, 36, $sp, $t1 73 SDu $f26, $f27, 28, $sp, $t1 74 SDu $f24, $f25, 20, $sp, $t1 75 SDu $f22, $f23, 12, $sp, $t1 76 SDu $f20, $f21, 4, $sp, $t1 77 78 # 1 word for holding Method* 79 80 lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) 81 lw $t0, 0($t0) 82 lw $t0, RUNTIME_SAVE_ALL_CALLEE_SAVES_METHOD_OFFSET($t0) 83 sw $t0, 0($sp) # Place Method* at bottom of stack. 84 sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. 85 addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack 86 .cfi_adjust_cfa_offset ARG_SLOT_SIZE 87.endm 88 89 /* 90 * Macro that sets up the callee save frame to conform with 91 * Runtime::CreateCalleeSaveMethod(kSaveRefsOnly). Restoration assumes non-moving GC. 92 * Does not include rSUSPEND or rSELF 93 * callee-save: $s2-$s8 + $gp + $ra, 9 total + 2 words padding + 1 word to hold Method* 94 * Clobbers $t0 and $sp 95 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 96 * Reserves FRAME_SIZE_SAVE_REFS_ONLY + ARG_SLOT_SIZE bytes on the stack 97 */ 98.macro SETUP_SAVE_REFS_ONLY_FRAME 99 addiu $sp, $sp, -48 100 .cfi_adjust_cfa_offset 48 101 102 // Ugly compile-time check, but we only have the preprocessor. 103#if (FRAME_SIZE_SAVE_REFS_ONLY != 48) 104#error "FRAME_SIZE_SAVE_REFS_ONLY(MIPS) size not as expected." 105#endif 106 107 sw $ra, 44($sp) 108 .cfi_rel_offset 31, 44 109 sw $s8, 40($sp) 110 .cfi_rel_offset 30, 40 111 sw $gp, 36($sp) 112 .cfi_rel_offset 28, 36 113 sw $s7, 32($sp) 114 .cfi_rel_offset 23, 32 115 sw $s6, 28($sp) 116 .cfi_rel_offset 22, 28 117 sw $s5, 24($sp) 118 .cfi_rel_offset 21, 24 119 sw $s4, 20($sp) 120 .cfi_rel_offset 20, 20 121 sw $s3, 16($sp) 122 .cfi_rel_offset 19, 16 123 sw $s2, 12($sp) 124 .cfi_rel_offset 18, 12 125 # 2 words for alignment and bottom word will hold Method* 126 127 lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) 128 lw $t0, 0($t0) 129 lw $t0, RUNTIME_SAVE_REFS_ONLY_METHOD_OFFSET($t0) 130 sw $t0, 0($sp) # Place Method* at bottom of stack. 131 sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. 132 addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack 133 .cfi_adjust_cfa_offset ARG_SLOT_SIZE 134.endm 135 136.macro RESTORE_SAVE_REFS_ONLY_FRAME 137 addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack 138 .cfi_adjust_cfa_offset -ARG_SLOT_SIZE 139 lw $ra, 44($sp) 140 .cfi_restore 31 141 lw $s8, 40($sp) 142 .cfi_restore 30 143 lw $gp, 36($sp) 144 .cfi_restore 28 145 lw $s7, 32($sp) 146 .cfi_restore 23 147 lw $s6, 28($sp) 148 .cfi_restore 22 149 lw $s5, 24($sp) 150 .cfi_restore 21 151 lw $s4, 20($sp) 152 .cfi_restore 20 153 lw $s3, 16($sp) 154 .cfi_restore 19 155 lw $s2, 12($sp) 156 .cfi_restore 18 157 addiu $sp, $sp, 48 158 .cfi_adjust_cfa_offset -48 159.endm 160 161.macro RESTORE_SAVE_REFS_ONLY_FRAME_AND_RETURN 162 RESTORE_SAVE_REFS_ONLY_FRAME 163 jalr $zero, $ra 164 nop 165.endm 166 167 /* 168 * Macro that sets up the callee save frame to conform with 169 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). 170 * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19 171 * (26 total + 1 word padding + method*) 172 */ 173.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY 174 addiu $sp, $sp, -112 175 .cfi_adjust_cfa_offset 112 176 177 // Ugly compile-time check, but we only have the preprocessor. 178#if (FRAME_SIZE_SAVE_REFS_AND_ARGS != 112) 179#error "FRAME_SIZE_SAVE_REFS_AND_ARGS(MIPS) size not as expected." 180#endif 181 182 sw $ra, 108($sp) 183 .cfi_rel_offset 31, 108 184 sw $s8, 104($sp) 185 .cfi_rel_offset 30, 104 186 sw $gp, 100($sp) 187 .cfi_rel_offset 28, 100 188 sw $s7, 96($sp) 189 .cfi_rel_offset 23, 96 190 sw $s6, 92($sp) 191 .cfi_rel_offset 22, 92 192 sw $s5, 88($sp) 193 .cfi_rel_offset 21, 88 194 sw $s4, 84($sp) 195 .cfi_rel_offset 20, 84 196 sw $s3, 80($sp) 197 .cfi_rel_offset 19, 80 198 sw $s2, 76($sp) 199 .cfi_rel_offset 18, 76 200 sw $t1, 72($sp) 201 .cfi_rel_offset 9, 72 202 sw $t0, 68($sp) 203 .cfi_rel_offset 8, 68 204 sw $a3, 64($sp) 205 .cfi_rel_offset 7, 64 206 sw $a2, 60($sp) 207 .cfi_rel_offset 6, 60 208 sw $a1, 56($sp) 209 .cfi_rel_offset 5, 56 210 SDu $f18, $f19, 48, $sp, $t8 211 SDu $f16, $f17, 40, $sp, $t8 212 SDu $f14, $f15, 32, $sp, $t8 213 SDu $f12, $f13, 24, $sp, $t8 214 SDu $f10, $f11, 16, $sp, $t8 215 SDu $f8, $f9, 8, $sp, $t8 216 # bottom will hold Method* 217.endm 218 219 /* 220 * Macro that sets up the callee save frame to conform with 221 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC. 222 * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19 223 * (26 total + 1 word padding + method*) 224 * Clobbers $t0 and $sp 225 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 226 * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack 227 */ 228.macro SETUP_SAVE_REFS_AND_ARGS_FRAME 229 SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY 230 lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) 231 lw $t0, 0($t0) 232 lw $t0, RUNTIME_SAVE_REFS_AND_ARGS_METHOD_OFFSET($t0) 233 sw $t0, 0($sp) # Place Method* at bottom of stack. 234 sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. 235 addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack 236 .cfi_adjust_cfa_offset ARG_SLOT_SIZE 237.endm 238 239 /* 240 * Macro that sets up the callee save frame to conform with 241 * Runtime::CreateCalleeSaveMethod(kSaveRefsAndArgs). Restoration assumes non-moving GC. 242 * callee-save: $a1-$a3, $t0-$t1, $s2-$s8, $gp, $ra, $f8-$f19 243 * (26 total + 1 word padding + method*) 244 * Clobbers $sp 245 * Use $a0 as the Method* and loads it into bottom of stack. 246 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 247 * Reserves FRAME_SIZE_SAVE_REFS_AND_ARGS + ARG_SLOT_SIZE bytes on the stack 248 */ 249.macro SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0 250 SETUP_SAVE_REFS_AND_ARGS_FRAME_REGISTERS_ONLY 251 sw $a0, 0($sp) # Place Method* at bottom of stack. 252 sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. 253 addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack 254 .cfi_adjust_cfa_offset ARG_SLOT_SIZE 255.endm 256 257.macro RESTORE_SAVE_REFS_AND_ARGS_FRAME 258 addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack 259 .cfi_adjust_cfa_offset -ARG_SLOT_SIZE 260 lw $ra, 108($sp) 261 .cfi_restore 31 262 lw $s8, 104($sp) 263 .cfi_restore 30 264 lw $gp, 100($sp) 265 .cfi_restore 28 266 lw $s7, 96($sp) 267 .cfi_restore 23 268 lw $s6, 92($sp) 269 .cfi_restore 22 270 lw $s5, 88($sp) 271 .cfi_restore 21 272 lw $s4, 84($sp) 273 .cfi_restore 20 274 lw $s3, 80($sp) 275 .cfi_restore 19 276 lw $s2, 76($sp) 277 .cfi_restore 18 278 lw $t1, 72($sp) 279 .cfi_restore 9 280 lw $t0, 68($sp) 281 .cfi_restore 8 282 lw $a3, 64($sp) 283 .cfi_restore 7 284 lw $a2, 60($sp) 285 .cfi_restore 6 286 lw $a1, 56($sp) 287 .cfi_restore 5 288 LDu $f18, $f19, 48, $sp, $t8 289 LDu $f16, $f17, 40, $sp, $t8 290 LDu $f14, $f15, 32, $sp, $t8 291 LDu $f12, $f13, 24, $sp, $t8 292 LDu $f10, $f11, 16, $sp, $t8 293 LDu $f8, $f9, 8, $sp, $t8 294 addiu $sp, $sp, 112 # pop frame 295 .cfi_adjust_cfa_offset -112 296.endm 297 298 /* 299 * Macro that sets up the callee save frame to conform with 300 * Runtime::CreateCalleeSaveMethod(kSaveEverything). 301 * when the $sp has already been decremented by FRAME_SIZE_SAVE_EVERYTHING. 302 * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31; 303 * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method* 304 * Clobbers $t0 and $t1. 305 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 306 * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack. 307 * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. 308 */ 309.macro SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP 310 // Ugly compile-time check, but we only have the preprocessor. 311#if (FRAME_SIZE_SAVE_EVERYTHING != 256) 312#error "FRAME_SIZE_SAVE_EVERYTHING(MIPS) size not as expected." 313#endif 314 315 sw $ra, 252($sp) 316 .cfi_rel_offset 31, 252 317 sw $fp, 248($sp) 318 .cfi_rel_offset 30, 248 319 sw $gp, 244($sp) 320 .cfi_rel_offset 28, 244 321 sw $t9, 240($sp) 322 .cfi_rel_offset 25, 240 323 sw $t8, 236($sp) 324 .cfi_rel_offset 24, 236 325 sw $s7, 232($sp) 326 .cfi_rel_offset 23, 232 327 sw $s6, 228($sp) 328 .cfi_rel_offset 22, 228 329 sw $s5, 224($sp) 330 .cfi_rel_offset 21, 224 331 sw $s4, 220($sp) 332 .cfi_rel_offset 20, 220 333 sw $s3, 216($sp) 334 .cfi_rel_offset 19, 216 335 sw $s2, 212($sp) 336 .cfi_rel_offset 18, 212 337 sw $s1, 208($sp) 338 .cfi_rel_offset 17, 208 339 sw $s0, 204($sp) 340 .cfi_rel_offset 16, 204 341 sw $t7, 200($sp) 342 .cfi_rel_offset 15, 200 343 sw $t6, 196($sp) 344 .cfi_rel_offset 14, 196 345 sw $t5, 192($sp) 346 .cfi_rel_offset 13, 192 347 sw $t4, 188($sp) 348 .cfi_rel_offset 12, 188 349 sw $t3, 184($sp) 350 .cfi_rel_offset 11, 184 351 sw $t2, 180($sp) 352 .cfi_rel_offset 10, 180 353 sw $t1, 176($sp) 354 .cfi_rel_offset 9, 176 355 sw $t0, 172($sp) 356 .cfi_rel_offset 8, 172 357 sw $a3, 168($sp) 358 .cfi_rel_offset 7, 168 359 sw $a2, 164($sp) 360 .cfi_rel_offset 6, 164 361 sw $a1, 160($sp) 362 .cfi_rel_offset 5, 160 363 sw $a0, 156($sp) 364 .cfi_rel_offset 4, 156 365 sw $v1, 152($sp) 366 .cfi_rel_offset 3, 152 367 sw $v0, 148($sp) 368 .cfi_rel_offset 2, 148 369 370 // Set up $gp, clobbering $ra and using the branch delay slot for a useful instruction. 371 bal 1f 372 .set push 373 .set noat 374 sw $at, 144($sp) 375 .cfi_rel_offset 1, 144 376 .set pop 3771: 378 .cpload $ra 379 380 SDu $f30, $f31, 136, $sp, $t1 381 SDu $f28, $f29, 128, $sp, $t1 382 SDu $f26, $f27, 120, $sp, $t1 383 SDu $f24, $f25, 112, $sp, $t1 384 SDu $f22, $f23, 104, $sp, $t1 385 SDu $f20, $f21, 96, $sp, $t1 386 SDu $f18, $f19, 88, $sp, $t1 387 SDu $f16, $f17, 80, $sp, $t1 388 SDu $f14, $f15, 72, $sp, $t1 389 SDu $f12, $f13, 64, $sp, $t1 390 SDu $f10, $f11, 56, $sp, $t1 391 SDu $f8, $f9, 48, $sp, $t1 392 SDu $f6, $f7, 40, $sp, $t1 393 SDu $f4, $f5, 32, $sp, $t1 394 SDu $f2, $f3, 24, $sp, $t1 395 SDu $f0, $f1, 16, $sp, $t1 396 397 # 3 words padding and 1 word for holding Method* 398 399 lw $t0, %got(_ZN3art7Runtime9instance_E)($gp) 400 lw $t0, 0($t0) 401 lw $t0, RUNTIME_SAVE_EVERYTHING_METHOD_OFFSET($t0) 402 sw $t0, 0($sp) # Place Method* at bottom of stack. 403 sw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) # Place sp in Thread::Current()->top_quick_frame. 404 addiu $sp, $sp, -ARG_SLOT_SIZE # reserve argument slots on the stack 405 .cfi_adjust_cfa_offset ARG_SLOT_SIZE 406.endm 407 408 /* 409 * Macro that sets up the callee save frame to conform with 410 * Runtime::CreateCalleeSaveMethod(kSaveEverything). 411 * Callee-save: $at, $v0-$v1, $a0-$a3, $t0-$t7, $s0-$s7, $t8-$t9, $gp, $fp $ra, $f0-$f31; 412 * 28(GPR)+ 32(FPR) + 3 words for padding and 1 word for Method* 413 * Clobbers $t0 and $t1. 414 * Allocates ARG_SLOT_SIZE bytes at the bottom of the stack for arg slots. 415 * Reserves FRAME_SIZE_SAVE_EVERYTHING + ARG_SLOT_SIZE bytes on the stack. 416 * This macro sets up $gp; entrypoints using it should start with ENTRY_NO_GP. 417 */ 418.macro SETUP_SAVE_EVERYTHING_FRAME 419 addiu $sp, $sp, -(FRAME_SIZE_SAVE_EVERYTHING) 420 .cfi_adjust_cfa_offset (FRAME_SIZE_SAVE_EVERYTHING) 421 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP 422.endm 423 424.macro RESTORE_SAVE_EVERYTHING_FRAME 425 addiu $sp, $sp, ARG_SLOT_SIZE # remove argument slots on the stack 426 .cfi_adjust_cfa_offset -ARG_SLOT_SIZE 427 428 LDu $f30, $f31, 136, $sp, $t1 429 LDu $f28, $f29, 128, $sp, $t1 430 LDu $f26, $f27, 120, $sp, $t1 431 LDu $f24, $f25, 112, $sp, $t1 432 LDu $f22, $f23, 104, $sp, $t1 433 LDu $f20, $f21, 96, $sp, $t1 434 LDu $f18, $f19, 88, $sp, $t1 435 LDu $f16, $f17, 80, $sp, $t1 436 LDu $f14, $f15, 72, $sp, $t1 437 LDu $f12, $f13, 64, $sp, $t1 438 LDu $f10, $f11, 56, $sp, $t1 439 LDu $f8, $f9, 48, $sp, $t1 440 LDu $f6, $f7, 40, $sp, $t1 441 LDu $f4, $f5, 32, $sp, $t1 442 LDu $f2, $f3, 24, $sp, $t1 443 LDu $f0, $f1, 16, $sp, $t1 444 445 lw $ra, 252($sp) 446 .cfi_restore 31 447 lw $fp, 248($sp) 448 .cfi_restore 30 449 lw $gp, 244($sp) 450 .cfi_restore 28 451 lw $t9, 240($sp) 452 .cfi_restore 25 453 lw $t8, 236($sp) 454 .cfi_restore 24 455 lw $s7, 232($sp) 456 .cfi_restore 23 457 lw $s6, 228($sp) 458 .cfi_restore 22 459 lw $s5, 224($sp) 460 .cfi_restore 21 461 lw $s4, 220($sp) 462 .cfi_restore 20 463 lw $s3, 216($sp) 464 .cfi_restore 19 465 lw $s2, 212($sp) 466 .cfi_restore 18 467 lw $s1, 208($sp) 468 .cfi_restore 17 469 lw $s0, 204($sp) 470 .cfi_restore 16 471 lw $t7, 200($sp) 472 .cfi_restore 15 473 lw $t6, 196($sp) 474 .cfi_restore 14 475 lw $t5, 192($sp) 476 .cfi_restore 13 477 lw $t4, 188($sp) 478 .cfi_restore 12 479 lw $t3, 184($sp) 480 .cfi_restore 11 481 lw $t2, 180($sp) 482 .cfi_restore 10 483 lw $t1, 176($sp) 484 .cfi_restore 9 485 lw $t0, 172($sp) 486 .cfi_restore 8 487 lw $a3, 168($sp) 488 .cfi_restore 7 489 lw $a2, 164($sp) 490 .cfi_restore 6 491 lw $a1, 160($sp) 492 .cfi_restore 5 493 lw $a0, 156($sp) 494 .cfi_restore 4 495 lw $v1, 152($sp) 496 .cfi_restore 3 497 lw $v0, 148($sp) 498 .cfi_restore 2 499 .set push 500 .set noat 501 lw $at, 144($sp) 502 .cfi_restore 1 503 .set pop 504 505 addiu $sp, $sp, 256 # pop frame 506 .cfi_adjust_cfa_offset -256 507.endm 508 509 /* 510 * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending 511 * exception is Thread::Current()->exception_ 512 */ 513.macro DELIVER_PENDING_EXCEPTION 514 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME # save callee saves for throw 515 la $t9, artDeliverPendingExceptionFromCode 516 jalr $zero, $t9 # artDeliverPendingExceptionFromCode(Thread*) 517 move $a0, rSELF # pass Thread::Current 518.endm 519 520.macro RETURN_IF_NO_EXCEPTION 521 lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ 522 RESTORE_SAVE_REFS_ONLY_FRAME 523 bnez $t0, 1f # success if no exception is pending 524 nop 525 jalr $zero, $ra 526 nop 5271: 528 DELIVER_PENDING_EXCEPTION 529.endm 530 531.macro RETURN_IF_ZERO 532 RESTORE_SAVE_REFS_ONLY_FRAME 533 bnez $v0, 1f # success? 534 nop 535 jalr $zero, $ra # return on success 536 nop 5371: 538 DELIVER_PENDING_EXCEPTION 539.endm 540 541.macro RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 542 RESTORE_SAVE_REFS_ONLY_FRAME 543 beqz $v0, 1f # success? 544 nop 545 jalr $zero, $ra # return on success 546 nop 5471: 548 DELIVER_PENDING_EXCEPTION 549.endm 550 551 /* 552 * On stack replacement stub. 553 * On entry: 554 * a0 = stack to copy 555 * a1 = size of stack 556 * a2 = pc to call 557 * a3 = JValue* result 558 * [sp + 16] = shorty 559 * [sp + 20] = thread 560 */ 561ENTRY art_quick_osr_stub 562 // Save callee general purpose registers, RA and GP. 563 addiu $sp, $sp, -48 564 .cfi_adjust_cfa_offset 48 565 sw $ra, 44($sp) 566 .cfi_rel_offset 31, 44 567 sw $s8, 40($sp) 568 .cfi_rel_offset 30, 40 569 sw $gp, 36($sp) 570 .cfi_rel_offset 28, 36 571 sw $s7, 32($sp) 572 .cfi_rel_offset 23, 32 573 sw $s6, 28($sp) 574 .cfi_rel_offset 22, 28 575 sw $s5, 24($sp) 576 .cfi_rel_offset 21, 24 577 sw $s4, 20($sp) 578 .cfi_rel_offset 20, 20 579 sw $s3, 16($sp) 580 .cfi_rel_offset 19, 16 581 sw $s2, 12($sp) 582 .cfi_rel_offset 18, 12 583 sw $s1, 8($sp) 584 .cfi_rel_offset 17, 8 585 sw $s0, 4($sp) 586 .cfi_rel_offset 16, 4 587 588 move $s8, $sp # Save the stack pointer 589 move $s7, $a1 # Save size of stack 590 move $s6, $a2 # Save the pc to call 591 lw rSELF, 48+20($sp) # Save managed thread pointer into rSELF 592 addiu $t0, $sp, -12 # Reserve space for stack pointer, 593 # JValue* result, and ArtMethod* slot. 594 srl $t0, $t0, 4 # Align stack pointer to 16 bytes 595 sll $sp, $t0, 4 # Update stack pointer 596 sw $s8, 4($sp) # Save old stack pointer 597 sw $a3, 8($sp) # Save JValue* result 598 sw $zero, 0($sp) # Store null for ArtMethod* at bottom of frame 599 subu $sp, $a1 # Reserve space for callee stack 600 move $a2, $a1 601 move $a1, $a0 602 move $a0, $sp 603 la $t9, memcpy 604 jalr $t9 # memcpy (dest a0, src a1, bytes a2) 605 addiu $sp, $sp, -16 # make space for argument slots for memcpy 606 bal .Losr_entry # Call the method 607 addiu $sp, $sp, 16 # restore stack after memcpy 608 lw $a2, 8($sp) # Restore JValue* result 609 lw $sp, 4($sp) # Restore saved stack pointer 610 lw $a0, 48+16($sp) # load shorty 611 lbu $a0, 0($a0) # load return type 612 li $a1, 'D' # put char 'D' into a1 613 beq $a0, $a1, .Losr_fp_result # Test if result type char == 'D' 614 li $a1, 'F' # put char 'F' into a1 615 beq $a0, $a1, .Losr_fp_result # Test if result type char == 'F' 616 nop 617 sw $v0, 0($a2) 618 b .Losr_exit 619 sw $v1, 4($a2) # store v0/v1 into result 620.Losr_fp_result: 621 SDu $f0, $f1, 0, $a2, $t0 # store f0/f1 into result 622.Losr_exit: 623 lw $ra, 44($sp) 624 .cfi_restore 31 625 lw $s8, 40($sp) 626 .cfi_restore 30 627 lw $gp, 36($sp) 628 .cfi_restore 28 629 lw $s7, 32($sp) 630 .cfi_restore 23 631 lw $s6, 28($sp) 632 .cfi_restore 22 633 lw $s5, 24($sp) 634 .cfi_restore 21 635 lw $s4, 20($sp) 636 .cfi_restore 20 637 lw $s3, 16($sp) 638 .cfi_restore 19 639 lw $s2, 12($sp) 640 .cfi_restore 18 641 lw $s1, 8($sp) 642 .cfi_restore 17 643 lw $s0, 4($sp) 644 .cfi_restore 16 645 jalr $zero, $ra 646 addiu $sp, $sp, 48 647 .cfi_adjust_cfa_offset -48 648.Losr_entry: 649 addiu $s7, $s7, -4 650 addu $t0, $s7, $sp 651 move $t9, $s6 652 jalr $zero, $t9 653 sw $ra, 0($t0) # Store RA per the compiler ABI 654END art_quick_osr_stub 655 656 /* 657 * On entry $a0 is uint32_t* gprs_ and $a1 is uint32_t* fprs_ 658 * FIXME: just guessing about the shape of the jmpbuf. Where will pc be? 659 */ 660ENTRY art_quick_do_long_jump 661 LDu $f0, $f1, 0*8, $a1, $t1 662 LDu $f2, $f3, 1*8, $a1, $t1 663 LDu $f4, $f5, 2*8, $a1, $t1 664 LDu $f6, $f7, 3*8, $a1, $t1 665 LDu $f8, $f9, 4*8, $a1, $t1 666 LDu $f10, $f11, 5*8, $a1, $t1 667 LDu $f12, $f13, 6*8, $a1, $t1 668 LDu $f14, $f15, 7*8, $a1, $t1 669 LDu $f16, $f17, 8*8, $a1, $t1 670 LDu $f18, $f19, 9*8, $a1, $t1 671 LDu $f20, $f21, 10*8, $a1, $t1 672 LDu $f22, $f23, 11*8, $a1, $t1 673 LDu $f24, $f25, 12*8, $a1, $t1 674 LDu $f26, $f27, 13*8, $a1, $t1 675 LDu $f28, $f29, 14*8, $a1, $t1 676 LDu $f30, $f31, 15*8, $a1, $t1 677 678 .set push 679 .set nomacro 680 .set noat 681 lw $at, 4($a0) 682 .set pop 683 lw $v0, 8($a0) 684 lw $v1, 12($a0) 685 lw $a1, 20($a0) 686 lw $a2, 24($a0) 687 lw $a3, 28($a0) 688 lw $t0, 32($a0) 689 lw $t1, 36($a0) 690 lw $t2, 40($a0) 691 lw $t3, 44($a0) 692 lw $t4, 48($a0) 693 lw $t5, 52($a0) 694 lw $t6, 56($a0) 695 lw $t7, 60($a0) 696 lw $s0, 64($a0) 697 lw $s1, 68($a0) 698 lw $s2, 72($a0) 699 lw $s3, 76($a0) 700 lw $s4, 80($a0) 701 lw $s5, 84($a0) 702 lw $s6, 88($a0) 703 lw $s7, 92($a0) 704 lw $t8, 96($a0) 705 lw $t9, 100($a0) 706 lw $gp, 112($a0) 707 lw $sp, 116($a0) 708 lw $fp, 120($a0) 709 lw $ra, 124($a0) 710 lw $a0, 16($a0) 711 move $v0, $zero # clear result registers v0 and v1 (in branch delay slot) 712 jalr $zero, $t9 # do long jump 713 move $v1, $zero 714END art_quick_do_long_jump 715 716 /* 717 * Called by managed code, saves most registers (forms basis of long jump context) and passes 718 * the bottom of the stack. artDeliverExceptionFromCode will place the callee save Method* at 719 * the bottom of the thread. On entry a0 holds Throwable* 720 */ 721ENTRY art_quick_deliver_exception 722 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 723 la $t9, artDeliverExceptionFromCode 724 jalr $zero, $t9 # artDeliverExceptionFromCode(Throwable*, Thread*) 725 move $a1, rSELF # pass Thread::Current 726END art_quick_deliver_exception 727 728 /* 729 * Called by managed code to create and deliver a NullPointerException 730 */ 731 .extern artThrowNullPointerExceptionFromCode 732ENTRY_NO_GP art_quick_throw_null_pointer_exception 733 // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, 734 // even after clobbering any registers we don't need to preserve, such as $gp or $t0. 735 SETUP_SAVE_EVERYTHING_FRAME 736 la $t9, artThrowNullPointerExceptionFromCode 737 jalr $zero, $t9 # artThrowNullPointerExceptionFromCode(Thread*) 738 move $a0, rSELF # pass Thread::Current 739END art_quick_throw_null_pointer_exception 740 741 742 /* 743 * Call installed by a signal handler to create and deliver a NullPointerException. 744 */ 745 .extern artThrowNullPointerExceptionFromSignal 746ENTRY_NO_GP_CUSTOM_CFA art_quick_throw_null_pointer_exception_from_signal, FRAME_SIZE_SAVE_EVERYTHING 747 SETUP_SAVE_EVERYTHING_FRAME_DECREMENTED_SP 748 # Retrieve the fault address from the padding where the signal handler stores it. 749 lw $a0, (ARG_SLOT_SIZE + __SIZEOF_POINTER__)($sp) 750 la $t9, artThrowNullPointerExceptionFromSignal 751 jalr $zero, $t9 # artThrowNullPointerExceptionFromSignal(uintptr_t, Thread*) 752 move $a1, rSELF # pass Thread::Current 753END art_quick_throw_null_pointer_exception_from_signal 754 755 /* 756 * Called by managed code to create and deliver an ArithmeticException 757 */ 758 .extern artThrowDivZeroFromCode 759ENTRY_NO_GP art_quick_throw_div_zero 760 SETUP_SAVE_EVERYTHING_FRAME 761 la $t9, artThrowDivZeroFromCode 762 jalr $zero, $t9 # artThrowDivZeroFromCode(Thread*) 763 move $a0, rSELF # pass Thread::Current 764END art_quick_throw_div_zero 765 766 /* 767 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException 768 */ 769 .extern artThrowArrayBoundsFromCode 770ENTRY_NO_GP art_quick_throw_array_bounds 771 // Note that setting up $gp does not rely on $t9 here, so branching here directly is OK, 772 // even after clobbering any registers we don't need to preserve, such as $gp or $t0. 773 SETUP_SAVE_EVERYTHING_FRAME 774 la $t9, artThrowArrayBoundsFromCode 775 jalr $zero, $t9 # artThrowArrayBoundsFromCode(index, limit, Thread*) 776 move $a2, rSELF # pass Thread::Current 777END art_quick_throw_array_bounds 778 779 /* 780 * Called by managed code to create and deliver a StringIndexOutOfBoundsException 781 * as if thrown from a call to String.charAt(). 782 */ 783 .extern artThrowStringBoundsFromCode 784ENTRY_NO_GP art_quick_throw_string_bounds 785 SETUP_SAVE_EVERYTHING_FRAME 786 la $t9, artThrowStringBoundsFromCode 787 jalr $zero, $t9 # artThrowStringBoundsFromCode(index, limit, Thread*) 788 move $a2, rSELF # pass Thread::Current 789END art_quick_throw_string_bounds 790 791 /* 792 * Called by managed code to create and deliver a StackOverflowError. 793 */ 794 .extern artThrowStackOverflowFromCode 795ENTRY art_quick_throw_stack_overflow 796 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 797 la $t9, artThrowStackOverflowFromCode 798 jalr $zero, $t9 # artThrowStackOverflowFromCode(Thread*) 799 move $a0, rSELF # pass Thread::Current 800END art_quick_throw_stack_overflow 801 802 /* 803 * All generated callsites for interface invokes and invocation slow paths will load arguments 804 * as usual - except instead of loading arg0/$a0 with the target Method*, arg0/$a0 will contain 805 * the method_idx. This wrapper will save arg1-arg3, and call the appropriate C helper. 806 * NOTE: "this" is first visable argument of the target, and so can be found in arg1/$a1. 807 * 808 * The helper will attempt to locate the target and return a 64-bit result in $v0/$v1 consisting 809 * of the target Method* in $v0 and method->code_ in $v1. 810 * 811 * If unsuccessful, the helper will return null/null. There will be a pending exception in the 812 * thread and we branch to another stub to deliver it. 813 * 814 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 815 * pointing back to the original caller. 816 */ 817.macro INVOKE_TRAMPOLINE_BODY cxx_name 818 .extern \cxx_name 819 SETUP_SAVE_REFS_AND_ARGS_FRAME # save callee saves in case allocation triggers GC 820 move $a2, rSELF # pass Thread::Current 821 la $t9, \cxx_name 822 jalr $t9 # (method_idx, this, Thread*, $sp) 823 addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots) 824 move $a0, $v0 # save target Method* 825 RESTORE_SAVE_REFS_AND_ARGS_FRAME 826 beqz $v0, 1f 827 move $t9, $v1 # save $v0->code_ 828 jalr $zero, $t9 829 nop 8301: 831 DELIVER_PENDING_EXCEPTION 832.endm 833.macro INVOKE_TRAMPOLINE c_name, cxx_name 834ENTRY \c_name 835 INVOKE_TRAMPOLINE_BODY \cxx_name 836END \c_name 837.endm 838 839INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 840 841INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 842INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 843INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 844INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 845 846// Each of the following macros expands into four instructions or 16 bytes. 847// They are used to build indexable "tables" of code. 848 849.macro LOAD_WORD_TO_REG reg, next_arg, index_reg, label 850 lw $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4) 851 b \label 852 addiu $\index_reg, 16 853 .balign 16 854.endm 855 856.macro LOAD_LONG_TO_REG reg1, reg2, next_arg, index_reg, next_index, label 857 lw $\reg1, -8($\next_arg) # next_arg points to argument after the current one (offset is 8) 858 lw $\reg2, -4($\next_arg) 859 b \label 860 li $\index_reg, \next_index 861 .balign 16 862.endm 863 864.macro LOAD_FLOAT_TO_REG reg, next_arg, index_reg, label 865 lwc1 $\reg, -4($\next_arg) # next_arg points to argument after the current one (offset is 4) 866 b \label 867 addiu $\index_reg, 16 868 .balign 16 869.endm 870 871#if defined(__mips_isa_rev) && __mips_isa_rev > 2 872// LDu expands into 3 instructions for 64-bit FPU, so index_reg cannot be updated here. 873.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label 874 .set reorder # force use of the branch delay slot 875 LDu $\reg1, $\reg2, -8, $\next_arg, $\tmp # next_arg points to argument after the current one 876 # (offset is 8) 877 b \label 878 .set noreorder 879 .balign 16 880.endm 881#else 882// LDu expands into 2 instructions for 32-bit FPU, so index_reg is updated here. 883.macro LOAD_DOUBLE_TO_REG reg1, reg2, next_arg, index_reg, tmp, label 884 LDu $\reg1, $\reg2, -8, $\next_arg, $\tmp # next_arg points to argument after the current one 885 # (offset is 8) 886 b \label 887 addiu $\index_reg, 16 888 .balign 16 889.endm 890#endif 891 892.macro LOAD_END index_reg, next_index, label 893 b \label 894 li $\index_reg, \next_index 895 .balign 16 896.endm 897 898#define SPILL_SIZE 32 899 900 /* 901 * Invocation stub for quick code. 902 * On entry: 903 * a0 = method pointer 904 * a1 = argument array or null for no argument methods 905 * a2 = size of argument array in bytes 906 * a3 = (managed) thread pointer 907 * [sp + 16] = JValue* result 908 * [sp + 20] = shorty 909 */ 910ENTRY art_quick_invoke_stub 911 sw $a0, 0($sp) # save out a0 912 addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp 913 .cfi_adjust_cfa_offset SPILL_SIZE 914 sw $gp, 16($sp) 915 sw $ra, 12($sp) 916 .cfi_rel_offset 31, 12 917 sw $fp, 8($sp) 918 .cfi_rel_offset 30, 8 919 sw $s1, 4($sp) 920 .cfi_rel_offset 17, 4 921 sw $s0, 0($sp) 922 .cfi_rel_offset 16, 0 923 move $fp, $sp # save sp in fp 924 .cfi_def_cfa_register 30 925 move $s1, $a3 # move managed thread pointer into s1 926 addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval 927 addiu $t0, $a2, 4 # create space for ArtMethod* in frame. 928 subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes: 929 srl $t0, $t0, 4 # native calling convention only aligns to 8B, 930 sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves. 931 addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy 932 la $t9, memcpy 933 jalr $t9 # (dest, src, bytes) 934 addiu $sp, $sp, -16 # make space for argument slots for memcpy 935 addiu $sp, $sp, 16 # restore stack after memcpy 936 lw $gp, 16($fp) # restore $gp 937 lw $a0, SPILL_SIZE($fp) # restore ArtMethod* 938 lw $a1, 4($sp) # a1 = this* 939 addiu $t8, $sp, 8 # t8 = pointer to the current argument (skip ArtMethod* and this*) 940 li $t6, 0 # t6 = gpr_index = 0 (corresponds to A2; A0 and A1 are skipped) 941 li $t7, 0 # t7 = fp_index = 0 942 lw $t9, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE 943 # as the $fp is SPILL_SIZE bytes below the $sp on entry) 944 addiu $t9, 1 # t9 = shorty + 1 (skip 1 for return type) 945 946 // Load the base addresses of tabInt ... tabDouble. 947 // We will use the register indices (gpr_index, fp_index) to branch. 948 // Note that the indices are scaled by 16, so they can be added to the bases directly. 949#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 950 lapc $t2, tabInt 951 lapc $t3, tabLong 952 lapc $t4, tabSingle 953 lapc $t5, tabDouble 954#else 955 bltzal $zero, tabBase # nal 956 addiu $t2, $ra, %lo(tabInt - tabBase) 957tabBase: 958 addiu $t3, $ra, %lo(tabLong - tabBase) 959 addiu $t4, $ra, %lo(tabSingle - tabBase) 960 addiu $t5, $ra, %lo(tabDouble - tabBase) 961#endif 962 963loop: 964 lbu $ra, 0($t9) # ra = shorty[i] 965 beqz $ra, loopEnd # finish getting args when shorty[i] == '\0' 966 addiu $t9, 1 967 968 addiu $ra, -'J' 969 beqz $ra, isLong # branch if result type char == 'J' 970 addiu $ra, 'J' - 'D' 971 beqz $ra, isDouble # branch if result type char == 'D' 972 addiu $ra, 'D' - 'F' 973 beqz $ra, isSingle # branch if result type char == 'F' 974 975 addu $ra, $t2, $t6 976 jalr $zero, $ra 977 addiu $t8, 4 # next_arg = curr_arg + 4 978 979isLong: 980 addu $ra, $t3, $t6 981 jalr $zero, $ra 982 addiu $t8, 8 # next_arg = curr_arg + 8 983 984isSingle: 985 addu $ra, $t4, $t7 986 jalr $zero, $ra 987 addiu $t8, 4 # next_arg = curr_arg + 4 988 989isDouble: 990 addu $ra, $t5, $t7 991#if defined(__mips_isa_rev) && __mips_isa_rev > 2 992 addiu $t7, 16 # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG 993#endif 994 jalr $zero, $ra 995 addiu $t8, 8 # next_arg = curr_arg + 8 996 997loopEnd: 998 lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code 999 jalr $t9 # call the method 1000 sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame 1001 move $sp, $fp # restore the stack 1002 lw $s0, 0($sp) 1003 .cfi_restore 16 1004 lw $s1, 4($sp) 1005 .cfi_restore 17 1006 lw $fp, 8($sp) 1007 .cfi_restore 30 1008 lw $ra, 12($sp) 1009 .cfi_restore 31 1010 addiu $sp, $sp, SPILL_SIZE 1011 .cfi_adjust_cfa_offset -SPILL_SIZE 1012 lw $t0, 16($sp) # get result pointer 1013 lw $t1, 20($sp) # get shorty 1014 lb $t1, 0($t1) # get result type char 1015 li $t2, 'D' # put char 'D' into t2 1016 beq $t1, $t2, 5f # branch if result type char == 'D' 1017 li $t3, 'F' # put char 'F' into t3 1018 beq $t1, $t3, 5f # branch if result type char == 'F' 1019 sw $v0, 0($t0) # store the result 1020 jalr $zero, $ra 1021 sw $v1, 4($t0) # store the other half of the result 10225: 1023 SDu $f0, $f1, 0, $t0, $t1 # store floating point result 1024 jalr $zero, $ra 1025 nop 1026 1027 // Note that gpr_index is kept within the range of tabInt and tabLong 1028 // and fp_index is kept within the range of tabSingle and tabDouble. 1029 .balign 16 1030tabInt: 1031 LOAD_WORD_TO_REG a2, t8, t6, loop # a2 = current argument, gpr_index += 16 1032 LOAD_WORD_TO_REG a3, t8, t6, loop # a3 = current argument, gpr_index += 16 1033 LOAD_WORD_TO_REG t0, t8, t6, loop # t0 = current argument, gpr_index += 16 1034 LOAD_WORD_TO_REG t1, t8, t6, loop # t1 = current argument, gpr_index += 16 1035 LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16 1036tabLong: 1037 LOAD_LONG_TO_REG a2, a3, t8, t6, 2*16, loop # a2_a3 = curr_arg, gpr_index = 2*16 1038 LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop # t0_t1 = curr_arg, gpr_index = 4*16 1039 LOAD_LONG_TO_REG t0, t1, t8, t6, 4*16, loop # t0_t1 = curr_arg, gpr_index = 4*16 1040 LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16 1041 LOAD_END t6, 4*16, loop # no more GPR args, gpr_index = 4*16 1042tabSingle: 1043 LOAD_FLOAT_TO_REG f8, t8, t7, loop # f8 = curr_arg, fp_index += 16 1044 LOAD_FLOAT_TO_REG f10, t8, t7, loop # f10 = curr_arg, fp_index += 16 1045 LOAD_FLOAT_TO_REG f12, t8, t7, loop # f12 = curr_arg, fp_index += 16 1046 LOAD_FLOAT_TO_REG f14, t8, t7, loop # f14 = curr_arg, fp_index += 16 1047 LOAD_FLOAT_TO_REG f16, t8, t7, loop # f16 = curr_arg, fp_index += 16 1048 LOAD_FLOAT_TO_REG f18, t8, t7, loop # f18 = curr_arg, fp_index += 16 1049 LOAD_END t7, 6*16, loop # no more FPR args, fp_index = 6*16 1050tabDouble: 1051 LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loop # f8_f9 = curr_arg; if FPU32, fp_index += 16 1052 LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loop # f10_f11 = curr_arg; if FPU32, fp_index += 16 1053 LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loop # f12_f13 = curr_arg; if FPU32, fp_index += 16 1054 LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loop # f14_f15 = curr_arg; if FPU32, fp_index += 16 1055 LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loop # f16_f17 = curr_arg; if FPU32, fp_index += 16 1056 LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loop # f18_f19 = curr_arg; if FPU32, fp_index += 16 1057 LOAD_END t7, 6*16, loop # no more FPR args, fp_index = 6*16 1058END art_quick_invoke_stub 1059 1060 /* 1061 * Invocation static stub for quick code. 1062 * On entry: 1063 * a0 = method pointer 1064 * a1 = argument array or null for no argument methods 1065 * a2 = size of argument array in bytes 1066 * a3 = (managed) thread pointer 1067 * [sp + 16] = JValue* result 1068 * [sp + 20] = shorty 1069 */ 1070ENTRY art_quick_invoke_static_stub 1071 sw $a0, 0($sp) # save out a0 1072 addiu $sp, $sp, -SPILL_SIZE # spill s0, s1, fp, ra and gp 1073 .cfi_adjust_cfa_offset SPILL_SIZE 1074 sw $gp, 16($sp) 1075 sw $ra, 12($sp) 1076 .cfi_rel_offset 31, 12 1077 sw $fp, 8($sp) 1078 .cfi_rel_offset 30, 8 1079 sw $s1, 4($sp) 1080 .cfi_rel_offset 17, 4 1081 sw $s0, 0($sp) 1082 .cfi_rel_offset 16, 0 1083 move $fp, $sp # save sp in fp 1084 .cfi_def_cfa_register 30 1085 move $s1, $a3 # move managed thread pointer into s1 1086 addiu $s0, $zero, SUSPEND_CHECK_INTERVAL # reset s0 to suspend check interval 1087 addiu $t0, $a2, 4 # create space for ArtMethod* in frame. 1088 subu $t0, $sp, $t0 # reserve & align *stack* to 16 bytes: 1089 srl $t0, $t0, 4 # native calling convention only aligns to 8B, 1090 sll $sp, $t0, 4 # so we have to ensure ART 16B alignment ourselves. 1091 addiu $a0, $sp, 4 # pass stack pointer + ArtMethod* as dest for memcpy 1092 la $t9, memcpy 1093 jalr $t9 # (dest, src, bytes) 1094 addiu $sp, $sp, -16 # make space for argument slots for memcpy 1095 addiu $sp, $sp, 16 # restore stack after memcpy 1096 lw $gp, 16($fp) # restore $gp 1097 lw $a0, SPILL_SIZE($fp) # restore ArtMethod* 1098 addiu $t8, $sp, 4 # t8 = pointer to the current argument (skip ArtMethod*) 1099 li $t6, 0 # t6 = gpr_index = 0 (corresponds to A1; A0 is skipped) 1100 li $t7, 0 # t7 = fp_index = 0 1101 lw $t9, 20 + SPILL_SIZE($fp) # get shorty (20 is offset from the $sp on entry + SPILL_SIZE 1102 # as the $fp is SPILL_SIZE bytes below the $sp on entry) 1103 addiu $t9, 1 # t9 = shorty + 1 (skip 1 for return type) 1104 1105 // Load the base addresses of tabIntS ... tabDoubleS. 1106 // We will use the register indices (gpr_index, fp_index) to branch. 1107 // Note that the indices are scaled by 16, so they can be added to the bases directly. 1108#if defined(__mips_isa_rev) && __mips_isa_rev >= 6 1109 lapc $t2, tabIntS 1110 lapc $t3, tabLongS 1111 lapc $t4, tabSingleS 1112 lapc $t5, tabDoubleS 1113#else 1114 bltzal $zero, tabBaseS # nal 1115 addiu $t2, $ra, %lo(tabIntS - tabBaseS) 1116tabBaseS: 1117 addiu $t3, $ra, %lo(tabLongS - tabBaseS) 1118 addiu $t4, $ra, %lo(tabSingleS - tabBaseS) 1119 addiu $t5, $ra, %lo(tabDoubleS - tabBaseS) 1120#endif 1121 1122loopS: 1123 lbu $ra, 0($t9) # ra = shorty[i] 1124 beqz $ra, loopEndS # finish getting args when shorty[i] == '\0' 1125 addiu $t9, 1 1126 1127 addiu $ra, -'J' 1128 beqz $ra, isLongS # branch if result type char == 'J' 1129 addiu $ra, 'J' - 'D' 1130 beqz $ra, isDoubleS # branch if result type char == 'D' 1131 addiu $ra, 'D' - 'F' 1132 beqz $ra, isSingleS # branch if result type char == 'F' 1133 1134 addu $ra, $t2, $t6 1135 jalr $zero, $ra 1136 addiu $t8, 4 # next_arg = curr_arg + 4 1137 1138isLongS: 1139 addu $ra, $t3, $t6 1140 jalr $zero, $ra 1141 addiu $t8, 8 # next_arg = curr_arg + 8 1142 1143isSingleS: 1144 addu $ra, $t4, $t7 1145 jalr $zero, $ra 1146 addiu $t8, 4 # next_arg = curr_arg + 4 1147 1148isDoubleS: 1149 addu $ra, $t5, $t7 1150#if defined(__mips_isa_rev) && __mips_isa_rev > 2 1151 addiu $t7, 16 # fp_index += 16 didn't fit into LOAD_DOUBLE_TO_REG 1152#endif 1153 jalr $zero, $ra 1154 addiu $t8, 8 # next_arg = curr_arg + 8 1155 1156loopEndS: 1157 lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code 1158 jalr $t9 # call the method 1159 sw $zero, 0($sp) # store null for ArtMethod* at bottom of frame 1160 move $sp, $fp # restore the stack 1161 lw $s0, 0($sp) 1162 .cfi_restore 16 1163 lw $s1, 4($sp) 1164 .cfi_restore 17 1165 lw $fp, 8($sp) 1166 .cfi_restore 30 1167 lw $ra, 12($sp) 1168 .cfi_restore 31 1169 addiu $sp, $sp, SPILL_SIZE 1170 .cfi_adjust_cfa_offset -SPILL_SIZE 1171 lw $t0, 16($sp) # get result pointer 1172 lw $t1, 20($sp) # get shorty 1173 lb $t1, 0($t1) # get result type char 1174 li $t2, 'D' # put char 'D' into t2 1175 beq $t1, $t2, 6f # branch if result type char == 'D' 1176 li $t3, 'F' # put char 'F' into t3 1177 beq $t1, $t3, 6f # branch if result type char == 'F' 1178 sw $v0, 0($t0) # store the result 1179 jalr $zero, $ra 1180 sw $v1, 4($t0) # store the other half of the result 11816: 1182 SDu $f0, $f1, 0, $t0, $t1 # store floating point result 1183 jalr $zero, $ra 1184 nop 1185 1186 // Note that gpr_index is kept within the range of tabIntS and tabLongS 1187 // and fp_index is kept within the range of tabSingleS and tabDoubleS. 1188 .balign 16 1189tabIntS: 1190 LOAD_WORD_TO_REG a1, t8, t6, loopS # a1 = current argument, gpr_index += 16 1191 LOAD_WORD_TO_REG a2, t8, t6, loopS # a2 = current argument, gpr_index += 16 1192 LOAD_WORD_TO_REG a3, t8, t6, loopS # a3 = current argument, gpr_index += 16 1193 LOAD_WORD_TO_REG t0, t8, t6, loopS # t0 = current argument, gpr_index += 16 1194 LOAD_WORD_TO_REG t1, t8, t6, loopS # t1 = current argument, gpr_index += 16 1195 LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16 1196tabLongS: 1197 LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS # a2_a3 = curr_arg, gpr_index = 3*16 1198 LOAD_LONG_TO_REG a2, a3, t8, t6, 3*16, loopS # a2_a3 = curr_arg, gpr_index = 3*16 1199 LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS # t0_t1 = curr_arg, gpr_index = 5*16 1200 LOAD_LONG_TO_REG t0, t1, t8, t6, 5*16, loopS # t0_t1 = curr_arg, gpr_index = 5*16 1201 LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16 1202 LOAD_END t6, 5*16, loopS # no more GPR args, gpr_index = 5*16 1203tabSingleS: 1204 LOAD_FLOAT_TO_REG f8, t8, t7, loopS # f8 = curr_arg, fp_index += 16 1205 LOAD_FLOAT_TO_REG f10, t8, t7, loopS # f10 = curr_arg, fp_index += 16 1206 LOAD_FLOAT_TO_REG f12, t8, t7, loopS # f12 = curr_arg, fp_index += 16 1207 LOAD_FLOAT_TO_REG f14, t8, t7, loopS # f14 = curr_arg, fp_index += 16 1208 LOAD_FLOAT_TO_REG f16, t8, t7, loopS # f16 = curr_arg, fp_index += 16 1209 LOAD_FLOAT_TO_REG f18, t8, t7, loopS # f18 = curr_arg, fp_index += 16 1210 LOAD_END t7, 6*16, loopS # no more FPR args, fp_index = 6*16 1211tabDoubleS: 1212 LOAD_DOUBLE_TO_REG f8, f9, t8, t7, ra, loopS # f8_f9 = curr_arg; if FPU32, fp_index += 16 1213 LOAD_DOUBLE_TO_REG f10, f11, t8, t7, ra, loopS # f10_f11 = curr_arg; if FPU32, fp_index += 16 1214 LOAD_DOUBLE_TO_REG f12, f13, t8, t7, ra, loopS # f12_f13 = curr_arg; if FPU32, fp_index += 16 1215 LOAD_DOUBLE_TO_REG f14, f15, t8, t7, ra, loopS # f14_f15 = curr_arg; if FPU32, fp_index += 16 1216 LOAD_DOUBLE_TO_REG f16, f17, t8, t7, ra, loopS # f16_f17 = curr_arg; if FPU32, fp_index += 16 1217 LOAD_DOUBLE_TO_REG f18, f19, t8, t7, ra, loopS # f18_f19 = curr_arg; if FPU32, fp_index += 16 1218 LOAD_END t7, 6*16, loopS # no more FPR args, fp_index = 6*16 1219END art_quick_invoke_static_stub 1220 1221#undef SPILL_SIZE 1222 1223 /* 1224 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 1225 * failure. 1226 */ 1227 .extern artHandleFillArrayDataFromCode 1228ENTRY art_quick_handle_fill_data 1229 lw $a2, 0($sp) # pass referrer's Method* 1230 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC 1231 la $t9, artHandleFillArrayDataFromCode 1232 jalr $t9 # (payload offset, Array*, method, Thread*) 1233 move $a3, rSELF # pass Thread::Current 1234 RETURN_IF_ZERO 1235END art_quick_handle_fill_data 1236 1237 /* 1238 * Entry from managed code that calls artLockObjectFromCode, may block for GC. 1239 */ 1240 .extern artLockObjectFromCode 1241ENTRY art_quick_lock_object 1242 beqz $a0, art_quick_throw_null_pointer_exception 1243 nop 1244 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block 1245 la $t9, artLockObjectFromCode 1246 jalr $t9 # (Object* obj, Thread*) 1247 move $a1, rSELF # pass Thread::Current 1248 RETURN_IF_ZERO 1249END art_quick_lock_object 1250 1251ENTRY art_quick_lock_object_no_inline 1252 beqz $a0, art_quick_throw_null_pointer_exception 1253 nop 1254 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case we block 1255 la $t9, artLockObjectFromCode 1256 jalr $t9 # (Object* obj, Thread*) 1257 move $a1, rSELF # pass Thread::Current 1258 RETURN_IF_ZERO 1259END art_quick_lock_object_no_inline 1260 1261 /* 1262 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 1263 */ 1264 .extern artUnlockObjectFromCode 1265ENTRY art_quick_unlock_object 1266 beqz $a0, art_quick_throw_null_pointer_exception 1267 nop 1268 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC 1269 la $t9, artUnlockObjectFromCode 1270 jalr $t9 # (Object* obj, Thread*) 1271 move $a1, rSELF # pass Thread::Current 1272 RETURN_IF_ZERO 1273END art_quick_unlock_object 1274 1275ENTRY art_quick_unlock_object_no_inline 1276 beqz $a0, art_quick_throw_null_pointer_exception 1277 nop 1278 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case exception allocation triggers GC 1279 la $t9, artUnlockObjectFromCode 1280 jalr $t9 # (Object* obj, Thread*) 1281 move $a1, rSELF # pass Thread::Current 1282 RETURN_IF_ZERO 1283END art_quick_unlock_object_no_inline 1284 1285 /* 1286 * Entry from managed code that calls artInstanceOfFromCode and delivers exception on failure. 1287 */ 1288 .extern artInstanceOfFromCode 1289 .extern artThrowClassCastExceptionForObject 1290ENTRY art_quick_check_instance_of 1291 addiu $sp, $sp, -32 1292 .cfi_adjust_cfa_offset 32 1293 sw $gp, 16($sp) 1294 sw $ra, 12($sp) 1295 .cfi_rel_offset 31, 12 1296 sw $t9, 8($sp) 1297 sw $a1, 4($sp) 1298 sw $a0, 0($sp) 1299 la $t9, artInstanceOfFromCode 1300 jalr $t9 1301 addiu $sp, $sp, -16 # reserve argument slots on the stack 1302 addiu $sp, $sp, 16 1303 lw $gp, 16($sp) 1304 beqz $v0, .Lthrow_class_cast_exception 1305 lw $ra, 12($sp) 1306 jalr $zero, $ra 1307 addiu $sp, $sp, 32 1308 .cfi_adjust_cfa_offset -32 1309.Lthrow_class_cast_exception: 1310 lw $t9, 8($sp) 1311 lw $a1, 4($sp) 1312 lw $a0, 0($sp) 1313 addiu $sp, $sp, 32 1314 .cfi_adjust_cfa_offset -32 1315 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1316 la $t9, artThrowClassCastExceptionForObject 1317 jalr $zero, $t9 # artThrowClassCastException (Object*, Class*, Thread*) 1318 move $a2, rSELF # pass Thread::Current 1319END art_quick_check_instance_of 1320 1321 /* 1322 * Restore rReg's value from offset($sp) if rReg is not the same as rExclude. 1323 * nReg is the register number for rReg. 1324 */ 1325.macro POP_REG_NE rReg, nReg, offset, rExclude 1326 .ifnc \rReg, \rExclude 1327 lw \rReg, \offset($sp) # restore rReg 1328 .cfi_restore \nReg 1329 .endif 1330.endm 1331 1332 /* 1333 * Macro to insert read barrier, only used in art_quick_aput_obj. 1334 * rObj and rDest are registers, offset is a defined literal such as MIRROR_OBJECT_CLASS_OFFSET. 1335 * TODO: When read barrier has a fast path, add heap unpoisoning support for the fast path. 1336 */ 1337.macro READ_BARRIER rDest, rObj, offset 1338#ifdef USE_READ_BARRIER 1339 # saved registers used in art_quick_aput_obj: a0-a2, t0-t1, t9, ra. 8 words for 16B alignment. 1340 addiu $sp, $sp, -32 1341 .cfi_adjust_cfa_offset 32 1342 sw $ra, 28($sp) 1343 .cfi_rel_offset 31, 28 1344 sw $t9, 24($sp) 1345 .cfi_rel_offset 25, 24 1346 sw $t1, 20($sp) 1347 .cfi_rel_offset 9, 20 1348 sw $t0, 16($sp) 1349 .cfi_rel_offset 8, 16 1350 sw $a2, 8($sp) # padding slot at offset 12 (padding can be any slot in the 32B) 1351 .cfi_rel_offset 6, 8 1352 sw $a1, 4($sp) 1353 .cfi_rel_offset 5, 4 1354 sw $a0, 0($sp) 1355 .cfi_rel_offset 4, 0 1356 1357 # move $a0, \rRef # pass ref in a0 (no-op for now since parameter ref is unused) 1358 .ifnc \rObj, $a1 1359 move $a1, \rObj # pass rObj 1360 .endif 1361 addiu $a2, $zero, \offset # pass offset 1362 la $t9, artReadBarrierSlow 1363 jalr $t9 # artReadBarrierSlow(ref, rObj, offset) 1364 addiu $sp, $sp, -16 # Use branch delay slot to reserve argument slots on the stack 1365 # before the call to artReadBarrierSlow. 1366 addiu $sp, $sp, 16 # restore stack after call to artReadBarrierSlow 1367 # No need to unpoison return value in v0, artReadBarrierSlow() would do the unpoisoning. 1368 move \rDest, $v0 # save return value in rDest 1369 # (rDest cannot be v0 in art_quick_aput_obj) 1370 1371 lw $a0, 0($sp) # restore registers except rDest 1372 # (rDest can only be t0 or t1 in art_quick_aput_obj) 1373 .cfi_restore 4 1374 lw $a1, 4($sp) 1375 .cfi_restore 5 1376 lw $a2, 8($sp) 1377 .cfi_restore 6 1378 POP_REG_NE $t0, 8, 16, \rDest 1379 POP_REG_NE $t1, 9, 20, \rDest 1380 lw $t9, 24($sp) 1381 .cfi_restore 25 1382 lw $ra, 28($sp) # restore $ra 1383 .cfi_restore 31 1384 addiu $sp, $sp, 32 1385 .cfi_adjust_cfa_offset -32 1386#else 1387 lw \rDest, \offset(\rObj) 1388 UNPOISON_HEAP_REF \rDest 1389#endif // USE_READ_BARRIER 1390.endm 1391 1392#ifdef USE_READ_BARRIER 1393 .extern artReadBarrierSlow 1394#endif 1395ENTRY art_quick_aput_obj 1396 beqz $a2, .Ldo_aput_null 1397 nop 1398 READ_BARRIER $t0, $a0, MIRROR_OBJECT_CLASS_OFFSET 1399 READ_BARRIER $t1, $a2, MIRROR_OBJECT_CLASS_OFFSET 1400 READ_BARRIER $t0, $t0, MIRROR_CLASS_COMPONENT_TYPE_OFFSET 1401 bne $t1, $t0, .Lcheck_assignability # value's type == array's component type - trivial assignability 1402 nop 1403.Ldo_aput: 1404 sll $a1, $a1, 2 1405 add $t0, $a0, $a1 1406 POISON_HEAP_REF $a2 1407 sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0) 1408 lw $t0, THREAD_CARD_TABLE_OFFSET(rSELF) 1409 srl $t1, $a0, 7 1410 add $t1, $t1, $t0 1411 sb $t0, ($t1) 1412 jalr $zero, $ra 1413 nop 1414.Ldo_aput_null: 1415 sll $a1, $a1, 2 1416 add $t0, $a0, $a1 1417 sw $a2, MIRROR_OBJECT_ARRAY_DATA_OFFSET($t0) 1418 jalr $zero, $ra 1419 nop 1420.Lcheck_assignability: 1421 addiu $sp, $sp, -32 1422 .cfi_adjust_cfa_offset 32 1423 sw $ra, 28($sp) 1424 .cfi_rel_offset 31, 28 1425 sw $gp, 16($sp) 1426 sw $t9, 12($sp) 1427 sw $a2, 8($sp) 1428 sw $a1, 4($sp) 1429 sw $a0, 0($sp) 1430 move $a1, $t1 1431 move $a0, $t0 1432 la $t9, artIsAssignableFromCode 1433 jalr $t9 # (Class*, Class*) 1434 addiu $sp, $sp, -16 # reserve argument slots on the stack 1435 addiu $sp, $sp, 16 1436 lw $ra, 28($sp) 1437 lw $gp, 16($sp) 1438 lw $t9, 12($sp) 1439 lw $a2, 8($sp) 1440 lw $a1, 4($sp) 1441 lw $a0, 0($sp) 1442 addiu $sp, 32 1443 .cfi_adjust_cfa_offset -32 1444 bnez $v0, .Ldo_aput 1445 nop 1446 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1447 move $a1, $a2 1448 la $t9, artThrowArrayStoreException 1449 jalr $zero, $t9 # artThrowArrayStoreException(Class*, Class*, Thread*) 1450 move $a2, rSELF # pass Thread::Current 1451END art_quick_aput_obj 1452 1453// Macros taking opportunity of code similarities for downcalls. 1454.macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1455 .extern \entrypoint 1456ENTRY \name 1457 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1458 la $t9, \entrypoint 1459 jalr $t9 # (field_idx, Thread*) 1460 move $a1, rSELF # pass Thread::Current 1461 \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO 1462END \name 1463.endm 1464 1465.macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1466 .extern \entrypoint 1467ENTRY \name 1468 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1469 la $t9, \entrypoint 1470 jalr $t9 # (field_idx, Object*, Thread*) or 1471 # (field_idx, new_val, Thread*) 1472 move $a2, rSELF # pass Thread::Current 1473 \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO 1474END \name 1475.endm 1476 1477.macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1478 .extern \entrypoint 1479ENTRY \name 1480 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1481 la $t9, \entrypoint 1482 jalr $t9 # (field_idx, Object*, new_val, Thread*) 1483 move $a3, rSELF # pass Thread::Current 1484 \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO 1485END \name 1486.endm 1487 1488.macro FOUR_ARG_REF_DOWNCALL name, entrypoint, return 1489 .extern \entrypoint 1490ENTRY \name 1491 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1492 la $t9, \entrypoint 1493 jalr $t9 # (field_idx, Object*, 64-bit new_val, Thread*) or 1494 # (field_idx, 64-bit new_val, Thread*) 1495 # Note that a 64-bit new_val needs to be aligned with 1496 # an even-numbered register, hence A1 may be skipped 1497 # for new_val to reside in A2-A3. 1498 sw rSELF, 16($sp) # pass Thread::Current 1499 \return # RETURN_IF_NO_EXCEPTION or RETURN_IF_ZERO 1500END \name 1501.endm 1502 1503 /* 1504 * Called by managed code to resolve a static/instance field and load/store a value. 1505 */ 1506ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1507ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1508ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1509ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1510ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1511ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1512ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCompiledCode, RETURN_IF_NO_EXCEPTION 1513TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1514TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1515TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1516TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1517TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1518TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1519TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCompiledCode, RETURN_IF_NO_EXCEPTION 1520TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCompiledCode, RETURN_IF_ZERO 1521TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCompiledCode, RETURN_IF_ZERO 1522TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCompiledCode, RETURN_IF_ZERO 1523TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCompiledCode, RETURN_IF_ZERO 1524FOUR_ARG_REF_DOWNCALL art_quick_set64_static, artSet64StaticFromCompiledCode, RETURN_IF_ZERO 1525THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCompiledCode, RETURN_IF_ZERO 1526THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCompiledCode, RETURN_IF_ZERO 1527THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCompiledCode, RETURN_IF_ZERO 1528THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCompiledCode, RETURN_IF_ZERO 1529FOUR_ARG_REF_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCompiledCode, RETURN_IF_ZERO 1530 1531// Macro to facilitate adding new allocation entrypoints. 1532.macro ONE_ARG_DOWNCALL name, entrypoint, return 1533 .extern \entrypoint 1534ENTRY \name 1535 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1536 la $t9, \entrypoint 1537 jalr $t9 1538 move $a1, rSELF # pass Thread::Current 1539 \return 1540END \name 1541.endm 1542 1543.macro TWO_ARG_DOWNCALL name, entrypoint, return 1544 .extern \entrypoint 1545ENTRY \name 1546 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1547 la $t9, \entrypoint 1548 jalr $t9 1549 move $a2, rSELF # pass Thread::Current 1550 \return 1551END \name 1552.endm 1553 1554.macro THREE_ARG_DOWNCALL name, entrypoint, return 1555 .extern \entrypoint 1556ENTRY \name 1557 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1558 la $t9, \entrypoint 1559 jalr $t9 1560 move $a3, rSELF # pass Thread::Current 1561 \return 1562END \name 1563.endm 1564 1565.macro FOUR_ARG_DOWNCALL name, entrypoint, return 1566 .extern \entrypoint 1567ENTRY \name 1568 SETUP_SAVE_REFS_ONLY_FRAME # save callee saves in case of GC 1569 la $t9, \entrypoint 1570 jalr $t9 1571 sw rSELF, 16($sp) # pass Thread::Current 1572 \return 1573END \name 1574.endm 1575 1576// Generate the allocation entrypoints for each allocator. 1577GENERATE_ALLOC_ENTRYPOINTS_FOR_EACH_ALLOCATOR 1578 1579// A hand-written override for: 1580// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) 1581// GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc) 1582.macro ART_QUICK_ALLOC_OBJECT_ROSALLOC c_name, cxx_name 1583ENTRY \c_name 1584 # Fast path rosalloc allocation 1585 # a0: type 1586 # s1: Thread::Current 1587 # ----------------------------- 1588 # t1: object size 1589 # t2: rosalloc run 1590 # t3: thread stack top offset 1591 # t4: thread stack bottom offset 1592 # v0: free list head 1593 # 1594 # t5, t6 : temps 1595 lw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) # Check if thread local allocation 1596 lw $t4, THREAD_LOCAL_ALLOC_STACK_END_OFFSET($s1) # stack has any room left. 1597 bgeu $t3, $t4, .Lslow_path_\c_name 1598 1599 lw $t1, MIRROR_CLASS_OBJECT_SIZE_ALLOC_FAST_PATH_OFFSET($a0) # Load object size (t1). 1600 li $t5, ROSALLOC_MAX_THREAD_LOCAL_BRACKET_SIZE # Check if size is for a thread local 1601 # allocation. Also does the 1602 # initialized and finalizable checks. 1603 bgtu $t1, $t5, .Lslow_path_\c_name 1604 1605 # Compute the rosalloc bracket index from the size. Since the size is already aligned we can 1606 # combine the two shifts together. 1607 srl $t1, $t1, (ROSALLOC_BRACKET_QUANTUM_SIZE_SHIFT - POINTER_SIZE_SHIFT) 1608 1609 addu $t2, $t1, $s1 1610 lw $t2, (THREAD_ROSALLOC_RUNS_OFFSET - __SIZEOF_POINTER__)($t2) # Load rosalloc run (t2). 1611 1612 # Load the free list head (v0). 1613 # NOTE: this will be the return val. 1614 lw $v0, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) 1615 beqz $v0, .Lslow_path_\c_name 1616 nop 1617 1618 # Load the next pointer of the head and update the list head with the next pointer. 1619 lw $t5, ROSALLOC_SLOT_NEXT_OFFSET($v0) 1620 sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_HEAD_OFFSET)($t2) 1621 1622 # Store the class pointer in the header. This also overwrites the first pointer. The offsets are 1623 # asserted to match. 1624 1625#if ROSALLOC_SLOT_NEXT_OFFSET != MIRROR_OBJECT_CLASS_OFFSET 1626#error "Class pointer needs to overwrite next pointer." 1627#endif 1628 1629 POISON_HEAP_REF $a0 1630 sw $a0, MIRROR_OBJECT_CLASS_OFFSET($v0) 1631 1632 # Push the new object onto the thread local allocation stack and increment the thread local 1633 # allocation stack top. 1634 sw $v0, 0($t3) 1635 addiu $t3, $t3, COMPRESSED_REFERENCE_SIZE 1636 sw $t3, THREAD_LOCAL_ALLOC_STACK_TOP_OFFSET($s1) 1637 1638 # Decrement the size of the free list. 1639 lw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) 1640 addiu $t5, $t5, -1 1641 sw $t5, (ROSALLOC_RUN_FREE_LIST_OFFSET + ROSALLOC_RUN_FREE_LIST_SIZE_OFFSET)($t2) 1642 1643 sync # Fence. 1644 1645 jalr $zero, $ra 1646 nop 1647 1648 .Lslow_path_\c_name: 1649 SETUP_SAVE_REFS_ONLY_FRAME 1650 la $t9, \cxx_name 1651 jalr $t9 1652 move $a1, $s1 # Pass self as argument. 1653 RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1654END \c_name 1655.endm 1656 1657ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_resolved_rosalloc, artAllocObjectFromCodeResolvedRosAlloc 1658ART_QUICK_ALLOC_OBJECT_ROSALLOC art_quick_alloc_object_initialized_rosalloc, artAllocObjectFromCodeInitializedRosAlloc 1659 1660GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) 1661GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_region_tlab, RegionTLAB) 1662 1663 /* 1664 * Entry from managed code to resolve a string, this stub will allocate a String and deliver an 1665 * exception on error. On success the String is returned. A0 holds the string index. The fast 1666 * path check for hit in strings cache has already been performed. 1667 */ 1668ONE_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1669 1670 /* 1671 * Entry from managed code when uninitialized static storage, this stub will run the class 1672 * initializer and deliver the exception on error. On success the static storage base is 1673 * returned. 1674 */ 1675ONE_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1676 1677 /* 1678 * Entry from managed code when dex cache misses for a type_idx. 1679 */ 1680ONE_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1681 1682 /* 1683 * Entry from managed code when type_idx needs to be checked for access and dex cache may also 1684 * miss. 1685 */ 1686ONE_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO_OR_DELIVER 1687 1688 /* 1689 * Called by managed code when the value in rSUSPEND has been decremented to 0. 1690 */ 1691 .extern artTestSuspendFromCode 1692ENTRY_NO_GP art_quick_test_suspend 1693 lh rSUSPEND, THREAD_FLAGS_OFFSET(rSELF) 1694 bnez rSUSPEND, 1f 1695 addiu rSUSPEND, $zero, SUSPEND_CHECK_INTERVAL # reset rSUSPEND to SUSPEND_CHECK_INTERVAL 1696 jalr $zero, $ra 1697 nop 16981: 1699 SETUP_SAVE_EVERYTHING_FRAME # save everything for stack crawl 1700 la $t9, artTestSuspendFromCode 1701 jalr $t9 # (Thread*) 1702 move $a0, rSELF 1703 RESTORE_SAVE_EVERYTHING_FRAME 1704 jalr $zero, $ra 1705 nop 1706END art_quick_test_suspend 1707 1708 /* 1709 * Called by managed code that is attempting to call a method on a proxy class. On entry 1710 * a0 holds the proxy method; a1, a2 and a3 may contain arguments. 1711 */ 1712 .extern artQuickProxyInvokeHandler 1713ENTRY art_quick_proxy_invoke_handler 1714 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0 1715 move $a2, rSELF # pass Thread::Current 1716 la $t9, artQuickProxyInvokeHandler 1717 jalr $t9 # (Method* proxy method, receiver, Thread*, SP) 1718 addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots) 1719 lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ 1720 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1721 bnez $t7, 1f 1722 # don't care if $v0 and/or $v1 are modified, when exception branch taken 1723 MTD $v0, $v1, $f0, $f1 # move float value to return value 1724 jalr $zero, $ra 1725 nop 17261: 1727 DELIVER_PENDING_EXCEPTION 1728END art_quick_proxy_invoke_handler 1729 1730 /* 1731 * Called to resolve an imt conflict. 1732 * a0 is the conflict ArtMethod. 1733 * t7 is a hidden argument that holds the target interface method's dex method index. 1734 * 1735 * Note that this stub writes to a0, t7 and t8. 1736 */ 1737ENTRY art_quick_imt_conflict_trampoline 1738 lw $t8, 0($sp) # Load referrer. 1739 lw $t8, ART_METHOD_DEX_CACHE_METHODS_OFFSET_32($t8) # Load dex cache methods array. 1740 sll $t7, $t7, POINTER_SIZE_SHIFT # Calculate offset. 1741 addu $t7, $t8, $t7 # Add offset to base. 1742 lw $t7, 0($t7) # Load interface method. 1743 lw $a0, ART_METHOD_JNI_OFFSET_32($a0) # Load ImtConflictTable. 1744 1745.Limt_table_iterate: 1746 lw $t8, 0($a0) # Load next entry in ImtConflictTable. 1747 # Branch if found. 1748 beq $t8, $t7, .Limt_table_found 1749 nop 1750 # If the entry is null, the interface method is not in the ImtConflictTable. 1751 beqz $t8, .Lconflict_trampoline 1752 nop 1753 # Iterate over the entries of the ImtConflictTable. 1754 b .Limt_table_iterate 1755 addiu $a0, $a0, 2 * __SIZEOF_POINTER__ # Iterate to the next entry. 1756 1757.Limt_table_found: 1758 # We successfully hit an entry in the table. Load the target method and jump to it. 1759 lw $a0, __SIZEOF_POINTER__($a0) 1760 lw $t9, ART_METHOD_QUICK_CODE_OFFSET_32($a0) 1761 jalr $zero, $t9 1762 nop 1763 1764.Lconflict_trampoline: 1765 # Call the runtime stub to populate the ImtConflictTable and jump to the resolved method. 1766 move $a0, $t7 # Load interface method. 1767 INVOKE_TRAMPOLINE_BODY artInvokeInterfaceTrampoline 1768END art_quick_imt_conflict_trampoline 1769 1770 .extern artQuickResolutionTrampoline 1771ENTRY art_quick_resolution_trampoline 1772 SETUP_SAVE_REFS_AND_ARGS_FRAME 1773 move $a2, rSELF # pass Thread::Current 1774 la $t9, artQuickResolutionTrampoline 1775 jalr $t9 # (Method* called, receiver, Thread*, SP) 1776 addiu $a3, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots) 1777 beqz $v0, 1f 1778 lw $a0, ARG_SLOT_SIZE($sp) # load resolved method to $a0 1779 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1780 move $t9, $v0 # code pointer must be in $t9 to generate the global pointer 1781 jalr $zero, $t9 # tail call to method 1782 nop 17831: 1784 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1785 DELIVER_PENDING_EXCEPTION 1786END art_quick_resolution_trampoline 1787 1788 .extern artQuickGenericJniTrampoline 1789 .extern artQuickGenericJniEndTrampoline 1790ENTRY art_quick_generic_jni_trampoline 1791 SETUP_SAVE_REFS_AND_ARGS_FRAME_WITH_METHOD_IN_A0 1792 move $s8, $sp # save $sp to $s8 1793 move $s3, $gp # save $gp to $s3 1794 1795 # prepare for call to artQuickGenericJniTrampoline(Thread*, SP) 1796 move $a0, rSELF # pass Thread::Current 1797 addiu $a1, $sp, ARG_SLOT_SIZE # save $sp (remove arg slots) 1798 la $t9, artQuickGenericJniTrampoline 1799 jalr $t9 # (Thread*, SP) 1800 addiu $sp, $sp, -5120 # reserve space on the stack 1801 1802 # The C call will have registered the complete save-frame on success. 1803 # The result of the call is: 1804 # v0: ptr to native code, 0 on error. 1805 # v1: ptr to the bottom of the used area of the alloca, can restore stack till here. 1806 beq $v0, $zero, 2f # check entry error 1807 move $t9, $v0 # save the code ptr 1808 move $sp, $v1 # release part of the alloca 1809 1810 # Load parameters from stack into registers 1811 lw $a0, 0($sp) 1812 lw $a1, 4($sp) 1813 lw $a2, 8($sp) 1814 lw $a3, 12($sp) 1815 1816 # artQuickGenericJniTrampoline sets bit 0 of the native code address to 1 1817 # when the first two arguments are both single precision floats. This lets 1818 # us extract them properly from the stack and load into floating point 1819 # registers. 1820 MTD $a0, $a1, $f12, $f13 1821 andi $t0, $t9, 1 1822 xor $t9, $t9, $t0 1823 bnez $t0, 1f 1824 mtc1 $a1, $f14 1825 MTD $a2, $a3, $f14, $f15 1826 18271: 1828 jalr $t9 # native call 1829 nop 1830 addiu $sp, $sp, 16 # remove arg slots 1831 1832 move $gp, $s3 # restore $gp from $s3 1833 1834 # result sign extension is handled in C code 1835 # prepare for call to artQuickGenericJniEndTrampoline(Thread*, result, result_f) 1836 move $a0, rSELF # pass Thread::Current 1837 move $a2, $v0 # pass result 1838 move $a3, $v1 1839 addiu $sp, $sp, -24 # reserve arg slots 1840 la $t9, artQuickGenericJniEndTrampoline 1841 jalr $t9 1842 s.d $f0, 16($sp) # pass result_f 1843 1844 lw $t0, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ 1845 bne $t0, $zero, 2f # check for pending exceptions 1846 1847 move $sp, $s8 # tear down the alloca 1848 1849 # tear down the callee-save frame 1850 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1851 1852 MTD $v0, $v1, $f0, $f1 # move float value to return value 1853 jalr $zero, $ra 1854 nop 1855 18562: 1857 lw $sp, THREAD_TOP_QUICK_FRAME_OFFSET(rSELF) 1858 # This will create a new save-all frame, required by the runtime. 1859 DELIVER_PENDING_EXCEPTION 1860END art_quick_generic_jni_trampoline 1861 1862 .extern artQuickToInterpreterBridge 1863ENTRY art_quick_to_interpreter_bridge 1864 SETUP_SAVE_REFS_AND_ARGS_FRAME 1865 move $a1, rSELF # pass Thread::Current 1866 la $t9, artQuickToInterpreterBridge 1867 jalr $t9 # (Method* method, Thread*, SP) 1868 addiu $a2, $sp, ARG_SLOT_SIZE # pass $sp (remove arg slots) 1869 lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # load Thread::Current()->exception_ 1870 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1871 bnez $t7, 1f 1872 # don't care if $v0 and/or $v1 are modified, when exception branch taken 1873 MTD $v0, $v1, $f0, $f1 # move float value to return value 1874 jalr $zero, $ra 1875 nop 18761: 1877 DELIVER_PENDING_EXCEPTION 1878END art_quick_to_interpreter_bridge 1879 1880 .extern artInvokeObsoleteMethod 1881ENTRY art_invoke_obsolete_method_stub 1882 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1883 la $t9, artInvokeObsoleteMethod 1884 jalr $t9 # (Method* method, Thread* self) 1885 move $a1, rSELF # pass Thread::Current 1886END art_invoke_obsolete_method_stub 1887 1888 /* 1889 * Routine that intercepts method calls and returns. 1890 */ 1891 .extern artInstrumentationMethodEntryFromCode 1892 .extern artInstrumentationMethodExitFromCode 1893ENTRY art_quick_instrumentation_entry 1894 SETUP_SAVE_REFS_AND_ARGS_FRAME 1895 sw $a0, 28($sp) # save arg0 in free arg slot 1896 move $a3, $ra # pass $ra 1897 la $t9, artInstrumentationMethodEntryFromCode 1898 jalr $t9 # (Method*, Object*, Thread*, LR) 1899 move $a2, rSELF # pass Thread::Current 1900 move $t9, $v0 # $t9 holds reference to code 1901 lw $a0, 28($sp) # restore arg0 from free arg slot 1902 RESTORE_SAVE_REFS_AND_ARGS_FRAME 1903 jalr $t9 # call method 1904 nop 1905END art_quick_instrumentation_entry 1906 /* intentional fallthrough */ 1907 .global art_quick_instrumentation_exit 1908art_quick_instrumentation_exit: 1909 .cfi_startproc 1910 addiu $t9, $ra, 4 # put current address into $t9 to rebuild $gp 1911 .cpload $t9 1912 move $ra, $zero # link register is to here, so clobber with 0 for later checks 1913 1914 SETUP_SAVE_REFS_ONLY_FRAME 1915 addiu $sp, $sp, -16 # allocate temp storage on the stack 1916 .cfi_adjust_cfa_offset 16 1917 sw $v0, ARG_SLOT_SIZE+12($sp) 1918 .cfi_rel_offset 2, ARG_SLOT_SIZE+12 1919 sw $v1, ARG_SLOT_SIZE+8($sp) 1920 .cfi_rel_offset 3, ARG_SLOT_SIZE+8 1921 s.d $f0, ARG_SLOT_SIZE($sp) 1922 s.d $f0, 16($sp) # pass fpr result 1923 move $a2, $v0 # pass gpr result 1924 move $a3, $v1 1925 addiu $a1, $sp, ARG_SLOT_SIZE+16 # pass $sp (remove arg slots and temp storage) 1926 la $t9, artInstrumentationMethodExitFromCode 1927 jalr $t9 # (Thread*, SP, gpr_res, fpr_res) 1928 move $a0, rSELF # pass Thread::Current 1929 move $t9, $v0 # set aside returned link register 1930 move $ra, $v1 # set link register for deoptimization 1931 lw $v0, ARG_SLOT_SIZE+12($sp) # restore return values 1932 lw $v1, ARG_SLOT_SIZE+8($sp) 1933 l.d $f0, ARG_SLOT_SIZE($sp) 1934 jalr $zero, $t9 # return 1935 addiu $sp, $sp, ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16 # restore stack 1936 .cfi_adjust_cfa_offset -(ARG_SLOT_SIZE+FRAME_SIZE_SAVE_REFS_ONLY+16) 1937END art_quick_instrumentation_exit 1938 1939 /* 1940 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 1941 * will long jump to the upcall with a special exception of -1. 1942 */ 1943 .extern artDeoptimize 1944ENTRY art_quick_deoptimize 1945 SETUP_SAVE_ALL_CALLEE_SAVES_FRAME 1946 la $t9, artDeoptimize 1947 jalr $t9 # (Thread*) 1948 move $a0, rSELF # pass Thread::current 1949END art_quick_deoptimize 1950 1951 /* 1952 * Compiled code has requested that we deoptimize into the interpreter. The deoptimization 1953 * will long jump to the upcall with a special exception of -1. 1954 */ 1955 .extern artDeoptimizeFromCompiledCode 1956ENTRY art_quick_deoptimize_from_compiled_code 1957 SETUP_SAVE_EVERYTHING_FRAME 1958 la $t9, artDeoptimizeFromCompiledCode 1959 jalr $t9 # (DeoptimizationKind, Thread*) 1960 move $a1, rSELF # pass Thread::current 1961END art_quick_deoptimize_from_compiled_code 1962 1963 /* 1964 * Long integer shift. This is different from the generic 32/64-bit 1965 * binary operations because vAA/vBB are 64-bit but vCC (the shift 1966 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 1967 * 6 bits. 1968 * On entry: 1969 * $a0: low word 1970 * $a1: high word 1971 * $a2: shift count 1972 */ 1973ENTRY_NO_GP art_quick_shl_long 1974 /* shl-long vAA, vBB, vCC */ 1975 sll $v0, $a0, $a2 # rlo<- alo << (shift&31) 1976 not $v1, $a2 # rhi<- 31-shift (shift is 5b) 1977 srl $a0, 1 1978 srl $a0, $v1 # alo<- alo >> (32-(shift&31)) 1979 sll $v1, $a1, $a2 # rhi<- ahi << (shift&31) 1980 andi $a2, 0x20 # shift< shift & 0x20 1981 beqz $a2, 1f 1982 or $v1, $a0 # rhi<- rhi | alo 1983 1984 move $v1, $v0 # rhi<- rlo (if shift&0x20) 1985 move $v0, $zero # rlo<- 0 (if shift&0x20) 1986 19871: jalr $zero, $ra 1988 nop 1989END art_quick_shl_long 1990 1991 /* 1992 * Long integer shift. This is different from the generic 32/64-bit 1993 * binary operations because vAA/vBB are 64-bit but vCC (the shift 1994 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 1995 * 6 bits. 1996 * On entry: 1997 * $a0: low word 1998 * $a1: high word 1999 * $a2: shift count 2000 */ 2001ENTRY_NO_GP art_quick_shr_long 2002 sra $v1, $a1, $a2 # rhi<- ahi >> (shift&31) 2003 srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) 2004 sra $a3, $a1, 31 # $a3<- sign(ah) 2005 not $a0, $a2 # alo<- 31-shift (shift is 5b) 2006 sll $a1, 1 2007 sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) 2008 andi $a2, 0x20 # shift & 0x20 2009 beqz $a2, 1f 2010 or $v0, $a1 # rlo<- rlo | ahi 2011 2012 move $v0, $v1 # rlo<- rhi (if shift&0x20) 2013 move $v1, $a3 # rhi<- sign(ahi) (if shift&0x20) 2014 20151: jalr $zero, $ra 2016 nop 2017END art_quick_shr_long 2018 2019 /* 2020 * Long integer shift. This is different from the generic 32/64-bit 2021 * binary operations because vAA/vBB are 64-bit but vCC (the shift 2022 * distance) is 32-bit. Also, Dalvik requires us to ignore all but the low 2023 * 6 bits. 2024 * On entry: 2025 * $a0: low word 2026 * $a1: high word 2027 * $a2: shift count 2028 */ 2029 /* ushr-long vAA, vBB, vCC */ 2030ENTRY_NO_GP art_quick_ushr_long 2031 srl $v1, $a1, $a2 # rhi<- ahi >> (shift&31) 2032 srl $v0, $a0, $a2 # rlo<- alo >> (shift&31) 2033 not $a0, $a2 # alo<- 31-shift (shift is 5b) 2034 sll $a1, 1 2035 sll $a1, $a0 # ahi<- ahi << (32-(shift&31)) 2036 andi $a2, 0x20 # shift & 0x20 2037 beqz $a2, 1f 2038 or $v0, $a1 # rlo<- rlo | ahi 2039 2040 move $v0, $v1 # rlo<- rhi (if shift&0x20) 2041 move $v1, $zero # rhi<- 0 (if shift&0x20) 2042 20431: jalr $zero, $ra 2044 nop 2045END art_quick_ushr_long 2046 2047/* java.lang.String.indexOf(int ch, int fromIndex=0) */ 2048ENTRY_NO_GP art_quick_indexof 2049/* $a0 holds address of "this" */ 2050/* $a1 holds "ch" */ 2051/* $a2 holds "fromIndex" */ 2052#if (STRING_COMPRESSION_FEATURE) 2053 lw $a3, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this 2054#else 2055 lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # this.length() 2056#endif 2057 slt $t1, $a2, $zero # if fromIndex < 0 2058#if defined(_MIPS_ARCH_MIPS32R6) 2059 seleqz $a2, $a2, $t1 # fromIndex = 0; 2060#else 2061 movn $a2, $zero, $t1 # fromIndex = 0; 2062#endif 2063 2064#if (STRING_COMPRESSION_FEATURE) 2065 srl $t0, $a3, 1 # $a3 holds count (with flag) and $t0 holds actual length 2066#endif 2067 subu $t0, $t0, $a2 # this.length() - fromIndex 2068 blez $t0, 6f # if this.length()-fromIndex <= 0 2069 li $v0, -1 # return -1; 2070 2071#if (STRING_COMPRESSION_FEATURE) 2072 sll $a3, $a3, 31 # Extract compression flag. 2073 beqz $a3, .Lstring_indexof_compressed 2074 move $t2, $a0 # Save a copy in $t2 to later compute result (in branch delay slot). 2075#endif 2076 sll $v0, $a2, 1 # $a0 += $a2 * 2 2077 addu $a0, $a0, $v0 # " ditto " 2078 move $v0, $a2 # Set i to fromIndex. 2079 20801: 2081 lhu $t3, MIRROR_STRING_VALUE_OFFSET($a0) # if this.charAt(i) == ch 2082 beq $t3, $a1, 6f # return i; 2083 addu $a0, $a0, 2 # i++ 2084 subu $t0, $t0, 1 # this.length() - i 2085 bnez $t0, 1b # while this.length() - i > 0 2086 addu $v0, $v0, 1 # i++ 2087 2088 li $v0, -1 # if this.length() - i <= 0 2089 # return -1; 2090 20916: 2092 j $ra 2093 nop 2094 2095#if (STRING_COMPRESSION_FEATURE) 2096.Lstring_indexof_compressed: 2097 addu $a0, $a0, $a2 # $a0 += $a2 2098 2099.Lstring_indexof_compressed_loop: 2100 lbu $t3, MIRROR_STRING_VALUE_OFFSET($a0) 2101 beq $t3, $a1, .Lstring_indexof_compressed_matched 2102 subu $t0, $t0, 1 2103 bgtz $t0, .Lstring_indexof_compressed_loop 2104 addu $a0, $a0, 1 2105 2106.Lstring_indexof_nomatch: 2107 jalr $zero, $ra 2108 li $v0, -1 # return -1; 2109 2110.Lstring_indexof_compressed_matched: 2111 jalr $zero, $ra 2112 subu $v0, $a0, $t2 # return (current - start); 2113#endif 2114END art_quick_indexof 2115 2116/* java.lang.String.compareTo(String anotherString) */ 2117ENTRY_NO_GP art_quick_string_compareto 2118/* $a0 holds address of "this" */ 2119/* $a1 holds address of "anotherString" */ 2120 beq $a0, $a1, .Lstring_compareto_length_diff # this and anotherString are the same object 2121 move $a3, $a2 # trick to return 0 (it returns a2 - a3) 2122 2123#if (STRING_COMPRESSION_FEATURE) 2124 lw $t0, MIRROR_STRING_COUNT_OFFSET($a0) # 'count' field of this 2125 lw $t1, MIRROR_STRING_COUNT_OFFSET($a1) # 'count' field of anotherString 2126 sra $a2, $t0, 1 # this.length() 2127 sra $a3, $t1, 1 # anotherString.length() 2128#else 2129 lw $a2, MIRROR_STRING_COUNT_OFFSET($a0) # this.length() 2130 lw $a3, MIRROR_STRING_COUNT_OFFSET($a1) # anotherString.length() 2131#endif 2132 2133 MINu $t2, $a2, $a3 2134 # $t2 now holds min(this.length(),anotherString.length()) 2135 2136 # while min(this.length(),anotherString.length())-i != 0 2137 beqz $t2, .Lstring_compareto_length_diff # if $t2==0 2138 nop # return (this.length() - anotherString.length()) 2139 2140#if (STRING_COMPRESSION_FEATURE) 2141 # Differ cases: 2142 sll $t3, $t0, 31 2143 beqz $t3, .Lstring_compareto_this_is_compressed 2144 sll $t3, $t1, 31 # In branch delay slot. 2145 beqz $t3, .Lstring_compareto_that_is_compressed 2146 nop 2147 b .Lstring_compareto_both_not_compressed 2148 nop 2149 2150.Lstring_compareto_this_is_compressed: 2151 beqz $t3, .Lstring_compareto_both_compressed 2152 nop 2153 /* If (this->IsCompressed() && that->IsCompressed() == false) */ 2154.Lstring_compareto_loop_comparison_this_compressed: 2155 lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0) 2156 lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1) 2157 bne $t0, $t1, .Lstring_compareto_char_diff 2158 addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed 2159 subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i 2160 bnez $t2, .Lstring_compareto_loop_comparison_this_compressed 2161 addiu $a1, $a1, 2 # point at anotherString.charAt(i++) - uncompressed 2162 jalr $zero, $ra 2163 subu $v0, $a2, $a3 # return (this.length() - anotherString.length()) 2164 2165.Lstring_compareto_that_is_compressed: 2166 lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) 2167 lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1) 2168 bne $t0, $t1, .Lstring_compareto_char_diff 2169 addiu $a0, $a0, 2 # point at this.charAt(i++) - uncompressed 2170 subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i 2171 bnez $t2, .Lstring_compareto_that_is_compressed 2172 addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed 2173 jalr $zero, $ra 2174 subu $v0, $a2, $a3 # return (this.length() - anotherString.length()) 2175 2176.Lstring_compareto_both_compressed: 2177 lbu $t0, MIRROR_STRING_VALUE_OFFSET($a0) 2178 lbu $t1, MIRROR_STRING_VALUE_OFFSET($a1) 2179 bne $t0, $t1, .Lstring_compareto_char_diff 2180 addiu $a0, $a0, 1 # point at this.charAt(i++) - compressed 2181 subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i 2182 bnez $t2, .Lstring_compareto_both_compressed 2183 addiu $a1, $a1, 1 # point at anotherString.charAt(i++) - compressed 2184 jalr $zero, $ra 2185 subu $v0, $a2, $a3 # return (this.length() - anotherString.length()) 2186#endif 2187 2188.Lstring_compareto_both_not_compressed: 2189 lhu $t0, MIRROR_STRING_VALUE_OFFSET($a0) # while this.charAt(i) == anotherString.charAt(i) 2190 lhu $t1, MIRROR_STRING_VALUE_OFFSET($a1) 2191 bne $t0, $t1, .Lstring_compareto_char_diff # if this.charAt(i) != anotherString.charAt(i) 2192 # return (this.charAt(i) - anotherString.charAt(i)) 2193 addiu $a0, $a0, 2 # point at this.charAt(i++) 2194 subu $t2, $t2, 1 # new value of min(this.length(),anotherString.length())-i 2195 bnez $t2, .Lstring_compareto_both_not_compressed 2196 addiu $a1, $a1, 2 # point at anotherString.charAt(i++) 2197 2198.Lstring_compareto_length_diff: 2199 jalr $zero, $ra 2200 subu $v0, $a2, $a3 # return (this.length() - anotherString.length()) 2201 2202.Lstring_compareto_char_diff: 2203 jalr $zero, $ra 2204 subu $v0, $t0, $t1 # return (this.charAt(i) - anotherString.charAt(i)) 2205END art_quick_string_compareto 2206 2207 /* 2208 * Create a function `name` calling the ReadBarrier::Mark routine, 2209 * getting its argument and returning its result through register 2210 * `reg`, saving and restoring all caller-save registers. 2211 */ 2212.macro READ_BARRIER_MARK_REG name, reg 2213ENTRY \name 2214 /* TODO: optimizations: mark bit, forwarding. */ 2215 addiu $sp, $sp, -160 # includes 16 bytes of space for argument registers a0-a3 2216 .cfi_adjust_cfa_offset 160 2217 2218 sw $ra, 156($sp) 2219 .cfi_rel_offset 31, 156 2220 sw $t8, 152($sp) 2221 .cfi_rel_offset 24, 152 2222 sw $t7, 148($sp) 2223 .cfi_rel_offset 15, 148 2224 sw $t6, 144($sp) 2225 .cfi_rel_offset 14, 144 2226 sw $t5, 140($sp) 2227 .cfi_rel_offset 13, 140 2228 sw $t4, 136($sp) 2229 .cfi_rel_offset 12, 136 2230 sw $t3, 132($sp) 2231 .cfi_rel_offset 11, 132 2232 sw $t2, 128($sp) 2233 .cfi_rel_offset 10, 128 2234 sw $t1, 124($sp) 2235 .cfi_rel_offset 9, 124 2236 sw $t0, 120($sp) 2237 .cfi_rel_offset 8, 120 2238 sw $a3, 116($sp) 2239 .cfi_rel_offset 7, 116 2240 sw $a2, 112($sp) 2241 .cfi_rel_offset 6, 112 2242 sw $a1, 108($sp) 2243 .cfi_rel_offset 5, 108 2244 sw $a0, 104($sp) 2245 .cfi_rel_offset 4, 104 2246 sw $v1, 100($sp) 2247 .cfi_rel_offset 3, 100 2248 sw $v0, 96($sp) 2249 .cfi_rel_offset 2, 96 2250 2251 la $t9, artReadBarrierMark 2252 2253 sdc1 $f18, 88($sp) 2254 sdc1 $f16, 80($sp) 2255 sdc1 $f14, 72($sp) 2256 sdc1 $f12, 64($sp) 2257 sdc1 $f10, 56($sp) 2258 sdc1 $f8, 48($sp) 2259 sdc1 $f6, 40($sp) 2260 sdc1 $f4, 32($sp) 2261 sdc1 $f2, 24($sp) 2262 2263 .ifnc \reg, $a0 2264 move $a0, \reg # pass obj from `reg` in a0 2265 .endif 2266 jalr $t9 # v0 <- artReadBarrierMark(obj) 2267 sdc1 $f0, 16($sp) # in delay slot 2268 2269 lw $ra, 156($sp) 2270 .cfi_restore 31 2271 lw $t8, 152($sp) 2272 .cfi_restore 24 2273 lw $t7, 148($sp) 2274 .cfi_restore 15 2275 lw $t6, 144($sp) 2276 .cfi_restore 14 2277 lw $t5, 140($sp) 2278 .cfi_restore 13 2279 lw $t4, 136($sp) 2280 .cfi_restore 12 2281 lw $t3, 132($sp) 2282 .cfi_restore 11 2283 lw $t2, 128($sp) 2284 .cfi_restore 10 2285 lw $t1, 124($sp) 2286 .cfi_restore 9 2287 lw $t0, 120($sp) 2288 .cfi_restore 8 2289 lw $a3, 116($sp) 2290 .cfi_restore 7 2291 lw $a2, 112($sp) 2292 .cfi_restore 6 2293 lw $a1, 108($sp) 2294 .cfi_restore 5 2295 lw $a0, 104($sp) 2296 .cfi_restore 4 2297 lw $v1, 100($sp) 2298 .cfi_restore 3 2299 2300 .ifnc \reg, $v0 2301 move \reg, $v0 # `reg` <- v0 2302 lw $v0, 96($sp) 2303 .cfi_restore 2 2304 .endif 2305 2306 ldc1 $f18, 88($sp) 2307 ldc1 $f16, 80($sp) 2308 ldc1 $f14, 72($sp) 2309 ldc1 $f12, 64($sp) 2310 ldc1 $f10, 56($sp) 2311 ldc1 $f8, 48($sp) 2312 ldc1 $f6, 40($sp) 2313 ldc1 $f4, 32($sp) 2314 ldc1 $f2, 24($sp) 2315 ldc1 $f0, 16($sp) 2316 2317 jalr $zero, $ra 2318 addiu $sp, $sp, 160 2319 .cfi_adjust_cfa_offset -160 2320END \name 2321.endm 2322 2323// Note that art_quick_read_barrier_mark_regXX corresponds to register XX+1. 2324// ZERO (register 0) is reserved. 2325// AT (register 1) is reserved as a temporary/scratch register. 2326READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg01, $v0 2327READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg02, $v1 2328READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg03, $a0 2329READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg04, $a1 2330READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg05, $a2 2331READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg06, $a3 2332READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg07, $t0 2333READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg08, $t1 2334READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg09, $t2 2335READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg10, $t3 2336READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg11, $t4 2337READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg12, $t5 2338READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg13, $t6 2339READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg14, $t7 2340// S0 and S1 (registers 16 and 17) are reserved as suspended and thread registers. 2341READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg17, $s2 2342READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg18, $s3 2343READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg19, $s4 2344READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg20, $s5 2345READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg21, $s6 2346READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg22, $s7 2347// T8 and T9 (registers 24 and 25) are reserved as temporary/scratch registers. 2348// K0, K1, GP, SP (registers 26 - 29) are reserved. 2349READ_BARRIER_MARK_REG art_quick_read_barrier_mark_reg29, $s8 2350// RA (register 31) is reserved. 2351 2352.extern artInvokePolymorphic 2353ENTRY art_quick_invoke_polymorphic 2354 SETUP_SAVE_REFS_AND_ARGS_FRAME 2355 move $a2, rSELF # Make $a2 an alias for the current Thread. 2356 addiu $a3, $sp, ARG_SLOT_SIZE # Make $a3 a pointer to the saved frame context. 2357 sw $zero, 20($sp) # Initialize JValue result. 2358 sw $zero, 16($sp) 2359 la $t9, artInvokePolymorphic 2360 jalr $t9 # (result, receiver, Thread*, context) 2361 addiu $a0, $sp, 16 # Make $a0 a pointer to the JValue result 2362.macro MATCH_RETURN_TYPE c, handler 2363 li $t0, \c 2364 beq $v0, $t0, \handler 2365.endm 2366 MATCH_RETURN_TYPE 'V', .Lcleanup_and_return 2367 MATCH_RETURN_TYPE 'L', .Lstore_int_result 2368 MATCH_RETURN_TYPE 'I', .Lstore_int_result 2369 MATCH_RETURN_TYPE 'J', .Lstore_long_result 2370 MATCH_RETURN_TYPE 'B', .Lstore_int_result 2371 MATCH_RETURN_TYPE 'C', .Lstore_char_result 2372 MATCH_RETURN_TYPE 'D', .Lstore_double_result 2373 MATCH_RETURN_TYPE 'F', .Lstore_float_result 2374 MATCH_RETURN_TYPE 'S', .Lstore_int_result 2375 MATCH_RETURN_TYPE 'Z', .Lstore_boolean_result 2376.purgem MATCH_RETURN_TYPE 2377 nop 2378 b .Lcleanup_and_return 2379 nop 2380.Lstore_boolean_result: 2381 b .Lcleanup_and_return 2382 lbu $v0, 16($sp) # Move byte from JValue result to return value register. 2383.Lstore_char_result: 2384 b .Lcleanup_and_return 2385 lhu $v0, 16($sp) # Move char from JValue result to return value register. 2386.Lstore_double_result: 2387.Lstore_float_result: 2388 LDu $f0, $f1, 16, $sp, $t0 # Move double/float from JValue result to return value register. 2389 b .Lcleanup_and_return 2390 nop 2391.Lstore_long_result: 2392 lw $v1, 20($sp) # Move upper bits from JValue result to return value register. 2393 // Fall-through for lower bits. 2394.Lstore_int_result: 2395 lw $v0, 16($sp) # Move lower bits from JValue result to return value register. 2396 // Fall-through to clean up and return. 2397.Lcleanup_and_return: 2398 lw $t7, THREAD_EXCEPTION_OFFSET(rSELF) # Load Thread::Current()->exception_ 2399 RESTORE_SAVE_REFS_AND_ARGS_FRAME 2400 bnez $t7, 1f # Success if no exception is pending. 2401 nop 2402 jalr $zero, $ra 2403 nop 24041: 2405 DELIVER_PENDING_EXCEPTION 2406END art_quick_invoke_polymorphic 2407