1    /*
2     * Array get, 64 bits.  vAA <- vBB[vCC].
3     *
4     * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
5     */
6    /* aget-wide vAA, vBB, vCC */
7    FETCH r0, 1                         @ r0<- CCBB
8    mov     r9, rINST, lsr #8           @ r9<- AA
9    and     r2, r0, #255                @ r2<- BB
10    mov     r3, r0, lsr #8              @ r3<- CC
11    GET_VREG r0, r2                     @ r0<- vBB (array object)
12    GET_VREG r1, r3                     @ r1<- vCC (requested index)
13    CLEAR_SHADOW_PAIR r9, r2, r3        @ Zero out the shadow regs
14    cmp     r0, #0                      @ null array object?
15    beq     common_errNullObject        @ yes, bail
16    ldr     r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET]    @ r3<- arrayObj->length
17    add     r0, r0, r1, lsl #3          @ r0<- arrayObj + index*width
18    cmp     r1, r3                      @ compare unsigned index, length
19    bcs     common_errArrayIndex        @ index >= length, bail
20    FETCH_ADVANCE_INST 2                @ advance rPC, load rINST
21    ldrd    r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET]  @ r2/r3<- vBB[vCC]
22    VREG_INDEX_TO_ADDR r9, r9           @ r9<- &fp[AA]
23    GET_INST_OPCODE ip                  @ extract opcode from rINST
24    stmia   r9, {r2-r3}                 @ vAA/vAA+1<- r2/r3
25    GOTO_OPCODE ip                      @ jump to next instruction
26