1    /*
2     * Signed 64-bit integer multiply.
3     *         a1   a0
4     *   x     a3   a2
5     *   -------------
6     *       a2a1 a2a0
7     *       a3a0
8     *  a3a1 (<= unused)
9     *  ---------------
10     *         v1   v0
11     */
12    /* mul-long vAA, vBB, vCC */
13    FETCH(a0, 1)                           #  a0 <- CCBB
14    and       t0, a0, 255                  #  a2 <- BB
15    srl       t1, a0, 8                    #  a3 <- CC
16    EAS2(t0, rFP, t0)                      #  t0 <- &fp[BB]
17    LOAD64(a0, a1, t0)                     #  a0/a1 <- vBB/vBB+1
18
19    EAS2(t1, rFP, t1)                      #  t0 <- &fp[CC]
20    LOAD64(a2, a3, t1)                     #  a2/a3 <- vCC/vCC+1
21
22    mul       v1, a3, a0                   #  v1= a3a0
23#ifdef MIPS32REVGE6
24    mulu      v0, a2, a0                   #  v0= a2a0
25    muhu      t1, a2, a0
26#else
27    multu     a2, a0
28    mfhi      t1
29    mflo      v0                           #  v0= a2a0
30#endif
31    mul       t0, a2, a1                   #  t0= a2a1
32    addu      v1, v1, t1                   #  v1+= hi(a2a0)
33    addu      v1, v1, t0                   #  v1= a3a0 + a2a1;
34
35    GET_OPA(a0)                            #  a0 <- AA
36    FETCH_ADVANCE_INST(2)                  #  advance rPC, load rINST
37    b         .L${opcode}_finish
38%break
39
40.L${opcode}_finish:
41    GET_INST_OPCODE(t0)                    #  extract opcode from rINST
42    SET_VREG64_GOTO(v0, v1, a0, t0)        #  vAA/vAA+1 <- v0(low)/v1(high)
43