/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/ |
D | filter_v6.asm | 32 stmdb sp!, {r4 - r11, lr} 34 ldr r11, [sp, #40] ; vp8_filter address 45 ldr r4, [r11] ; load up packed filter coefficients 46 ldr r5, [r11, #4] 47 ldr r6, [r11, #8] 60 ldrb r11, [r0, #-1] 68 pkhbt r10, r10, r11, lsl #16 ; r11 | r10 70 pkhbt r11, r11, r9, lsl #16 ; r9 | r11 74 smlad r8, r11, r5, r8 75 ldrb r11, [r0, #2] [all …]
|
D | loopfilter_v6.asm | 64 stmdb sp!, {r4 - r11, lr} 75 ldr r11, [src], pstep ; p1 92 uqsub8 r8, r10, r11 ; p2 - p1 93 uqsub8 r10, r11, r10 ; p1 - p2 99 uqsub8 r6, r11, r12 ; p1 - p0 101 uqsub8 r7, r12, r11 ; p0 - p1 109 uqsub8 r6, r11, r10 ; p1 - q1 110 uqsub8 r7, r10, r11 ; q1 - p1 111 uqsub8 r11, r12, r9 ; p0 - q0 115 orr r12, r11, r12 ; abs (p0-q0) [all …]
|
D | bilinearfilter_v6.asm | 27 stmdb sp!, {r4 - r11, lr} 29 ldr r11, [sp, #40] ; vp8_filter address 39 ldr r5, [r11] ; load up filter coefficients 44 mov r11, r1 ; save dst_ptr for each row 102 add r11, r11, #2 ; move over to next column 103 mov r1, r11 107 ldmia sp!, {r4 - r11, pc} 130 add r11, r11, #2 ; move over to next column 131 mov r1, r11 135 ldmia sp!, {r4 - r11, pc} [all …]
|
D | dequant_idct_v6.asm | 21 stmdb sp!, {r4-r11, lr} 70 smulwt r11, r3, r12 77 pkhbt r9, r9, r11, lsl #16 78 ldr r11, [r0], #4 83 uadd16 r10, r11, r14 84 usub16 r8, r11, r14 107 pkhbt r11, r8, r6, lsl #16 113 uadd16 r10, r11, lr 114 usub16 lr, r11, lr 119 smulwt r11, r4, r8 [all …]
|
D | sixtappredict8x4_v6.asm | 27 stmdb sp!, {r4 - r11, lr} 68 smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1] 73 smlad r11, r8, r4, r11 ; vp8_filter[2], vp8_filter[3] 78 pkhbt r6, r6, r7, lsl #16 ; r11 | r10 79 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5] 84 add r11, r11, #0x40 ; round_shift_and_clamp 86 usat r11, #8, r11, asr #7 88 strh r11, [lr], #20 ; result is transposed and stored, which 93 movne r11, r6 99 movne r9, r11 [all …]
|
D | vp8_variance16x16_armv6.asm | 33 mov r11, #0 ; initialize sse = 0 61 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 66 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 85 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 90 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 109 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 114 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 135 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 136 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2) 146 str r11, [r6] ; store sse [all …]
|
D | idct_v6.asm | 26 stmdb sp!, {r4-r11, lr} 47 smulbt r11, r5, r12 ; (ip[13] * cospi8sqrt2minus1) >> 16 58 pkhtb r9, r11, r9, asr #16 ; 13c | 12c 59 ldr r11, [r0] ; i1 | i0 65 uadd16 r10, r11, r14 ; a 66 usub16 r8, r11, r14 ; b 96 pkhbt r11, r6, r0, lsl #16 ; i0 | i4 103 uadd16 r10, r11, r9 ; a 104 usub16 r9, r11, r9 ; b 110 smulwt r11, r4, r6 ; (ip[3] * sinpi8sqrt2) >> 16 [all …]
|
D | vp8_variance_halfpixvar16x16_h_armv6.asm | 34 mov r11, #0 ; initialize sse = 0 66 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 78 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 97 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 109 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 128 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 140 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 161 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 162 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 171 str r11, [r6] ; store sse [all …]
|
D | vp8_variance_halfpixvar16x16_v_armv6.asm | 34 mov r11, #0 ; initialize sse = 0 67 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 79 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 98 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 110 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 129 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 141 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 162 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 163 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 173 str r11, [r6] ; store sse [all …]
|
D | intra4x4_predict_v6.asm | 93 uxtb16 r11, r8, ror #8 ; a[3|1] 95 ssub16 r11, r11, r9 ; a[3|1] - [tl|tl] 103 sadd16 r2, r4, r11 ; l[0|0] + a[3|1] - [tl|tl] 108 sadd16 r5, r5, r11 ; l[1|1] + a[3|1] - [tl|tl] 117 sadd16 r2, r6, r11 ; l[2|2] + a[3|1] - [tl|tl] 126 sadd16 r5, r7, r11 ; l[3|3] + a[3|1] - [tl|tl] 142 ldr r11, c00FF00FF 163 and r9, r11, r9, asr #2 164 and r4, r11, r4, asr #2 186 add r11, r6, r7 ; l[2] + l[3] [all …]
|
D | vp8_variance_halfpixvar16x16_hv_armv6.asm | 34 mov r11, #0 ; initialize sse = 0 77 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 84 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 119 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 126 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 161 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 167 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 203 smlad r11, r5, r5, r11 ; dual signed multiply, add and accumulate (1) 205 smlad r11, r7, r7, r11 ; dual signed multiply, add and accumulate (2) 212 str r11, [r6] ; store sse [all …]
|
D | iwalsh_v6.asm | 34 qadd16 r11, r4, r6 ; b1 [5+9 | 4+8] 38 qadd16 r2, r10, r11 ; a1 + b1 [1 | 0] 40 qsub16 r6, r10, r11 ; a1 - b1 [9 | 8] 44 qadd16 r11, r5, r7 ; b1 [7+11 | 6+10] 48 qadd16 r3, r10, r11 ; a1 + b1 [3 | 2] 50 qsub16 r7, r10, r11 ; a1 - b1 [11 | 10] 56 qaddsubx r11, r2, r3 ; [b1|d1] [1+2 | 0-3] 60 qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1] 61 qaddsubx r3, r11, r10 ; [a2|d2] [b1+a1 | d1-c1]
|
D | vp8_sad16x16_armv6.asm | 43 ldr r11, [r0, #0xC] ; load 4 src pixels (1B) 58 usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels 68 ldr r11, [r0, #0xC] ; load 4 src pixels (2B) 80 usada8 r8, r11, lr, r8 ; calculate sad for 4 pixels
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/ |
D | vp8_vpxyv12_copyframe_func_neon.asm | 25 push {r4 - r11, lr} 34 ldr r11, [r1, #yv12_buffer_config_v_buffer] ;srcptr1 46 str r11, [sp, #12] 55 add r11, r3, r7 72 vst1.8 {q8, q9}, [r11]! 74 vst1.8 {q10, q11}, [r11]! 76 vst1.8 {q12, q13}, [r11]! 78 vst1.8 {q14, q15}, [r11]! 90 sub r11, r5, r10 115 add r11, r3, r7 [all …]
|
D | vp8_vpxyv12_copysrcframe_func_neon.asm | 28 push {r4 - r11, lr} 40 add r11, r3, r7 ;second row dst 64 vst1.8 {q4, q5}, [r11]! 66 vst1.8 {q6, q7}, [r11]! 68 vst1.8 {q12, q13}, [r11]! 70 vst1.8 {q14, q15}, [r11]! 82 vst1.8 {d1}, [r11]! 93 strb r8, [r11], #1 101 add r11, r11, r7 151 add r11, r3, r7 ;second row dst [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_subtract_armv6.asm | 81 stmfd sp!, {r4-r11} 96 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 99 usub16 r7, r10, r11 ; [d3 | d1] (A) 102 ldr r11, [r5, #4] ; upred (B) 111 uxtb16 r9, r11 ; [p2 | p0] (B) 113 uxtb16 r11, r11, ror #8 ; [p3 | p1] (B) 116 usub16 r7, r10, r11 ; [d3 | d1] (B) 141 uxtb16 r11, r7, ror #8 ; [p3 | p1] (A) 144 usub16 r7, r10, r11 ; [d3 | d1] (A) 147 ldr r11, [r5, #4] ; vpred (B) [all …]
|
D | vp8_fast_quantize_b_armv6.asm | 25 stmfd sp!, {r1, r4-r11, lr} 45 ldr r11, [r4], #4 ; [q1 | q0] 54 smulbb r0, r9, r11 ; [(x0+r0)*q0] 55 smultt r9, r9, r11 ; [(x1+r1)*q1] 59 ssat16 r11, #1, r12 ; [sz3 | sz2] 60 eor r12, r12, r11 ; [z3 ^ sz3 | z2 ^ sz2] 63 ssub16 r12, r12, r11 ; x = (z ^ sz) - sz 81 eor r10, r10, r11 ; [(y3 ^ sz3) | (y2 ^ sz2)] 82 ssub16 r10, r10, r11 ; x = (y ^ sz) - sz 88 ldr r11, [r8], #4 ; [dq3 | dq2] [all …]
|
D | walsh_v6.asm | 25 stmdb sp!, {r4 - r11, lr} 40 ldrd r10, r11, [r0] 46 qadd16 r9, r10, r11 ; [d1|a1] [13+15 | 12+14] 47 qsub16 r10, r10, r11 ; [c1|b1] [13-15 | 12-14] 51 smuad r11, r3, lr ; A0 = a1<<2 + d1<<2 52 addne r11, r11, #1 ; A0 += (a1!=0) 58 add r0, r11, r12 ; a1_0 = A0 + C0 59 sub r11, r11, r12 ; b1_0 = A0 - C0 86 adds r2, r11, r12 ; b2 = b1_0 + c1_0 89 subs r0, r11, r12 ; c2 = b1_0 - c1_0 [all …]
|
D | vp8_short_fdct4x4_armv6.asm | 29 ldr r11, c14500 46 smlad r6, r7, r12, r11 ; o1 = (c1 * 2217 + d1 * 5352 + 14500) 71 smlad r6, r7, r12, r11 ; o5 = (c1 * 2217 + d1 * 5352 + 14500) 96 smlad r6, r7, r12, r11 ; o9 = (c1 * 2217 + d1 * 5352 + 14500) 119 smlad r6, r7, r12, r11 ; o13 = (c1 * 2217 + d1 * 5352 + 14500) 129 ldr r11, c12000 140 add r0, r11, #0x10000 ; add (d!=0) 166 addeq r8, r8, r11 ; c1_b*2217+d1_b*5352+12000 + (d==0) 169 addeq r9, r9, r11 ; c1_t*2217+d1_t*5352+12000 + (d==0) 220 addeq r8, r8, r11 ; c1_b*2217+d1_b*5352+12000 + (d==0) [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv5te/ |
D | vp8_packtokens_partitions_armv5.asm | 66 ldr r11, _VP8_COMP_bc_ ; load up vp8_writer out of cpi 67 add r0, r0, r11 69 mov r11, #0 70 str r11, [sp, #28] ; i 78 subs r5, r5, r11 ; move start point with each partition 179 ldrb r11, [r7, r4] 180 cmpge r11, #0xff 194 add r11, r4, #1 ; w->pos++ 196 str r11, [r0, #vp8_writer_pos] 199 VALIDATE_POS r10, r11 ; validate_buffer at pos [all …]
|
D | vp8_packtokens_armv5.asm | 129 ldrb r11, [r7, r4] 130 cmpge r11, #0xff 144 add r11, r4, #1 ; w->pos++ 146 str r11, [r0, #vp8_writer_pos] 149 VALIDATE_POS r10, r11 ; validate_buffer at pos 227 ldrb r11, [r7, r4] 228 cmpge r11, #0xff 242 add r11, r4, #1 ; w->pos++ 244 str r11, [r0, #vp8_writer_pos] 247 VALIDATE_POS r10, r11 ; validate_buffer at pos
|
D | vp8_packtokens_mbrow_armv5.asm | 150 ldrb r11, [r7, r4] 151 cmpge r11, #0xff 165 add r11, r4, #1 ; w->pos++ 167 str r11, [r0, #vp8_writer_pos] 170 VALIDATE_POS r10, r11 ; validate_buffer at pos 248 ldrb r11, [r7, r4] 249 cmpge r11, #0xff 263 add r11, r4, #1 ; w->pos++ 265 str r11, [r0, #vp8_writer_pos] 268 VALIDATE_POS r10, r11 ; validate_buffer at pos
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ppc/ |
D | encodemb_altivec.asm | 21 mfspr r11, 256 ;# get old VRSAVE 22 oris r12, r11, 0xf000 108 mtspr 256, r11 ;# reset old VRSAVE 117 mfspr r11, 256 ;# get old VRSAVE 118 oris r12, r11, 0xf800 151 mtspr 256, r11 ;# reset old VRSAVE
|
D | rdopt_altivec.asm | 18 mfspr r11, 256 ;# get old VRSAVE 19 oris r12, r11, 0xf800 49 mtspr 256, r11 ;# reset old VRSAVE
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/ |
D | copy_altivec.asm | 23 mfspr r11, 256 ;# get old VRSAVE 24 oris r12, r11, 0xe000 45 mtspr 256, r11 ;# reset old VRSAVE
|