Home
last modified time | relevance | path

Searched refs:r10 (Results 1 – 25 of 46) sorted by relevance

12

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/
Dloopfilter_v6.asm73 ldr r10, [src], pstep ; p2
90 uqsub8 r6, r9, r10 ; p3 - p2
91 uqsub8 r7, r10, r9 ; p2 - p3
92 uqsub8 r8, r10, r11 ; p2 - p1
93 uqsub8 r10, r11, r10 ; p1 - p2
96 orr r8, r8, r10 ; abs (p2-p1)
103 ldr r10, [src], pstep ; q1
109 uqsub8 r6, r11, r10 ; p1 - q1
110 uqsub8 r7, r10, r11 ; q1 - p1
119 uqsub8 r7, r9, r10 ; q0 - q1
[all …]
Dfilter_v6.asm56 ldrb r10, [r0], #2
63 pkhbt r8, r9, r10, lsl #16 ; r10 | r9
68 pkhbt r10, r10, r11, lsl #16 ; r11 | r10
72 smlad lr, r10, r5, lr
73 ldrb r10, [r0, #1]
79 pkhbt r9, r9, r10, lsl #16 ; r10 | r9
80 pkhbt r10, r10, r11, lsl #16 ; r11 | r10
83 smlad r11, r10, r6, r8
85 ands r10, r7, #0xff ; test loop counter
96 ldrneb r10, [r0], #2
[all …]
Ddequant_idct_v6.asm67 smulwt r10, r4, r6
71 pkhbt r8, r8, r10, lsl #16
75 smulwb r10, r4, r12
79 pkhbt r10, r10, r7, lsl #16
82 uadd16 r6, r6, r10
83 uadd16 r10, r11, r14
85 uadd16 r9, r10, r6
86 usub16 r10, r10, r6
92 str r10, [r1, #24]
106 smulwt r10, r4, r8
[all …]
Diwalsh_v6.asm33 qadd16 r10, r2, r8 ; a1 [1+13 | 0+12]
38 qadd16 r2, r10, r11 ; a1 + b1 [1 | 0]
40 qsub16 r6, r10, r11 ; a1 - b1 [9 | 8]
43 qadd16 r10, r3, r9 ; a1 [3+15 | 2+14]
48 qadd16 r3, r10, r11 ; a1 + b1 [3 | 2]
50 qsub16 r7, r10, r11 ; a1 - b1 [11 | 10]
55 qsubaddx r10, r2, r3 ; [c1|a1] [1-2 | 0+3]
60 qaddsubx r2, r10, r11 ; [b2|c2] [c1+d1 | a1-b1]
61 qaddsubx r3, r11, r10 ; [a2|d2] [b1+a1 | d1-c1]
62 ldr r10, c0x00030003
[all …]
Dintra4x4_predict_v6.asm92 uxtb16 r10, r8 ; a[2|0]
94 ssub16 r10, r10, r9 ; a[2|0] - [tl|tl]
102 sadd16 r1, r4, r10 ; l[0|0] + a[2|0] - [tl|tl]
107 sadd16 r4, r5, r10 ; l[1|1] + a[2|0] - [tl|tl]
116 sadd16 r1, r6, r10 ; l[2|2] + a[2|0] - [tl|tl]
125 sadd16 r4, r7, r10 ; l[3|3] + a[2|0] - [tl|tl]
144 ldrb r10, [r0, #4] ; a[4]
158 add r0, r0, r10, lsl #16 ;[a[4]+2 | 2]
185 add r10, r5, r6 ; l[1] + l[2]
191 add r4, r9, r10 ; l[0] + 2*l[1] + l[2]
[all …]
Dsixtappredict8x4_v6.asm57 ldrb r10, [r0, #-1]
65 pkhbt r9, r9, r10, lsl #16 ; r10 | r9
77 pkhbt r10, r10, r6, lsl #16 ; r10 | r9
78 pkhbt r6, r6, r7, lsl #16 ; r11 | r10
79 smlad r11, r10, r5, r11 ; vp8_filter[4], vp8_filter[5]
98 movne r8, r10
100 movne r10, r12
150 smulbt r10, r3, r6
155 smladx r10, r12, r7, r10
160 smladx r10, r11, r8, r10
[all …]
Didct_v6.asm44 smulwt r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16
49 pkhbt r8, r8, r10, lsl #16 ; 5s | 4s
54 smulwb r10, r4, r12 ; (ip[12] * sinpi8sqrt2) >> 16
60 pkhbt r10, r10, r7, lsl #16 ; 13s | 12s
64 uadd16 r6, r6, r10 ; d
65 uadd16 r10, r11, r14 ; a
68 uadd16 r9, r10, r6 ; a+d
69 usub16 r10, r10, r6 ; a-d
76 str r10,[r0, #(12*2)] ; o13|o12
93 smulwt r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16
[all …]
Dvp8_variance8x8_armv6.asm25 push {r4-r10, lr}
43 sel r10, r8, lr ; select bytes with positive difference
49 usad8 r6, r10, lr ; calculate sum of positive differences
51 orr r8, r8, r10 ; differences of all 4 pixels
58 uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
64 smlad r5, r10, r10, r5 ; dual signed multiply, add and accumulate (2)
68 sel r10, r8, lr ; select bytes with positive difference
74 usad8 r6, r10, lr ; calculate sum of positive differences
76 orr r8, r8, r10 ; differences of all 4 pixels
84 uxtb16 r10, r8, ror #8 ; another two pixels to halfwords
[all …]
Dvp8_variance16x16_armv6.asm60 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
66 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
84 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
90 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
108 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
114 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
134 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
136 smlad r11, r10, r10, r11 ; dual signed multiply, add and accumulate (2)
Dsimpleloopfilter_v6.asm174 pkhbt r10, r5, r6, lsl #16
176 ;transpose r7, r8, r9, r10 to r3, r4, r5, r6
177 TRANSPOSE_MATRIX r7, r8, r9, r10, r3, r4, r5, r6
182 uqsub8 r10, r5, r4 ; q0 - p0
184 orr r9, r9, r10 ; abs(p0 - q0)
189 mvn r10, #0 ; r10 == -1
192 sel lr, r10, r8 ; filter mask
255 ; load soure data to r7, r8, r9, r10
Dvp8_variance_halfpixvar16x16_hv_armv6.asm33 ldr r10, c80808080
48 eor r4, r4, r10
52 eor r5, r5, r10
57 eor r4, r4, r10
91 eor r4, r4, r10
95 eor r5, r5, r10
100 eor r4, r4, r10
133 eor r4, r4, r10
137 eor r5, r5, r10
142 eor r4, r4, r10
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
Drecon_altivec.asm65 stvx v2, 0, r10 ;# 2 rows to dst from buf
66 lwz r0, 0(r10)
72 lwz r0, 4(r10)
74 lwz r0, 8(r10)
76 lwz r0, 12(r10)
95 la r10, -48(r1) ;# buf
110 stw r0, 0(r10)
112 stw r0, 4(r10)
114 stw r0, 8(r10)
116 stw r0, 12(r10)
[all …]
Dvariance_altivec.asm37 li r10, 16 ;# load offset and loop counter
75 load_aligned_16 v4, r3, r10
76 load_aligned_16 v5, r5, r10
108 load_aligned_16 v4, r3, r10
109 load_aligned_16 v5, r5, r10
116 load_aligned_16 v6, r3, r10
117 load_aligned_16 v0, r5, r10
184 mtctr r10
203 mtctr r10
207 load_aligned_16 v4, r3, r10
[all …]
Dsad_altivec.asm34 li r10, 16 ;# load offset and loop counter
61 lvx v2, r10, r5
77 lvx v2, r10, r5
95 lvx v2, r10, r5
116 load_aligned_16 v4, r3, r10
117 load_aligned_16 v5, r5, r10
124 load_aligned_16 v6, r3, r10
125 load_aligned_16 v7, r5, r10
Dfilter_altivec.asm24 load_c \V0, HFilter, r5, r9, r10
27 lvx \V1, r5, r10
32 load_c v0, VFilter, r6, r3, r10
48 li r10, 16
50 lvx v11, r10, r9
53 lvx v13, r10, r9
152 lvx v20, r10, \RS
412 load_c v16, B_0123, 0, r9, r10
413 load_c v17, B_4567, 0, r9, r10
414 load_c v18, B_89AB, 0, r9, r10
[all …]
Dfilter_bilinear_altivec.asm24 load_c \V0, vfilter_b, r6, r9, r10
27 lvx \V1, r6, r10
35 li r10, 16
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/
Dvp8_subtract_armv6.asm95 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A)
99 usub16 r7, r10, r11 ; [d3 | d1] (A)
101 ldr r10, [r1, #4] ; usrc (B)
108 uxtb16 r8, r10 ; [s2 | s0] (B)
112 uxtb16 r10, r10, ror #8 ; [s3 | s1] (B)
116 usub16 r7, r10, r11 ; [d3 | d1] (B)
140 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A)
144 usub16 r7, r10, r11 ; [d3 | d1] (A)
146 ldr r10, [r2, #4] ; vsrc (B)
153 uxtb16 r8, r10 ; [s2 | s0] (B)
[all …]
Dvp8_fast_quantize_b_armv6.asm44 ldr r10, [r5], #4 ; [r1 | r0]
50 sadd16 r9, r9, r10 ; [x1+r1 | x0+r0]
57 ldr r10, [r5], #4 ; [r3 | r2]
65 sadd16 r12, r12, r10 ; [x3+r3 | x2+r2]
69 smulbb r10, r12, r9 ; [(x2+r2)*q2]
80 pkhtb r10, r12, r10, asr #16 ; [y3 | y2]
81 eor r10, r10, r11 ; [(y3 ^ sz3) | (y2 ^ sz2)]
82 ssub16 r10, r10, r11 ; x = (y ^ sz) - sz
84 cmp r10, #0 ; check if zero
87 str r10, [r6], #4 ; *qcoeff++ = x
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv5te/
Dvp8_packtokens_partitions_armv5.asm76 ldr r10, [sp, #24] ; ptr
94 str r10, [r0, #vp8_writer_buffer]
127 ldr r10, [sp, #88] ; vp8_coef_tree
148 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
173 mov r10, #0
174 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
184 ldrb r10, [r7, r4] ; w->buffer[x]
185 add r10, r10, #1
186 strb r10, [r7, r4] ; w->buffer[x] + 1
189 ldr r10, [r0, #vp8_writer_buffer]
[all …]
Dvp8_packtokens_armv5.asm77 ldr r10, [sp, #60] ; vp8_coef_tree
98 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
123 mov r10, #0
124 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
134 ldrb r10, [r7, r4] ; w->buffer[x]
135 add r10, r10, #1
136 strb r10, [r7, r4] ; w->buffer[x] + 1
139 ldr r10, [r0, #vp8_writer_buffer]
149 VALIDATE_POS r10, r11 ; validate_buffer at pos
151 strb r7, [r10, r4] ; w->buffer[w->pos++]
[all …]
Dvp8_packtokens_mbrow_armv5.asm98 ldr r10, [sp, #64] ; vp8_coef_tree
119 ldrsb lr, [r10, lr] ; i = vp8_coef_tree[i+bb]
144 mov r10, #0
145 strb r10, [r7, r4] ; w->buffer[x] =(unsigned char)0
155 ldrb r10, [r7, r4] ; w->buffer[x]
156 add r10, r10, #1
157 strb r10, [r7, r4] ; w->buffer[x] + 1
160 ldr r10, [r0, #vp8_writer_buffer]
170 VALIDATE_POS r10, r11 ; validate_buffer at pos
172 strb r7, [r10, r4] ; w->buffer[w->pos++]
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/
Dvp8_vpxyv12_copysrcframe_func_neon.asm39 add r10, r2, r6 ;second row src
54 vld1.8 {q4, q5}, [r10]!
56 vld1.8 {q6, q7}, [r10]!
58 vld1.8 {q12, q13}, [r10]!
60 vld1.8 {q14, q15}, [r10]!
78 vld1.8 {d1}, [r10]!
92 ldrb r8, [r10], #1
100 add r10, r10, r6
150 add r10, r2, r6 ;second row src
168 vld1.8 {q4, q5}, [r10]!
[all …]
Dvp8_vpxyv12_copyframe_func_neon.asm33 ldr r10, [r0, #yv12_buffer_config_v_buffer] ;srcptr1
45 str r10, [sp, #8]
54 add r10, r2, r6
61 vld1.8 {q8, q9}, [r10]!
63 vld1.8 {q10, q11}, [r10]!
65 vld1.8 {q12, q13}, [r10]!
67 vld1.8 {q14, q15}, [r10]!
89 ands r10, r5, #0x7f ;check to see if extra copy is needed
90 sub r11, r5, r10
114 add r10, r2, r6
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/ppc/
Dfdct_altivec.asm36 load_c v0, dct_tab, 0, r9, r10
37 lvx v1, r6, r10
38 addi r10, r10, 32
39 lvx v2, 0, r10
40 lvx v3, r6, r10
42 load_c v4, ppc_dctperm_tab, 0, r9, r10
43 load_c v5, ppc_dctperm_tab, r6, r9, r10
45 load_c v6, round_tab, 0, r10, r9
146 addi r10, r3, 0
163 addi r3, r10, 8
Dencodemb_altivec.asm30 li r10, 16
65 stvx v3, r10, r3 ;# store out diff
102 stvx v3, r10, r3 ;# store out diff
121 li r10, 16
122 mtctr r10
145 stvx v3, r10, r3 ;# store out diff

12