Home
last modified time | relevance | path

Searched refs:r6 (Results 1 – 25 of 55) sorted by relevance

123

/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/
Dvp8_variance_halfpixvar16x16_h_armv6.asm40 ldr r6, [r0, #1] ; load 4 src pixels with 1 byte offset
44 mvn r6, r6
45 uhsub8 r4, r4, r6
48 usub8 r6, r4, r5 ; calculate difference
50 sel r7, r6, lr ; select bytes with positive difference
51 usub8 r6, r5, r4 ; calculate difference with reversed operands
53 sel r6, r6, lr ; select bytes with negative difference
57 usad8 r5, r6, lr ; calculate sum of negative differences
58 orr r6, r6, r7 ; differences of all 4 pixels
64 uxtb16 r5, r6 ; byte (two pixels) to halfwords
[all …]
Dvp8_variance_halfpixvar16x16_v_armv6.asm41 ldr r6, [r9, #0] ; load 4 src pixels from next row
45 mvn r6, r6
46 uhsub8 r4, r4, r6
49 usub8 r6, r4, r5 ; calculate difference
51 sel r7, r6, lr ; select bytes with positive difference
52 usub8 r6, r5, r4 ; calculate difference with reversed operands
54 sel r6, r6, lr ; select bytes with negative difference
58 usad8 r5, r6, lr ; calculate sum of negative differences
59 orr r6, r6, r7 ; differences of all 4 pixels
65 uxtb16 r5, r6 ; byte (two pixels) to halfwords
[all …]
Dvp8_variance_halfpixvar16x16_hv_armv6.asm41 ldr r6, [r0, #1] ; load source pixels b, row N
46 mvn r6, r6
47 uhsub8 r4, r4, r6
59 usub8 r6, r4, r5 ; calculate difference
61 sel r7, r6, lr ; select bytes with positive difference
62 usub8 r6, r5, r4 ; calculate difference with reversed operands
64 sel r6, r6, lr ; select bytes with negative difference
68 usad8 r5, r6, lr ; calculate sum of negative differences
69 orr r6, r6, r7 ; differences of all 4 pixels
75 uxtb16 r5, r6 ; byte (two pixels) to halfwords
[all …]
Ddequant_idct_v6.asm32 smulbb r6, r4, r5
38 strh r6, [r0], #2
41 smulbb r6, r4, r5
49 strh r6, [r0], #2
60 ldr r6, [r0, #8]
65 smulwt r9, r3, r6
66 smulwb r7, r3, r6
67 smulwt r10, r4, r6
68 smulwb r8, r4, r6
72 uadd16 r6, r6, r7
[all …]
Didct_v6.asm38 ldr r6, [r0, #(4*2)] ; i5 | i4
42 smulbt r9, r5, r6 ; (ip[5] * cospi8sqrt2minus1) >> 16
43 smulbb r7, r5, r6 ; (ip[4] * cospi8sqrt2minus1) >> 16
44 smulwt r10, r4, r6 ; (ip[5] * sinpi8sqrt2) >> 16
45 smulwb r8, r4, r6 ; (ip[4] * sinpi8sqrt2) >> 16
50 uadd16 r6, r6, r7 ; 5c+5 | 4c+4
64 uadd16 r6, r6, r10 ; d
68 uadd16 r9, r10, r6 ; a+d
69 usub16 r10, r10, r6 ; a-d
70 uadd16 r6, r8, r7 ; b+c
[all …]
Dloopfilter_v6.asm68 ldr r6, [sp, #36] ; load thresh address
77 ldrb r3, [r6] ; thresh
90 uqsub8 r6, r9, r10 ; p3 - p2
95 orr r6, r6, r7 ; abs (p3-p2)
97 uqsub8 lr, r6, r2 ; compare to limit. lr: vp8_filter_mask
99 uqsub8 r6, r11, r12 ; p1 - p0
104 orr r6, r6, r7 ; abs (p1-p0)
105 uqsub8 r7, r6, r2 ; compare to limit
106 uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later
109 uqsub8 r6, r11, r10 ; p1 - q1
[all …]
Dvp8_variance16x16_armv6.asm43 usub8 r6, r4, r5 ; calculate difference
45 sel r7, r6, lr ; select bytes with positive difference
48 sel r6, r9, lr ; select bytes with negative difference
52 usad8 r5, r6, lr ; calculate sum of negative differences
53 orr r6, r6, r7 ; differences of all 4 pixels
59 uxtb16 r5, r6 ; byte (two pixels) to halfwords
60 uxtb16 r10, r6, ror #8 ; another two pixels to halfwords
68 usub8 r6, r4, r5 ; calculate difference
69 sel r7, r6, lr ; select bytes with positive difference
71 sel r6, r9, lr ; select bytes with negative difference
[all …]
Ddc_only_idct_add_v6.asm30 ldr r6, [r1], r2
37 uxtab16 r7, r0, r6
38 uxtab16 r6, r0, r6, ror #8
42 usat16 r6, #8, r6
44 orr r7, r7, r6, lsl #8
47 ldr r6, [r1]
52 uxtab16 r7, r0, r6
53 uxtab16 r6, r0, r6, ror #8
57 usat16 r6, #8, r6
59 orr r7, r7, r6, lsl #8
Dintra4x4_predict_v6.asm56 ldrb r6, [r1], r2 ; Left[2]
62 add r4, r4, r6
86 ldrb r6, [r1], r2 ; Left[2]
99 add r6, r6, r6, lsl #16 ; l[2|2]
116 sadd16 r1, r6, r10 ; l[2|2] + a[2|0] - [tl|tl]
117 sadd16 r2, r6, r11 ; l[2|2] + a[3|1] - [tl|tl]
180 ldrb r6, [r1], r2 ; Left[2]
185 add r10, r5, r6 ; l[1] + l[2]
186 add r11, r6, r7 ; l[2] + l[3]
193 add r6, r11, r7, lsl #1 ; l[2] + 2*l[3] + l[3]
[all …]
Dbilinearfilter_v6.asm50 ldrb r6, [r0] ; load source data
59 pkhbt r6, r6, r7, lsl #16 ; src[1] | src[0]
62 smuad r6, r6, r5 ; apply the filter
73 add r6, r6, #0x40 ; round_shift_and_clamp
75 usat r6, #16, r6, asr #7
78 strh r6, [r1], r3 ; result is transposed and stored
88 ldrneb r6, [r0] ; load source data
114 ldrb r6, [r0] ; load data
119 strh r6, [r1], r3 ; store it to immediate buffer
162 ldr r6, [r0] ; load the data
[all …]
Dcopymem16x16_v6.asm39 ldrb r6, [r0, #2]
47 strb r6, [r2, #2]
52 ldrb r6, [r0, #6]
59 strb r6, [r2, #6]
64 ldrb r6, [r0, #10]
69 strb r6, [r2, #10]
74 ldrb r6, [r0, #14]
81 strb r6, [r2, #14]
88 ldrneb r6, [r0, #2]
103 ldr r6, [r0, #8]
[all …]
Diwalsh_v6.asm28 ldr r6, [r0, #16] ; [9 | 8]
34 qadd16 r11, r4, r6 ; b1 [5+9 | 4+8]
35 qsub16 r12, r4, r6 ; c1 [5-9 | 4-8]
40 qsub16 r6, r10, r11 ; a1 - b1 [9 | 8]
93 qsubaddx r2, r6, r7 ; [c1|a1] [9-10 | 8+11]
94 qaddsubx r3, r6, r7 ; [b1|d1] [9+10 | 8-11]
98 qaddsubx r6, r2, r3 ; [b2|c2] [c1+d1 | a1-b1]
103 qadd16 r6, r6, r10 ; [b2+3|c2+3]
110 asr lr, r6, #19 ; [9]
112 sxth r6, r6
[all …]
Dsixtappredict8x4_v6.asm53 ldrb r6, [r0, #-5] ; load source data
61 pkhbt r6, r6, r7, lsl #16 ; r7 | r6
68 smuad r11, r6, r3 ; vp8_filter[0], vp8_filter[1]
71 ldrb r6, [r0], #1
77 pkhbt r10, r10, r6, lsl #16 ; r10 | r9
78 pkhbt r6, r6, r7, lsl #16 ; r11 | r10
80 smlad r12, r6, r5, r12
93 movne r11, r6
96 movne r6, r8
143 ldr r6, [sp] ; load the data
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vpx_scale/arm/neon/
Dvp8_vpxyv12_extendframeborders_neon.asm35 add r6, r1, r3 ; dest_ptr2 = src_ptr2 + 1 (src_ptr1 + plane_width)
36 sub r2, r6, #1 ; src_ptr2 = src_ptr1 + plane_width - 1
63 vst1.8 {q2, q3}, [r6], lr
65 vst1.8 {q6, q7}, [r6], lr
67 vst1.8 {q10, q11}, [r6], lr
69 vst1.8 {q14, q15}, [r6], lr
81 …add r6, r1, r8 ; dest_ptr2 = src_ptr2 - plane_stride (src_ptr1 + (plane_h…
82 …sub r2, r6, lr ; src_ptr2 = src_ptr1 + (plane_height * plane_stride) - pl…
102 vst1.8 {q8, q9}, [r6]!
104 vst1.8 {q10, q11}, [r6]!
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/
Dvp9_avg_neon.asm19 push {r4-r6, lr}
21 mov r6, r2
39 vld1.8 {q8-q9}, [r6@128]!
40 vld1.8 {q10-q11}, [r6@128], r4
49 pop {r4-r6, pc}
54 vld1.8 {q8-q9}, [r6@128], r3
55 vld1.8 {q10-q11}, [r6@128], r3
60 pld [r6]
62 pld [r6, r3]
68 pop {r4-r6, pc}
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/neon/
Dsubtract_neon.asm32 ldr r6, [r0, #vp8_block_src_stride]
36 vld1.8 {d0}, [r3], r6 ;load src
38 vld1.8 {d2}, [r3], r6
40 vld1.8 {d4}, [r3], r6
42 vld1.8 {d6}, [r3], r6
70 mov r6, #32 ; "diff" stride x2
92 vst1.16 {q8}, [r0], r6 ;store diff
93 vst1.16 {q9}, [r5], r6
94 vst1.16 {q10}, [r0], r6
95 vst1.16 {q11}, [r5], r6
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/
Dvp8_mse16x16_armv6.asm40 ldr r6, [r2, #0x0] ; load 4 ref pixels
44 usub8 r8, r5, r6 ; calculate difference
47 usub8 r9, r6, r5 ; calculate difference with reversed operands
53 usad8 r6, r8, lr ; calculate sum of negative differences
59 uxtb16 r6, r8 ; byte (two pixels) to halfwords
61 smlad r4, r6, r6, r4 ; dual signed multiply, add and accumulate (1)
64 ldr r6, [r2, #0x4] ; load 4 ref pixels
67 usub8 r8, r5, r6 ; calculate difference
69 usub8 r9, r6, r5 ; calculate difference with reversed operands
74 usad8 r6, r8, lr ; calculate sum of negative differences
[all …]
Dvp8_short_fdct4x4_armv6.asm34 qadd16 r6, r4, r5 ; [i1+i2 | i0+i3] = [b1 | a1] without shift
43 smuad r4, r6, lr ; o0 = (i1+i2)*8 + (i0+i3)*8
44 smusd r5, r6, lr ; o2 = (i1+i2)*8 - (i0+i3)*8
46 smlad r6, r7, r12, r11 ; o1 = (c1 * 2217 + d1 * 5352 + 14500)
51 pkhbt r3, r4, r6, lsl #4 ; [o1 | o0], keep in register for PART 2
52 pkhbt r6, r5, r7, lsl #4 ; [o3 | o2]
54 str r6, [r1, #4]
59 qadd16 r6, r8, r9 ; [i5+i6 | i4+i7] = [b1 | a1] without shift
68 smuad r9, r6, lr ; o4 = (i5+i6)*8 + (i4+i7)*8
69 smusd r8, r6, lr ; o6 = (i5+i6)*8 - (i4+i7)*8
[all …]
Dvp8_subtract_armv6.asm33 ldr r6, [r0, #vp8_block_src_diff]
60 str r0, [r6, #0] ; diff
61 str r1, [r6, #4] ; diff
63 add r6, r6, r2, lsl #1 ; update diff pointer
90 ldr r6, [r1] ; usrc (A)
93 uxtb16 r8, r6 ; [s2 | s0] (A)
95 uxtb16 r10, r6, ror #8 ; [s3 | s1] (A)
98 usub16 r6, r8, r9 ; [d2 | d0] (A)
104 pkhbt r8, r6, r7, lsl #16 ; [d1 | d0] (A)
105 pkhtb r9, r7, r6, asr #16 ; [d3 | d2] (A)
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv5te/
Dvp8_packtokens_mbrow_armv5.asm50 ldr r6, _VP8_COMMON_MBrows_
53 ldr r5, [r4, r6] ; load up mb_rows
81 ldrb r6, [r1, #tokenextra_token] ; t
84 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t
89 ldr r6, [r4, #vp8_token_value] ; v
101 lsl r12, r6, r4 ; r12 = v << 32 - n
112 mul r6, r4, r7 ; ((range-1) * pp[i>>1]))
120 add r4, r7, r6, lsr #8 ; 1 + (((range-1) * pp[i>>1]) >> 8)
126 clz r6, r4
127 sub r6, r6, #24 ; shift
[all …]
Dvp8_packtokens_armv5.asm60 ldrb r6, [r1, #tokenextra_token] ; t
63 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t
68 ldr r6, [r4, #vp8_token_value] ; v
80 lsl r12, r6, r4 ; r12 = v << 32 - n
91 mul r6, r4, r7 ; ((range-1) * pp[i>>1]))
99 add r4, r7, r6, lsr #8 ; 1 + (((range-1) * pp[i>>1]) >> 8)
105 clz r6, r4
106 sub r6, r6, #24 ; shift
110 adds r3, r3, r6 ; count += shift
111 lsl r5, r4, r6 ; range <<= shift
[all …]
Dvp8_packtokens_partitions_armv5.asm51 ldr r6, _VP8_COMMON_MBrows_
54 ldr r5, [r4, r6] ; load up mb_rows
110 ldrb r6, [r1, #tokenextra_token] ; t
113 add r4, r4, r6, lsl #3 ; a = vp8_coef_encodings + t
118 ldr r6, [r4, #vp8_token_value] ; v
130 lsl r12, r6, r4 ; r12 = v << 32 - n
141 mul r6, r4, r7 ; ((range-1) * pp[i>>1]))
149 add r4, r7, r6, lsr #8 ; 1 + (((range-1) * pp[i>>1]) >> 8)
155 clz r6, r4
156 sub r6, r6, #24 ; shift
[all …]
Dboolhuff_armv5te.asm71 mul r6, r4, r7 ; ((range-1) * probability)
74 add r4, r7, r6, lsr #8 ; 1 + (((range-1) * probability) >> 8)
80 clz r6, r4
81 sub r6, r6, #24 ; shift
85 adds r3, r3, r6 ; count += shift
86 lsl r5, r4, r6 ; range <<= shift
89 sub r6, r6, r3 ; offset = shift - count
90 sub r4, r6, #1 ; offset-1
113 rsb r4, r6, #24 ; 24-offset
117 lsl r2, r2, r6 ; lowvalue <<= offset
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/
Dbuildintrapredictorsmby_neon.asm58 sub r6, r0, r2
59 vld1.8 {q1}, [r6]
65 vmov.32 r6, d9[0]
67 add r12, r4, r6
82 ldrb r6, [r0], r2
87 add r12, r12, r6
92 ldrb r6, [r0], r2
97 add r12, r12, r6
102 ldrb r6, [r0], r2
107 add r12, r12, r6
[all …]
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/
Didctllm_altivec.asm138 lwz r6, 0(r1)
139 stw r6, 0(r4)
140 lwz r6, 4(r1)
141 stw r6, 4(r4)
145 lwz r6, 8(r1)
146 stw r6, 0(r4)
147 lwz r6, 12(r1)
148 stw r6, 4(r4)
153 lwz r6, 0(r1)
154 stw r6, 0(r4)
[all …]

123