Home
last modified time | relevance | path

Searched refs:r8 (Results 1 – 25 of 706) sorted by relevance

12345678910>>...29

/external/libhevc/common/arm/
Dihevc_intra_pred_luma_mode_18_34.s129 mov r8,r0
133 vld1.8 {d0},[r8],r6
135 vld1.8 {d1},[r8],r6
137 vld1.8 {d2},[r8],r6
138 vld1.8 {d3},[r8],r6
140 vld1.8 {d4},[r8],r6
141 vld1.8 {d5},[r8],r6
142 vld1.8 {d6},[r8],r6
144 vld1.8 {d7},[r8],r6
152 movne r8,r0
[all …]
Dihevc_sao_edge_offset_class3.s59 @r8=> ht
83 LDR r8,[sp,#0x40] @Loads ht
98 SUB r10,r8,#1 @ht-1
147 SUB r11,r8,#1 @ht - 1
190 MOV r12,r8 @Move ht
220 CMP r8,#4 @Compare ht with 4
228 LDRBEQ r8,[r5] @pu1_avail[0]
229 MOVNE r8,#-1
230 VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
234 LDRB r8,[r5,#1] @pu1_avail[1]
[all …]
Dihevc_sao_edge_offset_class2_chroma.s61 @r8=> ht
91 LDR r8,[sp,#0x44] @Loads ht
108 SUB r10,r8,#1 @ht-1
193 SUB r11,r8,#1 @ht - 1
266 MOV r12,r8 @Move ht
297 CMP r8,#4 @Compare ht with 4
304 LDRBEQ r8,[r5] @pu1_avail[0]
306 MOVNE r8,#-1
307 VMOV.8 D8[0],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
310 VMOV.8 D8[1],r8 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
[all …]
Dihevc_intra_pred_chroma_mode_18_34.s128 mov r8,r0
134 vld1.8 {d0,d1},[r8],r6
136 vld1.8 {d2,d3},[r8],r6
138 vld1.8 {d4,d5},[r8],r6
140 vld1.8 {d6,d7},[r8],r6
142 vld1.8 {d8,d9},[r8],r6
144 vld1.8 {d10,d11},[r8],r6
146 vld1.8 {d12,d13},[r8],r6
148 vld1.8 {d14,d15},[r8],r6
155 add r8,r0,#16
[all …]
Dihevc_intra_pred_luma_mode2.s110 mov r8,#-2
122 vld1.8 {d0},[r0],r8
125 vld1.8 {d1},[r10],r8
128 vld1.8 {d2},[r0],r8
129 vld1.8 {d3},[r10],r8
132 vld1.8 {d4},[r0],r8
133 vld1.8 {d5},[r10],r8
134 vld1.8 {d6},[r0],r8
137 vld1.8 {d7},[r10],r8
178 vld1.8 {d0},[r0],r8
[all …]
Dihevc_sao_edge_offset_class2.s59 @r8=> ht
83 LDR r8,[sp,#0x40] @Loads ht
100 SUB r10,r8,#1 @ht-1
146 SUB r11,r8,#1 @ht - 1
180 MOV r12,r8 @Move ht
210 CMP r8,#4 @Compare ht with 4
218 LDRBEQ r8,[r5] @pu1_avail[0]
219 MOVNE r8,#-1 @au1_mask = vsetq_lane_s8(-1, au1_mask, 0)
221 … VMOV.8 d8[0],r8 @au1_mask = vsetq_lane_s8((-1||pu1_avail[0]), au1_mask, 0)
224 LDRB r8,[r5,#1] @pu1_avail[1]
[all …]
/external/boringssl/src/crypto/bn/asm/
Drsaz-x86_64.pl163 movq %rax, %r8
203 addq %r8, %r8 #shlq \$1, %r8
205 adcq %r9, %r9 #shld \$1, %r8, %r9
209 addq %rdx, %r8
212 movq %r8, 8(%rsp)
216 movq 8($inp), %r8
218 mulq %r8
224 mulq %r8
232 mulq %r8
240 mulq %r8
[all …]
/external/llvm/test/MC/ARM/
Dthumb2-narrow-dp.ll8 // - Rd, Rn and Rm are < r8
12 // - Rd, Rn and Rm are < r8
23 ANDS r8, r1, r8 // high registers so must use wide encoding
24 ANDS r8, r8, r1
25 ANDS r0, r8, r0
26 ANDS r1, r1, r8
37 // CHECK: ands.w r8, r1, r8 @ encoding: [0x11,0xea,0x08,0x08]
38 // CHECK: ands.w r8, r8, r1 @ encoding: [0x18,0xea,0x01,0x08]
39 // CHECK: ands.w r0, r8, r0 @ encoding: [0x18,0xea,0x00,0x00]
40 // CHECK: ands.w r1, r1, r8 @ encoding: [0x11,0xea,0x08,0x01]
[all …]
Dbasic-thumb2-instructions.s25 adc r8, r12, #0xaa00aa00
35 @ CHECK: adc r8, r12, #2852170240 @ encoding: [0x4c,0xf1,0xaa,0x28]
73 add r2, r8, #0xff00
97 @ CHECK: add.w r2, r8, #65280 @ encoding: [0x08,0xf5,0x7f,0x42]
120 add r1, r2, r8
124 add.w r4, r8, r1, ror #12
125 add r10, r8
126 add r10, r10, r8
128 @ CHECK: add.w r1, r2, r8 @ encoding: [0x02,0xeb,0x08,0x01]
132 @ CHECK: add.w r4, r8, r1, ror #12 @ encoding: [0x08,0xeb,0x31,0x34]
[all …]
Dbasic-arm-instructions.s21 adc r7, r8, #(0xff << 16)
22 adc r7, r8, #-2147483638
23 adc r7, r8, #42, #2
24 adc r7, r8, #40, #2
25 adc r7, r8, $40, $2
26 adc r7, r8, 40, 2
27 adc r7, r8, (2 * 20), (1 << 1)
37 adcs r7, r8, #40, #2
44 @ CHECK: adc r7, r8, #16711680 @ encoding: [0xff,0x78,0xa8,0xe2]
45 @ CHECK: adc r7, r8, #-2147483638 @ encoding: [0x2a,0x71,0xa8,0xe2]
[all …]
/external/libvpx/libvpx/vp8/common/arm/armv6/
Dloopfilter_v6.asm92 uqsub8 r8, r10, r11 ; p2 - p1
96 orr r8, r8, r10 ; abs (p2-p1)
98 uqsub8 r8, r8, r2 ; compare to limit
100 orr lr, lr, r8
106 uqsub8 r8, r6, r3 ; compare to thresh -- save r8 for later
159 orr r10, r6, r8 ; calculate vp8_hevmask
167 ldr r8, [src], pstep ; p0
173 eor r8, r8, r12 ; p0 offset to convert to a signed value
178 str r8, [sp, #4] ; store ps0 temporarily
183 qsub8 r8, r9, r8 ; vp8_signed_char_clamp(vp8_filter + 3 * ( qs0 - ps0))
[all …]
Ddequant_idct_v6.asm68 smulwb r8, r4, r6
71 pkhbt r8, r8, r10, lsl #16
81 usub16 r7, r8, r7
84 usub16 r8, r11, r14
87 uadd16 r6, r8, r7
88 usub16 r7, r8, r7
101 ldr r8, [r0], #4
105 smulwt lr, r3, r8
106 smulwt r10, r4, r8
107 pkhbt r11, r8, r6, lsl #16
[all …]
Dintra4x4_predict_v6.asm52 ldr r8, [r0] ; Above
57 usad8 r12, r8, r9
82 ldr r8, [r0] ; Above
92 uxtb16 r10, r8 ; a[2|0]
93 uxtb16 r11, r8, ror #8 ; a[3|1]
141 ldr r8, [r0] ; a[3|2|1|0]
148 uxtb16 r4, r8 ; a[2|0]
149 uxtb16 r5, r8, ror #8 ; a[3|1]
178 ldrb r8, [sp, #48] ; top_left
183 add r8, r8, r4 ; tl + l[0]
[all …]
Dfilter_v6.asm54 ldrb r8, [r0, #-2] ; load source data
62 pkhbt lr, r8, r9, lsl #16 ; r9 | r8
63 pkhbt r8, r9, r10, lsl #16 ; r10 | r9
69 smuad r8, r8, r4
74 smlad r8, r11, r5, r8
83 smlad r11, r10, r6, r8
88 ldrneb r8, [r0, #-2] ; load data for next loop
144 ldrb r8, [r0, #-2] ; load source data
152 pkhbt lr, r8, r9, lsl #16 ; r9 | r8
153 pkhbt r8, r9, r10, lsl #16 ; r10 | r9
[all …]
/external/aac/libFDK/src/arm/
Ddct_arm.cpp117 LDR r8, [r1], #4 // val_tw = *twiddle++; in dct_IV_func1()
121 SMULWT r9, r5, r8 // accuX = accu2*val_tw.l in dct_IV_func1()
122 SMULWB r5, r5, r8 // accu2 = accu2*val_tw.h in dct_IV_func1()
124 SMLAWT r5, r4, r8, r5 // accu2 = accu2*val_tw.h + accu1*val_tw.l in dct_IV_func1()
125 SMLAWB r4, r4, r8, r9 // accu1 = accu1*val_tw.h - accu2*val_tw.l in dct_IV_func1()
127 LDR r8, [r1], #4 // val_tw = *twiddle++; in dct_IV_func1()
131 SMULWB r9, r7, r8 // accuX = accu4*val_tw.h in dct_IV_func1()
132 SMULWT r7, r7, r8 // accu4 = accu4*val_tw.l in dct_IV_func1()
134 SMLAWB r7, r6, r8, r7 // accu4 = accu4*val_tw.l+accu3*val_tw.h in dct_IV_func1()
135 SMLAWT r6, r6, r8, r9 // accu3 = accu3*val_tw.l-accu4*val_tw.h in dct_IV_func1()
[all …]
/external/boringssl/win-x86_64/crypto/modes/
Dghash-x86_64.asm26 movzx r8,BYTE[15+rdi]
34 mov r8,QWORD[8+rax*1+rsi]
37 mov rdx,r8
42 shr r8,4
47 xor r8,QWORD[8+rbx*1+rsi]
52 mov rdx,r8
54 xor r8,r10
58 shr r8,4
62 xor r8,QWORD[8+rax*1+rsi]
67 mov rdx,r8
[all …]
/external/boringssl/mac-x86_64/crypto/modes/
Dghash-x86_64.S15 movzbq 15(%rdi),%r8
23 movq 8(%rsi,%rax,1),%r8
26 movq %r8,%rdx
31 shrq $4,%r8
36 xorq 8(%rsi,%rbx,1),%r8
41 movq %r8,%rdx
43 xorq %r10,%r8
47 shrq $4,%r8
51 xorq 8(%rsi,%rax,1),%r8
56 movq %r8,%rdx
[all …]
/external/boringssl/linux-x86_64/crypto/modes/
Dghash-x86_64.S16 movzbq 15(%rdi),%r8
24 movq 8(%rsi,%rax,1),%r8
27 movq %r8,%rdx
32 shrq $4,%r8
37 xorq 8(%rsi,%rbx,1),%r8
42 movq %r8,%rdx
44 xorq %r10,%r8
48 shrq $4,%r8
52 xorq 8(%rsi,%rax,1),%r8
57 movq %r8,%rdx
[all …]
/external/libvpx/libvpx/vp8/encoder/arm/armv6/
Dvp8_mse16x16_armv6.asm44 usub8 r8, r5, r6 ; calculate difference
46 sel r7, r8, lr ; select bytes with positive difference
49 sel r8, r9, lr ; select bytes with negative difference
53 usad8 r6, r8, lr ; calculate sum of negative differences
54 orr r8, r8, r7 ; differences of all 4 pixels
59 uxtb16 r6, r8 ; byte (two pixels) to halfwords
60 uxtb16 r7, r8, ror #8 ; another two pixels to halfwords
67 usub8 r8, r5, r6 ; calculate difference
68 sel r7, r8, lr ; select bytes with positive difference
70 sel r8, r9, lr ; select bytes with negative difference
[all …]
/external/tremolo/Tremolo/
DmdctARM.s189 LDMFD r12,{r8,r9,r10} @ r8 = step
193 MOV r8, r8, LSL #1
210 STRH r6, [r0], r8
228 LDMFD r12,{r8,r9,r10} @ r8 = step
232 MOV r8, r8, LSL #1
249 STRH r6, [r0], r8
274 LDR r8, [r1], #8
278 STMIA r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
326 SMULL r8, r9, r7, r11 @ (r8, r9) = s2*T[1]
329 SMLAL r8, r9, r6, r10 @ (r8, r9) += s0*T[0]
[all …]
DmdctLARM.s187 LDMFD r12,{r8,r9,r10} @ r8 = step
191 MOV r8, r8, LSL #1
209 STRH r6, [r0], r8
227 LDMFD r12,{r8,r9,r10} @ r8 = step
231 MOV r8, r8, LSL #1
250 STRH r6, [r0], r8
275 LDR r8, [r1], #8
279 STMIA r2!,{r3,r4,r5,r6,r7,r8,r12,r14}
369 LDR r8, [r1],#16 @ r8 = ro0 = bX[0]
373 MOV r8, r8, ASR #8
[all …]
/external/libvpx/libvpx/vp9/common/arm/neon/
Dvp9_mb_lpf_neon.asm28 push {r4-r8, lr}
38 sub r8, r0, r1, lsl #3 ; move src pointer down by 8 lines
40 vld1.u8 {d0}, [r8@64], r1 ; p7
41 vld1.u8 {d1}, [r8@64], r1 ; p6
42 vld1.u8 {d2}, [r8@64], r1 ; p5
43 vld1.u8 {d3}, [r8@64], r1 ; p4
44 vld1.u8 {d4}, [r8@64], r1 ; p3
45 vld1.u8 {d5}, [r8@64], r1 ; p2
46 vld1.u8 {d6}, [r8@64], r1 ; p1
47 vld1.u8 {d7}, [r8@64], r1 ; p0
[all …]
/external/jpeg/
Darmv6_idct.S71 stmdb sp!, {r4, r5, r6, r7, r8, r9, r10, r11, r12, r14}
83 ldmdb r12!, {r8, r9, r10, r11}
96 mul r4, r8, r4
100 orreqs r8, r1, r2
101 orreqs r8, r3, r5
102 orreqs r8, r6, r7
115 ldmdb r12!, {r8, r9, r10, r11}
126 mul r7, r8, r7
145 add r8, r5, r7
151 smulwb r8, r8, r10
[all …]
/external/boringssl/linux-arm/crypto/aes/
Daes-armv4.S265 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,pc}
267 ldmia sp!,{r4,r5,r6,r7,r8,r9,r10,r11,r12,lr}
288 and r8,lr,r0,lsr#8
294 ldr r5,[r10,r8,lsl#2] @ Te2[s0>>8]
295 and r8,lr,r1
302 ldr r8,[r10,r8,lsl#2] @ Te3[s1>>0]
307 eor r5,r5,r8,ror#8
308 and r8,lr,r2,lsr#16 @ i1
313 ldr r8,[r10,r8,lsl#2] @ Te1[s2>>16]
320 eor r1,r1,r8,ror#8
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_format_conv.s117 stmfd sp!, {r4-r8, lr}
131 ldr r8, [sp, #48] @// Load u2_dest_stride_y from stack
135 sub r8, r8, r5 @// Destination increment
164 add r3, r3, r8
179 ldr r8, [sp, #52] @// Load u2_dest_stride_uv from stack
183 sub r8, r8, r5 @// Destination increment
220 add r3, r3, r8
224 ldmfd sp!, {r4-r8, pc}
278 stmfd sp!, {r4-r8, lr}
293 ldr r8, [sp, #48] @// Load u2_dest_stride_y from stack
[all …]

12345678910>>...29