/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/ |
D | vp9_loopfilter_filters.c | 23 uint8_t p1, uint8_t p0, in filter_mask() argument 29 mask |= (abs(p1 - p0) > limit) * -1; in filter_mask() 33 mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit) * -1; in filter_mask() 39 uint8_t p1, uint8_t p0, in flat_mask4() argument 43 mask |= (abs(p1 - p0) > thresh) * -1; in flat_mask4() 45 mask |= (abs(p2 - p0) > thresh) * -1; in flat_mask4() 47 mask |= (abs(p3 - p0) > thresh) * -1; in flat_mask4() 55 uint8_t p0, uint8_t q0, in flat_mask5() argument 58 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5() 59 mask |= (abs(p4 - p0) > thresh) * -1; in flat_mask5() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/mips/dspr2/ |
D | loopfilter_filters_dspr2.c | 48 uint32_t p0, in vp8_filter_mask_vec_mips() argument 120 [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), in vp8_filter_mask_vec_mips() 150 : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), in vp8_filter_mask_vec_mips() 355 uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; in vp8_loop_filter_horizontal_edge_mips() local 396 p0 = *((uint32_t *)(s0)); in vp8_loop_filter_horizontal_edge_mips() 400 vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, in vp8_loop_filter_horizontal_edge_mips() 441 p0 = *((uint32_t *)(s0)); in vp8_loop_filter_horizontal_edge_mips() 445 vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, in vp8_loop_filter_horizontal_edge_mips() 486 p0 = *((uint32_t *)(s0)); in vp8_loop_filter_horizontal_edge_mips() 490 vp8_filter_mask_vec_mips(limit, flimit, p1, p2, pm1, p0, p3, p4, p5, p6, in vp8_loop_filter_horizontal_edge_mips() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/mips/dspr2/ |
D | vp9_loopfilter_masks_dspr2.h | 28 uint32_t p1, uint32_t p0, in vp9_filter_hev_mask_dspr2() argument 94 [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), in vp9_filter_hev_mask_dspr2() 124 : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), in vp9_filter_hev_mask_dspr2() 135 uint32_t p1, uint32_t p0, in vp9_filter_hev_mask_flatmask4_dspr2() argument 242 [p1] "r" (p1), [p0] "r" (p0), [q1] "r" (q1), [q0] "r" (q0), in vp9_filter_hev_mask_flatmask4_dspr2() 273 : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [r3] "r" (r3), in vp9_filter_hev_mask_flatmask4_dspr2() 284 uint32_t p0, uint32_t q0, in vp9_flatmask5() argument 361 [p1] "r" (p1), [p0] "r" (p0), [q0] "r" (q0), [q1] "r" (q1), in vp9_flatmask5()
|
D | vp9_mbloop_loopfilter_dspr2.c | 36 uint32_t p3, p2, p1, p0, q0, q1, q2, q3; in vp9_lpf_horizontal_8_dspr2() local 78 : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), in vp9_lpf_horizontal_8_dspr2() 85 p1, p0, p3, p2, q0, q1, q2, q3, in vp9_lpf_horizontal_8_dspr2() 89 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_horizontal_8_dspr2() 126 : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), in vp9_lpf_horizontal_8_dspr2() 133 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_horizontal_8_dspr2() 231 : [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), in vp9_lpf_horizontal_8_dspr2() 334 uint32_t p3, p2, p1, p0, q3, q2, q1, q0; in vp9_lpf_vertical_8_dspr2() local 373 : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), in vp9_lpf_vertical_8_dspr2() 411 [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), in vp9_lpf_vertical_8_dspr2() [all …]
|
D | vp9_loopfilter_filters_dspr2.c | 32 uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; in vp9_lpf_horizontal_4_dspr2() local 86 : [pm1] "=&r" (pm1), [p0] "=&r" (p0), [p5] "=&r" (p5), in vp9_lpf_horizontal_4_dspr2() 92 pm1, p0, p3, p4, p5, p6, in vp9_lpf_horizontal_4_dspr2() 125 uint32_t pm1, p0, p1, p2, p3, p4, p5, p6; in vp9_lpf_vertical_4_dspr2() local 163 p0 = *((uint32_t *)(s3 - 4)); in vp9_lpf_vertical_4_dspr2() 187 [p2] "+r" (p2), [p1] "+r" (p1), [p0] "+r" (p0), [pm1] "+r" (pm1), in vp9_lpf_vertical_4_dspr2() 221 p0, p3, p4, p5, p6, thresh_vec, in vp9_lpf_vertical_4_dspr2()
|
D | vp9_loopfilter_filters_dspr2.h | 376 const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; in vp9_mbfilter_dspr2() local 435 : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1), in vp9_mbfilter_dspr2() 449 uint32_t p1, uint32_t p0, in vp9_mbfilter1_dspr2() argument 513 : [p0] "r" (p0), [q0] "r" (q0), [p1] "r" (p1), [q1] "r" (q1), in vp9_mbfilter1_dspr2() 535 const uint32_t p3 = *op3, p2 = *op2, p1 = *op1, p0 = *op0; in vp9_wide_mbfilter_dspr2() local 564 [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), in vp9_wide_mbfilter_dspr2() 650 [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), in vp9_wide_mbfilter_dspr2()
|
D | vp9_loopfilter_macros_dspr2.h | 362 : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), \ 402 : [p3] "r" (p3), [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), \ 436 : [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), \
|
D | vp9_mblpf_vert_loopfilter_dspr2.c | 34 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; in vp9_lpf_vertical_16_dspr2() local 78 [p0] "=&r" (p0), [p7] "=&r" (p7), [p6] "=&r" (p6), in vp9_lpf_vertical_16_dspr2() 132 [p0] "+r" (p0), [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), in vp9_lpf_vertical_16_dspr2() 252 p1, p0, p3, p2, q0, q1, q2, q3, in vp9_lpf_vertical_16_dspr2() 255 vp9_flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2); in vp9_lpf_vertical_16_dspr2() 260 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_vertical_16_dspr2() 294 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_vertical_16_dspr2() 469 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_vertical_16_dspr2()
|
D | vp9_mblpf_horiz_loopfilter_dspr2.c | 36 uint32_t p7, p6, p5, p4, p3, p2, p1, p0, q0, q1, q2, q3, q4, q5, q6, q7; in vp9_lpf_horizontal_16_dspr2() local 91 : [p3] "=&r" (p3), [p2] "=&r" (p2), [p1] "=&r" (p1), [p0] "=&r" (p0), in vp9_lpf_horizontal_16_dspr2() 114 p1, p0, p3, p2, q0, q1, q2, q3, in vp9_lpf_horizontal_16_dspr2() 117 vp9_flatmask5(p7, p6, p5, p4, p0, q0, q4, q5, q6, q7, &flat2); in vp9_lpf_horizontal_16_dspr2() 122 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_horizontal_16_dspr2() 168 [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), in vp9_lpf_horizontal_16_dspr2() 211 : [p2] "r" (p2), [p1] "r" (p1), [p0] "r" (p0), in vp9_lpf_horizontal_16_dspr2() 218 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_horizontal_16_dspr2() 402 vp9_filter1_dspr2(mask, hev, p1, p0, q0, q1, in vp9_lpf_horizontal_16_dspr2()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/x86/ |
D | loopfilter_mmx.asm | 100 movq mm4, [rsi+rax] ; p0 101 movq mm5, mm4 ; p0 102 psubusb mm4, mm3 ; p0-=p1 103 psubusb mm3, mm5 ; p1-=p0 104 por mm4, mm3 ; abs(p1 - p0) 117 movq mm6, mm5 ; p0 119 psubusb mm5, mm3 ; p0-=q0 120 psubusb mm3, mm6 ; q0-=p0 121 por mm5, mm3 ; abs(p0 - q0) 122 paddusb mm5, mm5 ; abs(p0-q0)*2 [all …]
|
D | loopfilter_sse2.asm | 111 movdqa xmm4, [rsi+rax] ; p0 114 movlps xmm4, [rsi + rcx*2] ; p0 119 movdqa xmm5, xmm4 ; p0 120 psubusb xmm4, xmm6 ; p0-=p1 122 psubusb xmm6, xmm5 ; p1-=p0 124 por xmm6, xmm4 ; abs(p1 - p0) 144 movdqa xmm6, xmm5 ; p0 147 psubusb xmm5, xmm3 ; p0-=q0 148 psubusb xmm3, xmm6 ; q0-=p0 149 por xmm5, xmm3 ; abs(p0 - q0) [all …]
|
D | subpixel_mmx.asm | 67 psrlq mm5, 16 ; mm5 = p0..p5; 68 punpcklbw mm5, mm0 ; mm5 = p0..p3 157 movq mm3, [rsi+rdx] ; mm3 = p0..p8 = row -1 161 movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 2 165 movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 0 169 movq mm4, [rsi] ; mm4 = p0..p3 = row -2 175 movq mm4, [rsi + 2*rdx] ; mm4 = p0..p3 = row 1 179 movq mm4, [rsi + 4*rdx] ; mm4 = p0..p3 = row 3
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/x86/ |
D | vp9_loopfilter_mmx.asm | 100 movq mm4, [rsi+rax] ; p0 101 movq mm5, mm4 ; p0 102 psubusb mm4, mm3 ; p0-=p1 103 psubusb mm3, mm5 ; p1-=p0 104 por mm4, mm3 ; abs(p1 - p0) 117 movq mm6, mm5 ; p0 119 psubusb mm5, mm3 ; p0-=q0 120 psubusb mm3, mm6 ; q0-=p0 121 por mm5, mm3 ; abs(p0 - q0) 122 paddusb mm5, mm5 ; abs(p0-q0)*2 [all …]
|
D | vp9_loopfilter_intrin_sse2.c | 388 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; in mb_lpf_horizontal_edge_w_sse2_16() local 396 p0 = _mm_loadu_si128((__m128i *)(s - 1 * p)); in mb_lpf_horizontal_edge_w_sse2_16() 407 _mm_store_si128((__m128i *)&ap[0 * 16], p0); in mb_lpf_horizontal_edge_w_sse2_16() 416 const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), in mb_lpf_horizontal_edge_w_sse2_16() 417 _mm_subs_epu8(p0, p1)); in mb_lpf_horizontal_edge_w_sse2_16() 422 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), in mb_lpf_horizontal_edge_w_sse2_16() 423 _mm_subs_epu8(q0, p0)); in mb_lpf_horizontal_edge_w_sse2_16() 464 __m128i ps0 = _mm_xor_si128(p0, t80); in mb_lpf_horizontal_edge_w_sse2_16() 512 work = _mm_max_epu8(_mm_or_si128(_mm_subs_epu8(p2, p0), in mb_lpf_horizontal_edge_w_sse2_16() 513 _mm_subs_epu8(p0, p2)), in mb_lpf_horizontal_edge_w_sse2_16() [all …]
|
D | vp9_loopfilter_intrin_avx2.c | 402 __m128i p4, p3, p2, p1, p0, q0, q1, q2, q3, q4; in mb_lpf_horizontal_edge_w_avx2_16() local 416 p0 = _mm_loadu_si128((__m128i *) (s - 1 * p)); in mb_lpf_horizontal_edge_w_avx2_16() 424 const __m128i abs_p1p0 = _mm_or_si128(_mm_subs_epu8(p1, p0), in mb_lpf_horizontal_edge_w_avx2_16() 425 _mm_subs_epu8(p0, p1)); in mb_lpf_horizontal_edge_w_avx2_16() 430 __m128i abs_p0q0 = _mm_or_si128(_mm_subs_epu8(p0, q0), in mb_lpf_horizontal_edge_w_avx2_16() 431 _mm_subs_epu8(q0, p0)); in mb_lpf_horizontal_edge_w_avx2_16() 470 __m128i ps0 = _mm_xor_si128(p0, t80); in mb_lpf_horizontal_edge_w_avx2_16() 523 _mm_or_si128(_mm_subs_epu8(p2, p0), _mm_subs_epu8(p0, p2)), in mb_lpf_horizontal_edge_w_avx2_16() 527 _mm_or_si128(_mm_subs_epu8(p3, p0), _mm_subs_epu8(p0, p3)), in mb_lpf_horizontal_edge_w_avx2_16() 531 _mm_or_si128(_mm_subs_epu8(p4, p0), _mm_subs_epu8(p0, p4)), in mb_lpf_horizontal_edge_w_avx2_16() [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/armv6/ |
D | simpleloopfilter_v6.asm | 43 pkhtb $b3, $a2, $a0, asr #16 ; 33 23 13 03 -- p0 63 ldr r4, [src, -pstep] ; p0 181 uqsub8 r9, r4, r5 ; p0 - q0 182 uqsub8 r10, r5, r4 ; q0 - p0 184 orr r9, r9, r10 ; abs(p0 - q0) 186 uqadd8 r9, r9, r9 ; abs(p0 - q0) * 2 188 uqadd8 r7, r7, r9 ; abs(p0 - q0)*2 + abs(p1 - q1)/2 200 eor r4, r4, r2 ; p0 offset to convert to a signed value 204 qsub8 r6, r5, r4 ; q0 - p0 206 qadd8 r3, r3, r6 ; vp8_filter += q0 - p0 [all …]
|
D | loopfilter_v6.asm | 45 pkhtb $b3, $a2, $a0, asr #16 ; 33 23 13 03 -- p0 88 ldr r12, [src], pstep ; p0 99 uqsub8 r6, r11, r12 ; p1 - p0 101 uqsub8 r7, r12, r11 ; p0 - p1 104 orr r6, r6, r7 ; abs (p1-p0) 111 uqsub8 r11, r12, r9 ; p0 - q0 112 uqsub8 r12, r9, r12 ; q0 - p0 115 orr r12, r11, r12 ; abs (p0-q0) 117 uqadd8 r12, r12, r12 ; abs (p0-q0) * 2 120 uqadd8 r12, r12, r6 ; abs (p0-q0)*2 + abs (p1-q1)/2 [all …]
|
D | intra4x4_predict_v6.asm | 300 add r4, r4, r5, lsl #1 ; [p2+2*p3 | p0+2*p1] 301 add r4, r4, r6 ; [p2+2*p3+p4 | p0+2*p1+p2] 302 uxtab16 r4, r4, r12 ; [p2+2*p3+p4+2 | p0+2*p1+p2+2] 427 uadd16 r4, r6, r7 ; [p2+p3 | p0+p1] 428 uhadd16 r4, r4, r2 ; [(p2+p3+1)>>1 | (p0+p1+1)>>1] 431 add r5, r6, r7, lsl #1 ; [p2+2*p3 | p0+2*p1] 432 add r5, r5, r8 ; [p2+2*p3+p4 | p0+2*p1+p2] 433 uxtab16 r5, r5, r12 ; [p2+2*p3+p4+2 | p0+2*p1+p2+2] 501 uadd16 r11, r4, r5 ; [p1+p2 | p0+p1] 502 uhadd16 r11, r11, r2 ; [(p1+p2+1)>>1 | (p0+p1+1)>>1] [all …]
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp9/common/arm/neon/ |
D | vp9_mb_lpf_neon.asm | 47 vld1.u8 {d7}, [r8@64], r1 ; p0 299 ; d7 p0 314 vabd.u8 d21, d6, d7 ; abs(p1 - p0) 321 vmax.u8 d20, d21, d22 ; max(abs(p1 - p0), abs(q1 - q0)) 325 vabd.u8 d24, d7, d8 ; abs(p0 - q0) 330 vqadd.u8 d24, d24, d24 ; b = abs(p0 - q0) * 2 336 vabd.u8 d25, d7, d5 ; abs(p0 - p2) 338 vabd.u8 d27, d4, d7 ; abs(p3 - p0) 342 vmax.u8 d25, d25, d26 ; max(abs(p0 - p2), abs(q0 - q2)) 343 vmax.u8 d26, d27, d28 ; max(abs(p3 - p0), abs(q3 - q0)) [all …]
|
D | vp9_loopfilter_16_neon.asm | 60 vld1.u8 {q6}, [r3@64], r1 ; p0 93 ; q6 p0 109 vabd.u8 q13, q5, q6 ; m3 = abs(p1 - p0) 118 vabd.u8 q9, q6, q7 ; abs(p0 - q0) 126 vcgt.u8 q13, q13, q2 ; (abs(p1 - p0) > thresh)*-1 131 vqadd.u8 q9, q9, q9 ; b = abs(p0 - q0) * 2
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ |
D | loopfilter_filters.c | 28 uc p3, uc p2, uc p1, uc p0, in vp8_filter_mask() argument 34 mask |= (abs(p1 - p0) > limit); in vp8_filter_mask() 38 mask |= (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 > blimit); in vp8_filter_mask() 43 static signed char vp8_hevmask(uc thresh, uc p1, uc p0, uc q0, uc q1) in vp8_hevmask() argument 46 hev |= (abs(p1 - p0) > thresh) * -1; in vp8_hevmask() 282 static signed char vp8_simple_filter_mask(uc blimit, uc p1, uc p0, uc q0, uc q1) in vp8_simple_filter_mask() argument 288 signed char mask = (abs(p0 - q0) * 2 + abs(p1 - q1) / 2 <= blimit) * -1; in vp8_simple_filter_mask() 296 signed char p0 = (signed char) * op0 ^ 0x80; in vp8_simple_filter() local 302 filter_value = vp8_signed_char_clamp(filter_value + 3 * (q0 - p0)); in vp8_simple_filter() 313 u = vp8_signed_char_clamp(p0 + Filter2); in vp8_simple_filter()
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/arm/neon/ |
D | loopfiltersimplehorizontaledge_neon.asm | 29 vld1.u8 {q5}, [r3@128], r1 ; p0 33 vabd.u8 q15, q6, q7 ; abs(p0 - q0) 36 vqadd.u8 q15, q15, q15 ; abs(p0 - q0) * 2 40 vqadd.u8 q15, q15, q14 ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2 43 veor q6, q6, q0 ; ps0: p0 offset to convert to a signed value 47 vcge.u8 q15, q1, q15 ; (abs(p0 - q0)*2 + abs(p1-q1)/2 > limit)*-1
|
D | loopfiltersimpleverticaledge_neon.asm | 53 vabd.u8 q15, q5, q4 ; abs(p0 - q0) 56 vqadd.u8 q15, q15, q15 ; abs(p0 - q0) * 2 60 vqadd.u8 q15, q15, q14 ; abs(p0 - q0) * 2 + abs(p1 - q1) / 2 63 veor q5, q5, q0 ; ps0: p0 offset to convert to a signed value 67 vcge.u8 q15, q1, q15 ; abs(p0 - q0)*2 + abs(p1-q1)/2 > flimit*2 + limit)*-1
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/common/ppc/ |
D | recon_altivec.asm | 17 lvx v1, 0, \Pred ;# v1 = pred = p0..p15 19 vmrghb v2, v0, v1 ;# v2 = 16-bit p0..p7 57 lvx v1, 0, \Pred ;# v1 = pred = p0..p15 58 vmrghb v2, v0, v1 ;# v2 = 16-bit p0..p7 144 lvx v1, 0, r10; ;# v1 = pred = p0..p15 150 vmrghb v2, v0, v1; ;# v2 = 16-bit p0..p7
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/vp8/encoder/arm/armv6/ |
D | vp8_subtract_armv6.asm | 48 uxtb16 r5, r1 ; [p2 | p0] 94 uxtb16 r9, r7 ; [p2 | p0] (A) 111 uxtb16 r9, r11 ; [p2 | p0] (B) 139 uxtb16 r9, r7 ; [p2 | p0] (A) 156 uxtb16 r9, r11 ; [p2 | p0] (B) 196 uxtb16 r9, r7 ; [p2 | p0] (A) 213 uxtb16 r9, r11 ; [p2 | p0] (B) 230 uxtb16 r9, r11 ; [p2 | p0] (C) 247 uxtb16 r9, r11 ; [p2 | p0] (D)
|