/external/llvm/test/MC/ARM/ |
D | neon-bitwise-encoding.s | 110 veor q4, q7, q3 111 veor.8 q4, q7, q3 112 veor.16 q4, q7, q3 113 veor.32 q4, q7, q3 114 veor.64 q4, q7, q3 116 veor.i8 q4, q7, q3 117 veor.i16 q4, q7, q3 118 veor.i32 q4, q7, q3 119 veor.i64 q4, q7, q3 121 veor.s8 q4, q7, q3 [all …]
|
/external/boringssl/linux-arm/crypto/aes/ |
D | aesv8-armx.S | 36 vld1.8 {q3},[r0]! 46 vtbl.8 d20,{q3},d4 47 vtbl.8 d21,{q3},d5 48 vext.8 q9,q0,q3,#12 49 vst1.32 {q3},[r2]! 53 veor q3,q3,q9 55 veor q3,q3,q9 58 veor q3,q3,q9 60 veor q3,q3,q10 65 vtbl.8 d20,{q3},d4 [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 252 FT_Pos q3, in cubic_peak() argument 270 FT_ABS( q3 ) | in cubic_peak() 281 q3 <<= shift; in cubic_peak() 288 q3 >>= -shift; in cubic_peak() 294 while ( q2 > 0 || q3 > 0 ) in cubic_peak() 297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 299 q4 = q4 + q3; in cubic_peak() 300 q3 = q3 + q2; in cubic_peak() 302 q4 = q4 + q3; in cubic_peak() 303 q3 = q3 + q2; in cubic_peak() [all …]
|
/external/pdfium/third_party/freetype/src/base/ |
D | ftbbox.c | 252 FT_Pos q3, in cubic_peak() argument 268 FT_MSB( FT_ABS( q1 ) | FT_ABS( q2 ) | FT_ABS( q3 ) | FT_ABS( q4 ) ); in cubic_peak() 278 q3 <<= shift; in cubic_peak() 285 q3 >>= -shift; in cubic_peak() 291 while ( q2 > 0 || q3 > 0 ) in cubic_peak() 294 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 296 q4 = q4 + q3; in cubic_peak() 297 q3 = q3 + q2; in cubic_peak() 299 q4 = q4 + q3; in cubic_peak() 300 q3 = q3 + q2; in cubic_peak() [all …]
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/geometry/ |
D | Rotation.java | 113 private final double q3; field in Rotation 133 public Rotation(double q0, double q1, double q2, double q3, in Rotation() argument 138 double inv = 1.0 / FastMath.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3); in Rotation() 142 q3 *= inv; in Rotation() 148 this.q3 = q3; in Rotation() 186 q3 = coeff * axis.getZ(); in Rotation() 262 q3 = inv * (ort[0][1] - ort[1][0]); in Rotation() 271 q3 = inv * (ort[0][2] + ort[2][0]); in Rotation() 280 q3 = inv * (ort[2][1] + ort[1][2]); in Rotation() 284 q3 = 0.5 * FastMath.sqrt(s + 1.0); in Rotation() [all …]
|
/external/boringssl/linux-arm/crypto/modes/ |
D | ghashv8-armx.S | 13 vext.8 q3,q9,q9,#8 17 vshr.u64 q10,q3,#63 20 vshl.i64 q3,q3,#1 23 vorr q3,q3,q10 @ H<<<=1 24 veor q12,q3,q8 @ twisted H 67 vext.8 q3,q9,q9,#8 69 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo�Xi.lo 70 veor q9,q9,q3 @ Karatsuba pre-processing 71 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi�Xi.hi 129 vext.8 q3,q8,q8,#8 @ rotate I[0] [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | dequant_idct_neon.c | 24 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local 32 q3 = vld1q_s16(input); in vp8_dequant_idct_add_neon() 53 q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3), in vp8_dequant_idct_add_neon() 63 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() 66 q3 = vshrq_n_s16(q3, 1); in vp8_dequant_idct_add_neon() 69 q3 = vqaddq_s16(q3, q2); in vp8_dequant_idct_add_neon() 72 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); in vp8_dequant_idct_add_neon() 73 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); in vp8_dequant_idct_add_neon() 90 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() 96 q3 = vshrq_n_s16(q3, 1); in vp8_dequant_idct_add_neon() [all …]
|
D | sad_neon.c | 21 uint64x2_t q3; in vp8_sad8x8_neon() local 40 q3 = vpaddlq_u32(q1); in vp8_sad8x8_neon() 41 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in vp8_sad8x8_neon() 42 vreinterpret_u32_u64(vget_high_u64(q3))); in vp8_sad8x8_neon() 55 uint64x2_t q3; in vp8_sad8x16_neon() local 74 q3 = vpaddlq_u32(q1); in vp8_sad8x16_neon() 75 d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)), in vp8_sad8x16_neon() 76 vreinterpret_u32_u64(vget_high_u64(q3))); in vp8_sad8x16_neon() 120 uint64x2_t q3; in vp8_sad16x16_neon() local 142 q3 = vpaddlq_u32(q1); in vp8_sad16x16_neon() [all …]
|
D | buildintrapredictorsmby_neon.asm | 62 vpaddl.u16 q3, q2 63 vpaddl.u32 q4, q3 184 vdup.u8 q3, r6 188 vst1.u8 {q3}, [r1]! 197 vdup.u8 q3, r6 201 vst1.u8 {q3}, [r1]! 211 vdup.u8 q3, r6 215 vst1.u8 {q3}, [r1]! 224 vdup.u8 q3, r6 228 vst1.u8 {q3}, [r1]! [all …]
|
D | vp8_subpixelvariance16x16s_neon.asm | 57 vext.8 q3, q2, q3, #1 63 vrhadd.u8 q1, q2, q3 65 vrhadd.u8 q3, q6, q7 74 vsubl.u8 q3, d7, d29 101 vpadal.s16 q8, q3 152 vld1.8 {q3}, [r2], r3 170 vsubl.u8 q3, d13, d15 200 vpadal.s16 q8, q3 261 vext.8 q3, q2, q3, #1 ;construct src_ptr[1] 266 vrhadd.u8 q1, q2, q3 ;(src_ptr[0]+src_ptr[1])/round/shift right 1 [all …]
|
/external/libvpx/libvpx/vp9/common/ |
D | vp9_loopfilter_filters.c | 25 uint8_t q2, uint8_t q3) { in filter_mask() argument 32 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 41 uint8_t q2, uint8_t q3) { in flat_mask4() argument 48 mask |= (abs(q3 - q0) > thresh) * -1; in flat_mask4() 57 uint8_t q3, uint8_t q4) { in flat_mask5() argument 58 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5() 114 const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; in vp9_lpf_horizontal_4_c() local 116 p3, p2, p1, p0, q0, q1, q2, q3); in vp9_lpf_horizontal_4_c() 139 const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; in vp9_lpf_vertical_4_c() local 141 p3, p2, p1, p0, q0, q1, q2, q3); in vp9_lpf_vertical_4_c() [all …]
|
/external/libavc/common/arm/ |
D | ih264_padding_neon.s | 185 vdup.u8 q3, r11 188 vst1.8 {q3}, [r4], r1 @ 16 bytes store 197 vdup.u8 q3, r11 200 vst1.8 {q3}, [r4], r1 @ 16 bytes store 217 vdup.u8 q3, r11 220 vst1.8 {q3}, [r4]! @ 16 bytes store 223 vst1.8 {q3}, [r4], r6 @ 16 bytes store 233 vdup.u8 q3, r11 236 vst1.8 {q3}, [r4]! @ 16 bytes store 237 vst1.8 {q3}, [r4], r6 @ 16 bytes store [all …]
|
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 153 vaddl.u8 q3, d0, d5 159 vmla.u16 q3, q4, q11 163 vmls.u16 q3, q4, q12 170 vst1.32 {q3}, [r9], r6 @ store temp buffer 0 249 vaddl.s16 q3, d7, d17 255 vmlal.s16 q3, d31, d22 256 vmlsl.s16 q3, d29, d24 262 vqrshrun.s32 d19, q3, #10 279 vaddl.s16 q3, d9, d21 291 vmlal.s16 q3, d31, d22 [all …]
|
D | ih264_weighted_pred_a9q.s | 142 vmovl.u8 q3, d6 @converting rows 3,4 to 16-bit 145 vmul.s16 q3, q3, d2[0] @weight mult. for rows 3,4 149 vrshl.s16 q3, q3, q0 @rounds off the weighted samples from rows 3,4 152 vaddw.s8 q3, q3, d3 @adding offset for rows 3,4 155 vqmovun.s16 d6, q3 @saturating rows 3,4 to unsigned 8-bit 173 vmovl.u8 q3, d6 @converting row 2 to 16-bit 178 vmul.s16 q3, q3, d2[0] @weight mult. for row 2 183 vrshl.s16 q3, q3, q0 @rounds off the weighted samples from row 2 187 vaddw.s8 q3, q3, d3 @adding offset for row 2 192 vqmovun.s16 d6, q3 @saturating row 2 to unsigned 8-bit [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_luma_horz.s | 124 vdup.8 q3,d1[5] @duplicate the iii value. 133 vst1.8 {q3},[r2],r3 134 vst1.8 {q3},[r9],r3 140 vdup.8 q3,d1[1] 149 vst1.8 {q3},[r2],r3 150 vst1.8 {q3},[r9],r3 156 vdup.8 q3,d0[5] 165 vst1.8 {q3},[r2],r3 166 vst1.8 {q3},[r9],r3 172 vdup.8 q3,d0[1] [all …]
|
D | ihevc_intra_pred_chroma_horz.s | 126 vdup.16 q3,d1[1] @duplicate the iii value. 135 vst1.16 {q3},[r2],r3 136 vst1.16 {q3},[r9],r3 142 vdup.16 q3,d0[1] 151 vst1.16 {q3},[r2],r3 152 vst1.16 {q3},[r9],r3 158 vdup.16 q3,d11[1] 167 vst1.16 {q3},[r2],r3 168 vst1.16 {q3},[r9],r3 174 vdup.16 q3,d10[1] [all …]
|
D | ihevc_deblk_chroma_horz.s | 92 vsub.i16 q3,q0,q1 96 vshl.i16 q3,q3,#2 106 vadd.i16 q2,q3,q2 108 vsub.i16 q3,q2,q8 125 vrshr.s16 q3,q3,#3 133 vmin.s16 q8,q3,q2
|
D | ihevc_itrans_recon_4x4_ttype1.s | 143 vmull.s16 q3,d1,d4[2] @74 * pi2_src[1] 144 vmlal.s16 q3,d0,d4[0] @74 * pi2_src[1] + 29 * pi2_src[0] 145 vmlal.s16 q3,d3,d4[1] @74 * pi2_src[1] + 29 * pi2_src[0] + 55 * pi2_src[3] 146 …vmlal.s16 q3,d2,d4[3] @pi2_out[0] = 29* pi2_src[0] + 74 * pi2_src[1] + 84* pi2_s… 162 vqrshrn.s32 d14,q3,#shift_stage1_idct @ (pi2_out[0] + rounding ) >> shift_stage1_idct 182 vmull.s16 q3,d15,d4[2] @74 * pi2_src[1] 183 vmlal.s16 q3,d14,d4[0] @74 * pi2_src[1] + 29 * pi2_src[0] 184 vmlal.s16 q3,d17,d4[1] @74 * pi2_src[1] + 29 * pi2_src[0] + 55 * pi2_src[3] 185 …vmlal.s16 q3,d16,d4[3] @pi2_out[0] = 29* pi2_src[0] + 74 * pi2_src[1] + 84* pi2_s… 202 vqrshrn.s32 d0,q3,#shift_stage2_idct @ (pi2_out[0] + rounding ) >> shift_stage1_idct
|
D | ihevc_itrans_recon_8x8.s | 210 vmull.s16 q3,d3,d0[2] @// y2 * cos2(part of d0) 240 vmlal.s16 q3,d11,d1[2] @// d0 = y2 * cos2 + y6 * sin2(part of a0 and a1) 250 vadd.s32 q7,q5,q3 @// a0 = c0 + d0(part of r0,r7) 251 vsub.s32 q5,q5,q3 @// a3 = c0 - d0(part of r3,r4) 256 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7) 268 vqrshrn.s32 d15,q3,#shift_stage1_idct @// r7 = (a0 - b0 + rnd) >> 7(shift_stage1_idct) 313 vmull.s16 q3,d3,d0[2] @// y2 * cos2(part of d0) 318 vadd.s32 q7,q10,q3 @// a0 = c0 + d0(part of r0,r7) 319 vsub.s32 q5,q10,q3 @// a3 = c0 - d0(part of r3,r4) 324 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7) [all …]
|
/external/libvpx/libvpx/vp9/common/mips/dspr2/ |
D | vp9_loopfilter_masks_dspr2.h | 31 uint32_t q2, uint32_t q3, in vp9_filter_hev_mask_dspr2() argument 95 [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh) in vp9_filter_hev_mask_dspr2() 138 uint32_t q2, uint32_t q3, in vp9_filter_hev_mask_flatmask4_dspr2() argument 243 [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh), in vp9_filter_hev_mask_flatmask4_dspr2() 286 uint32_t q3, uint32_t q4, in vp9_flatmask5() argument 362 [q2] "r" (q2), [q3] "r" (q3), [q4] "r" (q4), in vp9_flatmask5()
|
/external/icu/icu4c/source/test/perf/howExpensiveIs/ |
D | sieve.cpp | 104 double qs(double *times, int n, double *q1, double *q2, double *q3) { in qs() argument 107 *q3 = medianof(times,n,3); in qs() 108 return *q3-*q1; in qs() 112 double q1,q2,q3; in uprv_getMeanTime() local 117 double iqr = qs(times,n,&q1,&q2,&q3); in uprv_getMeanTime() 119 double rangeMax = (q3+(1.5*iqr)); in uprv_getMeanTime() 124 printf("iqr: %.9f, q1=%.9f, q2=%.9f, q3=%.9f, max=%.9f, n=%d\n", iqr,q1,q2,q3,(double)-1, n); in uprv_getMeanTime() 148 double iqr = qs(times,n,&q1,&q2,&q3); in uprv_getMeanTime() 150 rangeMax = (q3+(1.5*iqr)); in uprv_getMeanTime() 178 printf("min: %.9f, q1=%.9f, q2=%.9f, q3=%.9f, max=%.9f, n=%d\n", minTime,q1,q2,q3,maxTime, n); in uprv_getMeanTime()
|
/external/libvpx/libvpx/vp9/common/arm/neon/ |
D | vp9_avg_neon.asm | 37 vld1.8 {q2-q3}, [r0], lr 44 vrhadd.u8 q3, q3, q11 46 vst1.8 {q2-q3}, [r2@128], r4 53 vld1.8 {q2-q3}, [r0], r1 63 vrhadd.u8 q3, q3, q11 65 vst1.8 {q2-q3}, [r2@128], r3 74 vld1.8 {q3}, [r6@128], r3 80 vrhadd.u8 q1, q1, q3
|
D | vp9_iht8x8_add_neon.asm | 132 vmull.s16 q3, d19, d0 140 vmlsl.s16 q3, d31, d1 148 vqrshrn.s32 d9, q3, #14 ; >> 14 156 vmull.s16 q3, d19, d1 164 vmlal.s16 q3, d31, d0 172 vqrshrn.s32 d15, q3, #14 ; >> 14 183 vmull.s16 q3, d17, d0 191 vmlal.s16 q3, d25, d0 202 vqrshrn.s32 d19, q3, #14 ; >> 14 210 vmull.s16 q3, d21, d0 [all …]
|
/external/boringssl/src/crypto/chacha/ |
D | chacha_vec_arm.S | 155 vadd.i32 q3, q11, q0 208 veor q3, q3, q9 214 vrev32.16 q3, q3 216 vadd.i32 q8, q8, q3 246 veor q4, q9, q3 252 vshl.i32 q3, q4, #8 258 vsri.32 q3, q4, #24 266 vadd.i32 q4, q8, q3 306 vext.32 q3, q3, q3, #3 313 veor q3, q9, q3 [all …]
|
/external/libvpx/libvpx/vp8/encoder/arm/neon/ |
D | shortfdct_neon.asm | 56 vshl.s16 q3, q3, #3 ; (c1, d1) << 3 122 vld1.16 {q3}, [r0@128], r2 124 ; transpose q0=ip[0], q1=ip[1], q2=ip[2], q3=ip[3] 126 vtrn.32 q1, q3 ; [A1|B1] 128 vtrn.16 q2, q3 ; [A3|B3] 130 vadd.s16 q11, q0, q3 ; a1 = ip[0] + ip[3] 133 vsub.s16 q14, q0, q3 ; d1 = ip[0] - ip[3] 165 ; transpose q0=ip[0], q1=ip[4], q2=ip[8], q3=ip[12] 167 vtrn.32 q1, q3 ; q1=[A4 | B4] 169 vtrn.16 q2, q3 ; q3=[A12|B12] [all …]
|