/external/webrtc/common_audio/signal_processing/ |
D | resample_by_2_internal.c | 36 int32_t tmp0, tmp1, diff; in WebRtcSpl_DownBy2IntToShort() local 50 tmp1 = state[0] + diff * kResampleAllpass[1][0]; in WebRtcSpl_DownBy2IntToShort() 52 diff = tmp1 - state[2]; in WebRtcSpl_DownBy2IntToShort() 58 state[1] = tmp1; in WebRtcSpl_DownBy2IntToShort() 80 tmp1 = state[4] + diff * kResampleAllpass[0][0]; in WebRtcSpl_DownBy2IntToShort() 82 diff = tmp1 - state[6]; in WebRtcSpl_DownBy2IntToShort() 88 state[5] = tmp1; in WebRtcSpl_DownBy2IntToShort() 108 tmp1 = (in[(i << 1) + 2] + in[(i << 1) + 3]) >> 15; in WebRtcSpl_DownBy2IntToShort() 114 if (tmp1 > (int32_t)0x00007FFF) in WebRtcSpl_DownBy2IntToShort() 115 tmp1 = 0x00007FFF; in WebRtcSpl_DownBy2IntToShort() [all …]
|
/external/arm-optimized-routines/string/arm/ |
D | strcmp.S | 61 #define tmp1 r4 macro 70 #define syndrome_a tmp1 88 clz tmp1, \synd 89 lsl r1, \d2, tmp1 95 lsl \d1, \d1, tmp1 107 clz tmp1, \synd 108 bic tmp1, tmp1, #7 109 lsr r1, \d2, tmp1 116 lsr \d1, \d1, tmp1 149 orr tmp1, src1, src2 [all …]
|
D | memcpy.S | 63 #define tmp1 r3 macro 133 and tmp1, count, #0x38 134 rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE) 135 add pc, pc, tmp1 152 ldrne tmp1, [src], #4 153 strne tmp1, [dst], #4 157 and tmp1, count, #0x3c 158 add dst, dst, tmp1 159 add src, src, tmp1 160 rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2) [all …]
|
/external/llvm-project/libc/AOR_v20.02/string/arm/ |
D | strcmp.S | 62 #define tmp1 r4 macro 71 #define syndrome_a tmp1 89 clz tmp1, \synd 90 lsl r1, \d2, tmp1 96 lsl \d1, \d1, tmp1 108 clz tmp1, \synd 109 bic tmp1, tmp1, #7 110 lsr r1, \d2, tmp1 117 lsr \d1, \d1, tmp1 151 orr tmp1, src1, src2 [all …]
|
D | memcpy.S | 64 #define tmp1 r3 macro 134 and tmp1, count, #0x38 135 rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE) 136 add pc, pc, tmp1 153 ldrne tmp1, [src], #4 154 strne tmp1, [dst], #4 158 and tmp1, count, #0x3c 159 add dst, dst, tmp1 160 add src, src, tmp1 161 rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2) [all …]
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | inv_txfm_vsx.c | 582 int16x8_t tmp0[8], tmp1[8]; in half_idct16x8_vsx() local 591 src[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], in half_idct16x8_vsx() 592 tmp1[6], tmp1[7]); in half_idct16x8_vsx() 594 tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], tmp1[6], tmp1[7], in half_idct16x8_vsx() 600 int16x8_t tmp0[8], tmp1[8], tmp2[8], tmp3[8]; in vpx_idct16_vsx() local 609 src0[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], in vpx_idct16_vsx() 610 tmp1[6], tmp1[7]); in vpx_idct16_vsx() 619 tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], tmp1[6], tmp1[7], in vpx_idct16_vsx() 663 int16x8_t tmp0[8], tmp1[8], tmp2[8], tmp3[8]; in vpx_idct16x16_256_add_vsx() local 679 src0[15], tmp1[0], tmp1[1], tmp1[2], tmp1[3], tmp1[4], tmp1[5], in vpx_idct16x16_256_add_vsx() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | vst4.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %B 8 …d @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 17 %tmp1 = load <8 x i8>, <8 x i8>* %B 18 …d @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 29 %tmp1 = load <4 x i16>, <4 x i16>* %B 30 …m.arm.neon.vst4.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16>… 39 %tmp1 = load <2 x i32>, <2 x i32>* %B 40 …m.arm.neon.vst4.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32>… 48 %tmp1 = load <2 x float>, <2 x float>* %B 49 ….neon.vst4.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x flo… [all …]
|
D | vstlane.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %B 8 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 18 %tmp1 = load <8 x i8>, <8 x i8>* %B 19 %tmp2 = extractelement <8 x i8> %tmp1, i32 3 30 %tmp1 = load <4 x i16>, <4 x i16>* %B 31 %tmp2 = extractelement <4 x i16> %tmp1, i32 2 40 %tmp1 = load <2 x i32>, <2 x i32>* %B 41 %tmp2 = extractelement <2 x i32> %tmp1, i32 1 49 %tmp1 = load <2 x float>, <2 x float>* %B 50 %tmp2 = extractelement <2 x float> %tmp1, i32 1 [all …]
|
D | vshift.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > [all …]
|
D | vbits.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = and <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = and <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = and <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = and <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <16 x i8>, <16 x i8>* %A 44 %tmp3 = and <16 x i8> %tmp1, %tmp2 [all …]
|
D | vst3.ll | 8 %tmp1 = load <8 x i8>, <8 x i8>* %B 9 …call void @llvm.arm.neon.vst3.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, i3… 17 %tmp1 = load <4 x i16>, <4 x i16>* %B 18 …oid @llvm.arm.neon.vst3.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, i… 26 %tmp1 = load <2 x i32>, <2 x i32>* %B 27 …oid @llvm.arm.neon.vst3.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i… 37 %tmp1 = load <2 x i32>, <2 x i32>* %B 38 …oid @llvm.arm.neon.vst3.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, i… 48 %tmp1 = load <2 x float>, <2 x float>* %B 49 …@llvm.arm.neon.vst3.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1,… [all …]
|
/external/libiio/src/ |
D | sort.c | 40 const struct iio_channel *tmp1 = *(struct iio_channel **)p1; in iio_channel_compare() local 44 if (iio_channel_is_scan_element(tmp1) && !iio_channel_is_scan_element(tmp2)) in iio_channel_compare() 46 if (!iio_channel_is_scan_element(tmp1) && iio_channel_is_scan_element(tmp2)) in iio_channel_compare() 49 if (iio_channel_is_scan_element(tmp1) && iio_channel_is_scan_element(tmp2)){ in iio_channel_compare() 50 if (iio_channel_get_index(tmp1) > iio_channel_get_index(tmp2)) in iio_channel_compare() 55 if (strcmp(tmp1->id, tmp2->id) == 0) in iio_channel_compare() 56 return !iio_channel_is_output(tmp1); in iio_channel_compare() 59 return strcmp(tmp1->id, tmp2->id); in iio_channel_compare() 64 const struct iio_channel_attr *tmp1 = (struct iio_channel_attr *)p1; in iio_channel_attr_compare() local 67 return strcmp(tmp1->name, tmp2->name); in iio_channel_attr_compare() [all …]
|
/external/llvm-project/libc/AOR_v20.02/string/aarch64/ |
D | strlen.S | 30 #define tmp1 x4 macro 78 and tmp1, srcin, MIN_PAGE_SIZE - 1 80 cmp tmp1, MIN_PAGE_SIZE - 16 91 sub tmp1, data1, zeroones 95 bics has_nul1, tmp1, tmp2 104 clz tmp1, has_nul1 106 add len, len, tmp1, lsr 3 119 sub tmp1, data1, zeroones 121 orr tmp2, tmp1, tmp3 125 sub tmp1, data1, zeroones [all …]
|
D | strlen-mte.S | 26 #define tmp1 x4 macro 70 lsl tmp1, zeroones, offset /* Shift (offset & 63). */ 81 sub tmp1, data1, tmp1 85 bics has_nul1, tmp1, tmp2 96 clz tmp1, has_nul1 /* Count bits before NUL. */ 100 add len, len, tmp1, lsr 3 104 sub tmp1, data2, tmp1 106 bics has_nul1, tmp1, tmp2 110 lsl tmp1, has_nul1, offset /* Ignore bytes before string. */ 111 clz tmp1, tmp1 /* Count bits before NUL. */ [all …]
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | vst4.ll | 7 %tmp1 = load <8 x i8>, <8 x i8>* %B 8 …d @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 17 %tmp1 = load <8 x i8>, <8 x i8>* %B 18 …d @llvm.arm.neon.vst4.p0i8.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %tmp1, <8 x i8> %… 29 %tmp1 = load <4 x i16>, <4 x i16>* %B 30 …m.arm.neon.vst4.p0i8.v4i16(i8* %tmp0, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16> %tmp1, <4 x i16>… 39 %tmp1 = load <2 x i32>, <2 x i32>* %B 40 …m.arm.neon.vst4.p0i8.v2i32(i8* %tmp0, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32> %tmp1, <2 x i32>… 48 %tmp1 = load <2 x float>, <2 x float>* %B 49 ….neon.vst4.p0i8.v2f32(i8* %tmp0, <2 x float> %tmp1, <2 x float> %tmp1, <2 x float> %tmp1, <2 x flo… [all …]
|
D | vshift.ll | 6 %tmp1 = load <8 x i8>, <8 x i8>* %A 8 %tmp3 = shl <8 x i8> %tmp1, %tmp2 15 %tmp1 = load <4 x i16>, <4 x i16>* %A 17 %tmp3 = shl <4 x i16> %tmp1, %tmp2 24 %tmp1 = load <2 x i32>, <2 x i32>* %A 26 %tmp3 = shl <2 x i32> %tmp1, %tmp2 33 %tmp1 = load <1 x i64>, <1 x i64>* %A 35 %tmp3 = shl <1 x i64> %tmp1, %tmp2 42 %tmp1 = load <8 x i8>, <8 x i8>* %A 43 %tmp2 = shl <8 x i8> %tmp1, < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 > [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | neon-bitwise-instructions.ll | 6 %tmp1 = and <8 x i8> %a, %b; 7 ret <8 x i8> %tmp1 13 %tmp1 = and <16 x i8> %a, %b; 14 ret <16 x i8> %tmp1 21 %tmp1 = or <8 x i8> %a, %b; 22 ret <8 x i8> %tmp1 28 %tmp1 = or <16 x i8> %a, %b; 29 ret <16 x i8> %tmp1 36 %tmp1 = xor <8 x i8> %a, %b; 37 ret <8 x i8> %tmp1 [all …]
|
/external/webrtc/modules/audio_coding/codecs/isac/fix/source/ |
D | filterbanks_neon.c | 47 int16x4_t tmp1, tmp2; in WebRtcIsacfix_AllpassFilter2FixDec16Neon() local 54 tmp1 = vshrn_n_s32(a, 16); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 57 statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 64 tmp1 = vld1_lane_s16(data_ch1 + 1, tmp1, 1); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 65 tmp1 = vld1_lane_s16(data_ch2 + 1, tmp1, 3); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 66 datav = vrev32_s16(tmp1); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 71 tmp1 = vshrn_n_s32(a, 16); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 73 vst1_lane_s16(data_ch1 + n, tmp1, 1); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 74 vst1_lane_s16(data_ch2 + n, tmp1, 3); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() 78 statev = vqdmlsl_s16(vshll_n_s16(datav, 16), tmp1, factorv); in WebRtcIsacfix_AllpassFilter2FixDec16Neon() [all …]
|
/external/arm-optimized-routines/string/aarch64/ |
D | strchr-mte.S | 21 #define tmp1 x1 macro 62 fmov tmp1, dend 63 lsr tmp1, tmp1, tmp3 64 cbz tmp1, L(loop) 66 rbit tmp1, tmp1 67 clz tmp1, tmp1 70 tst tmp1, 2 71 add result, srcin, tmp1, lsr 2 81 fmov tmp1, dend 82 cbz tmp1, L(loop) [all …]
|
D | strchrnul-mte.S | 21 #define tmp1 x1 macro 55 fmov tmp1, dend 56 lsr tmp1, tmp1, tmp2 /* Mask padding bits. */ 57 cbz tmp1, L(loop) 59 rbit tmp1, tmp1 60 clz tmp1, tmp1 61 add result, srcin, tmp1, lsr 2 70 fmov tmp1, dend 71 cbz tmp1, L(loop) 75 fmov tmp1, dend [all …]
|
/external/pdfium/core/fxcodec/jbig2/ |
D | JBig2_Image.cpp | 319 uint32_t tmp1 = JBIG2_GETDWORD(lineSrc) << shift; in ComposeToInternal() local 324 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in ComposeToInternal() 327 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in ComposeToInternal() 330 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in ComposeToInternal() 333 tmp = (tmp2 & ~maskM) | ((~(tmp1 ^ tmp2)) & maskM); in ComposeToInternal() 336 tmp = (tmp2 & ~maskM) | (tmp1 & maskM); in ComposeToInternal() 348 uint32_t tmp1 = JBIG2_GETDWORD(lineSrc) >> shift; in ComposeToInternal() local 353 tmp = (tmp2 & ~maskM) | ((tmp1 | tmp2) & maskM); in ComposeToInternal() 356 tmp = (tmp2 & ~maskM) | ((tmp1 & tmp2) & maskM); in ComposeToInternal() 359 tmp = (tmp2 & ~maskM) | ((tmp1 ^ tmp2) & maskM); in ComposeToInternal() [all …]
|
/external/llvm-project/llvm/test/Analysis/ScalarEvolution/ |
D | lshr-shl-differentconstmask.ll | 9 ; CHECK-NEXT: %tmp1 = udiv i32 %val, 64 11 ; CHECK-NEXT: %tmp2 = mul i32 %tmp1, 16 15 %tmp1 = udiv i32 %val, 64 16 %tmp2 = mul i32 %tmp1, 16 23 ; CHECK-NEXT: %tmp1 = udiv i32 %val, 16 25 ; CHECK-NEXT: %tmp2 = mul i32 %tmp1, 64 29 %tmp1 = udiv i32 %val, 16 30 %tmp2 = mul i32 %tmp1, 64 39 ; CHECK-NEXT: %tmp1 = lshr i32 %val, 6 41 ; CHECK-NEXT: %tmp2 = shl i32 %tmp1, 4 [all …]
|
D | shl-lshr-differentconstmask.ll | 9 ; CHECK-NEXT: %tmp1 = mul i32 %val, 64 11 ; CHECK-NEXT: %tmp2 = udiv i32 %tmp1, 16 15 %tmp1 = mul i32 %val, 64 16 %tmp2 = udiv i32 %tmp1, 16 23 ; CHECK-NEXT: %tmp1 = mul i32 %val, 16 25 ; CHECK-NEXT: %tmp2 = udiv i32 %tmp1, 64 29 %tmp1 = mul i32 %val, 16 30 %tmp2 = udiv i32 %tmp1, 64 39 ; CHECK-NEXT: %tmp1 = shl i32 %val, 6 41 ; CHECK-NEXT: %tmp2 = lshr i32 %tmp1, 4 [all …]
|
/external/llvm/test/Instrumentation/EfficiencySanitizer/ |
D | working_set_strict.ll | 11 %tmp1 = load i8, i8* %a, align 1 12 ret i8 %tmp1 27 ; CHECK: %tmp1 = load i8, i8* %a, align 1 28 ; CHECK-NEXT: ret i8 %tmp1 33 %tmp1 = load i16, i16* %a, align 2 34 ret i16 %tmp1 48 ; CHECK: %tmp1 = load i16, i16* %a, align 2 49 ; CHECK-NEXT: ret i16 %tmp1 54 %tmp1 = load i32, i32* %a, align 4 55 ret i32 %tmp1 [all …]
|
/external/boringssl/src/crypto/fipsmodule/md5/asm/ |
D | md5-586.pl | 23 $tmp1="edi"; 52 &mov($tmp1,$C) if $pos < 0; 58 &xor($tmp1,$d); # F function - part 2 60 &and($tmp1,$b); # F function - part 3 63 &xor($tmp1,$d); # F function - part 4 65 &add($a,$tmp1); 66 &mov($tmp1,&Np($c)) if $pos < 1; # next tmp1 for R0 67 &mov($tmp1,&Np($c)) if $pos == 1; # next tmp1 for R1 84 &xor($tmp1,$b); # G function - part 2 85 &and($tmp1,$d); # G function - part 3 [all …]
|