/external/speex/libspeex/ |
D | _kiss_fft_guts.h | 62 # define smul(a,b) ( (SAMPPROD)(a)*(b) ) macro 65 # define S_MUL(a,b) sround( smul(a,b) ) 68 do{ (m).r = sround( smul((a).r,(b).r) - smul((a).i,(b).i) ); \ 69 (m).i = sround( smul((a).r,(b).i) + smul((a).i,(b).r) ); }while(0) 72 do{ (m).r = PSHR32( smul((a).r,(b).r) - smul((a).i,(b).i),17 ); \ 73 (m).i = PSHR32( smul((a).r,(b).i) + smul((a).i,(b).r),17 ); }while(0) 76 (x) = sround( smul( x, SAMP_MAX/k ) ) 83 do{ (c).r = sround( smul( (c).r , s ) ) ;\ 84 (c).i = sround( smul( (c).i , s ) ) ; }while(0)
|
/external/llvm/test/CodeGen/Generic/ |
D | overflow.ll | 150 ;; smul 154 %smul = tail call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %a, i8 %b) 155 %cmp = extractvalue { i8, i1 } %smul, 1 156 %smul.result = extractvalue { i8, i1 } %smul, 0 157 %X = select i1 %cmp, i8 %smul.result, i8 42 161 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 165 %smul = tail call { i16, i1 } @llvm.smul.with.overflow.i16(i16 %a, i16 %b) 166 %cmp = extractvalue { i16, i1 } %smul, 1 167 %smul.result = extractvalue { i16, i1 } %smul, 0 168 %X = select i1 %cmp, i16 %smul.result, i16 42 [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | smul-with-overflow.ll | 8 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 27 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 45 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 50 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 2) 62 %tmp1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %tmp0, i32 4) 71 declare { i63, i1 } @llvm.smul.with.overflow.i63(i63, i63) nounwind readnone 75 %res = call { i63, i1 } @llvm.smul.with.overflow.i63(i63 4, i63 4611686018427387903)
|
D | muloti.ll | 16 %0 = tail call %1 @llvm.smul.with.overflow.i128(i128 %ins14, i128 %ins) 79 declare %1 @llvm.smul.with.overflow.i128(i128, i128) nounwind readnone
|
D | xaluo.ll | 301 %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 %v1, i8 %v2) 313 %t = call {i16, i1} @llvm.smul.with.overflow.i16(i16 %v1, i16 %v2) 325 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 337 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 490 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 501 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 674 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 691 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 749 declare {i8, i1} @llvm.smul.with.overflow.i8 (i8, i8 ) nounwind readnone 750 declare {i16, i1} @llvm.smul.with.overflow.i16(i16, i16) nounwind readnone [all …]
|
/external/llvm/test/Analysis/ValueTracking/ |
D | pr23011.ll | 3 declare { i8, i1 } @llvm.smul.with.overflow.i8(i8, i8) nounwind readnone 9 %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem) 10 ; CHECK: %t = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 %rem, i8 %rem)
|
/external/llvm/test/Transforms/GVN/ |
D | 2011-07-07-MatchIntrinsicExtract.ll | 68 %smul = tail call %0 @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 69 %smul.0 = extractvalue %0 %smul, 0 84 declare %0 @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm/test/CodeGen/SPARC/ |
D | basictest.ll | 57 ; CHECK: smul %o0, %o1, %o0 64 ; CHECK: smul %o0, %o1, %o1 74 ;FIXME: the smul in the output is totally redundant and should not there. 75 ; CHECK: smul %o0, %o1, %o2
|
/external/llvm/test/CodeGen/PowerPC/ |
D | mul-with-overflow.ll | 10 declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b) 12 %res = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %x, i32 3)
|
/external/llvm/test/CodeGen/Mips/Fast-ISel/ |
D | mul1.ll | 8 declare { i32, i1 } @llvm.smul.with.overflow.i32(i32, i32) 13 %1 = call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %0, i32 %b)
|
/external/llvm/test/Transforms/ConstProp/ |
D | overflow-ops.ll | 10 declare {i8, i1} @llvm.smul.with.overflow.i8(i8, i8) 196 ;; smul 202 %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 -20, i8 -10)
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-xaluo.ll | 199 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 213 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 225 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 2) 369 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 382 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 559 %t = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %v1, i32 %v2) 578 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 %v2) 595 %t = call {i64, i1} @llvm.smul.with.overflow.i64(i64 %v1, i64 2) 667 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone 668 declare {i64, i1} @llvm.smul.with.overflow.i64(i64, i64) nounwind readnone
|
/external/llvm/test/MC/Sparc/ |
D | sparc-alu-instructions.s | 41 ! CHECK: smul %g1, %g2, %g3 ! encoding: [0x86,0x58,0x40,0x02] 42 smul %g1, %g2, %g3
|
/external/llvm/test/Transforms/InstCombine/ |
D | intrinsics.ll | 13 declare %ov.result.32 @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone 155 %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B) 166 %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B) 177 %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B) 180 ; CHECK: %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B) 401 %t = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem) 413 %t = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
|
D | mul.ll | 167 declare {i32, i1} @llvm.smul.with.overflow.i32(i32, i32) 176 %E = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %C, i32 %D)
|
/external/llvm/test/MC/Disassembler/Sparc/ |
D | sparc.txt | 48 # CHECK: smul %g1, %g2, %g3
|
/external/llvm/lib/Target/Sparc/ |
D | SparcInstrInfo.td | 623 defm SMUL : F3_12 <"smul", 0b001011, mul, IntRegs, i32, simm13Op>;
|
/external/llvm/docs/ |
D | LangRef.rst | 10848 '``llvm.smul.with.overflow.*``' Intrinsics 10854 This is an overloaded intrinsic. You can use ``llvm.smul.with.overflow`` 10859 declare {i16, i1} @llvm.smul.with.overflow.i16(i16 %a, i16 %b) 10860 declare {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b) 10861 declare {i64, i1} @llvm.smul.with.overflow.i64(i64 %a, i64 %b) 10866 The '``llvm.smul.with.overflow``' family of intrinsic functions perform 10882 The '``llvm.smul.with.overflow``' family of intrinsic functions perform 10893 %res = call {i32, i1} @llvm.smul.with.overflow.i32(i32 %a, i32 %b)
|
/external/llvm/lib/Target/ARM/ |
D | ARMInstrInfo.td | 4077 defm SMUL : AI_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>; 5435 // smul* and smla*
|
D | ARMInstrThumb2.td | 2838 defm t2SMUL : T2I_smul<"smul", BinOpFrag<(mul node:$LHS, node:$RHS)>>;
|
/external/libxml2/result/ |
D | rdf2.rde | 829 /usr/share/ncurses4/terminfo/h/h19-smul
|
D | rdf2.rdr | 829 /usr/share/ncurses4/terminfo/h/h19-smul
|