/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | branch-relax-spill.ll | 10 %sgpr0 = tail call i32 asm sideeffect "s_mov_b32 s0, 0", "={s0}"() #0 11 %sgpr1 = tail call i32 asm sideeffect "s_mov_b32 s1, 0", "={s1}"() #0 12 %sgpr2 = tail call i32 asm sideeffect "s_mov_b32 s2, 0", "={s2}"() #0 13 %sgpr3 = tail call i32 asm sideeffect "s_mov_b32 s3, 0", "={s3}"() #0 14 %sgpr4 = tail call i32 asm sideeffect "s_mov_b32 s4, 0", "={s4}"() #0 15 %sgpr5 = tail call i32 asm sideeffect "s_mov_b32 s5, 0", "={s5}"() #0 16 %sgpr6 = tail call i32 asm sideeffect "s_mov_b32 s6, 0", "={s6}"() #0 17 %sgpr7 = tail call i32 asm sideeffect "s_mov_b32 s7, 0", "={s7}"() #0 18 %sgpr8 = tail call i32 asm sideeffect "s_mov_b32 s8, 0", "={s8}"() #0 19 %sgpr9 = tail call i32 asm sideeffect "s_mov_b32 s9, 0", "={s9}"() #0 [all …]
|
/external/llvm/test/Transforms/BDCE/ |
D | basic.ll | 9 %call = tail call signext i32 @foo(i32 signext 5) #0 12 %call1 = tail call signext i32 @foo(i32 signext 3) #0 15 %call4 = tail call signext i32 @foo(i32 signext 2) #0 18 %call7 = tail call signext i32 @foo(i32 signext 1) #0 21 %call10 = tail call signext i32 @foo(i32 signext 0) #0 24 %call13 = tail call signext i32 @foo(i32 signext 4) #0 31 ; CHECK-NOT: tail call signext i32 @foo(i32 signext 5) 32 ; CHECK-NOT: tail call signext i32 @foo(i32 signext 3) 33 ; CHECK: tail call signext i32 @foo(i32 signext 2) 34 ; CHECK: tail call signext i32 @foo(i32 signext 1) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/BDCE/ |
D | basic.ll | 10 %call = tail call signext i32 @foo(i32 signext 5) #0 13 %call1 = tail call signext i32 @foo(i32 signext 3) #0 16 %call4 = tail call signext i32 @foo(i32 signext 2) #0 19 %call7 = tail call signext i32 @foo(i32 signext 1) #0 22 %call10 = tail call signext i32 @foo(i32 signext 0) #0 25 %call13 = tail call signext i32 @foo(i32 signext 4) #0 32 ; CHECK-NOT: tail call signext i32 @foo(i32 signext 5) 33 ; CHECK-NOT: tail call signext i32 @foo(i32 signext 3) 34 ; CHECK: tail call signext i32 @foo(i32 signext 2) 35 ; CHECK: tail call signext i32 @foo(i32 signext 1) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoopUnroll/ |
D | ignore-annotation-intrinsic-cost.ll | 27 %annot.0 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 28 %annot.1 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 29 %annot.2 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 30 %annot.3 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 31 %annot.4 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 32 %annot.5 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 33 %annot.6 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 34 %annot.7 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 35 %annot.8 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 36 %annot.9 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) [all …]
|
/external/llvm/test/Transforms/LoopUnroll/ |
D | ignore-annotation-intrinsic-cost.ll | 27 %annot.0 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 28 %annot.1 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 29 %annot.2 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 30 %annot.3 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 31 %annot.4 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 32 %annot.5 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 33 %annot.6 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 34 %annot.7 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 35 %annot.8 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) 36 %annot.9 = tail call i32 @llvm.annotation.i32(i32 %i.01, i8* null, i8* null, i32 0) [all …]
|
/external/u-boot/lib/ |
D | membuff.c | 18 mb->tail = mb->start; in membuff_purge() 39 if (mb->head >= mb->tail) { in membuff_putrawflex() 53 if ((maxlen < 0 || len < maxlen) && mb->tail != mb->start) { in membuff_putrawflex() 62 len = mb->tail - mb->head - 1; in membuff_putrawflex() 109 if (mb->head > mb->tail) { in membuff_getraw() 111 *data = mb->tail; in membuff_getraw() 112 len = mb->head - mb->tail; in membuff_getraw() 120 mb->tail += len; in membuff_getraw() 128 else if (mb->head < mb->tail) { in membuff_getraw() 130 *data = mb->tail; in membuff_getraw() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Generic/ |
D | PBQP.ll | 5 %call = tail call i32 (...) @baz() 6 %call1 = tail call i32 (...) @baz() 7 %call2 = tail call i32 (...) @baz() 8 %call3 = tail call i32 (...) @baz() 9 %call4 = tail call i32 (...) @baz() 10 %call5 = tail call i32 (...) @baz() 11 %call6 = tail call i32 (...) @baz() 12 %call7 = tail call i32 (...) @baz() 13 %call8 = tail call i32 (...) @baz() 14 %call9 = tail call i32 (...) @baz() [all …]
|
/external/llvm/test/CodeGen/Generic/ |
D | PBQP.ll | 5 %call = tail call i32 (...) @baz() 6 %call1 = tail call i32 (...) @baz() 7 %call2 = tail call i32 (...) @baz() 8 %call3 = tail call i32 (...) @baz() 9 %call4 = tail call i32 (...) @baz() 10 %call5 = tail call i32 (...) @baz() 11 %call6 = tail call i32 (...) @baz() 12 %call7 = tail call i32 (...) @baz() 13 %call8 = tail call i32 (...) @baz() 14 %call9 = tail call i32 (...) @baz() [all …]
|
/external/jemalloc_new/include/jemalloc/internal/ |
D | hash.h | 102 const uint8_t *tail = (const uint8_t *) (data + nblocks*4); in hash_x86_32() local 107 case 3: k1 ^= tail[2] << 16; in hash_x86_32() 108 case 2: k1 ^= tail[1] << 8; in hash_x86_32() 109 case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); in hash_x86_32() 173 const uint8_t *tail = (const uint8_t *) (data + nblocks*16); in hash_x86_128() local 180 case 15: k4 ^= tail[14] << 16; in hash_x86_128() 181 case 14: k4 ^= tail[13] << 8; in hash_x86_128() 182 case 13: k4 ^= tail[12] << 0; in hash_x86_128() 185 case 12: k3 ^= tail[11] << 24; in hash_x86_128() 186 case 11: k3 ^= tail[10] << 16; in hash_x86_128() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | reg-scavengebug.ll | 31 %v7 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 32768) 32 %v8 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 2147450879) 39 %v12 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v6, <16 x i32> %v11, i32 2) 41 %v14 = tail call <16 x i32> @llvm.hexagon.V6.vaddh(<16 x i32> %v12, <16 x i32> undef) 48 %v18 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v17, <16 x i32> %v6, i32 4) 51 %v21 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v18, <16 x i32> %v19) 52 %v22 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 4) 53 %v23 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 8) 54 %v24 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %v21, <16 x i32> %v14, i32 12) 55 %v25 = tail call <16 x i32> @llvm.hexagon.V6.vaddw(<16 x i32> %v14, <16 x i32> %v22) [all …]
|
D | eliminate-pred-spill.ll | 50 %20 = tail call <1024 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %2, <32 x i32> %11) 51 …%21 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %11, <32 x i32> … 52 …%22 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %2, <32 x i32> %… 53 …%23 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> undef, <32 x i32… 54 …%24 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %12, <32 x i32> … 55 %25 = tail call <1024 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %7, <32 x i32> %15) 56 …%26 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %15, <32 x i32> … 57 …%27 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %7, <32 x i32> %… 58 …%28 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %16, <32 x i32> … 59 …%29 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %8, <32 x i32> %… [all …]
|
D | v6vect-dbl-spill.ll | 9 %v0 = tail call <32 x i32> @llvm.hexagon.V6.lvsplatw.128B(i32 16843009) 10 %v1 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef) 11 %v2 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> zeroinitializer) 19 %v7 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> undef, <32 x i32> undef) 20 …%v8 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v7, <32 x i32> zeroinitializer) 21 …%v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> %v8, <32 x i32> undef, <32 x … 22 …%v10 = tail call <32 x i32> @llvm.hexagon.V6.valignbi.128B(<32 x i32> undef, <32 x i32> undef, i32… 23 …%v11 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> zeroinitializer, <32 x i32… 24 %v12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v10, <32 x i32> undef) 25 …%v13 = tail call <1024 x i1> @llvm.hexagon.V6.vgtub.128B(<32 x i32> %v11, <32 x i32> zeroinitializ… [all …]
|
D | v6vect-spill-kill.ll | 10 %v0 = tail call <32 x i32> @llvm.hexagon.V6.vshuffh.128B(<32 x i32> undef) 21 …%v5 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroinitia… 22 …%v6 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v5, <32 x i32> zeroinitial… 23 …%v7 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> zeroinitializer, <32 x i32> z… 24 …%v8 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v7, <32 x … 25 …%v9 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> zeroinitializer, <32 x i32> %… 26 …%v10 = tail call <32 x i32> @llvm.hexagon.V6.vaddbnq.128B(<1024 x i1> undef, <32 x i32> %v9, <32 x… 27 …%v11 = tail call <32 x i32> @llvm.hexagon.V6.vlalignbi.128B(<32 x i32> undef, <32 x i32> zeroiniti… 28 …%v12 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> %v11, <32 x i32> zeroiniti… 29 …%v13 = tail call <32 x i32> @llvm.hexagon.V6.vabsdiffub.128B(<32 x i32> zeroinitializer, <32 x i32… [all …]
|
D | reg-scavengebug-4.ll | 11 %v0 = tail call <16 x i32> @llvm.hexagon.V6.vshuffb(<16 x i32> zeroinitializer) 21 %v4 = tail call <32 x i32> @llvm.hexagon.V6.vmpyh(<16 x i32> undef, i32 undef) 30 %v10 = tail call <16 x i32> @llvm.hexagon.V6.vlalignb(<16 x i32> %v8, <16 x i32> undef, i32 4) 31 %v11 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> undef, <16 x i32> %v8, i32 4) 32 %v12 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v10, <16 x i32> undef) 33 %v13 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v11, <16 x i32> undef) 34 …%v14 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb(<16 x i32> %v12, <16 x i32> zeroinitializer, … 35 …%v15 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v14, <16 x i32> %v12, <16 x… 36 …%v16 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v15, <16 x i32> %v12, <16 x… 37 …%v17 = tail call <16 x i32> @llvm.hexagon.V6.vlutvvb.oracc(<16 x i32> %v16, <16 x i32> %v12, <16 x… [all …]
|
D | expand-vstorerw-undef2.ll | 40 %v3 = tail call i8* @halide_malloc() 42 %v5 = tail call i8* @halide_malloc() 44 %v7 = tail call i8* @halide_malloc() 46 %v9 = tail call i8* @halide_malloc() 57 %v14 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> zeroinitializer) #2 58 …%v15 = tail call <32 x i32> @llvm.hexagon.V6.vasrwh.128B(<32 x i32> undef, <32 x i32> %v14, i32 1)… 65 %v18 = tail call <32 x i32> @llvm.hexagon.V6.vavghrnd.128B(<32 x i32> %v15, <32 x i32> undef) #2 70 …%v21 = tail call <32 x i32> @llvm.hexagon.V6.vaddhsat.128B(<32 x i32> zeroinitializer, <32 x i32> … 71 …%v22 = tail call <64 x i32> @llvm.hexagon.V6.vshuffvdd.128B(<32 x i32> %v21, <32 x i32> undef, i32… 72 %v23 = tail call <32 x i32> @llvm.hexagon.V6.lo.128B(<64 x i32> %v22) [all …]
|
D | v6-spill1.ll | 7 %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3) 8 %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0) 9 %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009) 10 %v3 = tail call <16 x i32> @llvm.hexagon.V6.vd0() 18 %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3) 46 %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v25, <16 x i32> %v14) 47 %v31 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1) 48 …%v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25) 49 …%v33 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v16, <16 x i32> %v32, i32 168… 50 …%v34 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v31, <16 x i32> %v17, <16 x i32> … [all …]
|
D | v60-vecpred-spill.ll | 11 %v0 = tail call i32 @llvm.hexagon.S2.vsplatrb(i32 %a3) 12 %v1 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 %v0) 13 %v2 = tail call <16 x i32> @llvm.hexagon.V6.lvsplatw(i32 16843009) 14 %v3 = tail call <16 x i32> @llvm.hexagon.V6.vsubw(<16 x i32> undef, <16 x i32> undef) 22 %v8 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %v3, <16 x i32> %v3) 50 %v30 = tail call <16 x i32> @llvm.hexagon.V6.vabsdiffub(<16 x i32> %v25, <16 x i32> %v14) 51 %v31 = tail call <512 x i1> @llvm.hexagon.V6.vgtub(<16 x i32> %v30, <16 x i32> %v1) 52 …%v32 = tail call <16 x i32> @llvm.hexagon.V6.vmux(<512 x i1> %v31, <16 x i32> %v3, <16 x i32> %v25) 53 …%v33 = tail call <32 x i32> @llvm.hexagon.V6.vmpybus.acc(<32 x i32> %v16, <16 x i32> %v32, i32 168… 54 …%v34 = tail call <16 x i32> @llvm.hexagon.V6.vaddbnq(<512 x i1> %v31, <16 x i32> %v17, <16 x i32> … [all …]
|
D | frame-offset-overflow.ll | 55 %11 = tail call <32 x i32> @llvm.hexagon.V6.vaddhw(<16 x i32> %6, <16 x i32> %10) 56 …%12 = tail call <32 x i32> @llvm.hexagon.V6.vmpyhsat.acc(<32 x i32> %11, <16 x i32> %8, i32 393222) 57 %13 = tail call <32 x i32> @llvm.hexagon.V6.vcombine(<16 x i32> %9, <16 x i32> %7) 58 …%14 = tail call <32 x i32> @llvm.hexagon.V6.vmpahb.acc(<32 x i32> %12, <32 x i32> %13, i32 6737203… 59 %15 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %dVsumv1.096) 60 %16 = tail call <16 x i32> @llvm.hexagon.V6.lo(<32 x i32> %14) 61 %17 = tail call <16 x i32> @llvm.hexagon.V6.valignbi(<16 x i32> %16, <16 x i32> %15, i32 4) 62 %18 = tail call <16 x i32> @llvm.hexagon.V6.hi(<32 x i32> %14) 63 %19 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %16, <16 x i32> %15, i32 8) 64 %20 = tail call <16 x i32> @llvm.hexagon.V6.valignb(<16 x i32> %18, <16 x i32> undef, i32 8) [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | eliminate-pred-spill.ll | 51 %20 = tail call <1024 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %2, <32 x i32> %11) 52 …%21 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %11, <32 x i32> … 53 …%22 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %2, <32 x i32> %… 54 …%23 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> undef, <32 x i32… 55 …%24 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %20, <32 x i32> %12, <32 x i32> … 56 %25 = tail call <1024 x i1> @llvm.hexagon.V6.vgtb.128B(<32 x i32> %7, <32 x i32> %15) 57 …%26 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %15, <32 x i32> … 58 …%27 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %7, <32 x i32> %… 59 …%28 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %16, <32 x i32> … 60 …%29 = tail call <32 x i32> @llvm.hexagon.V6.vmux.128B(<1024 x i1> %25, <32 x i32> %8, <32 x i32> %… [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/RISCV/ |
D | tail-calls.ll | 4 ; Perform tail call optimization for global address. 8 ; CHECK: tail callee_tail 10 %r = tail call i32 @callee_tail(i32 %i) 14 ; Perform tail call optimization for external symbol. 21 ; CHECK: tail memcpy 22 …tail call void @llvm.memcpy.p0i8.p0i8.i32(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @dest, i… 26 ; Perform indirect tail call optimization (for function pointer call). 33 ; CHECK-NOT: tail callee_indirect1 34 ; CHECK-NOT: tail callee_indirect2 46 tail call void %callee() [all …]
|
/external/jemalloc/include/jemalloc/internal/ |
D | hash.h | 135 const uint8_t *tail = (const uint8_t *) (data + nblocks*4); in hash_x86_32() local 140 case 3: k1 ^= tail[2] << 16; in hash_x86_32() 141 case 2: k1 ^= tail[1] << 8; in hash_x86_32() 142 case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); in hash_x86_32() 207 const uint8_t *tail = (const uint8_t *) (data + nblocks*16); in hash_x86_128() local 214 case 15: k4 ^= tail[14] << 16; in hash_x86_128() 215 case 14: k4 ^= tail[13] << 8; in hash_x86_128() 216 case 13: k4 ^= tail[12] << 0; in hash_x86_128() 219 case 12: k3 ^= tail[11] << 24; in hash_x86_128() 220 case 11: k3 ^= tail[10] << 16; in hash_x86_128() [all …]
|
/external/llvm/test/Analysis/BasicAA/ |
D | cs-cs.ll | 41 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) 42 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) 48 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8*… 49 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8*… 50 ; CHECK: Both ModRef: Ptr: i8* %P <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8*… 51 ; CHECK: Both ModRef: Ptr: i8* %Q <-> tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8*… 52 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, … 53 ; CHECK: Both ModRef: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, … 57 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) 58 tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %P, i8* %Q, i64 12, i32 1, i1 false) [all …]
|
/external/llvm/test/Transforms/ObjCARC/ |
D | tail-call-invariant-enforcement.ll | 11 ; Never tail call objc_autorelease. 20 %tmp1 = tail call i8* @objc_autorelease(i8* %x) 25 ; Always tail call autoreleaseReturnValue. 28 ; CHECK: %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]] 29 ; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]] 34 %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) 38 ; Always tail call objc_retain. 41 ; CHECK: %tmp0 = tail call i8* @objc_retain(i8* %x) [[NUW]] 42 ; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x) [[NUW]] 47 %tmp1 = tail call i8* @objc_retain(i8* %x) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/ObjCARC/ |
D | tail-call-invariant-enforcement.ll | 11 ; Never tail call objc_autorelease. 20 %tmp1 = tail call i8* @objc_autorelease(i8* %x) 25 ; Always tail call autoreleaseReturnValue. 28 ; CHECK: %tmp0 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]] 29 ; CHECK: %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) [[NUW]] 34 %tmp1 = tail call i8* @objc_autoreleaseReturnValue(i8* %x) 38 ; Always tail call objc_retain. 41 ; CHECK: %tmp0 = tail call i8* @objc_retain(i8* %x) [[NUW]] 42 ; CHECK: %tmp1 = tail call i8* @objc_retain(i8* %x) [[NUW]] 47 %tmp1 = tail call i8* @objc_retain(i8* %x) [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|