/external/llvm/test/CodeGen/Mips/ |
D | ra-allocatable.ll | 101 %0 = load i32, i32* @a0, align 4 102 %1 = load i32*, i32** @b0, align 4 104 %2 = load i32, i32* @a1, align 4 105 %3 = load i32*, i32** @b1, align 4 107 %4 = load i32, i32* @a2, align 4 108 %5 = load i32*, i32** @b2, align 4 110 %6 = load i32, i32* @a3, align 4 111 %7 = load i32*, i32** @b3, align 4 113 %8 = load i32, i32* @a4, align 4 114 %9 = load i32*, i32** @b4, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Mips/ |
D | ra-allocatable.ll | 101 %0 = load i32, i32* @a0, align 4 102 %1 = load i32*, i32** @b0, align 4 104 %2 = load i32, i32* @a1, align 4 105 %3 = load i32*, i32** @b1, align 4 107 %4 = load i32, i32* @a2, align 4 108 %5 = load i32*, i32** @b2, align 4 110 %6 = load i32, i32* @a3, align 4 111 %7 = load i32*, i32** @b3, align 4 113 %8 = load i32, i32* @a4, align 4 114 %9 = load i32*, i32** @b4, align 4 [all …]
|
/external/gemmlowp/meta/generators/ |
D | transform_kernels_common.py | 63 load = [registers.QuadRegister() for unused_i in range(register_count)] 64 emitter.EmitVLoadAE(8, elements, load, input_address, None) 67 for register in load: 70 for register in load: 74 emitter.EmitVStoreAE(8, elements, load, output_address, None) 76 registers.FreeRegisters(load) 109 load = [registers.QuadRegister() for unused_i in range(register_count)] 110 emitter.EmitVLoadAE(8, elements, load, input_address, None) 113 if len(load) is 1: 114 emitter.EmitVMovl('u8', load[0], load[0]) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | hrc-stack-coloring.ll | 66 %v34 = load %s.11*, %s.11** %v1, align 4 69 %v37 = load i64, i64* %v36, align 8 71 %v39 = load double, double* %v32, align 8 74 %v41 = load %s.11*, %s.11** %v1, align 4 77 %v44 = load i64, i64* %v43, align 8 79 %v46 = load double, double* %v32, align 8 82 %v48 = load %s.11*, %s.11** %v1, align 4 85 %v51 = load i64, i64* %v50, align 8 87 %v53 = load double, double* %v32, align 8 90 %v55 = load %s.11*, %s.11** %v1, align 4 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | load-local-i1.ll | 14 %load = load i1, i1 addrspace(3)* %in 15 store i1 %load, i1 addrspace(3)* %out 21 %load = load <2 x i1>, <2 x i1> addrspace(3)* %in 22 store <2 x i1> %load, <2 x i1> addrspace(3)* %out 28 %load = load <3 x i1>, <3 x i1> addrspace(3)* %in 29 store <3 x i1> %load, <3 x i1> addrspace(3)* %out 35 %load = load <4 x i1>, <4 x i1> addrspace(3)* %in 36 store <4 x i1> %load, <4 x i1> addrspace(3)* %out 42 %load = load <8 x i1>, <8 x i1> addrspace(3)* %in 43 store <8 x i1> %load, <8 x i1> addrspace(3)* %out [all …]
|
D | load-global-i1.ll | 13 %load = load i1, i1 addrspace(1)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(1)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(1)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(1)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-constant-i1.ll | 13 %load = load i1, i1 addrspace(2)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(2)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(2)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(2)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(2)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-constant-i8.ll | 14 %ld = load i8, i8 addrspace(2)* %in 26 %ld = load <2 x i8>, <2 x i8> addrspace(2)* %in 37 %ld = load <3 x i8>, <3 x i8> addrspace(2)* %in 48 %ld = load <4 x i8>, <4 x i8> addrspace(2)* %in 59 %ld = load <8 x i8>, <8 x i8> addrspace(2)* %in 70 %ld = load <16 x i8>, <16 x i8> addrspace(2)* %in 81 %a = load i8, i8 addrspace(2)* %in 95 %ld = load i8, i8 addrspace(2)* %in 103 %load = load <1 x i8>, <1 x i8> addrspace(2)* %in 104 %ext = zext <1 x i8> %load to <1 x i32> [all …]
|
D | load-local-i8.ll | 14 %ld = load i8, i8 addrspace(3)* %in 27 %ld = load <2 x i8>, <2 x i8> addrspace(3)* %in 38 %ld = load <3 x i8>, <3 x i8> addrspace(3)* %in 49 %ld = load <4 x i8>, <4 x i8> addrspace(3)* %in 61 %ld = load <8 x i8>, <8 x i8> addrspace(3)* %in 76 %ld = load <16 x i8>, <16 x i8> addrspace(3)* %in 88 %a = load i8, i8 addrspace(3)* %in 102 %ld = load i8, i8 addrspace(3)* %in 110 %load = load <1 x i8>, <1 x i8> addrspace(3)* %in 111 %ext = zext <1 x i8> %load to <1 x i32> [all …]
|
/external/swiftshader/third_party/subzero/tests_lit/llvm2ice_tests/ |
D | load_cast.ll | 1 ; Tests desired and undesired folding of load instructions into cast 14 %load = load i8, i8* %addr, align 1 15 %result = zext i8 %load to i32 25 %load = load i8, i8* %addr, align 1 26 %tmp1 = zext i8 %load to i32 27 %tmp2 = zext i8 %load to i32 31 ; Test that load folding does not happen. 39 %load = load i8, i8* %addr, align 1 40 %result = sext i8 %load to i32 50 %load = load i8, i8* %addr, align 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | load-constant-i1.ll | 13 %load = load i1, i1 addrspace(4)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(4)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(4)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(4)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(4)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-global-i1.ll | 13 %load = load i1, i1 addrspace(1)* %in 14 store i1 %load, i1 addrspace(1)* %out 20 %load = load <2 x i1>, <2 x i1> addrspace(1)* %in 21 store <2 x i1> %load, <2 x i1> addrspace(1)* %out 27 %load = load <3 x i1>, <3 x i1> addrspace(1)* %in 28 store <3 x i1> %load, <3 x i1> addrspace(1)* %out 34 %load = load <4 x i1>, <4 x i1> addrspace(1)* %in 35 store <4 x i1> %load, <4 x i1> addrspace(1)* %out 41 %load = load <8 x i1>, <8 x i1> addrspace(1)* %in 42 store <8 x i1> %load, <8 x i1> addrspace(1)* %out [all …]
|
D | load-local-i1.ll | 18 %load = load i1, i1 addrspace(3)* %in 19 store i1 %load, i1 addrspace(3)* %out 27 %load = load <2 x i1>, <2 x i1> addrspace(3)* %in 28 store <2 x i1> %load, <2 x i1> addrspace(3)* %out 36 %load = load <3 x i1>, <3 x i1> addrspace(3)* %in 37 store <3 x i1> %load, <3 x i1> addrspace(3)* %out 45 %load = load <4 x i1>, <4 x i1> addrspace(3)* %in 46 store <4 x i1> %load, <4 x i1> addrspace(3)* %out 54 %load = load <8 x i1>, <8 x i1> addrspace(3)* %in 55 store <8 x i1> %load, <8 x i1> addrspace(3)* %out [all …]
|
D | global-extload-i16.ll | 11 %a = load i16, i16 addrspace(1)* %in 22 %a = load i16, i16 addrspace(1)* %in 32 %load = load <1 x i16>, <1 x i16> addrspace(1)* %in 33 %ext = zext <1 x i16> %load to <1 x i32> 42 %load = load <1 x i16>, <1 x i16> addrspace(1)* %in 43 %ext = sext <1 x i16> %load to <1 x i32> 51 %load = load <2 x i16>, <2 x i16> addrspace(1)* %in 52 %ext = zext <2 x i16> %load to <2 x i32> 60 %load = load <2 x i16>, <2 x i16> addrspace(1)* %in 61 %ext = sext <2 x i16> %load to <2 x i32> [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/Mem2Reg/ |
D | preserve-nonnull-load-metadata.ll | 9 ; CHECK: %arg.load = load float*, float** %arg, align 8 10 ; CHECK: [[ASSUME:%(.*)]] = icmp ne float* %arg.load, null 12 ; CHECK: ret float* %arg.load 15 %arg.load = load float*, float** %arg, align 8 16 store float* %arg.load, float** %buf, align 8 17 %buf.load = load float*, float **%buf, !nonnull !0 18 ret float* %buf.load 25 ; CHECK: %arg.load = load float*, float** %arg, align 8 26 ; CHECK: [[ASSUME:%(.*)]] = icmp ne float* %arg.load, null 28 ; CHECK: ret float* %arg.load [all …]
|
/external/linux-kselftest/tools/testing/selftests/powerpc/ptrace/ |
D | ptrace-vsx.h | 17 int validate_vsx(unsigned long *vsx, unsigned long *load) in validate_vsx() argument 22 if (vsx[i] != load[2 * i + 1]) { in validate_vsx() 24 i, vsx[i], 2 * i + 1, load[2 * i + 1]); in validate_vsx() 35 int validate_vmx(unsigned long vmx[][2], unsigned long *load) in validate_vmx() argument 41 if ((vmx[i][0] != load[64 + 2 * i]) || in validate_vmx() 42 (vmx[i][1] != load[65 + 2 * i])) { in validate_vmx() 45 load[64 + 2 * i]); in validate_vmx() 48 load[65 + 2 * i]); in validate_vmx() 55 if ((vmx[i][0] != load[65 + 2 * i]) || in validate_vmx() 56 (vmx[i][1] != load[64 + 2 * i])) { in validate_vmx() [all …]
|
/external/llvm/test/Analysis/CostModel/X86/ |
D | load_store.ll | 36 ;CHECK: cost of 1 {{.*}} load 37 load i8, i8* undef, align 4 38 ;CHECK: cost of 1 {{.*}} load 39 load i16, i16* undef, align 4 40 ;CHECK: cost of 1 {{.*}} load 41 load i32, i32* undef, align 4 42 ;CHECK: cost of 1 {{.*}} load 43 load i64, i64* undef, align 4 44 ;CHECK: cost of 2 {{.*}} load 45 load i128, i128* undef, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | load_store.ll | 36 ;CHECK: cost of 1 {{.*}} load 37 load i8, i8* undef, align 4 38 ;CHECK: cost of 1 {{.*}} load 39 load i16, i16* undef, align 4 40 ;CHECK: cost of 1 {{.*}} load 41 load i32, i32* undef, align 4 42 ;CHECK: cost of 1 {{.*}} load 43 load i64, i64* undef, align 4 44 ;CHECK: cost of 2 {{.*}} load 45 load i128, i128* undef, align 4 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/SystemZ/ |
D | memop-folding-int-arith.ll | 4 ; cost. In the case that both operands are loaded, one load should get a cost 8 %li32 = load i32, i32* undef 11 %li32_0 = load i32, i32* undef 12 %li32_1 = load i32, i32* undef 15 %li64 = load i64, i64* undef 18 %li64_0 = load i64, i64* undef 19 %li64_1 = load i64, i64* undef 24 ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32 = load i32, i32* undef 26 ; CHECK: Cost Model: Found an estimated cost of 0 for instruction: %li32_0 = load i32, i32* undef 27 ; CHECK: Cost Model: Found an estimated cost of 1 for instruction: %li32_1 = load i32, i32* undef [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/LoadStoreVectorizer/X86/ |
D | subchain-interleaved.ll | 1 ; RUN: opt -mtriple=x86_64-unknown-linux-gnu -load-store-vectorizer -S -o - %s | FileCheck %s 5 ; Vectorized subsets of the load/store chains in the presence of 9 ; CHECK: load <2 x i32> 10 ; CHECK: load i32 12 ; CHECK: load i32 18 %l1 = load i32, i32* %next.gep1, align 4 19 %l2 = load i32, i32* %next.gep, align 4 22 %l3 = load i32, i32* %next.gep1, align 4 23 %l4 = load i32, i32* %next.gep2, align 4 29 ; CHECK: load <3 x i32> [all …]
|
/external/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 7 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 13 %val0 = load i32, i32* %ptr, !invariant.load !{} 15 %val1 = load i32, i32* %ptr, !invariant.load !{} 17 %val2 = load i32, i32* %ptr, !invariant.load !{} 24 ; invariant load has executed, the location loaded from is known to be 28 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 32 %val0 = load i32, i32* %ptr, !invariant.load !{} 34 %val1 = load i32, i32* %ptr 40 ; Negative test -- we can't forward a non-invariant load into an 41 ; invariant load. [all …]
|
/external/llvm/test/Transforms/Inline/ |
D | inline-cold.ll | 20 %a1 = load volatile i32, i32* @a 22 %a2 = load volatile i32, i32* @a 24 %a3 = load volatile i32, i32* @a 26 %a4 = load volatile i32, i32* @a 28 %a5 = load volatile i32, i32* @a 30 %a6 = load volatile i32, i32* @a 32 %a7 = load volatile i32, i32* @a 34 %a8 = load volatile i32, i32* @a 36 %a9 = load volatile i32, i32* @a 38 %a10 = load volatile i32, i32* @a [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/ |
D | load-store.ll | 6 %a.load = load i8, i8* %a 7 %a.add = add i8 %a.load, 1 12 %b.load = load i16, i16* %b 13 %b.add = add i16 %b.load, 1 18 %c.load = load i32, i32* %c 19 %c.add = add i32 %c.load, 1 24 %d.load = load i64, i64* %d 25 %d.add = add i64 %d.load, 1 35 %a.load = load volatile i8, i8* %a 36 %a.add = add i8 %a.load, 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 8 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 14 %val0 = load i32, i32* %ptr, !invariant.load !{} 16 %val1 = load i32, i32* %ptr, !invariant.load !{} 18 %val2 = load i32, i32* %ptr, !invariant.load !{} 27 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 31 %val0 = load i32, i32* %ptr, !invariant.load !{} 33 %val1 = load i32, i32* %ptr 39 ; We can forward a non-invariant load into an invariant load. 42 ; CHECK: %val0 = load i32, i32* %ptr 46 %val0 = load i32, i32* %ptr [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | volatile.ll | 5 ; Check that we can bypass a volatile load when searching 6 ; for dependencies of a non-volatile load 9 ; CHECK: %0 = load volatile i32, i32* %q 12 %x = load i32, i32* %p 13 load volatile i32, i32* %q 14 %y = load i32, i32* %p 20 ; volatile, this would be (in effect) removing the volatile load 23 ; CHECK: %x = load i32, i32* %p 24 ; CHECK-NEXT: %y = load volatile i32, i32* %p 27 %x = load i32, i32* %p [all …]
|