/external/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 7 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 8 ; CHECK: call void @clobber_and_use(i32 %val0) 9 ; CHECK: call void @clobber_and_use(i32 %val0) 10 ; CHECK: call void @clobber_and_use(i32 %val0) 13 %val0 = load i32, i32* %ptr, !invariant.load !{} 14 call void @clobber_and_use(i32 %val0) 28 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 29 ; CHECK: call void @clobber_and_use(i32 %val0) 30 ; CHECK: call void @clobber_and_use(i32 %val0) 32 %val0 = load i32, i32* %ptr, !invariant.load !{} [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 8 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 9 ; CHECK: call void @clobber_and_use(i32 %val0) 10 ; CHECK: call void @clobber_and_use(i32 %val0) 11 ; CHECK: call void @clobber_and_use(i32 %val0) 14 %val0 = load i32, i32* %ptr, !invariant.load !{} 15 call void @clobber_and_use(i32 %val0) 27 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 28 ; CHECK: call void @clobber_and_use(i32 %val0) 29 ; CHECK: call void @clobber_and_use(i32 %val0) 31 %val0 = load i32, i32* %ptr, !invariant.load !{} [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/NVPTX/ |
D | mulwide.ll | 9 %val0 = sext i16 %a to i32 11 %val2 = mul i32 %val0, %val1 20 %val0 = zext i16 %a to i32 22 %val2 = mul i32 %val0, %val1 31 %val0 = sext i8 %a to i32 33 %val2 = mul i32 %val0, %val1 42 %val0 = zext i8 %a to i32 44 %val2 = mul i32 %val0, %val1 53 %val0 = sext i32 %a to i64 55 %val2 = mul i64 %val0, %val1 [all …]
|
D | bfe.ll | 9 %val0 = ashr i32 %a, 4 10 %val1 = and i32 %val0, 15 19 %val0 = ashr i32 %a, 3 20 %val1 = and i32 %val0, 7 29 %val0 = ashr i32 %a, 5 30 %val1 = and i32 %val0, 7
|
/external/llvm/test/CodeGen/NVPTX/ |
D | mulwide.ll | 9 %val0 = sext i16 %a to i32 11 %val2 = mul i32 %val0, %val1 20 %val0 = zext i16 %a to i32 22 %val2 = mul i32 %val0, %val1 31 %val0 = sext i8 %a to i32 33 %val2 = mul i32 %val0, %val1 42 %val0 = zext i8 %a to i32 44 %val2 = mul i32 %val0, %val1 53 %val0 = sext i32 %a to i64 55 %val2 = mul i64 %val0, %val1 [all …]
|
D | bfe.ll | 9 %val0 = ashr i32 %a, 4 10 %val1 = and i32 %val0, 15 19 %val0 = ashr i32 %a, 3 20 %val1 = and i32 %val0, 7 29 %val0 = ashr i32 %a, 5 30 %val1 = and i32 %val0, 7
|
/external/libaom/libaom/aom_dsp/mips/ |
D | intrapred_msa.c | 158 uint32_t val0, val1; in intra_predict_dc_4x4_msa() local 164 val0 = LW(src_top); in intra_predict_dc_4x4_msa() 166 INSERT_W2_SB(val0, val1, src); in intra_predict_dc_4x4_msa() 172 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_4x4_msa() 174 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_4x4_msa() 179 uint32_t val0; in intra_predict_dc_tl_4x4_msa() local 184 val0 = LW(src); in intra_predict_dc_tl_4x4_msa() 185 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); in intra_predict_dc_tl_4x4_msa() 190 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_tl_4x4_msa() 192 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_tl_4x4_msa() [all …]
|
/external/deqp/data/gles31/shaders/es31/ |
D | linkage_tessellation_uniform_types.test | 9 uniform float val0 = -1.25; 27 tc_out[gl_InvocationID] = val0; 61 uniform vec2 val0 = [ vec2(-1.25, 1.25) ]; 79 tc_out[gl_InvocationID] = val0; 113 uniform vec3 val0 = [ vec3(-1.25, 1.25, -9.5) ]; 131 tc_out[gl_InvocationID] = val0; 165 uniform vec4 val0 = [ vec4(-1.25, 1.25, -9.5, -12.2) ]; 183 tc_out[gl_InvocationID] = val0; 217 uniform mat2 val0 = [ mat2(-1.25, 1.25, -9.5, -12.2) ]; 235 tc_out[gl_InvocationID] = val0; [all …]
|
/external/deqp/data/gles31/shaders/es32/ |
D | linkage_tessellation_uniform_types.test | 26 uniform float val0 = -1.25; 44 tc_out[gl_InvocationID] = val0; 77 uniform vec2 val0 = [ vec2(-1.25, 1.25) ]; 95 tc_out[gl_InvocationID] = val0; 128 uniform vec3 val0 = [ vec3(-1.25, 1.25, -9.5) ]; 146 tc_out[gl_InvocationID] = val0; 179 uniform vec4 val0 = [ vec4(-1.25, 1.25, -9.5, -12.2) ]; 197 tc_out[gl_InvocationID] = val0; 230 uniform mat2 val0 = [ mat2(-1.25, 1.25, -9.5, -12.2) ]; 248 tc_out[gl_InvocationID] = val0; [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | intrapred_msa.c | 156 uint32_t val0, val1; in intra_predict_dc_4x4_msa() local 162 val0 = LW(src_top); in intra_predict_dc_4x4_msa() 164 INSERT_W2_SB(val0, val1, src); in intra_predict_dc_4x4_msa() 170 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_4x4_msa() 172 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_4x4_msa() 177 uint32_t val0; in intra_predict_dc_tl_4x4_msa() local 182 val0 = LW(src); in intra_predict_dc_tl_4x4_msa() 183 data = (v16i8)__msa_insert_w((v4i32)data, 0, val0); in intra_predict_dc_tl_4x4_msa() 188 val0 = __msa_copy_u_w((v4i32)store, 0); in intra_predict_dc_tl_4x4_msa() 190 SW4(val0, val0, val0, val0, dst, dst_stride); in intra_predict_dc_tl_4x4_msa() [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ds_read2_superreg.ll | 18 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0, align 4 20 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 32 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0 34 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 49 %val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 4 50 %elt0 = extractelement <4 x float> %val0, i32 0 51 %elt1 = extractelement <4 x float> %val0, i32 1 52 %elt2 = extractelement <4 x float> %val0, i32 2 53 %elt3 = extractelement <4 x float> %val0, i32 3 74 %val0 = load <3 x float>, <3 x float> addrspace(3)* %arrayidx0, align 4 [all …]
|
D | cgp-bitfield-extract.ll | 15 ; OPT-NEXT: %val0 = and i32 %0, 255 45 %val0 = and i32 %shr, 255 55 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 66 ; OPT-NEXT: %val0 = and i32 %0, 255 85 %val0 = and i32 %shr, 255 95 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 107 ; OPT-NEXT: %val0 = and i16 %0, 255 137 %val0 = and i16 %shr, 255 147 %phi = phi i16 [ %val0, %bb0 ], [ %val1, %bb1 ] 162 ; OPT-NEXT: %val0 = and i64 %0, 255 [all …]
|
D | ds_read2st64.ll | 16 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 20 %sum = fadd float %val0, %val1 36 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 40 %sum = fadd float %val0, %val1 56 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 60 %sum = fadd float %val0, %val1 76 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 80 %sum = fadd float %val0, %val1 92 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 96 %sum = fadd float %val0, %val1 [all …]
|
D | annotate-kernel-features.ll | 31 %val0 = call i32 @llvm.r600.read.tgid.y() 32 store volatile i32 %val0, i32 addrspace(1)* %ptr 40 %val0 = call i32 @llvm.r600.read.tgid.x() 42 store volatile i32 %val0, i32 addrspace(1)* %ptr 56 %val0 = call i32 @llvm.r600.read.tgid.x() 58 store volatile i32 %val0, i32 addrspace(1)* %ptr 65 %val0 = call i32 @llvm.r600.read.tgid.y() 67 store volatile i32 %val0, i32 addrspace(1)* %ptr 74 %val0 = call i32 @llvm.r600.read.tgid.x() 77 store volatile i32 %val0, i32 addrspace(1)* %ptr [all …]
|
D | ds_read2.ll | 18 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 22 %sum = fadd float %val0, %val1 37 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 41 %sum = fadd float %val0, %val1 55 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 59 %sum = fadd float %val0, %val1 73 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 78 %sum.0 = fadd float %val0, %val1 105 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 110 %sum.0 = fadd float %val0, %val1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | ds_read2_superreg.ll | 18 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0, align 4 20 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 32 %val0 = load <2 x float>, <2 x float> addrspace(3)* %arrayidx0 34 store <2 x float> %val0, <2 x float> addrspace(1)* %out.gep 49 %val0 = load <4 x float>, <4 x float> addrspace(3)* %arrayidx0, align 4 50 %elt0 = extractelement <4 x float> %val0, i32 0 51 %elt1 = extractelement <4 x float> %val0, i32 1 52 %elt2 = extractelement <4 x float> %val0, i32 2 53 %elt3 = extractelement <4 x float> %val0, i32 3 74 %val0 = load <3 x float>, <3 x float> addrspace(3)* %arrayidx0, align 4 [all …]
|
D | cgp-bitfield-extract.ll | 15 ; OPT-NEXT: %val0 = and i32 %0, 255 45 %val0 = and i32 %shr, 255 55 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 66 ; OPT-NEXT: %val0 = and i32 %0, 255 85 %val0 = and i32 %shr, 255 95 %phi = phi i32 [ %val0, %bb0 ], [ %val1, %bb1 ] 107 ; OPT-NEXT: %val0 = and i16 %0, 255 143 %val0 = and i16 %shr, 255 153 %phi = phi i16 [ %val0, %bb0 ], [ %val1, %bb1 ] 168 ; OPT-NEXT: %val0 = and i64 %0, 255 [all …]
|
D | annotate-kernel-features.ll | 31 %val0 = call i32 @llvm.r600.read.tgid.y() 32 store volatile i32 %val0, i32 addrspace(1)* %ptr 40 %val0 = call i32 @llvm.r600.read.tgid.x() 42 store volatile i32 %val0, i32 addrspace(1)* %ptr 56 %val0 = call i32 @llvm.r600.read.tgid.x() 58 store volatile i32 %val0, i32 addrspace(1)* %ptr 65 %val0 = call i32 @llvm.r600.read.tgid.y() 67 store volatile i32 %val0, i32 addrspace(1)* %ptr 74 %val0 = call i32 @llvm.r600.read.tgid.x() 77 store volatile i32 %val0, i32 addrspace(1)* %ptr [all …]
|
D | ds_read2st64.ll | 20 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 24 %sum = fadd float %val0, %val1 43 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 47 %sum = fadd float %val0, %val1 66 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 70 %sum = fadd float %val0, %val1 89 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 93 %sum = fadd float %val0, %val1 108 %val0 = load float, float addrspace(3)* %arrayidx0, align 4 112 %sum = fadd float %val0, %val1 [all …]
|
D | sminmax.v2i16.ll | 162 …(<2 x i16> addrspace(1)* %out0, <2 x i16> addrspace(1)* %out1, <2 x i16> %val0, <2 x i16> %val1) #… 163 %cond0 = icmp sgt <2 x i16> %val0, %val1 164 %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1 165 %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0 176 %val0 = load volatile <2 x i16>, <2 x i16> addrspace(1)* %ptr0 179 %cond0 = icmp sgt <2 x i16> %val0, %val1 180 %sel0 = select <2 x i1> %cond0, <2 x i16> %val0, <2 x i16> %val1 181 %sel1 = select <2 x i1> %cond0, <2 x i16> %val1, <2 x i16> %val0 193 …(<4 x i16> addrspace(1)* %out0, <4 x i16> addrspace(1)* %out1, <4 x i16> %val0, <4 x i16> %val1) #… 194 %cond0 = icmp sgt <4 x i16> %val0, %val1 [all …]
|
D | annotate-kernel-features-hsa.ll | 31 %val0 = call i32 @llvm.amdgcn.workgroup.id.y() 32 store volatile i32 %val0, i32 addrspace(1)* %ptr 40 %val0 = call i32 @llvm.amdgcn.workgroup.id.x() 42 store volatile i32 %val0, i32 addrspace(1)* %ptr 56 %val0 = call i32 @llvm.amdgcn.workgroup.id.x() 58 store volatile i32 %val0, i32 addrspace(1)* %ptr 65 %val0 = call i32 @llvm.amdgcn.workgroup.id.y() 67 store volatile i32 %val0, i32 addrspace(1)* %ptr 74 %val0 = call i32 @llvm.amdgcn.workgroup.id.x() 77 store volatile i32 %val0, i32 addrspace(1)* %ptr [all …]
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | resolve_constant_binary.cc | 132 const auto val0 = input0_data[Offset(input0_shape, input0_indices)]; in EvaluateBinaryOperatorOnConstantInputs() local 137 outval = val0 + val1; in EvaluateBinaryOperatorOnConstantInputs() 139 outval = val0 * val1; in EvaluateBinaryOperatorOnConstantInputs() 141 outval = val0 - val1; in EvaluateBinaryOperatorOnConstantInputs() 143 outval = val0 / val1; in EvaluateBinaryOperatorOnConstantInputs() 145 outval = floor(val0 / val1); in EvaluateBinaryOperatorOnConstantInputs() 147 outval = val0 - (floor(val0 / val1) * val1); in EvaluateBinaryOperatorOnConstantInputs() 149 outval = std::min(val0, val1); in EvaluateBinaryOperatorOnConstantInputs() 151 outval = std::max(val0, val1); in EvaluateBinaryOperatorOnConstantInputs() 153 outval = val0 < val1; in EvaluateBinaryOperatorOnConstantInputs() [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | logical-imm.ll | 9 %val0 = and i32 %in32, 2863311530 10 store volatile i32 %val0, i32* @var32 31 %val0 = or i32 %in32, 2863311530 32 store volatile i32 %val0, i32* @var32 53 %val0 = xor i32 %in32, 2863311530 54 store volatile i32 %val0, i32* @var32 74 %val0 = add i32 %in32, 2863311530 75 store i32 %val0, i32* @var32
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | logical-imm.ll | 9 %val0 = and i32 %in32, 2863311530 10 store volatile i32 %val0, i32* @var32 31 %val0 = or i32 %in32, 2863311530 32 store volatile i32 %val0, i32* @var32 53 %val0 = xor i32 %in32, 2863311530 54 store volatile i32 %val0, i32* @var32 74 %val0 = add i32 %in32, 2863311530 75 store i32 %val0, i32* @var32
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | merge-consecutive-loads-256.ll | 22 %val0 = load <2 x double>, <2 x double>* %ptr0 24 …%res = shufflevector <2 x double> %val0, <2 x double> %val1, <4 x i32> <i32 0, i32 1, i32 2, i32 3> 40 %val0 = load <2 x double>, <2 x double>* %ptr0 41 …%res = shufflevector <2 x double> %val0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i3… 60 %val0 = load double, double* %ptr0 64 %res0 = insertelement <4 x double> undef, double %val0, i32 0 83 %val0 = load double, double* %ptr0 84 %res0 = insertelement <4 x double> undef, double %val0, i32 0 102 %val0 = load double, double* %ptr0 104 %res0 = insertelement <4 x double> undef, double %val0, i32 0 [all …]
|