/external/llvm/test/CodeGen/X86/ |
D | pr18846.ll | 58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1 70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1 71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1 72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1 80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1 81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1 86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1 87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1 88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1 89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1 [all …]
|
D | sse-intrinsics-x86-upgrade.ll | 22 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) 25 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
|
D | avx-intrinsics-x86-upgrade.ll | 394 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) 397 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind 411 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) 414 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind 423 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) 426 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind 444 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) 447 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind 461 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2) 464 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind [all …]
|
D | pmovext.ll | 16 tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind 21 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
|
D | vec-loadsingles-alignment.ll | 30 tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8) 34 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
|
D | sse2-intrinsics-x86-upgrade.ll | 101 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) 104 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind 118 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) 121 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
|
D | avx512bwvl-intrinsics-upgrade.ll | 124 declare void @llvm.x86.avx512.mask.storeu.b.128(i8*, <16 x i8>, i16) 133 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr1, <16 x i8> %x1, i16 %x2) 134 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr2, <16 x i8> %x1, i16 -1) 138 declare void @llvm.x86.avx512.mask.storeu.b.256(i8*, <32 x i8>, i32) 147 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr1, <32 x i8> %x1, i32 %x2) 148 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr2, <32 x i8> %x1, i32 -1) 152 declare void @llvm.x86.avx512.mask.storeu.w.128(i8*, <8 x i16>, i8) 161 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr1, <8 x i16> %x1, i8 %x2) 162 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr2, <8 x i16> %x1, i8 -1) 166 declare void @llvm.x86.avx512.mask.storeu.w.256(i8*, <16 x i16>, i16) [all …]
|
D | avx512-intrinsics-upgrade.ll | 193 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask) 194 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr2, <16 x float> %data, i16 -1) 198 declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 ) 207 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask) 208 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr2, <8 x double> %data, i8 -1) 212 declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8) 249 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2) 250 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1) 254 declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8) 263 call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2) [all …]
|
D | avx512bw-intrinsics-upgrade.ll | 5 declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64) 23 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr1, <64 x i8> %x1, i64 %x2) 24 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr2, <64 x i8> %x1, i64 -1) 28 declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32) 46 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr1, <32 x i16> %x1, i32 %x2) 47 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr2, <32 x i16> %x1, i32 -1)
|
D | avx512vl-intrinsics-upgrade.ll | 448 declare void @llvm.x86.avx512.mask.storeu.pd.128(i8*, <2 x double>, i8) 457 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr1, <2 x double> %x1, i8 %x2) 458 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr2, <2 x double> %x1, i8 -1) 462 declare void @llvm.x86.avx512.mask.storeu.pd.256(i8*, <4 x double>, i8) 471 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr1, <4 x double> %x1, i8 %x2) 472 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr2, <4 x double> %x1, i8 -1) 504 declare void @llvm.x86.avx512.mask.storeu.ps.128(i8*, <4 x float>, i8) 513 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr1, <4 x float> %x1, i8 %x2) 514 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr2, <4 x float> %x1, i8 -1) 518 declare void @llvm.x86.avx512.mask.storeu.ps.256(i8*, <8 x float>, i8) [all …]
|
D | avx2-intrinsics-x86-upgrade.ll | 378 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) 381 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | pr18846.ll | 58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1 70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1 71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1 72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1 80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1 81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1 86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1 87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1 88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1 89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1 [all …]
|
D | pmovext.ll | 16 tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind 21 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
|
D | vec-loadsingles-alignment.ll | 30 tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8) 34 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
|
D | avx-intrinsics-x86-upgrade.ll | 636 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) 639 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind 686 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) 689 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind 714 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) 717 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind 763 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) 766 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind 805 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2) 808 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind [all …]
|
D | sse-intrinsics-x86-upgrade.ll | 85 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1) 88 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
|
D | sse2-intrinsics-x86-upgrade.ll | 320 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2) 323 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind 391 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2) 394 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
|
D | avx2-intrinsics-x86-upgrade.ll | 407 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2) 410 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
|
D | avx512vl-intrinsics-upgrade.ll | 1151 declare void @llvm.x86.avx512.mask.storeu.pd.128(i8*, <2 x double>, i8) 1170 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr1, <2 x double> %x1, i8 %x2) 1171 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr2, <2 x double> %x1, i8 -1) 1175 declare void @llvm.x86.avx512.mask.storeu.pd.256(i8*, <4 x double>, i8) 1196 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr1, <4 x double> %x1, i8 %x2) 1197 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr2, <4 x double> %x1, i8 -1) 1251 declare void @llvm.x86.avx512.mask.storeu.ps.128(i8*, <4 x float>, i8) 1270 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr1, <4 x float> %x1, i8 %x2) 1271 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr2, <4 x float> %x1, i8 -1) 1275 declare void @llvm.x86.avx512.mask.storeu.ps.256(i8*, <8 x float>, i8) [all …]
|
D | avx512bw-intrinsics-upgrade.ll | 106 declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64) 126 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr1, <64 x i8> %x1, i64 %x2) 127 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr2, <64 x i8> %x1, i64 -1) 131 declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32) 151 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr1, <32 x i16> %x1, i32 %x2) 152 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr2, <32 x i16> %x1, i32 -1)
|
/external/llvm-project/llvm/test/Instrumentation/MemorySanitizer/ |
D | msan_x86intrinsics.ll | 16 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x) 20 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
|
/external/llvm/test/Instrumentation/MemorySanitizer/ |
D | msan_basic.ll | 624 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x) 628 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
|
/external/llvm/lib/Target/SystemZ/ |
D | SystemZOperators.td | 583 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/SystemZ/ |
D | SystemZOperators.td | 757 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
|
/external/deqp/external/openglcts/modules/gl/ |
D | gl4cDirectStateAccessTexturesTests.cpp | 13740 glw::GLuint storeu[4] = {}; in iterate() local 13822 gl.getTextureParameterIuiv(texture_2D, pname_invalid, storeu); in iterate() 13840 gl.getTextureParameterIuiv(texture_invalid, GL_TEXTURE_TARGET, storeu); in iterate() 13860 gl.getTextureParameterIuiv(texture_buffer, GL_TEXTURE_TARGET, storeu); in iterate()
|