Home
last modified time | relevance | path

Searched refs:storeu (Results 1 – 25 of 29) sorted by relevance

12

/external/llvm/test/CodeGen/X86/
Dpr18846.ll58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1
71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1
72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1
80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1
81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1
86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1
87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1
88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1
89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1
[all …]
Dsse-intrinsics-x86-upgrade.ll22 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
25 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
Davx-intrinsics-x86-upgrade.ll394 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
397 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
411 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
414 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
423 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
426 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
444 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
447 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
461 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2)
464 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind
[all …]
Dpmovext.ll16 tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind
21 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
Dvec-loadsingles-alignment.ll30 tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8)
34 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
Dsse2-intrinsics-x86-upgrade.ll101 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
104 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
118 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
121 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
Davx512bwvl-intrinsics-upgrade.ll124 declare void @llvm.x86.avx512.mask.storeu.b.128(i8*, <16 x i8>, i16)
133 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr1, <16 x i8> %x1, i16 %x2)
134 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr2, <16 x i8> %x1, i16 -1)
138 declare void @llvm.x86.avx512.mask.storeu.b.256(i8*, <32 x i8>, i32)
147 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr1, <32 x i8> %x1, i32 %x2)
148 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr2, <32 x i8> %x1, i32 -1)
152 declare void @llvm.x86.avx512.mask.storeu.w.128(i8*, <8 x i16>, i8)
161 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr1, <8 x i16> %x1, i8 %x2)
162 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr2, <8 x i16> %x1, i8 -1)
166 declare void @llvm.x86.avx512.mask.storeu.w.256(i8*, <16 x i16>, i16)
[all …]
Davx512-intrinsics-upgrade.ll193 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
194 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
198 declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
207 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
208 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
212 declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
249 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
250 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
254 declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8)
263 call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2)
[all …]
Davx512bw-intrinsics-upgrade.ll5 declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64)
23 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr1, <64 x i8> %x1, i64 %x2)
24 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr2, <64 x i8> %x1, i64 -1)
28 declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32)
46 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr1, <32 x i16> %x1, i32 %x2)
47 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr2, <32 x i16> %x1, i32 -1)
Davx512vl-intrinsics-upgrade.ll448 declare void @llvm.x86.avx512.mask.storeu.pd.128(i8*, <2 x double>, i8)
457 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr1, <2 x double> %x1, i8 %x2)
458 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr2, <2 x double> %x1, i8 -1)
462 declare void @llvm.x86.avx512.mask.storeu.pd.256(i8*, <4 x double>, i8)
471 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr1, <4 x double> %x1, i8 %x2)
472 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr2, <4 x double> %x1, i8 -1)
504 declare void @llvm.x86.avx512.mask.storeu.ps.128(i8*, <4 x float>, i8)
513 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr1, <4 x float> %x1, i8 %x2)
514 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr2, <4 x float> %x1, i8 -1)
518 declare void @llvm.x86.avx512.mask.storeu.ps.256(i8*, <8 x float>, i8)
[all …]
Davx2-intrinsics-x86-upgrade.ll378 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
381 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Dpr18846.ll58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1
71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1
72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1
80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1
81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1
86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1
87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1
88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1
89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #1
[all …]
Dpmovext.ll16 tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind
21 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
Dvec-loadsingles-alignment.ll30 tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8)
34 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
Davx-intrinsics-x86-upgrade.ll636 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
639 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
688 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
691 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
716 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
719 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
765 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
768 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
807 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2)
810 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind
[all …]
Dsse-intrinsics-x86-upgrade.ll85 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
88 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
Dsse2-intrinsics-x86-upgrade.ll320 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
323 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
393 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
396 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
Davx2-intrinsics-x86-upgrade.ll573 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
576 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
Davx512bw-intrinsics-upgrade.ll108 declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64)
128 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr1, <64 x i8> %x1, i64 %x2)
129 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr2, <64 x i8> %x1, i64 -1)
133 declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32)
153 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr1, <32 x i16> %x1, i32 %x2)
154 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr2, <32 x i16> %x1, i32 -1)
/external/swiftshader/third_party/llvm-7.0/llvm/test/Instrumentation/MemorySanitizer/
Dmsan_x86intrinsics.ll11 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
15 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Davx-intrinsics-x86.ll763 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a1)
766 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
772 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a1)
775 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
1641 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
1644 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
2249 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a1)
2252 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
2257 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a1)
2260 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwind
[all …]
/external/swiftshader/third_party/LLVM/include/llvm/
DIntrinsics.gen182 x86_avx_storeu_dq_256, // llvm.x86.avx.storeu.dq.256
183 x86_avx_storeu_pd_256, // llvm.x86.avx.storeu.pd.256
184 x86_avx_storeu_ps_256, // llvm.x86.avx.storeu.ps.256
381 x86_sse2_storeu_dq, // llvm.x86.sse2.storeu.dq
382 x86_sse2_storeu_pd, // llvm.x86.sse2.storeu.pd
503 x86_sse_storeu_ps, // llvm.x86.sse.storeu.ps
709 "llvm.x86.avx.storeu.dq.256",
710 "llvm.x86.avx.storeu.pd.256",
711 "llvm.x86.avx.storeu.ps.256",
908 "llvm.x86.sse2.storeu.dq",
[all …]
/external/llvm/test/Instrumentation/MemorySanitizer/
Dmsan_basic.ll624 call void @llvm.x86.sse.storeu.ps(i8* %p, <4 x float> %x)
628 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
/external/llvm/lib/Target/SystemZ/
DSystemZOperators.td583 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/SystemZ/
DSystemZOperators.td690 class storeu<SDPatternOperator operator, SDPatternOperator store = store>

12