Home
last modified time | relevance | path

Searched refs:vf1 (Results 1 – 25 of 264) sorted by relevance

1234567891011

/external/llvm-project/llvm/test/CodeGen/PowerPC/
Dfp-strict-minmax.ll12 define <4 x float> @fmaxnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) #0 {
18 <4 x float> %vf0, <4 x float> %vf1,
23 define <2 x double> @fmaxnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) #0 {
29 <2 x double> %vf0, <2 x double> %vf1,
35 define <4 x float> @fminnum_v4f32(<4 x float> %vf0, <4 x float> %vf1) #0 {
41 <4 x float> %vf0, <4 x float> %vf1,
46 define <2 x double> @fminnum_v2f64(<2 x double> %vf0, <2 x double> %vf1) #0 {
52 <2 x double> %vf0, <2 x double> %vf1,
Dfp-strict-round.ll74 define <4 x float> @ceil_v4f32(<4 x float> %vf1) {
85 <4 x float> %vf1,
90 define <2 x double> @ceil_v2f64(<2 x double> %vf1) {
101 <2 x double> %vf1,
138 define <4 x float> @floor_v4f32(<4 x float> %vf1) {
149 <4 x float> %vf1,
154 define <2 x double> @floor_v2f64(<2 x double> %vf1) {
165 <2 x double> %vf1,
205 define <4 x float> @nearbyint_v4f32(<4 x float> %vf1, <4 x float> %vf2) {
308 <4 x float> %vf1,
[all …]
Dfp-strict.ll70 define <4 x float> @fadd_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
102 <4 x float> %vf1, <4 x float> %vf2,
108 define <2 x double> @fadd_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
120 <2 x double> %vf1, <2 x double> %vf2,
162 define <4 x float> @fsub_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
194 <4 x float> %vf1, <4 x float> %vf2,
200 define <2 x double> @fsub_v2f64(<2 x double> %vf1, <2 x double> %vf2) #0 {
212 <2 x double> %vf1, <2 x double> %vf2,
254 define <4 x float> @fmul_v4f32(<4 x float> %vf1, <4 x float> %vf2) #0 {
286 <4 x float> %vf1, <4 x float> %vf2,
[all …]
/external/XNNPACK/src/f32-sigmoid/gen/
Dscalar-lut2048-p1-div-x2.c77 float vf1 = vy1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2() local
83 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2()
90 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2()
94 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x2()
Dscalar-p5-div-x2.c84 float vf1 = ve1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2() local
90 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2()
97 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2()
101 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x2()
Dscalar-lut64-p2-div-x2.c80 float vf1 = vy1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2() local
86 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2()
93 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2()
97 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x2()
Davx2-rr1-p5-div-x16.c82 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16() local
85 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16()
88 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16()
91 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x16()
Davx2-rr1-p5-nr1fma-x16.c89 __m256 vf1 = _mm256_mul_ps(ve1, vr1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16() local
92 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16()
95 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16()
98 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x16()
Davx2-rr1-p5-div-x24.c95 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24() local
99 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24()
103 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24()
107 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x24()
Davx2-rr1-p5-nr2fma-x16.c91 __m256 vf1 = _mm256_mul_ps(ve1, vr1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16() local
94 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16()
97 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16()
100 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x16()
Davx2-rr1-p5-nr1fma-x24.c104 __m256 vf1 = _mm256_mul_ps(ve1, vr1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24() local
108 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24()
112 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24()
116 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x24()
Davx-rr2-p5-div-x16.c90 __m256 vf1 = _mm256_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16() local
93 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vz1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16()
96 vf1 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf1), vf1, vx1); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16()
99 _mm256_storeu_ps(y + 8, vf1); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x16()
Dscalar-lut2048-p1-div-x4.c101 float vf1 = vy1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4() local
109 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4()
122 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4()
132 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_lut2048_p1_div_x4()
Dscalar-p5-div-x4.c112 float vf1 = ve1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4() local
120 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4()
133 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4()
143 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_p5_div_x4()
Dscalar-lut64-p2-div-x4.c106 float vf1 = vy1 / vd1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4() local
114 vf1 = 0.0f; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4()
127 vf1 = vone - vf1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4()
137 y[1] = vf1; in xnn_f32_sigmoid_ukernel__scalar_lut64_p2_div_x4()
Davx512f-rr1-p5-scalef-div-x32.c76 __m512 vf1 = _mm512_div_ps(ve1, vd1); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32() local
79vf1 = _mm512_mask_sub_ps(vf1, _mm512_testn_epi32_mask(_mm512_castps_si512(vx1), vsign_mask), vone,… in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32()
82 _mm512_storeu_ps(y + 16, vf1); in xnn_f32_sigmoid_ukernel__avx512f_rr1_p5_scalef_div_x32()
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/
Davx2-p5-x16.c97 __m256 vf1 = _mm256_fmadd_ps(vt1, vp1, vs1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16() local
102 vf1 = _mm256_andnot_ps(_mm256_cmp_ps(vx1, vdenorm_cutoff, _CMP_LT_OS), vf1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16()
106 vf1 = _mm256_mul_ps(vf1, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16()
110 _mm256_storeu_ps(output + 8, vf1); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x16()
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/
Dscalar-p5-x2.c98 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2() local
106 vf1 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2()
111 output[1] = vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2()
116 vacc0 += vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2()
Dscalar-p5-x2-acc2.c99 float vf1 = vt1 * vp1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2() local
107 vf1 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2()
112 output[1] = vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2()
117 vacc1 += vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_p5_x2_acc2()
Dscalar-lut64-p2-x2-acc2.c109 float vf1 = vp1 * vs1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2() local
117 vf1 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2()
122 output[1] = vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2()
127 vacc1 += vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2()
Dscalar-lut64-p2-x2.c108 float vf1 = vp1 * vs1 + vs1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2() local
116 vf1 = 0.0f; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2()
121 output[1] = vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2()
126 vacc0 += vf1; in xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2()
/external/llvm-project/llvm/test/Transforms/WholeProgramDevirt/
Dexport-single-impl.ll29 ; SUMMARY-NEXT: SingleImplName: vf1
63 ; CHECK: @vt1 = constant void (i8*)* @vf1
64 @vt1 = constant void (i8*)* @vf1, !type !0
76 ; CHECK: declare void @vf1(i8*)
77 declare void @vf1(i8*)
/external/clang/test/CodeGen/
DNontemporal.cpp14 float __attribute__((vector_size(16))) vf1, vf2; variable
32 __builtin_nontemporal_store(vf1, &vf2); // CHECK: store <4 x float>{{.*}}align 16, !nontemporal in test_all_sizes()
46 vf2 = __builtin_nontemporal_load(&vf1); // CHECK: load <4 x float>{{.*}}align 16, !nontemporal in test_all_sizes()
/external/llvm-project/clang/test/CodeGen/
DNontemporal.cpp14 float __attribute__((vector_size(16))) vf1, vf2; variable
32 __builtin_nontemporal_store(vf1, &vf2); // CHECK: store <4 x float>{{.*}}align 16, !nontemporal in test_all_sizes()
46 vf2 = __builtin_nontemporal_load(&vf1); // CHECK: load <4 x float>{{.*}}align 16, !nontemporal in test_all_sizes()
/external/deqp-deps/glslang/Test/baseResults/
Dhlsl.intrinsics.negative.comp.out15 0:57 Function Definition: ComputeShaderFunction1(vf1;vf1;vf1;vi1; ( temp 1-component vector of flo…
107 0:57 Function Definition: ComputeShaderFunction1(vf1;vf1;vf1;vi1; ( temp 1-component vector of flo…
198 Name 21 "ComputeShaderFunction1(vf1;vf1;vf1;vi1;"
312 21(ComputeShaderFunction1(vf1;vf1;vf1;vi1;): 6(float) Function None 10

1234567891011