/external/llvm-project/llvm/test/MC/RISCV/rvv/ |
D | ext.s | 59 vzext.vf8 v8, v4, v0.t 60 # CHECK-INST: vzext.vf8 v8, v4, v0.t 65 vzext.vf8 v8, v4 66 # CHECK-INST: vzext.vf8 v8, v4 71 vsext.vf8 v8, v4, v0.t 72 # CHECK-INST: vsext.vf8 v8, v4, v0.t 77 vsext.vf8 v8, v4 78 # CHECK-INST: vsext.vf8 v8, v4
|
/external/XNNPACK/src/f32-sigmoid/gen/ |
D | avx2-rr1-p5-div-x72.c | 180 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() local 190 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 200 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72() 210 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x72()
|
D | avx2-rr1-p5-div-x80.c | 193 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() local 204 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 215 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80() 226 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_div_x80()
|
D | avx2-rr1-p5-nr1fma-x72.c | 201 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() local 211 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() 221 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72() 231 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x72()
|
D | avx2-rr1-p5-nr1fma-x80.c | 216 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() local 227 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() 238 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80() 249 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr1fma_x80()
|
D | avx2-rr1-p5-nr2fma-x72.c | 210 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() local 220 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 230 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72() 240 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x72()
|
D | avx2-rr1-p5-nr2fma-x80.c | 226 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() local 237 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 248 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80() 259 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx2_rr1_p5_nr2fma_x80()
|
D | avx-rr2-p5-div-x72.c | 209 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() local 219 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 229 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72() 239 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x72()
|
D | avx-rr2-p5-div-x80.c | 225 __m256 vf8 = _mm256_div_ps(ve8, vd8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80() local 236 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80() 247 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80() 258 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_div_x80()
|
D | avx-rr2-p5-nr2-x72.c | 239 __m256 vf8 = _mm256_mul_ps(ve8, vr8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() local 249 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vz8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() 259 vf8 = _mm256_blendv_ps(_mm256_sub_ps(vone, vf8), vf8, vx8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72() 269 _mm256_storeu_ps(y + 64, vf8); in xnn_f32_sigmoid_ukernel__avx_rr2_p5_nr2_x72()
|
/external/XNNPACK/src/f32-vscaleexpminusmax/gen/ |
D | avx2-p5-x72.c | 188 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() local 200 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() 211 vf8 = _mm256_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72() 222 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x72()
|
D | avx2-p5-x80.c | 200 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() local 213 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() 225 vf8 = _mm256_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80() 237 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x80()
|
D | avx2-p5-x88.c | 212 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() local 226 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() 239 vf8 = _mm256_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88() 252 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x88()
|
D | avx2-p5-x96.c | 224 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96() local 239 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96() 253 vf8 = _mm256_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96() 267 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx2_p5_x96()
|
D | avx512f-p5-scalef-x144.c | 160 __m512 vf8 = _mm512_scalef_ps(vp8, vn8); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x144() local 171 vf8 = _mm512_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x144() 183 _mm512_storeu_ps(output + 128, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x144()
|
D | avx512f-p5-scalef-x160.c | 170 __m512 vf8 = _mm512_scalef_ps(vp8, vn8); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x160() local 182 vf8 = _mm512_mul_ps(vf8, vscale); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x160() 195 _mm512_storeu_ps(output + 128, vf8); in xnn_f32_vscaleexpminusmax_ukernel__avx512f_p5_scalef_x160()
|
/external/llvm-project/llvm/test/Transforms/WholeProgramDevirt/ |
D | constant-arg.ll | 10 …x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], [0 x i8] … 15 @vt8 = constant [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], !type !0 35 define i1 @vf8(i8* %this, i32 %arg) readnone {
|
/external/llvm/test/Transforms/WholeProgramDevirt/ |
D | constant-arg.ll | 10 …x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], [0 x i8] … 15 @vt8 = constant [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], !type !0 35 define i1 @vf8(i8* %this, i32 %arg) readnone {
|
/external/XNNPACK/src/f32-vscaleextexp/gen/ |
D | avx512f-p5-scalef-x144.c | 153 __m512 vf8 = _mm512_mul_ps(vp8, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144() local 174 vf8 = _mm512_scalef_ps(vf8, ve8); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144() 186 _mm512_storeu_ps(y + 128, vf8); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x144()
|
D | avx512f-p5-scalef-x160.c | 162 __m512 vf8 = _mm512_mul_ps(vp8, vscalev); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160() local 185 vf8 = _mm512_scalef_ps(vf8, ve8); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160() 198 _mm512_storeu_ps(y + 128, vf8); in xnn_f32_vscaleextexp_ukernel__avx512f_p5_scalef_x160()
|
/external/XNNPACK/src/f32-raddstoreexpminusmax/gen/ |
D | avx2-p5-x72-acc3.c | 189 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() local 201 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() 212 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3() 224 vacc2 = _mm256_add_ps(vacc2, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72_acc3()
|
D | avx2-p5-x72.c | 187 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() local 199 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() 210 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72() 222 vacc0 = _mm256_add_ps(vacc0, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x72()
|
D | avx2-p5-x80-acc5.c | 203 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() local 216 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() 228 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5() 241 vacc3 = _mm256_add_ps(vacc3, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc5()
|
D | avx2-p5-x80.c | 199 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() local 212 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() 224 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80() 237 vacc0 = _mm256_add_ps(vacc0, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80()
|
D | avx2-p5-x80-acc2.c | 200 __m256 vf8 = _mm256_fmadd_ps(vt8, vp8, vs8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() local 213 vf8 = _mm256_andnot_ps(_mm256_cmp_ps(vx8, vdenorm_cutoff, _CMP_LT_OS), vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() 225 _mm256_storeu_ps(output + 64, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2() 238 vacc0 = _mm256_add_ps(vacc0, vf8); in xnn_f32_raddstoreexpminusmax_ukernel__avx2_p5_x80_acc2()
|