Home
last modified time | relevance | path

Searched refs:math_max_f32 (Results 1 – 25 of 211) sorted by relevance

123456789

/external/XNNPACK/src/f32-spmm/gen/
D8x4-minmax-scalar.c155 vout0x0 = math_max_f32(vout0x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
156 vout1x0 = math_max_f32(vout1x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
157 vout2x0 = math_max_f32(vout2x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
158 vout3x0 = math_max_f32(vout3x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
159 vout4x0 = math_max_f32(vout4x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
160 vout5x0 = math_max_f32(vout5x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
161 vout6x0 = math_max_f32(vout6x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
162 vout7x0 = math_max_f32(vout7x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
163 vout0x1 = math_max_f32(vout0x1, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
164 vout1x1 = math_max_f32(vout1x1, vmin); in xnn_f32_spmm_minmax_ukernel_8x4__scalar()
[all …]
D8x2-minmax-scalar.c105 vout0x0 = math_max_f32(vout0x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
106 vout1x0 = math_max_f32(vout1x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
107 vout2x0 = math_max_f32(vout2x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
108 vout3x0 = math_max_f32(vout3x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
109 vout4x0 = math_max_f32(vout4x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
110 vout5x0 = math_max_f32(vout5x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
111 vout6x0 = math_max_f32(vout6x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
112 vout7x0 = math_max_f32(vout7x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
113 vout0x1 = math_max_f32(vout0x1, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
114 vout1x1 = math_max_f32(vout1x1, vmin); in xnn_f32_spmm_minmax_ukernel_8x2__scalar()
[all …]
D8x1-minmax-scalar.c80 vout0x0 = math_max_f32(vout0x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
81 vout1x0 = math_max_f32(vout1x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
82 vout2x0 = math_max_f32(vout2x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
83 vout3x0 = math_max_f32(vout3x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
84 vout4x0 = math_max_f32(vout4x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
85 vout5x0 = math_max_f32(vout5x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
86 vout6x0 = math_max_f32(vout6x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
87 vout7x0 = math_max_f32(vout7x0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
149 vout0 = math_max_f32(vout0, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
150 vout1 = math_max_f32(vout1, vmin); in xnn_f32_spmm_minmax_ukernel_8x1__scalar()
[all …]
/external/XNNPACK/src/f32-maxpool/
D9p8x-minmax-scalar-c1.c87 const float vmax01 = math_max_f32(vi0, vi1); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
88 const float vmax23 = math_max_f32(vi2, vi3); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
89 const float vmax45 = math_max_f32(vi4, vi5); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
90 const float vmax67 = math_max_f32(vi6, vi7); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
91 const float vmax018 = math_max_f32(vmax01, vi8); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
93 const float vmax2345 = math_max_f32(vmax23, vmax45); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
94 const float vmax01678 = math_max_f32(vmax018, vmax67); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
95 float vout = math_max_f32(vmax2345, vmax01678); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
96 vout = math_max_f32(vout, voutput_min); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
155 const float vmax01 = math_max_f32(vi0, vi1); in xnn_f32_maxpool_minmax_ukernel_9p8x__scalar_c1()
[all …]
/external/XNNPACK/src/f32-gemm/gen/
D4x4-relu-scalar.c110 vacc00 = math_max_f32(vacc00, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
111 vacc01 = math_max_f32(vacc01, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
112 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
113 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
114 vacc10 = math_max_f32(vacc10, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
115 vacc11 = math_max_f32(vacc11, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
116 vacc12 = math_max_f32(vacc12, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
117 vacc13 = math_max_f32(vacc13, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
118 vacc20 = math_max_f32(vacc20, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
119 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
[all …]
D4x4-minmax-scalar.c112 vacc00 = math_max_f32(vacc00, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
113 vacc01 = math_max_f32(vacc01, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
114 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
115 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
116 vacc10 = math_max_f32(vacc10, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
117 vacc11 = math_max_f32(vacc11, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
118 vacc12 = math_max_f32(vacc12, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
119 vacc13 = math_max_f32(vacc13, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
120 vacc20 = math_max_f32(vacc20, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
121 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
[all …]
/external/XNNPACK/src/f32-igemm/gen/
D4x4-relu-scalar.c134 vacc00 = math_max_f32(vacc00, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
135 vacc01 = math_max_f32(vacc01, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
136 vacc02 = math_max_f32(vacc02, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
137 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
138 vacc10 = math_max_f32(vacc10, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
139 vacc11 = math_max_f32(vacc11, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
140 vacc12 = math_max_f32(vacc12, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
141 vacc13 = math_max_f32(vacc13, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
142 vacc20 = math_max_f32(vacc20, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
143 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
[all …]
D4x4-minmax-scalar.c136 vacc00 = math_max_f32(vacc00, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
137 vacc01 = math_max_f32(vacc01, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
138 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
139 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
140 vacc10 = math_max_f32(vacc10, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
141 vacc11 = math_max_f32(vacc11, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
142 vacc12 = math_max_f32(vacc12, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
143 vacc13 = math_max_f32(vacc13, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
144 vacc20 = math_max_f32(vacc20, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
145 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
[all …]
/external/XNNPACK/src/f32-vbinary/gen/
Dvmaxc-scalar-x8.c43 float vy0 = math_max_f32(va0, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
44 float vy1 = math_max_f32(va1, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
45 float vy2 = math_max_f32(va2, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
46 float vy3 = math_max_f32(va3, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
47 float vy4 = math_max_f32(va4, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
48 float vy5 = math_max_f32(va5, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
49 float vy6 = math_max_f32(va6, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
50 float vy7 = math_max_f32(va7, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
67 float vy = math_max_f32(va, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
Dvsubc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vsubc_relu_ukernel__scalar_x8()
Dvaddc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vaddc_relu_ukernel__scalar_x8()
Dvmax-scalar-x8.c52 float vy0 = math_max_f32(va0, vb0); in xnn_f32_vmax_ukernel__scalar_x8()
53 float vy1 = math_max_f32(va1, vb1); in xnn_f32_vmax_ukernel__scalar_x8()
54 float vy2 = math_max_f32(va2, vb2); in xnn_f32_vmax_ukernel__scalar_x8()
55 float vy3 = math_max_f32(va3, vb3); in xnn_f32_vmax_ukernel__scalar_x8()
56 float vy4 = math_max_f32(va4, vb4); in xnn_f32_vmax_ukernel__scalar_x8()
57 float vy5 = math_max_f32(va5, vb5); in xnn_f32_vmax_ukernel__scalar_x8()
58 float vy6 = math_max_f32(va6, vb6); in xnn_f32_vmax_ukernel__scalar_x8()
59 float vy7 = math_max_f32(va7, vb7); in xnn_f32_vmax_ukernel__scalar_x8()
77 float vy = math_max_f32(va, vb); in xnn_f32_vmax_ukernel__scalar_x8()
Dvdivc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vdivc_relu_ukernel__scalar_x8()
Dvrdivc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vrdivc_relu_ukernel__scalar_x8()
Dvrsubc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vrsubc_relu_ukernel__scalar_x8()
Dvmulc-relu-scalar-x8.c53 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
54 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
55 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
56 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
57 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
58 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
59 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
60 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
76 vy = math_max_f32(vy, 0.0f); in xnn_f32_vmulc_relu_ukernel__scalar_x8()
Dvmul-relu-scalar-x8.c62 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
63 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
64 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
65 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
66 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
67 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
68 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
69 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
86 vy = math_max_f32(vy, 0.0f); in xnn_f32_vmul_relu_ukernel__scalar_x8()
Dvadd-relu-scalar-x8.c62 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
63 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
64 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
65 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
66 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
67 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
68 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
69 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
86 vy = math_max_f32(vy, 0.0f); in xnn_f32_vadd_relu_ukernel__scalar_x8()
Dvdiv-relu-scalar-x8.c62 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
63 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
64 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
65 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
66 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
67 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
68 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
69 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
86 vy = math_max_f32(vy, 0.0f); in xnn_f32_vdiv_relu_ukernel__scalar_x8()
Dvsub-relu-scalar-x8.c62 vy0 = math_max_f32(vy0, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
63 vy1 = math_max_f32(vy1, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
64 vy2 = math_max_f32(vy2, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
65 vy3 = math_max_f32(vy3, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
66 vy4 = math_max_f32(vy4, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
67 vy5 = math_max_f32(vy5, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
68 vy6 = math_max_f32(vy6, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
69 vy7 = math_max_f32(vy7, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
86 vy = math_max_f32(vy, 0.0f); in xnn_f32_vsub_relu_ukernel__scalar_x8()
Dvmulc-minmax-scalar-x8.c55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
87 vy = math_max_f32(vy, vy_min); in xnn_f32_vmulc_minmax_ukernel__scalar_x8()
Dvrsubc-minmax-scalar-x8.c55 vy0 = math_max_f32(vy0, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
56 vy1 = math_max_f32(vy1, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
57 vy2 = math_max_f32(vy2, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
58 vy3 = math_max_f32(vy3, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
59 vy4 = math_max_f32(vy4, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
60 vy5 = math_max_f32(vy5, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
61 vy6 = math_max_f32(vy6, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
62 vy7 = math_max_f32(vy7, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
87 vy = math_max_f32(vy, vy_min); in xnn_f32_vrsubc_minmax_ukernel__scalar_x8()
/external/XNNPACK/src/f32-rmax/
Dscalar.c31 vmax0 = math_max_f32(vx0, vmax0); in xnn_f32_rmax_ukernel__scalar()
32 vmax1 = math_max_f32(vx1, vmax1); in xnn_f32_rmax_ukernel__scalar()
33 vmax2 = math_max_f32(vx2, vmax2); in xnn_f32_rmax_ukernel__scalar()
34 vmax3 = math_max_f32(vx3, vmax3); in xnn_f32_rmax_ukernel__scalar()
36 const float vmax01 = math_max_f32(vmax0, vmax1); in xnn_f32_rmax_ukernel__scalar()
37 const float vmax23 = math_max_f32(vmax2, vmax3); in xnn_f32_rmax_ukernel__scalar()
38 float vmax = math_max_f32(vmax01, vmax23); in xnn_f32_rmax_ukernel__scalar()
42 vmax = math_max_f32(vx, vmax); in xnn_f32_rmax_ukernel__scalar()
/external/XNNPACK/src/f32-ppmm/gen/
D4x4-minmax-scalar.c119 vacc0x0 = math_max_f32(vacc0x0, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
120 vacc1x0 = math_max_f32(vacc1x0, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
121 vacc2x0 = math_max_f32(vacc2x0, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
122 vacc3x0 = math_max_f32(vacc3x0, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
123 vacc0x1 = math_max_f32(vacc0x1, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
124 vacc1x1 = math_max_f32(vacc1x1, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
125 vacc2x1 = math_max_f32(vacc2x1, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
126 vacc3x1 = math_max_f32(vacc3x1, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
127 vacc0x2 = math_max_f32(vacc0x2, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
128 vacc1x2 = math_max_f32(vacc1x2, vmin); in xnn_f32_ppmm_minmax_ukernel_4x4__scalar()
[all …]
/external/XNNPACK/src/f32-gemm/gen-inc/
D4x4inc-minmax-scalar.c114 vacc00 = math_max_f32(vacc00, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
115 vacc01 = math_max_f32(vacc01, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
116 vacc02 = math_max_f32(vacc02, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
117 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
118 vacc10 = math_max_f32(vacc10, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
119 vacc11 = math_max_f32(vacc11, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
120 vacc12 = math_max_f32(vacc12, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
121 vacc13 = math_max_f32(vacc13, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
122 vacc20 = math_max_f32(vacc20, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
123 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
[all …]

123456789