Home
last modified time | relevance | path

Searched refs:vacc21 (Results 1 – 25 of 26) sorted by relevance

12

/external/XNNPACK/src/f32-gemm/gen/
D4x2-minmax-scalar.c67 float vacc21 = vacc01; in xnn_f32_gemm_minmax_ukernel_4x2__scalar() local
87 vacc21 += va2 * vb1; in xnn_f32_gemm_minmax_ukernel_4x2__scalar()
99 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_gemm_minmax_ukernel_4x2__scalar()
108 vacc21 = math_min_f32(vacc21, vmax); in xnn_f32_gemm_minmax_ukernel_4x2__scalar()
117 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x2__scalar()
D4x2-minmax-wasm.c67 float vacc21 = vacc01; in xnn_f32_gemm_minmax_ukernel_4x2__wasm() local
87 vacc21 += va2 * vb1; in xnn_f32_gemm_minmax_ukernel_4x2__wasm()
99 vacc21 = __builtin_wasm_max_f32(vacc21, vmin); in xnn_f32_gemm_minmax_ukernel_4x2__wasm()
108 vacc21 = __builtin_wasm_min_f32(vacc21, vmax); in xnn_f32_gemm_minmax_ukernel_4x2__wasm()
117 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x2__wasm()
D4x4-minmax-scalar.c71 float vacc21 = vacc01; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() local
101 vacc21 += va2 * vb1; in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
121 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
138 vacc21 = math_min_f32(vacc21, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
153 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
181 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
D4x4-minmax-wasm.c71 float vacc21 = vacc01; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() local
101 vacc21 += va2 * vb1; in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
121 vacc21 = __builtin_wasm_max_f32(vacc21, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
138 vacc21 = __builtin_wasm_min_f32(vacc21, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
153 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
181 c2[1] = vacc21; in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
D4x2-relu-scalar.c65 float vacc21 = vacc01; in xnn_f32_gemm_relu_ukernel_4x2__scalar() local
85 vacc21 += va2 * vb1; in xnn_f32_gemm_relu_ukernel_4x2__scalar()
97 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_gemm_relu_ukernel_4x2__scalar()
106 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x2__scalar()
D4x2-relu-wasm.c65 float vacc21 = vacc01; in xnn_f32_gemm_relu_ukernel_4x2__wasm() local
85 vacc21 += va2 * vb1; in xnn_f32_gemm_relu_ukernel_4x2__wasm()
97 vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f); in xnn_f32_gemm_relu_ukernel_4x2__wasm()
106 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x2__wasm()
D4x4-relu-wasm.c69 float vacc21 = vacc01; in xnn_f32_gemm_relu_ukernel_4x4__wasm() local
99 vacc21 += va2 * vb1; in xnn_f32_gemm_relu_ukernel_4x4__wasm()
119 vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__wasm()
134 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x4__wasm()
162 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x4__wasm()
D4x4-relu-scalar.c69 float vacc21 = vacc01; in xnn_f32_gemm_relu_ukernel_4x4__scalar() local
99 vacc21 += va2 * vb1; in xnn_f32_gemm_relu_ukernel_4x4__scalar()
119 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar()
134 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x4__scalar()
162 c2[1] = vacc21; in xnn_f32_gemm_relu_ukernel_4x4__scalar()
D4x4-wasm.c69 float vacc21 = vacc01; in xnn_f32_gemm_ukernel_4x4__wasm() local
99 vacc21 += va2 * vb1; in xnn_f32_gemm_ukernel_4x4__wasm()
118 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x4__wasm()
146 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x4__wasm()
D4x4-scalar.c69 float vacc21 = vacc01; in xnn_f32_gemm_ukernel_4x4__scalar() local
99 vacc21 += va2 * vb1; in xnn_f32_gemm_ukernel_4x4__scalar()
118 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x4__scalar()
146 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x4__scalar()
D4x2-wasm.c65 float vacc21 = vacc01; in xnn_f32_gemm_ukernel_4x2__wasm() local
85 vacc21 += va2 * vb1; in xnn_f32_gemm_ukernel_4x2__wasm()
98 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x2__wasm()
D4x2-scalar.c65 float vacc21 = vacc01; in xnn_f32_gemm_ukernel_4x2__scalar() local
85 vacc21 += va2 * vb1; in xnn_f32_gemm_ukernel_4x2__scalar()
98 c2[1] = vacc21; in xnn_f32_gemm_ukernel_4x2__scalar()
/external/XNNPACK/src/f32-igemm/gen/
D4x2-minmax-wasm.c64 float vacc21 = vacc01; in xnn_f32_igemm_minmax_ukernel_4x2__wasm() local
109 vacc21 += va2 * vb1; in xnn_f32_igemm_minmax_ukernel_4x2__wasm()
123 vacc21 = __builtin_wasm_max_f32(vacc21, vmin); in xnn_f32_igemm_minmax_ukernel_4x2__wasm()
132 vacc21 = __builtin_wasm_min_f32(vacc21, vmax); in xnn_f32_igemm_minmax_ukernel_4x2__wasm()
141 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x2__wasm()
D4x2-minmax-scalar.c64 float vacc21 = vacc01; in xnn_f32_igemm_minmax_ukernel_4x2__scalar() local
109 vacc21 += va2 * vb1; in xnn_f32_igemm_minmax_ukernel_4x2__scalar()
123 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_igemm_minmax_ukernel_4x2__scalar()
132 vacc21 = math_min_f32(vacc21, vmax); in xnn_f32_igemm_minmax_ukernel_4x2__scalar()
141 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x2__scalar()
D4x4-minmax-scalar.c68 float vacc21 = vacc01; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() local
123 vacc21 += va2 * vb1; in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
145 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
162 vacc21 = math_min_f32(vacc21, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
177 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
201 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
D4x4-minmax-wasm.c68 float vacc21 = vacc01; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() local
123 vacc21 += va2 * vb1; in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
145 vacc21 = __builtin_wasm_max_f32(vacc21, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
162 vacc21 = __builtin_wasm_min_f32(vacc21, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
177 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
201 c2[1] = vacc21; in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
D4x2-relu-scalar.c62 float vacc21 = vacc01; in xnn_f32_igemm_relu_ukernel_4x2__scalar() local
107 vacc21 += va2 * vb1; in xnn_f32_igemm_relu_ukernel_4x2__scalar()
121 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_igemm_relu_ukernel_4x2__scalar()
130 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x2__scalar()
D4x2-relu-wasm.c62 float vacc21 = vacc01; in xnn_f32_igemm_relu_ukernel_4x2__wasm() local
107 vacc21 += va2 * vb1; in xnn_f32_igemm_relu_ukernel_4x2__wasm()
121 vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f); in xnn_f32_igemm_relu_ukernel_4x2__wasm()
130 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x2__wasm()
D4x4-relu-scalar.c66 float vacc21 = vacc01; in xnn_f32_igemm_relu_ukernel_4x4__scalar() local
121 vacc21 += va2 * vb1; in xnn_f32_igemm_relu_ukernel_4x4__scalar()
143 vacc21 = math_max_f32(vacc21, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar()
158 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x4__scalar()
182 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x4__scalar()
D4x4-relu-wasm.c66 float vacc21 = vacc01; in xnn_f32_igemm_relu_ukernel_4x4__wasm() local
121 vacc21 += va2 * vb1; in xnn_f32_igemm_relu_ukernel_4x4__wasm()
143 vacc21 = __builtin_wasm_max_f32(vacc21, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__wasm()
158 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x4__wasm()
182 c2[1] = vacc21; in xnn_f32_igemm_relu_ukernel_4x4__wasm()
D4x4-wasm.c66 float vacc21 = vacc01; in xnn_f32_igemm_ukernel_4x4__wasm() local
121 vacc21 += va2 * vb1; in xnn_f32_igemm_ukernel_4x4__wasm()
142 c2[1] = vacc21; in xnn_f32_igemm_ukernel_4x4__wasm()
166 c2[1] = vacc21; in xnn_f32_igemm_ukernel_4x4__wasm()
D4x4-scalar.c66 float vacc21 = vacc01; in xnn_f32_igemm_ukernel_4x4__scalar() local
121 vacc21 += va2 * vb1; in xnn_f32_igemm_ukernel_4x4__scalar()
142 c2[1] = vacc21; in xnn_f32_igemm_ukernel_4x4__scalar()
166 c2[1] = vacc21; in xnn_f32_igemm_ukernel_4x4__scalar()
D4x2-scalar.c62 float vacc21 = vacc01; in xnn_f32_igemm_ukernel_4x2__scalar() local
107 vacc21 += va2 * vb1; in xnn_f32_igemm_ukernel_4x2__scalar()
122 c2[1] = vacc21; in xnn_f32_igemm_ukernel_4x2__scalar()
/external/XNNPACK/src/f32-gemm/gen-inc/
D4x4inc-minmax-scalar.c72 float vacc21 = acc[9]; in xnn_f32_gemminc_minmax_ukernel_4x4__scalar() local
103 vacc21 += va2 * vb1; in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
123 vacc21 = math_max_f32(vacc21, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
140 vacc21 = math_min_f32(vacc21, vmax); in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
155 c2[1] = vacc21; in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
183 c2[1] = vacc21; in xnn_f32_gemminc_minmax_ukernel_4x4__scalar()
D4x4inc-minmax-wasm.c72 float vacc21 = acc[9]; in xnn_f32_gemminc_minmax_ukernel_4x4__wasm() local
103 vacc21 += va2 * vb1; in xnn_f32_gemminc_minmax_ukernel_4x4__wasm()
123 vacc21 = __builtin_wasm_max_f32(vacc21, vmin); in xnn_f32_gemminc_minmax_ukernel_4x4__wasm()
140 vacc21 = __builtin_wasm_min_f32(vacc21, vmax); in xnn_f32_gemminc_minmax_ukernel_4x4__wasm()
155 c2[1] = vacc21; in xnn_f32_gemminc_minmax_ukernel_4x4__wasm()
183 c2[1] = vacc21; in xnn_f32_gemminc_minmax_ukernel_4x4__wasm()

12