/external/XNNPACK/src/f32-gemm/gen/ |
D | 1x4-minmax-scalar.c | 46 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_1x4__scalar() local 62 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 70 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 75 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_1x4__scalar() 81 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_1x4__scalar()
|
D | 1x4-minmax-wasm.c | 46 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_1x4__wasm() local 62 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 70 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 75 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_1x4__wasm() 81 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_1x4__wasm()
|
D | 2x4-minmax-scalar.c | 52 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() local 57 float vacc13 = vacc03; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 73 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 85 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 94 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_2x4__scalar() 109 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_2x4__scalar()
|
D | 2x4-minmax-wasm.c | 52 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() local 57 float vacc13 = vacc03; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 73 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 85 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 94 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_2x4__wasm() 109 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_2x4__wasm()
|
D | 4x4-minmax-scalar.c | 64 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() local 69 float vacc13 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 73 float vacc23 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 77 float vacc33 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 95 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 115 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 132 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__scalar() 165 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__scalar()
|
D | 4x4-minmax-wasm.c | 64 float vacc03 = w[3]; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() local 69 float vacc13 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 73 float vacc23 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 77 float vacc33 = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 95 vacc03 += va0 * vb3; in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 115 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 132 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_gemm_minmax_ukernel_4x4__wasm() 165 c0[3] = vacc03; in xnn_f32_gemm_minmax_ukernel_4x4__wasm()
|
D | 4x4-relu-wasm.c | 62 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_4x4__wasm() local 67 float vacc13 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 71 float vacc23 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 75 float vacc33 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 93 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_4x4__wasm() 113 vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__wasm() 146 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__wasm()
|
D | 4x4-relu-scalar.c | 62 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_4x4__scalar() local 67 float vacc13 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 71 float vacc23 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 75 float vacc33 = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 93 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_4x4__scalar() 113 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_4x4__scalar() 146 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_4x4__scalar()
|
D | 2x4-relu-scalar.c | 50 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_2x4__scalar() local 55 float vacc13 = vacc03; in xnn_f32_gemm_relu_ukernel_2x4__scalar() 71 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_2x4__scalar() 83 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_2x4__scalar() 98 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_2x4__scalar()
|
D | 2x4-relu-wasm.c | 50 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_2x4__wasm() local 55 float vacc13 = vacc03; in xnn_f32_gemm_relu_ukernel_2x4__wasm() 71 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_2x4__wasm() 83 vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_2x4__wasm() 98 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_2x4__wasm()
|
D | 1x4-relu-wasm.c | 44 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_1x4__wasm() local 60 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_1x4__wasm() 68 vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_1x4__wasm() 74 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_1x4__wasm()
|
D | 1x4-relu-scalar.c | 44 float vacc03 = w[3]; in xnn_f32_gemm_relu_ukernel_1x4__scalar() local 60 vacc03 += va0 * vb3; in xnn_f32_gemm_relu_ukernel_1x4__scalar() 68 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_gemm_relu_ukernel_1x4__scalar() 74 c0[3] = vacc03; in xnn_f32_gemm_relu_ukernel_1x4__scalar()
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 1x4inc-minmax-scalar.c | 48 float vacc03 = acc[3]; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() local 64 vacc03 += va0 * vb3; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 72 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 77 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_gemminc_minmax_ukernel_1x4__scalar() 83 c0[3] = vacc03; in xnn_f32_gemminc_minmax_ukernel_1x4__scalar()
|
D | 1x4inc-minmax-wasm.c | 48 float vacc03 = acc[3]; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() local 64 vacc03 += va0 * vb3; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 72 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 77 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_gemminc_minmax_ukernel_1x4__wasm() 83 c0[3] = vacc03; in xnn_f32_gemminc_minmax_ukernel_1x4__wasm()
|
D | 2x4inc-minmax-scalar.c | 54 float vacc03 = acc[3]; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() local 75 vacc03 += va0 * vb3; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 87 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 96 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_gemminc_minmax_ukernel_2x4__scalar() 111 c0[3] = vacc03; in xnn_f32_gemminc_minmax_ukernel_2x4__scalar()
|
D | 2x4inc-minmax-wasm.c | 54 float vacc03 = acc[3]; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() local 75 vacc03 += va0 * vb3; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 87 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 96 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_gemminc_minmax_ukernel_2x4__wasm() 111 c0[3] = vacc03; in xnn_f32_gemminc_minmax_ukernel_2x4__wasm()
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 1x4-minmax-wasm.c | 50 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_1x4__wasm() local 75 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 85 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 90 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_1x4__wasm() 96 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_1x4__wasm()
|
D | 1x4-minmax-scalar.c | 50 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_1x4__scalar() local 75 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 85 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 90 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_1x4__scalar() 96 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_1x4__scalar()
|
D | 2x4-minmax-wasm.c | 54 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() local 58 float vacc13 = vacc03; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 89 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 103 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 112 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_2x4__wasm() 127 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_2x4__wasm()
|
D | 2x4-minmax-scalar.c | 54 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() local 58 float vacc13 = vacc03; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 89 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 103 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 112 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_2x4__scalar() 127 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_2x4__scalar()
|
D | 4x4-minmax-scalar.c | 62 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() local 66 float vacc13 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 70 float vacc23 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 74 float vacc33 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 117 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 139 vacc03 = math_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 156 vacc03 = math_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__scalar() 189 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__scalar()
|
D | 4x4-minmax-wasm.c | 62 float vacc03 = w[3]; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() local 66 float vacc13 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 70 float vacc23 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 74 float vacc33 = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 117 vacc03 += va0 * vb3; in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 139 vacc03 = __builtin_wasm_max_f32(vacc03, vmin); in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 156 vacc03 = __builtin_wasm_min_f32(vacc03, vmax); in xnn_f32_igemm_minmax_ukernel_4x4__wasm() 189 c0[3] = vacc03; in xnn_f32_igemm_minmax_ukernel_4x4__wasm()
|
D | 4x4-relu-scalar.c | 60 float vacc03 = w[3]; in xnn_f32_igemm_relu_ukernel_4x4__scalar() local 64 float vacc13 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__scalar() 68 float vacc23 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__scalar() 72 float vacc33 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__scalar() 115 vacc03 += va0 * vb3; in xnn_f32_igemm_relu_ukernel_4x4__scalar() 137 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__scalar() 170 c0[3] = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__scalar()
|
D | 4x4-relu-wasm.c | 60 float vacc03 = w[3]; in xnn_f32_igemm_relu_ukernel_4x4__wasm() local 64 float vacc13 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__wasm() 68 float vacc23 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__wasm() 72 float vacc33 = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__wasm() 115 vacc03 += va0 * vb3; in xnn_f32_igemm_relu_ukernel_4x4__wasm() 137 vacc03 = __builtin_wasm_max_f32(vacc03, 0.0f); in xnn_f32_igemm_relu_ukernel_4x4__wasm() 170 c0[3] = vacc03; in xnn_f32_igemm_relu_ukernel_4x4__wasm()
|
D | 2x4-relu-scalar.c | 52 float vacc03 = w[3]; in xnn_f32_igemm_relu_ukernel_2x4__scalar() local 56 float vacc13 = vacc03; in xnn_f32_igemm_relu_ukernel_2x4__scalar() 87 vacc03 += va0 * vb3; in xnn_f32_igemm_relu_ukernel_2x4__scalar() 101 vacc03 = math_max_f32(vacc03, 0.0f); in xnn_f32_igemm_relu_ukernel_2x4__scalar() 116 c0[3] = vacc03; in xnn_f32_igemm_relu_ukernel_2x4__scalar()
|