Home
last modified time | relevance | path

Searched refs:cm_stride (Results 1 – 25 of 901) sorted by relevance

12345678910>>...37

/external/XNNPACK/test/
Dgemm-microkernel-tester.h134 inline GemmMicrokernelTester& cm_stride(size_t cm_stride) { in cm_stride() function
135 this->cm_stride_ = cm_stride; in cm_stride()
139 inline size_t cm_stride() const { in cm_stride() function
227 …std::vector<uint8_t> c((mr() - 1) * cm_stride() + ((n() - 1) / nr()) * cn_stride() + (n() - 1) % n…
285 c.data(), cm_stride() * sizeof(uint8_t), cn_stride() * sizeof(uint8_t),
296 … ASSERT_LE(uint32_t(c[i * cm_stride() + (j / nr()) * cn_stride() + j % nr()]), uint32_t(qmax()));
297 … ASSERT_GE(uint32_t(c[i * cm_stride() + (j / nr()) * cn_stride() + j % nr()]), uint32_t(qmin()));
298 …ASSERT_EQ(uint32_t(c[i * cm_stride() + (j / nr()) * cn_stride() + j % nr()]), uint32_t(c_ref[i * n…
301 …<< "), optimized = " << (uint32_t) c[i * cm_stride() + (j / nr()) * cn_stride() + j % nr()] << ", …
321 …std::vector<uint8_t> c((mr() - 1) * cm_stride() + ((n() - 1) / nr()) * cn_stride() + (n() - 1) % n…
[all …]
/external/XNNPACK/src/
Doperator-run.c34 const size_t cm_stride = context->cm_stride; in xnn_compute_grouped_gemm() local
43 …(void*) ((uintptr_t) context->c + mr_block_start * cm_stride + (nr_block_start << context->log2_cs… in xnn_compute_grouped_gemm()
44 cm_stride, in xnn_compute_grouped_gemm()
57 const size_t cm_stride = context->cm_stride; in xnn_compute_gemm() local
66 …(void*) ((uintptr_t) context->c + mr_block_start * cm_stride + (nr_block_start << context->log2_cs… in xnn_compute_gemm()
67 cm_stride, in xnn_compute_gemm()
100 const size_t cm_stride = context->cm_stride; in xnn_compute_grouped_batch_igemm() local
109 …ontext->gc_stride + batch_index * context->bc_stride + mr_block_start * cm_stride + (nr_block_star… in xnn_compute_grouped_batch_igemm()
110 cm_stride, in xnn_compute_grouped_batch_igemm()
126 const size_t cm_stride = context->cm_stride; in xnn_compute_grouped_igemm() local
[all …]
/external/XNNPACK/src/f32-gemm/gen-inc/
D8x16inc-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
50 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
56 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
62 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
68 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
74 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
80 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x16__avx512f_broadcast()
D7x16inc-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
50 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
56 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
62 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
68 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
74 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x16__avx512f_broadcast()
D6x16inc-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast()
50 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast()
56 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast()
62 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast()
68 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_6x16__avx512f_broadcast()
D8x8inc-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast() argument
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
79 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_8x8__fma3_broadcast()
D5x16inc-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast()
50 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast()
56 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast()
62 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_5x16__avx512f_broadcast()
D7x8inc-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast() argument
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__fma3_broadcast()
D7x8inc-minmax-avx-broadcast.c25 size_t cm_stride, in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast() argument
43 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
55 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
61 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
67 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
73 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemminc_minmax_ukernel_7x8__avx_broadcast()
/external/XNNPACK/src/f32-gemm/gen/
D8x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast() argument
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
66 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
72 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
78 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x16__avx512f_broadcast()
D7x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast() argument
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
66 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
72 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x16__avx512f_broadcast()
D6x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast() argument
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast()
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast()
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast()
66 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_6x16__avx512f_broadcast()
D8x8-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast() argument
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
77 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_gemm_minmax_ukernel_8x8__fma3_broadcast()
D5x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast() argument
42 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast()
54 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast()
60 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_5x16__avx512f_broadcast()
D7x8-minmax-avx-broadcast.c25 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast() argument
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__avx_broadcast()
D7x8-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast() argument
41 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
47 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
59 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
65 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
71 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_gemm_minmax_ukernel_7x8__fma3_broadcast()
D4x2-scalar.c24 size_t cm_stride, in xnn_f32_gemm_ukernel_4x2__scalar() argument
40 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_gemm_ukernel_4x2__scalar()
46 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_gemm_ukernel_4x2__scalar()
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_gemm_ukernel_4x2__scalar()
/external/XNNPACK/src/f32-igemm/gen/
D8x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast() argument
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
61 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
65 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
69 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x16__avx512f_broadcast()
D7x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast() argument
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
61 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
65 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x16__avx512f_broadcast()
D6x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast() argument
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast()
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast()
61 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_6x16__avx512f_broadcast()
D8x8-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
68 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_igemm_minmax_ukernel_8x8__fma3_broadcast()
D7x8-minmax-avx-broadcast.c25 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__avx_broadcast()
D7x8-minmax-fma3-broadcast.c25 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast() argument
44 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
48 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
52 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
56 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
60 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
64 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_igemm_minmax_ukernel_7x8__fma3_broadcast()
D5x16-minmax-avx512f-broadcast.c26 size_t cm_stride, in xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast() argument
45 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast()
49 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast()
53 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast()
57 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_igemm_minmax_ukernel_5x16__avx512f_broadcast()
/external/XNNPACK/src/f32-ppmm/gen/
D8x8-minmax-neon.c25 size_t cm_stride, in xnn_f32_ppmm_minmax_ukernel_8x8__neon() argument
36 float* c1 = (float*) ((uintptr_t) c0 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
40 float* c2 = (float*) ((uintptr_t) c1 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
44 float* c3 = (float*) ((uintptr_t) c2 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
48 float* c4 = (float*) ((uintptr_t) c3 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
52 float* c5 = (float*) ((uintptr_t) c4 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
56 float* c6 = (float*) ((uintptr_t) c5 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()
60 float* c7 = (float*) ((uintptr_t) c6 + cm_stride); in xnn_f32_ppmm_minmax_ukernel_8x8__neon()

12345678910>>...37