Searched refs:scalar_size (Results 1 – 7 of 7) sorted by relevance
140 const int64 scalar_size = filter_inner_dim_size - vectorized_size;142 const int64 pad_size = scalar_size > 0 ? kPacketSize - scalar_size : 0;155 for (int64 j = 0; j < scalar_size; ++j) {161 padded_filter[output_base + vectorized_size + scalar_size + j] =
225 const int64 scalar_size = args.out_depth % kPacketSize; in CopyOutputBackpropRegion() local226 const int64 pad_size = scalar_size > 0 ? kPacketSize - scalar_size : 0; in CopyOutputBackpropRegion()244 for (int64 d = 0; d < scalar_size; ++d) { in CopyOutputBackpropRegion()249 buffer[buf_base + vectorized_size + scalar_size + d] = in CopyOutputBackpropRegion()838 const int64 scalar_size = out_bprop_limit - out_bprop_index; in ComputeBackpropFilter() local839 for (int64 j = 0; j < scalar_size; ++j) { in ComputeBackpropFilter()944 const int64 scalar_size = out_depth - vectorized_size; in operator ()() local970 for (int64 j = 0; j < scalar_size; ++j) { in operator ()()
147 const int64 scalar_size = args.in_depth % kPacketSize; in operator ()() local159 for (int64 d = 0; d < scalar_size; ++d) { in operator ()()222 const int64 scalar_size = in_depth % kPacketSize; in operator ()() local249 for (int64 d = 0; d < scalar_size; ++d) { in operator ()()
148 const size_t scalar_size = rhs.m_value.GetByteSize(); in AppendDataToHostBuffer() local149 if (scalar_size > 0) { in AppendDataToHostBuffer()150 const size_t new_size = curr_size + scalar_size; in AppendDataToHostBuffer()153 scalar_size, endian::InlHostByteOrder(), in AppendDataToHostBuffer()155 return scalar_size; in AppendDataToHostBuffer()
393 inline size_t PaddingBytes(size_t buf_size, size_t scalar_size) { in PaddingBytes() argument394 return ((~buf_size) + 1) & (scalar_size - 1); in PaddingBytes()
689 fn padding_bytes(buf_size: usize, scalar_size: usize) -> usize { in padding_bytes()691 (!buf_size).wrapping_add(1) & (scalar_size.wrapping_sub(1)) in padding_bytes()
1746 unsigned scalar_size = type_scalar_size_bytes(deref->type); in nir_lower_explicit_io_instr() local1748 assert(vec_stride == 0 || vec_stride >= scalar_size); in nir_lower_explicit_io_instr()1753 align_mul = scalar_size; in nir_lower_explicit_io_instr()1760 if (vec_stride > scalar_size) { in nir_lower_explicit_io_instr()1788 if (vec_stride > scalar_size) { in nir_lower_explicit_io_instr()