Home
last modified time | relevance | path

Searched refs:num_cols (Results 1 – 25 of 138) sorted by relevance

123456

/external/tensorflow/tensorflow/python/data/experimental/benchmarks/
Dcsv_dataset_benchmark.py59 def _run_benchmark(self, dataset, num_cols, prefix): argument
64 name='%s_with_cols_%d' % (prefix, num_cols),
71 num_cols = self._num_cols[i]
72 kwargs = {'record_defaults': [[0.0]] * num_cols}
76 dataset=dataset, num_cols=num_cols, prefix='csv_float_map_decode_csv')
82 num_cols = self._num_cols[i]
83 kwargs = {'record_defaults': [['']] * num_cols}
88 num_cols=num_cols,
95 num_cols = self._num_cols[i]
96 kwargs = {'record_defaults': [[0.0]] * num_cols}
[all …]
/external/tensorflow/tensorflow/core/kernels/linalg/
Dmatrix_diag_op_gpu.cu.cc32 const int num_cols, in ComputeContentOffset() argument
40 const int diag_len = min(num_rows + y_offset, num_cols - x_offset); in ComputeContentOffset()
46 const int num_threads, const int num_rows, const int num_cols, in MatrixDiagKernel() argument
52 const int batch_and_row_index = index / num_cols; in MatrixDiagKernel()
53 const int col = index - batch_and_row_index * num_cols; in MatrixDiagKernel()
59 ComputeContentOffset(diag_index, max_diag_len, num_rows, num_cols, in MatrixDiagKernel()
84 const int num_cols = output.dimension(2); in Compute() local
87 num_cols == 0) { in Compute()
91 GetGpuLaunchConfig(batch_size * num_rows * num_cols, device); in Compute()
94 device.stream(), config.virtual_thread_count, num_rows, num_cols, in Compute()
[all …]
Dmatrix_diag_op.cc102 const Eigen::Index num_cols = input_shape.dim_size(rank - 1); in Compute() local
105 (-num_rows < lower_diag_index && lower_diag_index < num_cols) || in Compute()
109 ". It must be between ", -num_rows, " and ", num_cols)); in Compute()
111 (-num_rows < upper_diag_index && upper_diag_index < num_cols) || in Compute()
115 " It must be between ", -num_rows, " and ", num_cols)); in Compute()
130 num_cols - std::max(lower_diag_index, 0)); in Compute()
170 int32 num_cols = -1; in Compute() local
196 num_cols = context->input(3).flat<int32>()(0); in Compute()
225 OP_REQUIRES(context, num_cols == -1 || num_cols >= min_num_cols, in Compute()
230 if (num_rows == -1 && num_cols == -1) { in Compute()
[all …]
Dlu_op.cc85 const int64 num_cols = input.dim_size(input_rank - 1); in Compute() local
87 input_matrix_shape.AppendShape({num_rows, num_cols}); in Compute()
115 auto shard = [this, &input, &num_rows, &num_cols, &outputs, in Compute()
118 ComputeTensorSlice(context, i, input, num_rows, num_cols, outputs, in Compute()
129 const Tensor& input, int64 num_rows, int64 num_cols, in ComputeTensorSlice() argument
135 input.flat<Scalar>().data() + matrix_index * num_rows * num_cols, in ComputeTensorSlice()
136 num_rows, num_cols); in ComputeTensorSlice()
140 outputs[0]->flat<Scalar>().data() + matrix_index * num_rows * num_cols, in ComputeTensorSlice()
Dmatrix_set_diag_op.cc104 const Eigen::Index num_cols = input_shape.dim_size(input_rank - 1); in Compute() local
107 (-num_rows < lower_diag_index && lower_diag_index < num_cols) || in Compute()
111 " It must be between ", -num_rows, " and ", num_cols)); in Compute()
113 (-num_rows < upper_diag_index && upper_diag_index < num_cols) || in Compute()
117 " It must be between ", -num_rows, " and ", num_cols)); in Compute()
139 num_cols - std::max(lower_diag_index, 0)); in Compute()
222 const Eigen::Index num_cols = output.dimension(2); in Compute() local
229 diag_index, max_diag_len, num_rows, num_cols, in Compute()
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/
Dmatrix_diag_ops.cc30 static inline int ComputeDiagLen(int diag_index, int num_rows, int num_cols) { in ComputeDiagLen() argument
32 num_cols - std::max(0, diag_index)); in ComputeDiagLen()
96 const int64 num_cols) { in ValidateDiagIndexWithOutputMatrixSize() argument
99 (-num_rows < lower_diag_index && lower_diag_index < num_cols) || in ValidateDiagIndexWithOutputMatrixSize()
103 " It must be between ", -num_rows, " and ", num_cols)); in ValidateDiagIndexWithOutputMatrixSize()
105 (-num_rows < upper_diag_index && upper_diag_index < num_cols) || in ValidateDiagIndexWithOutputMatrixSize()
109 " It must be between ", -num_rows, " and ", num_cols)); in ValidateDiagIndexWithOutputMatrixSize()
121 const int64 num_rows, const int64 num_cols, in SetMatrixDiag() argument
190 if (num_cols - num_rows <= diag_index && diag_index <= 0) { in SetMatrixDiag()
192 } else if (0 <= diag_index && diag_index <= num_cols - num_rows) { in SetMatrixDiag()
[all …]
/external/tensorflow/tensorflow/python/kernel_tests/
Dcheckpoint_ops_test.py145 num_cols=self.old_num_cols)
160 num_cols=len(col_remapping))
175 num_cols=len(col_remapping))
190 num_cols=2)
210 num_cols=self.old_num_cols)
219 num_cols = 4
220 initializing_values = [42] * num_rows * num_cols
225 col_remapping=[-1] * num_cols,
228 num_cols=num_cols)
231 np.reshape(initializing_values, (num_rows, num_cols)),
[all …]
Dbincount_op_test.py213 def _test_bincount_col_count(self, num_rows, num_cols, size, dtype): argument
215 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
226 def _test_bincount_col_binary(self, num_rows, num_cols, size, dtype): argument
228 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
242 def _test_bincount_col_count_with_weights(self, num_rows, num_cols, size, argument
245 inp = np.random.randint(0, size, (num_rows, num_cols), dtype=dtype)
246 np_weight = np.random.random((num_rows, num_cols))
276 num_cols = 27
278 self._test_bincount_col_count(num_rows, num_cols, size, dtype)
288 num_cols = 27
[all …]
Dfractional_avg_pool_op_test.py152 num_cols = 6
153 tensor_shape = (1, num_rows, num_cols, 1)
180 for j in range(num_cols):
201 num_cols = 30
204 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
241 for num_cols in [10, 20, 50]:
242 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
257 num_cols = 50
258 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
278 num_cols = 50
[all …]
Dfractional_max_pool_op_test.py152 num_cols = 6
153 tensor_shape = (1, num_rows, num_cols, 1)
182 for j in range(num_cols):
203 num_cols = 30
206 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
219 num_cols = 30
222 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
238 for num_cols in [10, 20, 50]:
239 tensor_shape = (num_batches, num_rows, num_cols, num_channels)
254 num_cols = 50
[all …]
/external/libjpeg-turbo/
Djdcol565.c29 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
58 num_cols--; in LOCAL()
60 for (col = 0; col < (num_cols >> 1); col++) { in LOCAL()
82 if (num_cols & 1) { in LOCAL()
108 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
139 num_cols--; in LOCAL()
141 for (col = 0; col < (num_cols >> 1); col++) { in LOCAL()
167 if (num_cols & 1) { in LOCAL()
192 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
211 num_cols--; in LOCAL()
[all …]
Djdcolext.c40 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
55 for (col = 0; col < num_cols; col++) { in LOCAL()
90 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
95 for (col = 0; col < num_cols; col++) { in LOCAL()
121 JDIMENSION num_cols = cinfo->output_width; in LOCAL() local
129 for (col = 0; col < num_cols; col++) { in LOCAL()
Djccolext.c42 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local
50 for (col = 0; col < num_cols; col++) { in LOCAL()
96 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local
102 for (col = 0; col < num_cols; col++) { in LOCAL()
129 JDIMENSION num_cols = cinfo->image_width; in LOCAL() local
137 for (col = 0; col < num_cols; col++) { in LOCAL()
/external/perfetto/src/trace_processor/sqlite/
Dsqlite_vtable_benchmark.cc51 explicit BenchmarkCursor(size_t num_cols, size_t batch_size) in BenchmarkCursor() argument
52 : num_cols_(num_cols), batch_size_(batch_size), rnd_engine_(kRandomSeed) { in BenchmarkCursor()
53 column_buffer_.resize(num_cols); in BenchmarkCursor()
103 size_t num_cols; in BM_SqliteStepAndResult() member
121 size_t num_cols = static_cast<size_t>(_state.range(1)); in BM_SqliteStepAndResult() local
123 for (size_t col = 0; col < num_cols; col++) in BM_SqliteStepAndResult()
129 vtab->num_cols = num_cols; in BM_SqliteStepAndResult()
130 vtab->batch_size = num_cols; in BM_SqliteStepAndResult()
147 *c = new BenchmarkCursor(vtab->num_cols, vtab->batch_size); in BM_SqliteStepAndResult()
184 size_t num_cols = static_cast<size_t>(state.range(1)); in BM_SqliteStepAndResult() local
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dreduction_gpu_kernels.cu.h202 T in, OUT_T out, int num_rows, int num_cols, Op op,
212 if (num_cols == 1) {
221 if (row < num_rows && col < num_cols) {
222 sum = in[row * num_cols + col];
224 for (; col < num_cols; col += TF_RED_WARPSIZE) {
225 sum = op(sum, in[row * num_cols + col]);
234 WarpReduce(temp_storage).Reduce(sum, op, min(num_cols, TF_RED_WARPSIZE));
270 T in, OUT_T out, int num_rows, int num_cols, Op op,
273 int rows_per_warp = TF_RED_WARPSIZE / num_cols;
276 const int lane_row = lane / num_cols;
[all …]
Dsoftmax_op_gpu.cu.cc74 const int num_rows, const int num_cols, in GenerateNormalizedProb() argument
84 row = tid / num_cols; in GenerateNormalizedProb()
85 col = tid % num_cols; in GenerateNormalizedProb()
86 if (row < num_rows && col < num_cols) { in GenerateNormalizedProb()
95 row = tid / num_cols; in GenerateNormalizedProb()
96 col = tid % num_cols; in GenerateNormalizedProb()
97 if (row < num_rows && col < num_cols) { in GenerateNormalizedProb()
113 const int num_cols, const bool in_log_space) { in GenerateNormalizedProb() argument
123 if (tid * kUnroll + kUnroll - 1 < num_rows * num_cols) { in GenerateNormalizedProb()
132 row[i] = idx[i] / num_cols; in GenerateNormalizedProb()
[all …]
Dtopk_op_gpu.h430 SegmentOffsetCreator(int num_cols) : num_cols_(num_cols) {}
440 ColumnIndexCreator(int num_cols) : num_cols_(num_cols) {}
452 int num_cols, int k,
463 DT_INT32, TensorShape({num_rows, num_cols}), &input_indices));
466 input_indices_t.generate(ColumnIndexCreator(num_cols));
471 segment_offsets_t(counting_iter, SegmentOffsetCreator(num_cols));
477 if (k == num_cols) {
484 DT_INT32, TensorShape({num_rows, num_cols}), &temp_indices));
486 TensorShape({num_rows, num_cols}),
499 /* num_items */ num_cols * num_rows,
[all …]
Dbincount_op_gpu.cu.cc135 const int num_rows, const int num_cols, in BincountColReduceKernel() argument
137 const int nthreads = num_rows * num_cols; in BincountColReduceKernel()
141 int row = index / num_cols; in BincountColReduceKernel()
157 const int num_cols, in BincountColReduceSharedKernel() argument
167 const int nthreads = num_rows * num_cols; in BincountColReduceSharedKernel()
171 int row = index / num_cols; in BincountColReduceSharedKernel()
203 const int num_cols = in.dimension(1); in Compute() local
206 GpuLaunchConfig config = GetGpuLaunchConfig(num_rows * num_cols, d); in Compute()
216 num_cols, num_bins); in Compute()
221 weights.size(), out.data(), num_rows, num_cols, num_bins); in Compute()
Dtopk_op.cc74 const int64 num_cols = input.dimension(1); in Compute() local
91 context, sorted_, k, input, num_rows, num_cols, values, indices); in Compute()
107 const int64 num_cols, typename TTypes<T, 2>::Tensor values, in Compute()
127 for (int c = 0; c < num_cols; ++c) { in Compute()
159 if (k == num_cols) { in Compute()
184 filter.reserve(num_cols); in Compute()
185 for (int32 c = 0; c < num_cols; ++c) { in Compute()
216 static_cast<double>(num_cols * in Compute()
218 const double sort_cost = (k == num_cols) ? base_cost : 4 * base_cost; in Compute()
255 const int64 num_cols, typename TTypes<T, 2>::Tensor values, \
/external/libhevc/decoder/
Dihevcd_fmt_conv.c393 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp() local
401 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp()
408 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp()
418 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp()
425 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp()
492 WORD32 num_rows, num_cols, src_strd, dst_strd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv() local
500 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv()
507 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420sp_swap_uv()
517 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420sp_swap_uv()
525 for(j = 0; j < num_cols; j += 2) in ihevcd_fmt_conv_420sp_to_420sp_swap_uv()
[all …]
/external/libhevc/decoder/x86/
Dihevcd_fmt_conv_ssse3_intr.c63 WORD32 num_rows, num_cols, src_strd, dst_strd, cols, rows; in ihevcd_fmt_conv_420sp_to_420p_ssse3() local
76 num_cols = wd; in ihevcd_fmt_conv_420sp_to_420p_ssse3()
82 memcpy(pu1_dst, pu1_src, num_cols); in ihevcd_fmt_conv_420sp_to_420p_ssse3()
119 num_cols = wd >> 1; in ihevcd_fmt_conv_420sp_to_420p_ssse3()
126 if(num_cols > 15) in ihevcd_fmt_conv_420sp_to_420p_ssse3()
128 cols = num_cols >> 4; in ihevcd_fmt_conv_420sp_to_420p_ssse3()
246 num_cols &= 0x0F; in ihevcd_fmt_conv_420sp_to_420p_ssse3()
248 if(num_cols) in ihevcd_fmt_conv_420sp_to_420p_ssse3()
256 for(j = 0; j < num_cols; j++) in ihevcd_fmt_conv_420sp_to_420p_ssse3()
/external/libavc/decoder/
Dih264d_format_conv.c385 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264d_fmt_conv_420sp_to_420sp() local
393 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp()
400 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp()
410 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp()
417 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp()
482 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv() local
490 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv()
497 memcpy(pu1_dst, pu1_src, num_cols); in ih264d_fmt_conv_420sp_to_420sp_swap_uv()
507 num_cols = wd; in ih264d_fmt_conv_420sp_to_420sp_swap_uv()
515 for(j = 0; j < num_cols; j += 2) in ih264d_fmt_conv_420sp_to_420sp_swap_uv()
[all …]
/external/libavc/encoder/
Dih264e_fmt_conv.c371 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp() local
379 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp()
386 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp()
396 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp()
403 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp()
423 WORD32 num_rows, num_cols, src_strd, dst_strd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv() local
431 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv()
438 memcpy(pu1_dst, pu1_src, num_cols); in ih264e_fmt_conv_420sp_to_420sp_swap_uv()
448 num_cols = wd; in ih264e_fmt_conv_420sp_to_420sp_swap_uv()
456 for (j = 0; j < num_cols; j += 2) in ih264e_fmt_conv_420sp_to_420sp_swap_uv()
[all …]
/external/libaom/libaom/av1/encoder/
Dtune_vmaf.c86 const int num_cols = (source->y_width + block_w - 1) / block_w; in gaussian_blur() local
97 for (col = 0; col < num_cols; ++col) { in gaussian_blur()
297 const int num_cols = (source->y_width + block_w - 1) / block_w; in av1_vmaf_blk_preprocessing() local
300 aom_malloc(sizeof(*best_unsharp_amounts) * num_cols * num_rows); in av1_vmaf_blk_preprocessing()
302 sizeof(*best_unsharp_amounts) * num_cols * num_rows); in av1_vmaf_blk_preprocessing()
315 for (int col = 0; col < num_cols; ++col) { in av1_vmaf_blk_preprocessing()
320 const int index = col + row * num_cols; in av1_vmaf_blk_preprocessing()
382 for (int col = 0; col < num_cols; ++col) { in av1_vmaf_blk_preprocessing()
387 const int index = col + row * num_cols; in av1_vmaf_blk_preprocessing()
419 int block_w, block_h, num_rows, num_cols, row, col, bit_depth; member
[all …]
/external/eigen/unsupported/test/
Dcxx11_tensor_complex_cuda.cu80 const int num_cols = internal::random<int>(1024, 5*1024); in test_cuda_sum_reductions() local
82 Tensor<std::complex<float>, 2> in(num_rows, num_cols); in test_cuda_sum_reductions()
94 TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); in test_cuda_sum_reductions()
117 const int num_cols = internal::random<int>(1024, 5*1024); in test_cuda_product_reductions() local
119 Tensor<std::complex<float>, 2> in(num_rows, num_cols); in test_cuda_product_reductions()
131 TensorMap<Tensor<std::complex<float>, 2> > in_gpu(gpu_in_ptr, num_rows, num_cols); in test_cuda_product_reductions()

123456