/external/tensorflow/tensorflow/lite/kernels/ |
D | cpu_backend_gemm_custom_gemv.h | 80 const MatrixParams<LhsScalar>& lhs_params, in IsSupportedGivenSufficientlyManyRows() 89 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run() 102 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemvTask() argument 107 : lhs_params_(lhs_params), in CustomGemvTask() 148 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in CustomGemv() argument 156 if (lhs_params.rows < Impl::kKernelRows) { in CustomGemv() 159 if (!Impl::IsSupportedGivenSufficientlyManyRows(lhs_params, rhs_params, in CustomGemv() 163 TFLITE_DCHECK_GE(lhs_params.rows, Impl::kKernelRows); in CustomGemv() 166 lhs_params.cols); in CustomGemv() 168 Impl::Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data, in CustomGemv() [all …]
|
D | cpu_backend_gemm_gemmlowp.h | 81 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data, 89 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols); 109 ColVectorMap bias_vector(params.bias, lhs_params.rows); 116 &gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point, 123 &gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point, 138 const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data, 149 gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols); 157 ColVectorMap bias_vector(params.bias, lhs_params.rows); 179 -lhs_params.zero_point, -rhs_params.zero_point, output_pipeline); 183 Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
|
D | cpu_backend_gemm.h | 116 void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Gemm() argument 122 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm() 132 if (lhs_params.order != Order::kRowMajor || in Gemm() 144 quantization_flavor>::Run(lhs_params, lhs_data, in Gemm() 156 if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm() 163 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Gemm() 171 void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Gemm() argument 177 ValidateParams(lhs_params, rhs_params, dst_params, params); in Gemm() 183 quantization_flavor>::Run(lhs_params, lhs_data, in Gemm()
|
D | cpu_backend_gemm_test.cc | 253 const MatrixParams<LhsScalar>& lhs_params, in PerformGemmThenCompareResultsThenAgainWithClamping() argument 261 const int accumulation_depth = lhs_params.cols; in PerformGemmThenCompareResultsThenAgainWithClamping() 262 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping() 275 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping() 284 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in PerformGemmThenCompareResultsThenAgainWithClamping() 311 int bisect_min, int bisect_max, const MatrixParams<LhsScalar>& lhs_params, in BisectReasonableMultiplierExponent() argument 331 Gemm(lhs_params, lhs_data.data(), rhs_params, rhs_data.data(), dst_params, in BisectReasonableMultiplierExponent() 336 bisect_mid + 1, bisect_max, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent() 340 bisect_min, bisect_mid, lhs_params, lhs_data, rhs_params, rhs_data, in BisectReasonableMultiplierExponent() 348 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in ReferenceGemm() argument [all …]
|
D | cpu_backend_gemm_x86.h | 39 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, in Run() 50 quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data, in Run() 57 quantization_flavor>::Run(lhs_params, lhs_data, in Run() 68 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data, 74 GemmImplUsingEigen::Run(lhs_params, lhs_data, rhs_params, rhs_data,
|
D | cpu_backend_gemm_eigen.cc | 38 const MatrixParams<float>& lhs_params, const float* lhs_data, in Run() argument 54 EigenMatrixMapRowMajorConst eigen_lhs(lhs_data, lhs_params.rows, in Run() 55 lhs_params.cols); in Run() 63 } else if (lhs_params.rows == 1) { in Run()
|
D | cpu_backend_gemm_eigen.h | 29 static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
|
D | cpu_backend_gemm_ruy.h | 126 const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data, 134 MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs, context->use_caching());
|
D | cpu_backend_gemm_params.h | 232 const MatrixParams<LhsScalar>& lhs_params,
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/integer_ops/ |
D | fully_connected.h | 64 cpu_backend_gemm::MatrixParams<int8> lhs_params; in FullyConnected() local 65 lhs_params.rows = filter_rows; in FullyConnected() 66 lhs_params.cols = filter_cols; in FullyConnected() 67 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnected() 68 lhs_params.zero_point = -filter_offset; in FullyConnected() 85 cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data, in FullyConnected()
|
D | transpose_conv.h | 64 cpu_backend_gemm::MatrixParams<int8_t> lhs_params; in TransposeConvV2() local 65 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in TransposeConvV2() 66 lhs_params.rows = hwoi_ordered_filter_total_size; in TransposeConvV2() 67 lhs_params.cols = input_depth; in TransposeConvV2() 69 lhs_params.zero_point = 0; in TransposeConvV2() 86 cpu_backend_gemm::Gemm(lhs_params, hwoi_ordered_filter_data, rhs_params, in TransposeConvV2()
|
D | conv.h | 97 cpu_backend_gemm::MatrixParams<int8> lhs_params; in ConvPerChannel() local 98 lhs_params.rows = filter_rows; in ConvPerChannel() 99 lhs_params.cols = filter_cols; in ConvPerChannel() 100 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in ConvPerChannel() 101 lhs_params.zero_point = 0; // filter is symmetric-quantized in ConvPerChannel() 121 cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data, in ConvPerChannel()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | batch_matmul.h | 80 MatrixParams<float> lhs_params; in BatchMatMul() local 81 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in BatchMatMul() 82 lhs_params.rows = lhs_rows; in BatchMatMul() 83 lhs_params.cols = accum_depth; in BatchMatMul() 108 cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2, in BatchMatMul() 190 MatrixParams<int8_t> lhs_params; in BatchMatMul() local 191 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in BatchMatMul() 192 lhs_params.rows = lhs_rows; in BatchMatMul() 193 lhs_params.cols = accum_depth; in BatchMatMul() 227 cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2, in BatchMatMul() [all …]
|
D | sse_tensor_utils.cc | 184 MatrixParams<int8_t> lhs_params; in SseCpuBackendGemm() local 185 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in SseCpuBackendGemm() 186 lhs_params.rows = n_output; in SseCpuBackendGemm() 187 lhs_params.cols = n_input; in SseCpuBackendGemm() 188 lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup; in SseCpuBackendGemm() 204 cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input, in SseCpuBackendGemm()
|
D | optimized_ops.h | 348 cpu_backend_gemm::MatrixParams<float> lhs_params; in FullyConnected() local 349 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnected() 350 lhs_params.cols = weights_shape.Dims(dims_count - 1); in FullyConnected() 351 lhs_params.rows = FlatSizeSkipDim(weights_shape, dims_count - 1); in FullyConnected() 352 lhs_params.cache_policy = in FullyConnected() 363 cpu_backend_gemm::Gemm(lhs_params, weights_data, rhs_params, input_data, in FullyConnected() 401 cpu_backend_gemm::MatrixParams<uint8> lhs_params; in FullyConnected() local 402 lhs_params.rows = filter_rows; in FullyConnected() 403 lhs_params.cols = filter_cols; in FullyConnected() 404 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in FullyConnected() [all …]
|
D | neon_tensor_utils.cc | 987 MatrixParams<int8_t> lhs_params; in NeonCpuBackendGemm() local 988 lhs_params.order = cpu_backend_gemm::Order::kRowMajor; in NeonCpuBackendGemm() 989 lhs_params.rows = n_output; in NeonCpuBackendGemm() 990 lhs_params.cols = n_input; in NeonCpuBackendGemm() 991 lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup; in NeonCpuBackendGemm() 1007 cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input, in NeonCpuBackendGemm()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | comparators.cc | 63 std::vector<XlaOp> lhs_params; in CreateScalarComparisonComputation() local 77 lhs_params.emplace_back(lhs_param); in CreateScalarComparisonComputation() 87 auto shape_or = b->GetShape(lhs_params[0]); in CreateScalarComparisonComputation() 101 generators[i].value()(lhs_params[i], rhs_params[i], {}), in CreateScalarComparisonComputation() 105 And(param_equal, EqTotalOrder(lhs_params[i], rhs_params[i])); in CreateScalarComparisonComputation()
|