Home
last modified time | relevance | path

Searched refs:input_shapes (Results 1 – 25 of 80) sorted by relevance

1234

/external/tensorflow/tensorflow/python/framework/
Dfunction_def_to_graph.py31 def function_def_to_graph(fdef, input_shapes=None): argument
52 fdef, input_shapes)
92 def function_def_to_graph_def(fdef, input_shapes=None): argument
128 if input_shapes and len(input_shapes) != len(fdef.signature.input_arg):
131 format(len(input_shapes), len(fdef.signature.input_arg)))
139 if input_shapes and input_shapes[i] is not None:
140 node_def.attr["shape"].shape.CopyFrom(input_shapes[i].as_proto())
Dcpp_shape_inference.cc75 std::vector<TensorShapeProto> input_shapes; in RunCppShapeInferenceImpl() local
79 input_shapes.resize(input_serialized_shapes.size()); in RunCppShapeInferenceImpl()
89 input_shapes[i].Swap(tmp.mutable_shape()); in RunCppShapeInferenceImpl()
128 graph_def_version, &node, op_reg_data->op_def, input_shapes, in RunCppShapeInferenceImpl()
Dfunction_def_to_graph_test.py78 fdef, input_shapes=[tensor_shape.vector(5),
86 fdef, input_shapes=[None, tensor_shape.matrix(5, 7)])
96 fdef, input_shapes=[tensor_shape.matrix(5, 7)])
180 input_shapes=[tensor_shape.scalar(),
/external/tensorflow/tensorflow/core/common_runtime/
Dconstant_folding.cc62 std::vector<PartialTensorShape>* input_shapes) { in ReadPartialShapesFromShapeMap() argument
64 input_shapes->resize(n->num_inputs()); in ReadPartialShapesFromShapeMap()
76 DCHECK_LT(in->dst_input(), input_shapes->size()); in ReadPartialShapesFromShapeMap()
77 (*input_shapes)[in->dst_input()] = known_shape[in->src_output()]; in ReadPartialShapesFromShapeMap()
85 const Node* n, const std::vector<PartialTensorShape>& input_shapes, in MaybeReplaceShapeOrShapeNOp() argument
89 for (const auto& shape : input_shapes) { in MaybeReplaceShapeOrShapeNOp()
126 const std::vector<PartialTensorShape>& input_shapes, in MaybeReplaceRankOp() argument
129 CHECK_EQ(input_shapes.size(), 1); in MaybeReplaceRankOp()
130 if (input_shapes[0].unknown_rank()) { in MaybeReplaceRankOp()
134 t.scalar<int32>()() = input_shapes[0].dims(); in MaybeReplaceRankOp()
[all …]
/external/tensorflow/tensorflow/contrib/tensorrt/shape_fn/
Dtrt_shfn.cc37 std::vector<tensorflow::TensorShape> input_shapes; in TRTEngineOpShapeInference() local
38 TF_RETURN_IF_ERROR(c->GetAttr("input_shapes", &input_shapes)); in TRTEngineOpShapeInference()
39 if (input_shapes.size() != c->num_inputs()) { in TRTEngineOpShapeInference()
43 c->num_inputs(), " vs ", input_shapes.size()); in TRTEngineOpShapeInference()
49 c->MakeShapeFromTensorShape(input_shapes.at(i), &handle)); in TRTEngineOpShapeInference()
/external/tensorflow/tensorflow/lite/testing/model_coverage/
Dmodel_coverage_lib.py256 input_shapes=None, argument
281 filename, input_arrays, output_arrays, input_shapes)
321 input_shapes=None, argument
341 filename, input_arrays, output_arrays, input_shapes)
349 input_shapes=None, argument
373 input_shapes=input_shapes,
417 input_shapes=None, argument
436 filename, input_arrays=input_arrays, input_shapes=input_shapes)
/external/tensorflow/tensorflow/lite/python/
Dlite.py477 input_shapes=None): argument
545 _set_tensor_shapes(input_tensors, input_shapes)
549 if not input_shapes:
551 if set(input_arrays) != set(input_shapes.keys()):
556 (name, input_shapes[name]) for name in input_arrays
569 input_shapes=None, argument
598 result = _freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
607 input_shapes=None, argument
640 _set_tensor_shapes(input_tensors, input_shapes)
837 input_shapes=None): argument
[all …]
Dtflite_convert.py79 input_shapes = None
80 if flags.input_shapes:
83 for shape in flags.input_shapes.split(":")
85 input_shapes = dict(zip(input_arrays, input_shapes_list))
90 "input_shapes": input_shapes,
236 if flags.input_shapes:
239 if flags.input_shapes.count(":") != flags.input_arrays.count(","):
Dconvert_saved_model_test.py163 input_shapes=None, argument
174 input_shapes=input_shapes,
250 input_shapes={"Placeholder": [1, 16, 16, 3]})
276 input_shapes={"inputA": [1, 16, 16, 3]})
Dconvert.py237 input_shapes=None, argument
359 if input_shapes is None:
362 shape = input_shapes[idx]
/external/tensorflow/tensorflow/lite/toco/
Dmodel_cmdline_flags.cc70 Flag("input_shapes", parsed_flags.input_shapes.bind(), in ParseModelFlagsFromCommandLineFlags()
71 parsed_flags.input_shapes.default_value(), in ParseModelFlagsFromCommandLineFlags()
234 parsed_model_flags.input_shapes.specified(); in ReadModelFlagsFromCommandLineFlags()
317 if (parsed_model_flags.input_shapes.specified()) { in ReadModelFlagsFromCommandLineFlags()
319 std::vector<string> input_shapes = in ReadModelFlagsFromCommandLineFlags() local
320 absl::StrSplit(parsed_model_flags.input_shapes.value(), ':'); in ReadModelFlagsFromCommandLineFlags()
321 QCHECK(input_shapes.size() == model_flags->input_arrays_size()); in ReadModelFlagsFromCommandLineFlags()
322 for (size_t i = 0; i < input_shapes.size(); ++i) { in ReadModelFlagsFromCommandLineFlags()
326 if (input_shapes[i].empty()) { in ReadModelFlagsFromCommandLineFlags()
329 for (const auto& dim_str : absl::StrSplit(input_shapes[i], ',')) { in ReadModelFlagsFromCommandLineFlags()
[all …]
/external/tensorflow/tensorflow/core/kernels/
Dmkl_concat_op.cc63 const TensorShapeList& input_shapes) { in Compute() argument
79 const int input_dims = input_shapes[0].dims(); in Compute()
80 const TensorShape& input_shape = input_shapes[0]; in Compute()
105 const bool in_is_scalar = IsLegacyScalar(input_shapes[i]); in Compute()
108 (input_shapes[i].dims() == input_dims) || in Compute()
113 "] = ", input_shapes[i].DebugString())); in Compute()
120 input_shapes[i].dims() > 0 ? input_shapes[i].dim_size(axis) : 1; in Compute()
497 memory::format FindMklCommonFormat(const MklDnnShapeList& input_shapes, in FindMklCommonFormat() argument
503 if (input_shapes.size() == 0) return memory::format::any; in FindMklCommonFormat()
506 for (int k = 0; k < input_shapes.size(); k++) { in FindMklCommonFormat()
[all …]
/external/tensorflow/tensorflow/tools/api/golden/v1/
Dtensorflow.lite.-toco-converter.pbtxt10 …=[\'cls\', \'graph_def_file\', \'input_arrays\', \'output_arrays\', \'input_shapes\'], varargs=Non…
14 …argspec: "args=[\'cls\', \'model_file\', \'input_arrays\', \'input_shapes\', \'output_arrays\'], v…
18 …argspec: "args=[\'cls\', \'saved_model_dir\', \'input_arrays\', \'input_shapes\', \'output_arrays\…
Dtensorflow.lite.-t-f-lite-converter.pbtxt15 …=[\'cls\', \'graph_def_file\', \'input_arrays\', \'output_arrays\', \'input_shapes\'], varargs=Non…
19 …argspec: "args=[\'cls\', \'model_file\', \'input_arrays\', \'input_shapes\', \'output_arrays\'], v…
23 …argspec: "args=[\'cls\', \'saved_model_dir\', \'input_arrays\', \'input_shapes\', \'output_arrays\…
/external/tensorflow/tensorflow/lite/kernels/
Dadd_n_test.cc33 std::vector<std::vector<int>> input_shapes; in BaseAddNOpModel() local
37 input_shapes.push_back(GetShape(inputs_[i])); in BaseAddNOpModel()
43 BuildInterpreter(input_shapes); in BaseAddNOpModel()
Dtest_util.cc96 void SingleOpModel::BuildInterpreter(std::vector<std::vector<int>> input_shapes, in BuildInterpreter() argument
127 for (size_t i = 0; i < input_shapes.size(); ++i) { in BuildInterpreter()
130 const auto& shape = input_shapes[i]; in BuildInterpreter()
/external/tensorflow/tensorflow/compiler/tf2tensorrt/kernels/
Dtrt_engine_op.cc92 EngineContext* GetEngine(const std::vector<TensorShape>& input_shapes,
372 std::vector<TensorShape> input_shapes; in ComputeAsync() local
373 input_shapes.reserve(ctx->num_inputs()); in ComputeAsync()
375 input_shapes.push_back(ctx->input(i).shape()); in ComputeAsync()
377 EngineContext* engine_context = GetEngine(input_shapes, ctx); in ComputeAsync()
380 << TensorShapeUtils::ShapeListString(input_shapes) in ComputeAsync()
520 const std::vector<TensorShape>& input_shapes, OpKernelContext* ctx) { in GetEngine() argument
524 const int batch_size = input_shapes[0].dim_size(0); in GetEngine()
567 std::vector<TensorShape> engine_input_shapes(input_shapes); in GetEngine()
596 GetCompatibleCachedEngine(input_shapes, &engine_input_shapes); in GetEngine()
[all …]
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dscoped_allocator_optimizer.cc283 std::vector<TensorShape>* input_shapes, in AnalyzeInputs() argument
287 CheckTypesAndGetShapes(*graph_properties_, ops, dtype, input_shapes)); in AnalyzeInputs()
296 CHECK(!input_shapes->empty()); in AnalyzeInputs()
304 0 /*scope_id*/, *input_shapes, *dtype, &sa_fields); in AnalyzeInputs()
317 const std::vector<TensorShape>& input_shapes, in ConstructScopedAllocatorNode() argument
325 sa_builder.Attr("shapes", input_shapes); in ConstructScopedAllocatorNode()
438 const std::vector<TensorShape>& input_shapes, in BuildSplitNode() argument
450 sas_builder.Attr("shapes", input_shapes); in BuildSplitNode()
589 std::vector<TensorShape> input_shapes; in Rewrite() local
595 &device_name, &dtype, &input_shapes, in Rewrite()
[all …]
/external/tensorflow/tensorflow/python/data/experimental/ops/
Dbatching.py365 input_shapes = dataset_ops.get_legacy_output_shapes(input_dataset)
366 flat_shapes = nest.flatten(input_shapes)
380 nest.map_structure(lambda s: s[1:], input_shapes),
527 input_shapes = dataset_ops.get_legacy_output_shapes(dataset)
531 output_types, nest.flatten(input_shapes))
536 flat_original_shapes = nest.flatten(input_shapes)
544 "incompatible output shapes %r" % (input_shapes,
752 input_shapes = dataset_ops.get_legacy_output_shapes(self._input_dataset)
754 output_shapes = nest.map_structure(recalculate_output_shapes, input_shapes)
/external/tensorflow/tensorflow/lite/tools/accuracy/
Dutils_test.cc44 ASSERT_EQ(4, model_info.input_shapes.size()); in TEST()
48 const TensorShape& shape = model_info.input_shapes[i]; in TEST()
/external/tensorflow/tensorflow/core/framework/
Dshape_inference.cc34 const std::vector<TensorShapeProto>& input_shapes, in InferenceContext() argument
54 inputs_.reserve(input_shapes.size()); in InferenceContext()
55 for (const TensorShapeProto& p : input_shapes) { in InferenceContext()
65 input_shapes.size()); in InferenceContext()
89 const std::vector<PartialTensorShape>& input_shapes, in InferenceContext() argument
109 inputs_.reserve(input_shapes.size()); in InferenceContext()
110 for (const PartialTensorShape& p : input_shapes) { in InferenceContext()
119 input_shapes.size()); in InferenceContext()
142 const std::vector<ShapeHandle>& input_shapes, in InferenceContext() argument
151 inputs_ = input_shapes; in InferenceContext()
[all …]
/external/tensorflow/tensorflow/core/profiler/internal/
Dtfprof_show_multi.cc171 std::map<int, std::vector<int64>> input_shapes; in FormatInputShapes() local
172 for (const auto& inp : gnode.input_shapes()) { in FormatInputShapes()
173 input_shapes[inp.first] = ShapeProtoToVec(inp.second); in FormatInputShapes()
177 for (const auto& s : input_shapes) { in FormatInputShapes()
/external/tensorflow/tensorflow/core/grappler/optimizers/data/
Dmap_vectorization_test.cc450 std::vector<PartialTensorShape> input_shapes({{}}); in TEST() local
452 auto input_node = AddArbitraryInputNode(&graph, &input_shapes, &input_types); in TEST()
470 std::vector<PartialTensorShape> input_shapes({{-1, 2}}); in TEST() local
472 auto input_node = AddArbitraryInputNode(&graph, &input_shapes, &input_types); in TEST()
490 std::vector<PartialTensorShape> input_shapes({{1}}); in TEST() local
491 auto input_node = AddArbitraryInputNode(&graph, &input_shapes, nullptr); in TEST()
/external/tensorflow/tensorflow/core/kernels/data/
Dbatch_dataset_op.cc65 const auto& input_shapes = input_->output_shapes(); in Dataset() local
66 output_shapes_.reserve(input_shapes.size()); in Dataset()
67 for (const auto& input_shape : input_shapes) { in Dataset()
/external/tensorflow/tensorflow/lite/experimental/microfrontend/
Daudio_microfrontend_test.cc42 const std::vector<std::vector<int>>& input_shapes) in MicroFrontendOpModel() argument
84 BuildInterpreter(input_shapes); in MicroFrontendOpModel()

1234