Home
last modified time | relevance | path

Searched refs:allow_fp32_relax_to_fp16 (Results 1 – 12 of 12) sorted by relevance

/external/tensorflow/tensorflow/lite/delegates/nnapi/
Dnnapi_delegate_nnapi_failure_handling_test.cc60 bool allow_fp32_relax_to_fp16 = false) in AddSubOpsAcceleratedModel() argument
67 allow_fp32_relax_to_fp16); in AddSubOpsAcceleratedModel()
89 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
102 /*num_threads=*/-1, allow_fp32_relax_to_fp16, in Init()
Dnnapi_delegate_errno_test.cc55 bool allow_fp32_relax_to_fp16 = false) in FloatAddOpModel() argument
57 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
74 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
81 allow_fp32_relax_to_fp16, /*apply_delegate=*/false); in Init()
Dnnapi_delegate_device_selection_test.cc47 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
57 allow_fp32_relax_to_fp16, /*apply_delegate=*/false); in Init()
379 bool allow_fp32_relax_to_fp16 = false) in AddSubOpsAcceleratedModel() argument
383 allow_fp32_relax_to_fp16); in AddSubOpsAcceleratedModel()
403 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
416 /*num_threads=*/-1, allow_fp32_relax_to_fp16, in Init()
567 bool allow_fp32_relax_to_fp16 = false) in HardSwishAddOpsAcceleratedModel() argument
570 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in HardSwishAddOpsAcceleratedModel()
587 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
599 allow_fp32_relax_to_fp16, /*apply_delegate=*/false); in Init()
Dnnapi_delegate_test.cc135 bool allow_fp32_relax_to_fp16 = false) { in BuildInterpreterWithNNAPI() argument
140 BuildInterpreter(input_shapes, /*num_threads=*/-1, allow_fp32_relax_to_fp16, in BuildInterpreterWithNNAPI()
157 bool allow_fp32_relax_to_fp16 = false) { in FloatAddOpModel() argument
158 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
165 bool allow_fp32_relax_to_fp16 = false) in FloatAddOpModel() argument
167 Init(input1, input2, output, activation_type, allow_fp32_relax_to_fp16); in FloatAddOpModel()
184 bool allow_fp32_relax_to_fp16 = false) { in Init() argument
191 allow_fp32_relax_to_fp16); in Init()
Dnnapi_delegate.cc4752 context->allow_fp32_relax_to_fp16 | delegate_options.allow_fp16; in BuildGraph()
/external/tensorflow/tensorflow/lite/kernels/
Dtest_util.cc173 bool allow_fp32_relax_to_fp16, in BuildInterpreter() argument
218 interpreter_->SetAllowFp16PrecisionForFp32(allow_fp32_relax_to_fp16); in BuildInterpreter()
Dtest_util.h493 int num_threads, bool allow_fp32_relax_to_fp16,
/external/tensorflow/tensorflow/lite/c/
Dcommon.h691 bool allow_fp32_relax_to_fp16; member
/external/tensorflow/tensorflow/lite/
Dinterpreter.cc372 subgraph->context()->allow_fp32_relax_to_fp16 = allow; in SetAllowFp16PrecisionForFp32()
Dinterpreter.h461 return context_->allow_fp32_relax_to_fp16; in GetAllowFp16PrecisionForFp32()
/external/tensorflow/tensorflow/lite/core/
Dsubgraph.h259 return context_.allow_fp32_relax_to_fp16; in GetAllowFp16PrecisionForFp32()
Dsubgraph.cc226 context_.allow_fp32_relax_to_fp16 = false; in Subgraph()