Searched refs:axis_tensor (Results 1 – 9 of 9) sorted by relevance
/external/tensorflow/tensorflow/core/kernels/ |
D | unique_op.cc | 100 const Tensor& axis_tensor = context->input(1); in Compute() local 101 OP_REQUIRES(context, TensorShapeUtils::IsVector(axis_tensor.shape()), in Compute() 104 context, axis_tensor.NumElements() <= 1, in Compute() 107 if (axis_tensor.NumElements() == 0) { in Compute() 112 (axis_tensor.dtype() == DT_INT32 || in Compute() 113 axis_tensor.dtype() == DT_INT64), in Compute() 116 DataTypeString(axis_tensor.dtype()))); in Compute() 117 if (axis_tensor.dtype() == DT_INT32) { in Compute() 118 axis = internal::SubtleMustCopy(axis_tensor.scalar<int32>()()); in Compute() 120 axis = internal::SubtleMustCopy(axis_tensor.scalar<int64>()()); in Compute()
|
D | gather_op.cc | 67 const Tensor& axis_tensor = c->input(2); in Compute() local 68 OP_REQUIRES(c, TensorShapeUtils::IsScalar(axis_tensor.shape()), in Compute() 71 if (axis_tensor.dtype() == DT_INT32) { in Compute() 72 axis = axis_tensor.scalar<int32>()(); in Compute() 73 } else if (axis_tensor.dtype() == DT_INT64) { in Compute() 74 axis = axis_tensor.scalar<int64>()(); in Compute()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | reverse.cc | 77 const TfLiteTensor* axis_tensor; in Eval() local 79 GetInputSafe(context, node, kAxisTensor, &axis_tensor)); in Eval() 80 int axis = GetTensorData<int32_t>(axis_tensor)[0]; in Eval()
|
D | cumsum.cc | 57 const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor); in Eval() local 63 int axis = *GetTensorData<int>(axis_tensor); in Eval()
|
/external/tensorflow/tensorflow/lite/delegates/nnapi/ |
D | nnapi_delegate.cc | 1711 const auto& axis_tensor = context->tensors[node->inputs->data[1]]; in Validate() local 1712 if (axis_tensor.type == kTfLiteInt64) { in Validate() 1714 axis_tensor.allocation_type == kTfLiteMmapRo && in Validate() 1715 *axis_tensor.data.i64 <= std::numeric_limits<int32_t>::max() && in Validate() 1716 *axis_tensor.data.i64 >= std::numeric_limits<int32_t>::min(), in Validate() 1723 Expect(axis_tensor.type == kTfLiteInt32, in Validate() 4496 const TfLiteTensor& axis_tensor = context->tensors[axis_id]; in AddOpsAndTensors() local 4497 switch (axis_tensor.type) { in AddOpsAndTensors() 4499 if (axis_tensor.allocation_type == kTfLiteMmapRo) { in AddOpsAndTensors() 4501 static_cast<int32_t>(*axis_tensor.data.i32))); in AddOpsAndTensors() [all …]
|
/external/tensorflow/tensorflow/core/ops/ |
D | array_ops.cc | 1003 const Tensor* axis_tensor = c->input_tensor(1); in __anondb9326b21402() local 1004 if (axis_tensor != nullptr && c->RankKnown(input)) { in __anondb9326b21402() 1007 if (axis_tensor->dtype() == DT_INT32) { in __anondb9326b21402() 1008 axis_value = AsInt64<int32>(axis_tensor, axis_tensor->NumElements()); in __anondb9326b21402() 1010 axis_value = AsInt64<int64>(axis_tensor, axis_tensor->NumElements()); in __anondb9326b21402()
|
/external/tensorflow/tensorflow/core/grappler/optimizers/ |
D | arithmetic_optimizer.cc | 3589 Tensor axis_tensor; in IsAxis0() local 3590 if (!GetTensorFromConstNode(node.input(axis_input), &axis_tensor)) in IsAxis0() 3592 if (axis_tensor.NumElements() != 1) return false; in IsAxis0() 3593 if (axis_tensor.dtype() == DT_INT32) { in IsAxis0() 3594 return axis_tensor.flat<int32>()(0) == 0; in IsAxis0() 3595 } else if (axis_tensor.dtype() == DT_INT64) { in IsAxis0() 3596 return axis_tensor.flat<int64>()(0) == 0; in IsAxis0()
|
D | constant_folding.cc | 3773 Tensor axis_tensor; in GetConcatAxis() local 3774 if (!GetTensorFromConstNode(node.input(axis_idx), &axis_tensor)) { in GetConcatAxis() 3777 *axis = axis_tensor.dtype() == DT_INT64 in GetConcatAxis() 3778 ? static_cast<int>(axis_tensor.scalar<int64>()()) in GetConcatAxis() 3779 : axis_tensor.scalar<int32>()(); in GetConcatAxis()
|
/external/tensorflow/tensorflow/lite/delegates/gpu/common/ |
D | model_builder.cc | 1840 const TfLiteTensor* axis_tensor = reader->GetInputTensor(2); in Parse() local 1843 ExtractAxisFromIndex(*input, axis_tensor->data.i32[0], &attr.axis)); in Parse()
|