Home
last modified time | relevance | path

Searched refs:EIGEN_ALWAYS_INLINE (Results 1 – 25 of 66) sorted by relevance

123

/external/eigen/Eigen/src/Core/
DMathFunctions.h832 EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
840 EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
848 EIGEN_ALWAYS_INLINE T mini(const T& x, const T& y)
854 EIGEN_ALWAYS_INLINE float mini(const float& x, const float& y)
860 EIGEN_ALWAYS_INLINE T maxi(const T& x, const T& y)
866 EIGEN_ALWAYS_INLINE float maxi(const float& x, const float& y)
958 template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
961 template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
992 template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
995 template<> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
[all …]
/external/eigen/unsupported/Eigen/CXX11/src/Tensor/
DTensorDimensionList.h26 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
142 EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
148 EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
155 EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
161 EIGEN_DEVICE_FUNC static EIGEN_ALWAYS_INLINE bool run() {
168 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
174 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
181 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
187 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex) {
194 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex, const DenseIndex){
[all …]
DTensorUInt128.h20 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator uint64_t() const { return n; } in uint64_t()
22 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val() { } in static_val()
25 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE static_val(const T& v) { in static_val()
38 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
45 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
55 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
61 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
64 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE operator LOW() const { in LOW()
67 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE LOW lower() const { in lower()
70 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HIGH upper() const { in upper()
[all …]
DTensorContractionMapper.h31 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_tensor(tensor) { } in CoeffLoader()
33 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index) { in offsetBuffer()
37 …EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) … in coeff()
55 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffLoader(const Tensor& tensor) : m_data(tensor.data()) {}
57 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
61 …EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename Tensor::Scalar coeff(typename Tensor::Index index) …
95 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void offsetBuffer(typename Tensor::Index offset) {
198 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index firstAligned(Index size) const {
204 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index stride() const {
362 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
[all …]
DTensorIntDiv.h35 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
51 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
88 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) { in muluh()
97 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) { in muluh()
110 …static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T… in computeMultiplier()
118 …static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T…
197 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const {
DTensorMeta.h17 template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
22 template<typename T1, typename T2> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
29 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
35 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
155 EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair() : first(0), second(0) {}
156 …EIGEN_CONSTEXPR EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE IndexPair(Idx f, Idx s) : first(f), second(s…
DTensorContractionBlocking.h42 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index kc() const { return kc_; } in kc()
43 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index mc() const { return mc_; } in mc()
44 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Index nc() const { return nc_; } in nc()
DTensorIndexList.h605 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(const DenseIndex) {
612 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
619 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run() {
626 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
633 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
640 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
647 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
654 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
661 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool run(DenseIndex, DenseIndex) {
/external/eigen/Eigen/src/Core/util/
DBlasUtil.h139 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasVectorMapper(Scalar *data) : m_data(data) {}
141 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar operator()(Index i) const {
145 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet load(Index i) const {
164 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE BlasLinearMapper(Scalar *data) : m_data(data) {}
166 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void prefetch(int i) const {
170 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Scalar& operator()(Index i) const {
174 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE Packet loadPacket(Index i) const {
178 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE HalfPacket loadHalfPacket(Index i) const {
182 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE void storePacket(Index i, const Packet &p) const {
200 …EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE blas_data_mapper(Scalar* data, Index stride) : m_data(data),…
[all …]
/external/tensorflow/tensorflow/core/kernels/
Deigen_spatial_convolutions-inl.h215 EIGEN_ALWAYS_INLINE bool nonStandardPatches() const { in nonStandardPatches()
231 EIGEN_ALWAYS_INLINE Scalar operator()(Index row) const { in operator()
249 EIGEN_ALWAYS_INLINE Packet loadPacket(Index row) const { in loadPacket()
258 EIGEN_ALWAYS_INLINE Packet loadPacket(Index row, Index patchIndex) const { in loadPacket()
265 EIGEN_ALWAYS_INLINE const TensorEvaluator<ArgType, Device>& impl() const { in impl()
270 EIGEN_ALWAYS_INLINE Index patchDepth() const { return m_rowInputStride; } in patchDepth()
272 EIGEN_ALWAYS_INLINE Index patchRows() const { return m_colStride; } in patchRows()
274 EIGEN_ALWAYS_INLINE Index patchCols() const { return m_patch_cols; } in patchCols()
346 EIGEN_ALWAYS_INLINE Packet loadPacket(Index patchId, Index rowIndex, in loadPacket()
369 EIGEN_ALWAYS_INLINE Packet loadPartialPacketStandard( in loadPartialPacketStandard()
[all …]
Deigen_cuboid_convolution.h277 EIGEN_ALWAYS_INLINE bool nonStandardPatches() const { in nonStandardPatches()
294 EIGEN_ALWAYS_INLINE Scalar operator()(Index row) const { in operator()
311 EIGEN_ALWAYS_INLINE Packet loadPacket(Index row) const { in loadPacket()
320 EIGEN_ALWAYS_INLINE Packet loadPacket(Index row, Index patchIndex) const { in loadPacket()
327 EIGEN_ALWAYS_INLINE const TensorEvaluator<ArgType, Device>& impl() const { in impl()
332 EIGEN_ALWAYS_INLINE Index patchDepth() const { return m_planeInputStride; } in patchDepth()
334 EIGEN_ALWAYS_INLINE Index patchPlanes() const { return m_rowStride; } in patchPlanes()
336 EIGEN_ALWAYS_INLINE Index patchRows() const { return m_patch_rows; } in patchRows()
338 EIGEN_ALWAYS_INLINE Index patchCols() const { return m_patch_cols; } in patchCols()
437 EIGEN_ALWAYS_INLINE Packet loadPacket(Index patchId, Index planeIndex, in loadPacket()
[all …]
Done_hot_op.h39 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
45 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
64 EIGEN_ALWAYS_INLINE static void Compute( in Compute()
76 EIGEN_ALWAYS_INLINE static void Compute(
Dreverse_sequence_op.h31 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
39 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
63 EIGEN_ALWAYS_INLINE static void Compute( in Compute()
Deigen_pooling.h41 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
51 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
160 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
170 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
286 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE AvgPoolMeanReducer() : scalarCount_(0) { in AvgPoolMeanReducer()
401 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
411 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
519 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
529 EIGEN_ALWAYS_INLINE static const TensorReshapingOp<
Dwhere_op.h41 EIGEN_ALWAYS_INLINE static Status Compute(
55 EIGEN_ALWAYS_INLINE static Status Compute(
Dargmax_op.h31 EIGEN_ALWAYS_INLINE static void Reduce##Dims( \
51 EIGEN_ALWAYS_INLINE static void Reduce##Dims( \
Dsparse_xent_op.h59 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SparseXentLossGenerator( in SparseXentLossGenerator()
69 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
99 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE SparseXentGradGenerator( in SparseXentGradGenerator()
109 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
Dgather_nd_op_cpu_impl.h44 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE GatherNdSliceGenerator( in GatherNdSliceGenerator()
54 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE bool GenerateIndices( in GenerateIndices()
66 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int32
Ddiag_op_gpu.cu.cc46 EIGEN_ALWAYS_INLINE Status operator()(OpKernelContext* context, in operator ()()
92 EIGEN_ALWAYS_INLINE Status operator()(OpKernelContext* context, in operator ()()
Deigen_spatial_convolutions.h36 EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block,
55 EIGEN_ALWAYS_INLINE static Index finalize(Scalar* block,
172 EIGEN_ALWAYS_INLINE void packStandardPatches(Scalar* block,
352 EIGEN_ALWAYS_INLINE void packNonStandardPatches(Scalar* block,
Dfused_eigen_output_kernels.h208 EIGEN_ALWAYS_INLINE void operator()( in operator()
235 EIGEN_ALWAYS_INLINE void operator()(
268 EIGEN_ALWAYS_INLINE void operator()(
311 EIGEN_ALWAYS_INLINE void operator()(
/external/tensorflow/tensorflow/core/kernels/image/
Dimage_ops.h47 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
80 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
107 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
115 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE float operator()(const float out_coord,
130 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
139 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
178 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
185 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T
215 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE T read_with_fill_value(
245 EIGEN_ALWAYS_INLINE
/external/tensorflow/tensorflow/core/framework/
Dbounds_check.h30 EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC bool FastBoundsCheck(const Ta index, in FastBoundsCheck()
45 EIGEN_ALWAYS_INLINE EIGEN_DEVICE_FUNC const T SubtleMustCopy(const T &x) { in SubtleMustCopy()
/external/tensorflow/tensorflow/core/kernels/linalg/
Dmatrix_diag_op.h49 EIGEN_ALWAYS_INLINE static void Compute(
60 EIGEN_ALWAYS_INLINE static void Compute(
/external/tensorflow/tensorflow/core/util/
Dgpu_kernel_helper.h177 __device__ EIGEN_ALWAYS_INLINE Eigen::half GpuShuffleUpSync(
184 __device__ EIGEN_ALWAYS_INLINE Eigen::half GpuShuffleDownSync(
191 __device__ EIGEN_ALWAYS_INLINE Eigen::half GpuShuffleXorSync(

123