1 /* Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/core/framework/tensor.h"
17 
18 #include "tensorflow/core/framework/tensor.pb.h"
19 #include "tensorflow/core/framework/tensor_testutil.h"
20 #include "tensorflow/core/framework/tensor_util.h"
21 #include "tensorflow/core/framework/types.h"
22 #include "tensorflow/core/framework/variant.h"
23 #include "tensorflow/core/framework/variant_encode_decode.h"
24 #include "tensorflow/core/framework/variant_tensor_data.h"
25 #include "tensorflow/core/lib/math/math_util.h"
26 #include "tensorflow/core/lib/strings/strcat.h"
27 #include "tensorflow/core/platform/logging.h"
28 #include "tensorflow/core/platform/test.h"
29 #include "tensorflow/core/platform/test_benchmark.h"
30 
31 namespace tensorflow {
32 
33 class TensorTestHelper {
34  public:
35   // This is an operation that can be done by VariableOp.
set_shape(Tensor * t,const TensorShape & s)36   static void set_shape(Tensor* t, const TensorShape& s) { t->set_shape(s); }
37 };
38 
39 // To make TestCopies do the right thing.
operator ==(const ResourceHandle & a,const ResourceHandle & b)40 bool operator==(const ResourceHandle& a, const ResourceHandle& b) {
41   return a.device() == b.device() && a.container() == b.container() &&
42          a.name() == b.name() && a.hash_code() == b.hash_code() &&
43          a.maybe_type_name() == b.maybe_type_name();
44 }
45 
operator ==(const Variant & a,const Variant & b)46 bool operator==(const Variant& a, const Variant& b) {
47   if (a.is_empty()) {
48     return b.is_empty();
49   }
50 
51   if (a.TypeId() != b.TypeId()) return false;
52   if (a.TypeName() != b.TypeName()) return false;
53 
54   VariantTensorData a_data, b_data;
55   a.Encode(&a_data);
56   b.Encode(&b_data);
57 
58   string a_metadata;
59   string b_metadata;
60   a_data.get_metadata(&a_metadata);
61   b_data.get_metadata(&b_metadata);
62   if (a_metadata != b_metadata) return false;
63 
64   if (a_data.tensors_size() != b_data.tensors_size()) return false;
65 
66   for (int i = 0; i < a_data.tensors_size(); ++i) {
67     TensorProto a_proto, b_proto;
68     a_data.tensors(i).AsProtoTensorContent(&a_proto);
69     b_data.tensors(i).AsProtoTensorContent(&b_proto);
70     string a_str, b_str;
71     a_proto.SerializeToString(&a_str);
72     b_proto.SerializeToString(&b_str);
73     if (a_str != b_str) return false;
74   }
75 
76   return true;
77 }
78 
79 namespace {
80 
TEST(TensorTest,Default)81 TEST(TensorTest, Default) {
82   Tensor t;
83   EXPECT_EQ(t.dtype(), DT_FLOAT);
84   EXPECT_EQ(t.dims(), 1);
85   EXPECT_EQ(t.NumElements(), 0);
86 }
87 
TEST(TensorTest,DataType_Traits)88 TEST(TensorTest, DataType_Traits) {
89   EXPECT_TRUE(std::is_trivial<float>::value);
90   EXPECT_TRUE(std::is_trivial<double>::value);
91   EXPECT_TRUE(std::is_trivial<int32>::value);
92   EXPECT_TRUE(std::is_trivial<uint8>::value);
93   EXPECT_TRUE(std::is_trivial<uint16>::value);
94   EXPECT_TRUE(std::is_trivial<int16>::value);
95   EXPECT_TRUE(std::is_trivial<int8>::value);
96   EXPECT_TRUE(std::is_trivial<int64>::value);
97   EXPECT_TRUE(std::is_trivial<bool>::value);
98   EXPECT_FALSE(std::is_trivial<tstring>::value);
99   EXPECT_FALSE(std::is_trivial<string>::value);
100 
101   EXPECT_EQ(sizeof(bool), 1);
102 
103   // Unfortunately. std::complex::complex() initializes (0, 0).
104   EXPECT_FALSE(std::is_trivial<complex64>::value);
105   EXPECT_FALSE(std::is_trivial<complex128>::value);
106   EXPECT_TRUE(std::is_trivial<float[2]>::value);
107   EXPECT_TRUE(std::is_trivial<double[2]>::value);
108   struct MyComplex64 {
109     float re, im;
110   };
111   EXPECT_TRUE(std::is_trivial<MyComplex64>::value);
112   struct MyComplex128 {
113     double re, im;
114   };
115   EXPECT_TRUE(std::is_trivial<MyComplex128>::value);
116 }
117 
118 template <typename T>
ExpectEqual(const Tensor & x,const Tensor & y)119 void ExpectEqual(const Tensor& x, const Tensor& y) {
120   test::ExpectEqual(x, y);
121 }
122 // test::ExpectEqual does not support ResourceHandle or Variant.
123 template <>
ExpectEqual(const Tensor & x,const Tensor & y)124 void ExpectEqual<ResourceHandle>(const Tensor& x, const Tensor& y) {
125   EXPECT_EQ(x, y);
126 }
127 template <>
ExpectEqual(const Tensor & x,const Tensor & y)128 void ExpectEqual<Variant>(const Tensor& x, const Tensor& y) {
129   EXPECT_EQ(x, y);
130 }
131 
132 template <typename T>
TestCopies(const Tensor & t)133 void TestCopies(const Tensor& t) {
134   {
135     LOG(INFO) << "CopyFrom()";
136     Tensor t2(t.dtype());
137     EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
138     ExpectEqual<T>(t, t2);
139   }
140   {
141     LOG(INFO) << "operator=()";
142     Tensor t2(t.dtype());
143     t2 = t;
144     ExpectEqual<T>(t, t2);
145   }
146   {
147     LOG(INFO) << "deep copy";
148     Tensor t2(t.dtype(), t.shape());
149     t2.flat<T>() = t.flat<T>();
150     ExpectEqual<T>(t, t2);
151   }
152   {
153     LOG(INFO) << "AsProtoField()";
154     TensorProto proto;
155     t.AsProtoField(&proto);
156     Tensor t2(t.dtype());
157     EXPECT_TRUE(t2.FromProto(proto));
158     ExpectEqual<T>(t, t2);
159   }
160   {
161     LOG(INFO) << "AsProtoTensorContent()";
162     TensorProto proto;
163     t.AsProtoTensorContent(&proto);
164     Tensor t2(t.dtype());
165     EXPECT_TRUE(t2.FromProto(proto));
166     ExpectEqual<T>(t, t2);
167     // Make another copy via tensor_content field.
168     *proto.mutable_tensor_content() = proto.tensor_content();
169     Tensor t3(t.dtype());
170     EXPECT_TRUE(t3.FromProto(proto));
171     ExpectEqual<T>(t, t2);
172   }
173   {
174     LOG(INFO) << "AsTensor";
175     gtl::ArraySlice<T> values(t.flat<T>().data(), t.NumElements());
176     Tensor t2 = test::AsTensor(values, t.shape());
177     ExpectEqual<T>(t, t2);
178   }
179   {
180     LOG(INFO) << "Move constructor";
181     Tensor t2 = t;
182     Tensor t3 = std::move(t2);
183     ExpectEqual<T>(t, t3);
184     EXPECT_TRUE(t3.IsInitialized());
185     EXPECT_FALSE(t2.IsInitialized());  // NOLINT(bugprone-use-after-move)
186   }
187   {
188     LOG(INFO) << "Move assignment";
189     Tensor t2 = t;
190     Tensor t3;
191     t3 = std::move(t2);
192     ExpectEqual<T>(t, t3);
193     EXPECT_TRUE(t3.IsInitialized());
194     EXPECT_FALSE(t2.IsInitialized());  // NOLINT(bugprone-use-after-move)
195   }
196   {
197     LOG(INFO) << "Move self-assignment";
198     Tensor t2 = t;
199     Tensor* t3 = &t2;
200     *t3 = std::move(t2);
201     ExpectEqual<Variant>(t, *t3);
202     EXPECT_TRUE(t3->IsInitialized());
203   }
204 }
205 
TEST(Tensor_Half,Simple)206 TEST(Tensor_Half, Simple) {
207   Tensor t(DT_HALF, TensorShape({5, 7}));
208   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
209   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
210     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
211       t.matrix<Eigen::half>()(a, b) = static_cast<Eigen::half>(a * b);
212     }
213   }
214   TestCopies<Eigen::half>(t);
215 }
216 
TEST(Tensor_Bfloat16,Simple)217 TEST(Tensor_Bfloat16, Simple) {
218   Tensor t(DT_BFLOAT16, TensorShape({5, 7}));
219   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({5, 7})));
220   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
221     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
222       t.matrix<bfloat16>()(a, b) = static_cast<bfloat16>(a * b);
223     }
224   }
225   TestCopies<bfloat16>(t);
226 }
227 
TEST(Tensor_Float,Simple)228 TEST(Tensor_Float, Simple) {
229   Tensor t(DT_FLOAT, TensorShape({10, 20}));
230   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 20})));
231   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
232     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
233       t.matrix<float>()(a, b) = static_cast<float>(a * b);
234     }
235   }
236   TestCopies<float>(t);
237 }
238 
TEST(Tensor_ResourceHandle,Simple)239 TEST(Tensor_ResourceHandle, Simple) {
240   Tensor t(DT_RESOURCE, TensorShape({}));
241   ResourceHandle tmp;
242   tmp.set_name("a");
243   t.flat<ResourceHandle>()(0) = tmp;
244   TestCopies<ResourceHandle>(t);
245 }
246 
TEST(Tensor_Variant,Simple)247 TEST(Tensor_Variant, Simple) {
248   Tensor t(DT_VARIANT, TensorShape({}));
249   Tensor value(DT_FLOAT, TensorShape({}));
250   value.flat<float>()(0) = 42.0f;
251   t.flat<Variant>()(0) = value;
252   // All the tests in TestCopies except the ones that serialize and deserialize
253   // the tensor. The consumer of a serialized Variant Tensor should know what
254   // type is stored in the Tensor, so not testing the generic
255   // serialize/deserialize case here.
256   {
257     LOG(INFO) << "CopyFrom()";
258     Tensor t2(t.dtype());
259     EXPECT_TRUE(t2.CopyFrom(t, t.shape()));
260     ExpectEqual<Variant>(t, t2);
261   }
262   {
263     LOG(INFO) << "operator=()";
264     Tensor t2(t.dtype());
265     t2 = t;
266     ExpectEqual<Variant>(t, t2);
267   }
268   {
269     LOG(INFO) << "deep copy";
270     Tensor t2(t.dtype(), t.shape());
271     t2.flat<Variant>() = t.flat<Variant>();
272     ExpectEqual<Variant>(t, t2);
273   }
274   {
275     LOG(INFO) << "AsTensor";
276     gtl::ArraySlice<Variant> values(t.flat<Variant>().data(), t.NumElements());
277     Tensor t2 = test::AsTensor(values, t.shape());
278     ExpectEqual<Variant>(t, t2);
279   }
280   {
281     LOG(INFO) << "Move constructor";
282     Tensor t2 = t;
283     Tensor t3 = std::move(t2);
284     ExpectEqual<Variant>(t, t3);
285     EXPECT_TRUE(t3.IsInitialized());
286     EXPECT_FALSE(t2.IsInitialized());  // NOLINT(bugprone-use-after-move)
287   }
288   {
289     LOG(INFO) << "Move assignment";
290     Tensor t2 = t;
291     Tensor t3;
292     t3 = std::move(t2);
293     ExpectEqual<Variant>(t, t3);
294     EXPECT_TRUE(t3.IsInitialized());
295     EXPECT_FALSE(t2.IsInitialized());  // NOLINT(bugprone-use-after-move)
296   }
297   {
298     LOG(INFO) << "Move self-assignment";
299     Tensor t2 = t;
300     Tensor* t3 = &t2;
301     *t3 = std::move(t2);
302     ExpectEqual<Variant>(t, *t3);
303     EXPECT_TRUE(t3->IsInitialized());
304   }
305 }
306 
TEST(Tensor_Variant,Marshal)307 TEST(Tensor_Variant, Marshal) {
308   Tensor t(DT_VARIANT, TensorShape({}));
309 
310   Tensor internal(DT_FLOAT, TensorShape({}));
311   internal.flat<float>()(0) = 42.0f;
312   t.flat<Variant>()(0) = internal;
313 
314   LOG(INFO) << "AsProtoField()";
315   TensorProto proto;
316   t.AsProtoField(&proto);
317 
318   // This performs a decode operation.
319   Tensor t2(t.dtype());
320   EXPECT_TRUE(t2.FromProto(proto));
321 
322   Tensor* out = t2.flat<Variant>()(0).get<Tensor>();
323   EXPECT_NE(out, nullptr);
324   EXPECT_FLOAT_EQ(out->scalar<float>()(), 42.0f);
325 }
326 
TEST(Tensor_UInt16,Simple)327 TEST(Tensor_UInt16, Simple) {
328   Tensor t(DT_UINT16, TensorShape({2, 2}));
329   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
330   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
331     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
332       t.matrix<uint16>()(a, b) = uint16(a * b);
333     }
334   }
335   TestCopies<uint16>(t);
336 }
337 
TEST(Tensor_QInt8,Simple)338 TEST(Tensor_QInt8, Simple) {
339   Tensor t(DT_QINT8, TensorShape({2, 2}));
340   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
341   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
342     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
343       t.matrix<qint8>()(a, b) = qint8(a * b);
344     }
345   }
346   TestCopies<qint8>(t);
347 }
348 
TEST(Tensor_QUInt8,Simple)349 TEST(Tensor_QUInt8, Simple) {
350   Tensor t(DT_QUINT8, TensorShape({2, 2}));
351   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
352   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
353     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
354       t.matrix<Eigen::QUInt8>()(a, b) = Eigen::QUInt8(a * b);
355     }
356   }
357   TestCopies<Eigen::QUInt8>(t);
358 }
359 
TEST(Tensor_QInt32,Simple)360 TEST(Tensor_QInt32, Simple) {
361   Tensor t(DT_QINT32, TensorShape({2, 2}));
362   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 2})));
363   for (int64 a = 0; a < t.shape().dim_size(0); a++) {
364     for (int64 b = 0; b < t.shape().dim_size(1); b++) {
365       t.matrix<qint32>()(a, b) = qint32(static_cast<int32>(a * b));
366     }
367   }
368   TestCopies<qint32>(t);
369 }
370 
371 class TensorReshapeTest : public ::testing::Test {
372  protected:
373   Tensor t;
374   Tensor zero_t;
375 
TensorReshapeTest()376   TensorReshapeTest()
377       : t(DT_FLOAT, TensorShape({2, 3, 4, 5})),
378         zero_t(DT_FLOAT, TensorShape({3, 0, 2, 0, 5})) {}
379 
SetUp()380   void SetUp() override {
381     EXPECT_TRUE(t.shape().IsSameSize(TensorShape({2, 3, 4, 5})));
382     EXPECT_TRUE(zero_t.shape().IsSameSize(TensorShape({3, 0, 2, 0, 5})));
383 
384     auto tensor = t.tensor<float, 4>();
385     EXPECT_EQ(2, tensor.dimension(0));
386     EXPECT_EQ(3, tensor.dimension(1));
387     EXPECT_EQ(4, tensor.dimension(2));
388     EXPECT_EQ(5, tensor.dimension(3));
389 
390     // Set first and last elements.
391     tensor(0, 0, 0, 0) = 0.01f;
392     tensor(1, 2, 3, 4) = 0.02f;
393   }
394 
395   template <typename T>
396   using ReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>);
397   template <typename T>
398   using ConstReshapeFunc = T (Tensor::*)(gtl::ArraySlice<int64>) const;
399 
400   template <typename T, ReshapeFunc<T> Func>
TestReshape(std::initializer_list<int64> sizes)401   void TestReshape(std::initializer_list<int64> sizes) {
402     T shaped = (t.*Func)(sizes);
403     TestReshapeImpl(shaped, sizes);
404   }
405 
406   template <typename T, ConstReshapeFunc<T> Func>
TestReshape(std::initializer_list<int64> sizes)407   void TestReshape(std::initializer_list<int64> sizes) {
408     T shaped = (static_cast<const Tensor&>(t).*Func)(sizes);
409     TestReshapeImpl(shaped, sizes);
410   }
411 
412   template <typename T>
TestReshapeImpl(T shaped,std::initializer_list<int64> sizes)413   void TestReshapeImpl(T shaped, std::initializer_list<int64> sizes) {
414     auto iter = sizes.begin();
415     for (int i = 0; i < shaped.rank(); ++i, ++iter) {
416       EXPECT_EQ(*iter, shaped.dimension(i));
417     }
418 
419     using Index = typename T::Index;
420     using Scalar = typename T::Scalar;
421     constexpr int N = T::NumIndices;
422 
423     // To handle the cast when `shaped` is bit casted into a different type.
424     const float expected_first = 0.01f;
425     Eigen::DSizes<Index, N> coord;
426     EXPECT_EQ(shaped(coord), *reinterpret_cast<const Scalar*>(&expected_first));
427 
428     for (int i = 0; i < N; ++i) {
429       coord[i] = shaped.dimension(i) - 1;
430     }
431     const float expected_last = 0.02f;
432     constexpr int kNumScalarPerFloat =
433         sizeof(float) / sizeof(Scalar);  // Assuming even divide.
434     EXPECT_EQ(shaped(coord), reinterpret_cast<const Scalar*>(
435                                  &expected_last)[kNumScalarPerFloat - 1]);
436   }
437 };
438 
TEST_F(TensorReshapeTest,Reshape)439 TEST_F(TensorReshapeTest, Reshape) {
440   LOG(INFO) << "shaped";
441 
442 #define TEST_RESHAPE(...)                                                  \
443   {                                                                        \
444     int _tmp[] = {__VA_ARGS__};                                            \
445     constexpr int N = (sizeof(_tmp) / sizeof(int));                        \
446     TestReshape<TTypes<float, N>::Tensor, &Tensor::shaped<float, N>>(      \
447         {__VA_ARGS__});                                                    \
448     TestReshape<TTypes<float, N>::ConstTensor, &Tensor::shaped<float, N>>( \
449         {__VA_ARGS__});                                                    \
450     TestReshape<TTypes<float, N>::UnalignedTensor,                         \
451                 &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__});       \
452     TestReshape<TTypes<float, N>::UnalignedConstTensor,                    \
453                 &Tensor::unaligned_shaped<float, N>>({__VA_ARGS__});       \
454     TestReshape<TTypes<float, N>::Tensor,                                  \
455                 &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__});      \
456     TestReshape<TTypes<float, N>::ConstTensor,                             \
457                 &Tensor::bit_casted_shaped<float, N>>({__VA_ARGS__});      \
458     TestReshape<TTypes<int32, N>::Tensor,                                  \
459                 &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__});      \
460     TestReshape<TTypes<int32, N>::ConstTensor,                             \
461                 &Tensor::bit_casted_shaped<int32, N>>({__VA_ARGS__});      \
462   }
463 
464   TEST_RESHAPE(120);
465   TEST_RESHAPE(6, 20);
466   TEST_RESHAPE(6, 4, 5);
467   TEST_RESHAPE(2, 3, 4, 5);
468 #undef TEST_RESHAPE
469 }
470 
TEST_F(TensorReshapeTest,BitcastReshapeDifferentSize)471 TEST_F(TensorReshapeTest, BitcastReshapeDifferentSize) {
472 #define TEST_BITCAST8_RESHAPE(...)                                    \
473   {                                                                   \
474     int _tmp[] = {__VA_ARGS__};                                       \
475     constexpr int N = (sizeof(_tmp) / sizeof(int));                   \
476     TestReshape<TTypes<uint8, N>::Tensor,                             \
477                 &Tensor::bit_casted_shaped<uint8, N>>({__VA_ARGS__}); \
478   }
479 
480   TEST_BITCAST8_RESHAPE(480);
481   TEST_BITCAST8_RESHAPE(24, 20);
482   TEST_BITCAST8_RESHAPE(6, 16, 5);
483   TEST_BITCAST8_RESHAPE(2, 3, 4, 20);
484 #undef TEST_BITCAST8_RESHAPE
485 #define TEST_BITCAST16_RESHAPE(...)                                   \
486   {                                                                   \
487     int _tmp[] = {__VA_ARGS__};                                       \
488     constexpr int N = (sizeof(_tmp) / sizeof(int));                   \
489     TestReshape<TTypes<int16, N>::Tensor,                             \
490                 &Tensor::bit_casted_shaped<int16, N>>({__VA_ARGS__}); \
491   }
492 
493   TEST_BITCAST16_RESHAPE(240);
494   TEST_BITCAST16_RESHAPE(6, 40);
495   TEST_BITCAST16_RESHAPE(12, 4, 5);
496   TEST_BITCAST16_RESHAPE(2, 3, 8, 5);
497   TEST_BITCAST16_RESHAPE(2, 3, 4, 1, 10);
498 #undef TEST_BITCAST16_RESHAPE
499 }
500 
TEST_F(TensorReshapeTest,ReshapeError)501 TEST_F(TensorReshapeTest, ReshapeError) {
502   EXPECT_DEATH((t.shaped<float, 0>({})), "1 vs. 120");
503   EXPECT_DEATH((t.shaped<float, 1>({119})), "119 vs. 120");
504   EXPECT_DEATH((t.shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
505 
506   EXPECT_DEATH((t.unaligned_shaped<float, 0>({})), "1 vs. 120");
507   EXPECT_DEATH((t.unaligned_shaped<float, 1>({119})), "119 vs. 120");
508   EXPECT_DEATH((t.unaligned_shaped<float, 4>({2, 3, 4, 6})), "144 vs. 120");
509 
510   EXPECT_DEATH((t.bit_casted_shaped<float, 0>({})), "4 vs. 480");
511   EXPECT_DEATH((t.bit_casted_shaped<float, 1>({119})), "476 vs. 480");
512   EXPECT_DEATH((t.bit_casted_shaped<float, 4>({2, 3, 4, 6})), "576 vs. 480");
513 
514   Tensor string_tensor{DT_STRING, {10}};
515   // Note that the error message compare # of elements, not # of bytes.
516   EXPECT_DEATH((string_tensor.bit_casted_shaped<tstring, 1>({9})), "9 vs. 10");
517 }
518 
TEST_F(TensorReshapeTest,Flat)519 TEST_F(TensorReshapeTest, Flat) {
520   LOG(INFO) << "flat";
521   {
522     auto flat = t.flat<float>();
523     EXPECT_EQ(flat(0), 0.01f);
524     EXPECT_EQ(120, flat.dimension(0));
525     EXPECT_EQ(flat(0), 0.01f);
526     EXPECT_EQ(flat(119), 0.02f);
527   }
528 }
529 
TEST_F(TensorReshapeTest,FlatInnerDims)530 TEST_F(TensorReshapeTest, FlatInnerDims) {
531   LOG(INFO) << "flat_inner_dims";
532   {
533     auto flat_inner_dims = t.flat_inner_dims<float>();
534     EXPECT_EQ(24, flat_inner_dims.dimension(0));
535     EXPECT_EQ(5, flat_inner_dims.dimension(1));
536     EXPECT_EQ(flat_inner_dims(0, 0), 0.01f);
537     EXPECT_EQ(flat_inner_dims(23, 4), 0.02f);
538   }
539   {
540     auto flat_inner_dims = t.flat_inner_dims<float, 3>();
541     EXPECT_EQ(6, flat_inner_dims.dimension(0));
542     EXPECT_EQ(4, flat_inner_dims.dimension(1));
543     EXPECT_EQ(5, flat_inner_dims.dimension(2));
544     EXPECT_EQ(flat_inner_dims(0, 0, 0), 0.01f);
545     EXPECT_EQ(flat_inner_dims(5, 3, 4), 0.02f);
546   }
547   {
548     auto flat_inner_dims = t.flat_inner_dims<float, 5>();
549     EXPECT_EQ(1, flat_inner_dims.dimension(0));
550     EXPECT_EQ(2, flat_inner_dims.dimension(1));
551     EXPECT_EQ(3, flat_inner_dims.dimension(2));
552     EXPECT_EQ(4, flat_inner_dims.dimension(3));
553     EXPECT_EQ(5, flat_inner_dims.dimension(4));
554     EXPECT_EQ(flat_inner_dims(0, 0, 0, 0, 0), 0.01f);
555     EXPECT_EQ(flat_inner_dims(0, 1, 2, 3, 4), 0.02f);
556   }
557   {
558     auto flat_inner_dims = zero_t.flat_inner_dims<float>();
559     EXPECT_EQ(0, flat_inner_dims.dimension(0));
560     EXPECT_EQ(5, flat_inner_dims.dimension(1));
561   }
562   {
563     auto flat_inner_dims = zero_t.flat_inner_dims<float, 3>();
564     EXPECT_EQ(0, flat_inner_dims.dimension(0));
565     EXPECT_EQ(0, flat_inner_dims.dimension(1));
566     EXPECT_EQ(5, flat_inner_dims.dimension(2));
567   }
568   {
569     auto flat_inner_dims = zero_t.flat_inner_dims<float, 5>();
570     EXPECT_EQ(3, flat_inner_dims.dimension(0));
571     EXPECT_EQ(0, flat_inner_dims.dimension(1));
572     EXPECT_EQ(2, flat_inner_dims.dimension(2));
573     EXPECT_EQ(0, flat_inner_dims.dimension(3));
574     EXPECT_EQ(5, flat_inner_dims.dimension(4));
575   }
576 }
577 
TEST_F(TensorReshapeTest,FlatOuterDims)578 TEST_F(TensorReshapeTest, FlatOuterDims) {
579   LOG(INFO) << "flat_outer_dims";
580   {
581     auto flat_outer_dims = t.flat_outer_dims<float>();
582     EXPECT_EQ(2, flat_outer_dims.dimension(0));
583     EXPECT_EQ(60, flat_outer_dims.dimension(1));
584     EXPECT_EQ(flat_outer_dims(0, 0), 0.01f);
585     EXPECT_EQ(flat_outer_dims(1, 59), 0.02f);
586   }
587   {
588     auto flat_outer_dims = t.flat_outer_dims<float, 3>();
589     EXPECT_EQ(2, flat_outer_dims.dimension(0));
590     EXPECT_EQ(3, flat_outer_dims.dimension(1));
591     EXPECT_EQ(20, flat_outer_dims.dimension(2));
592     EXPECT_EQ(flat_outer_dims(0, 0, 0), 0.01f);
593     EXPECT_EQ(flat_outer_dims(1, 2, 19), 0.02f);
594   }
595   {
596     auto flat_outer_dims = t.flat_outer_dims<float, 5>();
597     EXPECT_EQ(2, flat_outer_dims.dimension(0));
598     EXPECT_EQ(3, flat_outer_dims.dimension(1));
599     EXPECT_EQ(4, flat_outer_dims.dimension(2));
600     EXPECT_EQ(5, flat_outer_dims.dimension(3));
601     EXPECT_EQ(1, flat_outer_dims.dimension(4));
602     EXPECT_EQ(flat_outer_dims(0, 0, 0, 0, 0), 0.01f);
603     EXPECT_EQ(flat_outer_dims(1, 2, 3, 4, 0), 0.02f);
604   }
605   {
606     auto flat_outer_dims = zero_t.flat_outer_dims<float>();
607     EXPECT_EQ(3, flat_outer_dims.dimension(0));
608     EXPECT_EQ(0, flat_outer_dims.dimension(1));
609   }
610   {
611     auto flat_outer_dims = zero_t.flat_outer_dims<float, 3>();
612     EXPECT_EQ(3, flat_outer_dims.dimension(0));
613     EXPECT_EQ(0, flat_outer_dims.dimension(1));
614     EXPECT_EQ(0, flat_outer_dims.dimension(2));
615   }
616   {
617     auto flat_outer_dims = zero_t.flat_outer_dims<float, 5>();
618     EXPECT_EQ(3, flat_outer_dims.dimension(0));
619     EXPECT_EQ(0, flat_outer_dims.dimension(1));
620     EXPECT_EQ(2, flat_outer_dims.dimension(2));
621     EXPECT_EQ(0, flat_outer_dims.dimension(3));
622     EXPECT_EQ(5, flat_outer_dims.dimension(4));
623   }
624 }
625 
TEST_F(TensorReshapeTest,FlatInnerOuterDims)626 TEST_F(TensorReshapeTest, FlatInnerOuterDims) {
627   LOG(INFO) << "flat_inner_outer_dims";
628   {
629     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 4>(0);
630     EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
631     EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
632     EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
633     EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
634     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0), 0.01f);
635     EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4), 0.02f);
636   }
637   {
638     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(-2);
639     EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
640     EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
641     EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
642     EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
643     EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
644     EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
645     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
646     EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4), 0.02f);
647   }
648   {
649     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 6>(0);
650     EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
651     EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
652     EXPECT_EQ(4, flat_inner_outer_dims.dimension(2));
653     EXPECT_EQ(5, flat_inner_outer_dims.dimension(3));
654     EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
655     EXPECT_EQ(1, flat_inner_outer_dims.dimension(5));
656     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0), 0.01f);
657     EXPECT_EQ(flat_inner_outer_dims(1, 2, 3, 4, 0, 0), 0.02f);
658   }
659   {
660     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 8>(-2);
661     EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
662     EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
663     EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
664     EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
665     EXPECT_EQ(4, flat_inner_outer_dims.dimension(4));
666     EXPECT_EQ(5, flat_inner_outer_dims.dimension(5));
667     EXPECT_EQ(1, flat_inner_outer_dims.dimension(6));
668     EXPECT_EQ(1, flat_inner_outer_dims.dimension(7));
669     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0, 0, 0, 0), 0.01f);
670     EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 3, 4, 0, 0), 0.02f);
671   }
672   {
673     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(1);
674     EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
675     EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
676     EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
677     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
678     EXPECT_EQ(flat_inner_outer_dims(5, 3, 4), 0.02f);
679   }
680   {
681     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(1);
682     EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
683     EXPECT_EQ(4, flat_inner_outer_dims.dimension(1));
684     EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
685     EXPECT_EQ(1, flat_inner_outer_dims.dimension(3));
686     EXPECT_EQ(1, flat_inner_outer_dims.dimension(4));
687     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
688     EXPECT_EQ(flat_inner_outer_dims(5, 3, 4, 0, 0), 0.02f);
689   }
690   {
691     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 3>(0);
692     EXPECT_EQ(2, flat_inner_outer_dims.dimension(0));
693     EXPECT_EQ(3, flat_inner_outer_dims.dimension(1));
694     EXPECT_EQ(20, flat_inner_outer_dims.dimension(2));
695     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0), 0.01f);
696     EXPECT_EQ(flat_inner_outer_dims(1, 2, 19), 0.02f);
697   }
698   {
699     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 5>(-2);
700     EXPECT_EQ(1, flat_inner_outer_dims.dimension(0));
701     EXPECT_EQ(1, flat_inner_outer_dims.dimension(1));
702     EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
703     EXPECT_EQ(3, flat_inner_outer_dims.dimension(3));
704     EXPECT_EQ(20, flat_inner_outer_dims.dimension(4));
705     EXPECT_EQ(flat_inner_outer_dims(0, 0, 0, 0, 0), 0.01f);
706     EXPECT_EQ(flat_inner_outer_dims(0, 0, 1, 2, 19), 0.02f);
707   }
708   {
709     auto flat_inner_outer_dims = t.flat_inner_outer_dims<float, 2>(1);
710     EXPECT_EQ(6, flat_inner_outer_dims.dimension(0));
711     EXPECT_EQ(20, flat_inner_outer_dims.dimension(1));
712     EXPECT_EQ(flat_inner_outer_dims(0, 0), 0.01f);
713     EXPECT_EQ(flat_inner_outer_dims(5, 19), 0.02f);
714   }
715   {
716     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(0);
717     EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
718     EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
719   }
720   {
721     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(0);
722     EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
723     EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
724     EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
725   }
726   {
727     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 5>(0);
728     EXPECT_EQ(3, flat_inner_outer_dims.dimension(0));
729     EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
730     EXPECT_EQ(2, flat_inner_outer_dims.dimension(2));
731     EXPECT_EQ(0, flat_inner_outer_dims.dimension(3));
732     EXPECT_EQ(5, flat_inner_outer_dims.dimension(4));
733   }
734   {
735     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 2>(3);
736     EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
737     EXPECT_EQ(5, flat_inner_outer_dims.dimension(1));
738   }
739   {
740     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(2);
741     EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
742     EXPECT_EQ(0, flat_inner_outer_dims.dimension(1));
743     EXPECT_EQ(5, flat_inner_outer_dims.dimension(2));
744   }
745   {
746     auto flat_inner_outer_dims = zero_t.flat_inner_outer_dims<float, 3>(1);
747     EXPECT_EQ(0, flat_inner_outer_dims.dimension(0));
748     EXPECT_EQ(2, flat_inner_outer_dims.dimension(1));
749     EXPECT_EQ(0, flat_inner_outer_dims.dimension(2));
750   }
751 }
752 
TEST(ReinterpretLastDimension,Reinterpret_NCHW_VECT_C_as_NCHW)753 TEST(ReinterpretLastDimension, Reinterpret_NCHW_VECT_C_as_NCHW) {
754   LOG(INFO) << "reinterpret_last_dimension";
755   {
756     Tensor t_nchw_vect_c(DT_QINT8, TensorShape({2, 3, 5, 7, 4}));
757     auto nchw_vect_c = t_nchw_vect_c.tensor<qint8, 5>();
758     Tensor t_expected_nchw(DT_INT32, TensorShape({2, 3, 5, 7}));
759     auto expected_nchw = t_expected_nchw.tensor<int32, 4>();
760     int8 val = 0;
761     for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
762       for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
763         for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h, ++val) {
764           int8 packet[4];
765           for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
766             packet[0] = nchw_vect_c(n, c, h, w, 0) = ++val;
767             packet[1] = nchw_vect_c(n, c, h, w, 1) = ++val;
768             packet[2] = nchw_vect_c(n, c, h, w, 2) = ++val;
769             packet[3] = nchw_vect_c(n, c, h, w, 3) = ++val;
770             expected_nchw(n, c, h, w) = *reinterpret_cast<int32*>(&packet[0]);
771           }
772         }
773       }
774     }
775     auto actual_nchw = t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
776     const auto& const_t_nchw_vect_c = t_nchw_vect_c;
777     auto const_actual_nchw =
778         const_t_nchw_vect_c.reinterpret_last_dimension<int32, 4>();
779     for (int n = 0; n < t_nchw_vect_c.shape().dim_size(0); ++n) {
780       for (int c = 0; c < t_nchw_vect_c.shape().dim_size(1); ++c) {
781         for (int h = 0; h < t_nchw_vect_c.shape().dim_size(2); ++h) {
782           for (int w = 0; w < t_nchw_vect_c.shape().dim_size(3); ++w) {
783             EXPECT_EQ(expected_nchw(n, c, h, w), actual_nchw(n, c, h, w));
784             EXPECT_EQ(expected_nchw(n, c, h, w), const_actual_nchw(n, c, h, w));
785           }
786         }
787       }
788     }
789   }
790 }
791 
TEST(Tensor_Scalar,Basics)792 TEST(Tensor_Scalar, Basics) {
793   {
794     Tensor t(DT_BOOL, TensorShape({}));
795     EXPECT_EQ(1, t.NumElements());
796     auto Tt = t.scalar<bool>();
797     EXPECT_EQ(1, Tt.size());
798     EXPECT_EQ(0, Tt.rank());
799     t.scalar<bool>()() = true;
800     EXPECT_TRUE(Tt());
801   }
802   {
803     Tensor t(DT_FLOAT, TensorShape({}));
804     EXPECT_EQ(1, t.NumElements());
805     auto Tt = t.scalar<float>();
806     EXPECT_EQ(1, Tt.size());
807     EXPECT_EQ(0, Tt.rank());
808     t.scalar<float>()() = 123.45f;
809     EXPECT_FLOAT_EQ(123.45f, Tt());
810   }
811   {
812     Tensor t(DT_FLOAT, TensorShape({1}));
813     EXPECT_EQ(1, t.NumElements());
814     auto Tt = t.vec<float>();
815     EXPECT_EQ(1, Tt.size());
816     t.vec<float>()(0) = 123.45f;
817     EXPECT_FLOAT_EQ(123.45f, Tt(0));
818   }
819   {
820     Tensor t(DT_FLOAT, TensorShape({1, 1, 1}));
821     EXPECT_EQ(1, t.NumElements());
822     auto Tt = t.scalar<float>();
823     EXPECT_EQ(1, Tt.size());
824     EXPECT_EQ(0, Tt.rank());
825     t.flat<float>()(0) = 123.45f;
826     EXPECT_FLOAT_EQ(123.45f, Tt());
827   }
828   {
829     Tensor t(DT_STRING, TensorShape({}));
830     EXPECT_EQ(1, t.NumElements());
831     auto Tt = t.scalar<tstring>();
832     EXPECT_EQ(1, Tt.size());
833     EXPECT_EQ(0, Tt.rank());
834     t.scalar<tstring>()() = "foo";
835     EXPECT_EQ("foo", Tt());
836   }
837   {
838     Tensor t(DT_STRING, TensorShape({1}));
839     EXPECT_EQ(1, t.NumElements());
840     auto Tt = t.vec<tstring>();
841     EXPECT_EQ(1, Tt.size());
842     t.flat<tstring>()(0) = "foo";
843     EXPECT_EQ("foo", Tt(0));
844   }
845   {
846     Tensor t(DT_STRING, TensorShape({1, 1, 1}));
847     EXPECT_EQ(1, t.NumElements());
848     auto Tt = t.scalar<tstring>();
849     EXPECT_EQ(1, Tt.size());
850     EXPECT_EQ(0, Tt.rank());
851     t.flat<tstring>()(0) = "bar";
852     EXPECT_EQ("bar", Tt());
853   }
854   {
855     Tensor t(DT_FLOAT, TensorShape({0, 1}));
856     EXPECT_EQ(0, t.NumElements());
857     auto Tt = t.flat<float>();
858     EXPECT_EQ(0, Tt.size());
859     auto Tm = t.matrix<float>();
860     EXPECT_EQ(0, Tm.size());
861     EXPECT_EQ(0, Tm.dimensions()[0]);
862     EXPECT_EQ(1, Tm.dimensions()[1]);
863   }
864 }
865 
TEST(Tensor_HostScalar,Basics)866 TEST(Tensor_HostScalar, Basics) {
867   {
868     Tensor t(true);
869     EXPECT_EQ(DT_BOOL, t.dtype());
870     EXPECT_EQ(1, t.NumElements());
871     auto Tt = t.scalar<bool>();
872     EXPECT_EQ(1, Tt.size());
873     EXPECT_EQ(0, Tt.rank());
874     EXPECT_TRUE(Tt());
875     Tt() = false;
876     EXPECT_FALSE(Tt());
877   }
878   {
879     Tensor t(123.45f);
880     EXPECT_EQ(DT_FLOAT, t.dtype());
881     EXPECT_EQ(1, t.NumElements());
882     auto Tt = t.scalar<float>();
883     EXPECT_EQ(1, Tt.size());
884     EXPECT_EQ(0, Tt.rank());
885     EXPECT_FLOAT_EQ(123.45f, Tt());
886     Tt() = 42.0f;
887     EXPECT_FLOAT_EQ(42.0f, Tt());
888   }
889   {
890     // NOTE(mrry): Use long enough strings so that the contents are dynamically
891     // allocated, and the absence of a call to the string destructor would
892     // cause a memory leak.
893     Tensor t("fooooooooooooooooooooooooooooooooooooo");
894     EXPECT_EQ(DT_STRING, t.dtype());
895     EXPECT_EQ(1, t.NumElements());
896     auto Tt = t.scalar<tstring>();
897     EXPECT_EQ(1, Tt.size());
898     EXPECT_EQ(0, Tt.rank());
899     EXPECT_EQ("fooooooooooooooooooooooooooooooooooooo", Tt());
900     Tt() = "baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar";
901     EXPECT_EQ("baaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaar", Tt());
902   }
903 }
904 
TEST(Tensor_Float,Reshape_And_Slice_Assignment)905 TEST(Tensor_Float, Reshape_And_Slice_Assignment) {
906   // A test to experiment with a way to assign to a subset of a tensor
907   Tensor t(DT_FLOAT, TensorShape({10, 4, 3, 2}));
908   EXPECT_TRUE(t.shape().IsSameSize(TensorShape({10, 4, 3, 2})));
909 
910   // Get the N dimensional tensor (N==4 here)
911   auto e_t = t.tensor<float, 4>();
912   // Reshape to view it as a two-dimensional tensor
913   auto e_2d = t.shaped<float, 2>({10, 4 * 3 * 2});
914   for (int i = 0; i < 10; i++) {
915     // Assign a 1 x 4*3*2 matrix (really vector) to a slice of size
916     // 1 x 4*3*2 in e_t.
917     Eigen::Tensor<float, 2, Eigen::RowMajor> m(1, 4 * 3 * 2);
918     m.setConstant(i * 2.0);
919 
920     Eigen::DSizes<Eigen::DenseIndex, 2> indices(i, 0);
921     Eigen::DSizes<Eigen::DenseIndex, 2> sizes(1, 4 * 3 * 2);
922     e_2d.slice(indices, sizes) = m;
923   }
924   for (int i = 0; i < 10; i++) {
925     for (int j = 0; j < 4; j++) {
926       for (int k = 0; k < 3; k++) {
927         for (int l = 0; l < 2; l++) {
928           EXPECT_EQ(e_t(i, j, k, l), i * 2.0f);
929           LOG(INFO) << i << "," << j << "," << k << "," << l
930                     << " &e_t(i, j, k, l): " << &e_t(i, j, k, l) << " = "
931                     << e_t(i, j, k, l);
932         }
933       }
934     }
935   }
936 }
937 
TEST(Tensor_String,Simple)938 TEST(Tensor_String, Simple) {
939   Tensor t = test::AsTensor<tstring>(
940       {"hello", "world", "machine", "learning", "new", "york"},
941       TensorShape({3, 2}));
942   auto s = t.shape();
943   ASSERT_EQ(s.dims(), 2);
944   ASSERT_EQ(s.dim_size(0), 3);
945   ASSERT_EQ(s.dim_size(1), 2);
946   auto m = t.matrix<tstring>();
947   EXPECT_EQ(t.TotalBytes(), 3 * 2 * sizeof(tstring) + 5 + 5 + 7 + 8 + 3 + 4);
948 
949   EXPECT_EQ(m(0, 0), "hello");
950   EXPECT_EQ(m(0, 1), "world");
951   EXPECT_EQ(m(1, 0), "machine");
952   EXPECT_EQ(m(1, 1), "learning");
953   EXPECT_EQ(m(2, 0), "new");
954   EXPECT_EQ(m(2, 1), "york");
955 
956   TestCopies<tstring>(t);
957 }
958 
TEST(Tensor_Float,SimpleWithHelper)959 TEST(Tensor_Float, SimpleWithHelper) {
960   Tensor t1 = test::AsTensor<float>({0, 1, 2, 3, 4, 5}, {2, 3});
961   Tensor t2(t1.dtype(), t1.shape());
962   t2.flat<float>() = t1.flat<float>() * 2.0f;
963   Tensor t3 = test::AsTensor<float>({0, 2, 4, 6, 8, 10}, t1.shape());
964   ExpectEqual<float>(t2, t3);
965 }
966 
TEST(Tensor_Int32,SimpleWithHelper)967 TEST(Tensor_Int32, SimpleWithHelper) {
968   Tensor t1 = test::AsTensor<int32>({0, 1, 2, 3, 4, 5}, {2, 3});
969   Tensor t2(t1.dtype(), t1.shape());
970   t2.flat<int32>() = t1.flat<int32>() * 2;
971   Tensor t3 = test::AsTensor<int32>({0, 2, 4, 6, 8, 10}, t1.shape());
972   ExpectEqual<int32>(t2, t3);
973 }
974 
TEST(Tensor_UInt16,SimpleWithHelper)975 TEST(Tensor_UInt16, SimpleWithHelper) {
976   Tensor t1 = test::AsTensor<uint16>({0, 1, 2, 3, 4, 5}, {2, 3});
977   Tensor t2(t1.dtype(), t1.shape());
978   t2.flat<uint16>() = t1.flat<uint16>() * uint16(2);
979   Tensor t3 = test::AsTensor<uint16>({0, 2, 4, 6, 8, 10}, t1.shape());
980   ExpectEqual<uint16>(t2, t3);
981 }
982 
TEST(Tensor_QInt8,SimpleWithHelper)983 TEST(Tensor_QInt8, SimpleWithHelper) {
984   Tensor t1 = test::AsTensor<qint8>({0, 1, 2, 3, 4, 5}, {2, 3});
985   Tensor t2(t1.dtype(), t1.shape());
986   t2.flat<qint8>() = t1.flat<qint8>() + qint8(-2);
987   Tensor t3 = test::AsTensor<qint8>({-2, -1, 0, 1, 2, 3}, {2, 3});
988   ExpectEqual<qint8>(t2, t3);
989 }
990 
TEST(Tensor_QUInt8,SimpleWithHelper)991 TEST(Tensor_QUInt8, SimpleWithHelper) {
992   Tensor t1 = test::AsTensor<quint8>({0, 1, 2, 3, 4, 5}, {2, 3});
993   Tensor t2(t1.dtype(), t1.shape());
994   t2.flat<quint8>() = t1.flat<quint8>() + quint8(2);
995   Tensor t3 = test::AsTensor<quint8>({2, 3, 4, 5, 6, 7}, {2, 3});
996   ExpectEqual<quint8>(t2, t3);
997 }
998 
TEST(Tensor_Int64,SimpleWithHelper)999 TEST(Tensor_Int64, SimpleWithHelper) {
1000   Tensor t1 = test::AsTensor<int64>(
1001       {0LL << 48, 1LL << 48, 2LL << 48, 3LL << 48, 4LL << 48, 5LL << 48},
1002       {2, 3});
1003   Tensor t2(t1.dtype(), t1.shape());
1004   t2.flat<int64>() = t1.flat<int64>() * static_cast<int64>(2);
1005   Tensor t3 = test::AsTensor<int64>(
1006       {0LL << 48, 2LL << 48, 4LL << 48, 6LL << 48, 8LL << 48, 10LL << 48},
1007       {2, 3});
1008   ExpectEqual<int64>(t2, t3);
1009 }
1010 
TEST(Tensor_String,SimpleWithHelper)1011 TEST(Tensor_String, SimpleWithHelper) {
1012   Tensor t1 = test::AsTensor<tstring>({"0", "1", "2", "3", "4", "5"}, {2, 3});
1013   Tensor t2(DT_STRING, {2, 3});
1014   for (int i = 0; i < 2; ++i) {
1015     for (int j = 0; j < 3; ++j) {
1016       t2.matrix<tstring>()(i, j) = strings::StrCat(i * 3 + j);
1017     }
1018   }
1019 
1020   // Test with helper.
1021   ExpectEqual<tstring>(t1, t2);
1022 }
1023 
TEST(Tensor_Bool,SimpleWithHelper)1024 TEST(Tensor_Bool, SimpleWithHelper) {
1025   Tensor t1 =
1026       test::AsTensor<bool>({false, true, false, true, false, true}, {2, 3});
1027 
1028   Tensor t2(DT_BOOL, {2, 3});
1029   for (int i = 0; i < 2; ++i) {
1030     for (int j = 0; j < 3; ++j) {
1031       t2.matrix<bool>()(i, j) = (((i + j) % 2) != 0);
1032     }
1033   }
1034 
1035   // Test with helper.
1036   ExpectEqual<bool>(t1, t2);
1037 }
1038 
TEST(Tensor_Complex,Simple64)1039 TEST(Tensor_Complex, Simple64) {
1040   Tensor t(DT_COMPLEX64, {4, 5, 3, 7});
1041   t.flat<complex64>().setRandom();
1042   TestCopies<complex64>(t);
1043 }
1044 
TEST(Tensor_Complex,Simple128)1045 TEST(Tensor_Complex, Simple128) {
1046   Tensor t(DT_COMPLEX128, {4, 5, 3, 7});
1047   t.flat<complex128>().setRandom();
1048   TestCopies<complex128>(t);
1049 }
1050 
TEST(Tensor_Complex,SimpleWithHelper64)1051 TEST(Tensor_Complex, SimpleWithHelper64) {
1052   {
1053     Tensor t1 = test::AsTensor<complex64>({0,
1054                                            {1, 1},
1055                                            complex64(2),
1056                                            complex64(3, 3),
1057                                            complex64(0, 4),
1058                                            complex64(2, 5)},
1059                                           {2, 3});
1060     Tensor t2(t1.dtype(), t1.shape());
1061     t2.flat<complex64>() = t1.flat<complex64>() * complex64(0, 2);
1062     Tensor t3 = test::AsTensor<complex64>(
1063         {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
1064         // shape
1065         {2, 3});
1066     ExpectEqual<complex64>(t2, t3);
1067   }
1068 
1069   // Does some numeric operations for complex64 numbers.
1070   {
1071     const float PI = std::acos(-1);
1072     const complex64 rotate_45 = std::polar(1.0f, PI / 4);
1073 
1074     // x contains all the 8-th root of unity.
1075     Tensor x(DT_COMPLEX64, TensorShape({8}));
1076     for (int i = 0; i < 8; ++i) {
1077       x.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i);
1078     }
1079 
1080     // Shift the roots by 45 degree.
1081     Tensor y(DT_COMPLEX64, TensorShape({8}));
1082     y.vec<complex64>() = x.vec<complex64>() * rotate_45;
1083     Tensor y_expected(DT_COMPLEX64, TensorShape({8}));
1084     for (int i = 0; i < 8; ++i) {
1085       y_expected.vec<complex64>()(i) = MathUtil::IPow(rotate_45, i + 1);
1086     }
1087     test::ExpectTensorNear<complex64>(y, y_expected, 1e-5);
1088 
1089     // Raise roots to the power of 8.
1090     Tensor z(DT_COMPLEX64, TensorShape({8}));
1091     z.vec<complex64>() = x.vec<complex64>().pow(8);
1092     Tensor z_expected(DT_COMPLEX64, TensorShape({8}));
1093     for (int i = 0; i < 8; ++i) {
1094       z_expected.vec<complex64>()(i) = 1;
1095     }
1096     test::ExpectTensorNear<complex64>(z, z_expected, 1e-5);
1097   }
1098 }
1099 
TEST(Tensor_Complex,SimpleWithHelper128)1100 TEST(Tensor_Complex, SimpleWithHelper128) {
1101   {
1102     Tensor t1 = test::AsTensor<complex128>({0,
1103                                             {1, 1},
1104                                             complex128(2),
1105                                             complex128(3, 3),
1106                                             complex128(0, 4),
1107                                             complex128(2, 5)},
1108                                            {2, 3});
1109     Tensor t2(t1.dtype(), t1.shape());
1110     t2.flat<complex128>() = t1.flat<complex128>() * complex128(0, 2);
1111     Tensor t3 = test::AsTensor<complex128>(
1112         {0, {-2, 2}, {0, 4}, {-6, 6}, {-8, 0}, {-10, 4}},
1113         // shape
1114         {2, 3});
1115     ExpectEqual<complex128>(t2, t3);
1116   }
1117 
1118   // Does some numeric operations for complex128 numbers.
1119   {
1120     const double PI = std::acos(-1);
1121     const complex128 rotate_45 = std::polar(1.0, PI / 4);
1122 
1123     // x contains all the 8-th root of unity.
1124     Tensor x(DT_COMPLEX128, TensorShape({8}));
1125     for (int i = 0; i < 8; ++i) {
1126       x.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i);
1127     }
1128 
1129     // Shift the roots by 45 degree.
1130     Tensor y(DT_COMPLEX128, TensorShape({8}));
1131     y.vec<complex128>() = x.vec<complex128>() * rotate_45;
1132     Tensor y_expected(DT_COMPLEX128, TensorShape({8}));
1133     for (int i = 0; i < 8; ++i) {
1134       y_expected.vec<complex128>()(i) = MathUtil::IPow(rotate_45, i + 1);
1135     }
1136     test::ExpectTensorNear<complex128>(y, y_expected, 1e-5);
1137 
1138     // Raise roots to the power of 8.
1139     Tensor z(DT_COMPLEX128, TensorShape({8}));
1140     z.vec<complex128>() = x.vec<complex128>().pow(8);
1141     Tensor z_expected(DT_COMPLEX128, TensorShape({8}));
1142     for (int i = 0; i < 8; ++i) {
1143       z_expected.vec<complex128>()(i) = 1;
1144     }
1145     test::ExpectTensorNear<complex128>(z, z_expected, 1e-5);
1146   }
1147 }
1148 
1149 // An allocator that always returns nullptr, for testing
1150 // failures to allocate.
1151 class DummyCPUAllocator : public Allocator {
1152  public:
1153   DummyCPUAllocator() = default;
Name()1154   string Name() override { return "cpu"; }
AllocateRaw(size_t alignment,size_t num_bytes)1155   void* AllocateRaw(size_t alignment, size_t num_bytes) override {
1156     return nullptr;
1157   }
DeallocateRaw(void * ptr)1158   void DeallocateRaw(void* ptr) override {}
1159 };
1160 
TEST(Tensor,SharesBufferWith)1161 TEST(Tensor, SharesBufferWith) {
1162   Tensor a_empty;
1163   Tensor b_empty;
1164   Tensor a(DT_FLOAT, TensorShape({1}));
1165   Tensor b(DT_FLOAT, TensorShape({1}));
1166   Tensor copy(a);
1167   EXPECT_FALSE(a_empty.SharesBufferWith(a_empty));
1168   EXPECT_FALSE(a_empty.SharesBufferWith(b_empty));
1169   EXPECT_FALSE(a_empty.SharesBufferWith(a));
1170   EXPECT_FALSE(a_empty.SharesBufferWith(copy));
1171   EXPECT_TRUE(a.SharesBufferWith(a));
1172   EXPECT_FALSE(a.SharesBufferWith(b));
1173   EXPECT_TRUE(a.SharesBufferWith(copy));
1174 }
1175 
TEST(Tensor,FailureToAllocate)1176 TEST(Tensor, FailureToAllocate) {
1177   TensorShape shape({1});
1178   DummyCPUAllocator allocator;
1179   {
1180     Tensor a(&allocator, DT_FLOAT, shape);
1181     ASSERT_FALSE(a.IsInitialized());
1182   }
1183 
1184   // Float
1185   {
1186     Tensor t(DT_FLOAT, TensorShape({1}));
1187     t.vec<float>()(0) = 1.0;
1188     TensorProto proto;
1189     t.AsProtoField(&proto);
1190 
1191     // FromProto should fail nicely.
1192     Tensor a(&allocator, DT_FLOAT, TensorShape({1}));
1193     ASSERT_FALSE(a.FromProto(&allocator, proto));
1194   }
1195 
1196   // String
1197   {
1198     Tensor t(DT_STRING, TensorShape({1}));
1199     t.vec<tstring>()(0) = "foo";
1200     TensorProto proto;
1201     t.AsProtoField(&proto);
1202 
1203     // FromProto should fail nicely.
1204     Tensor a(&allocator, DT_STRING, TensorShape({1}));
1205     ASSERT_FALSE(a.FromProto(&allocator, proto));
1206   }
1207 
1208   // Half
1209   {
1210     Tensor t(DT_HALF, TensorShape({1}));
1211     t.vec<Eigen::half>()(0) = Eigen::half(1.0);
1212     TensorProto proto;
1213     t.AsProtoField(&proto);
1214 
1215     // FromProto should fail nicely.
1216     Tensor a(&allocator, DT_HALF, TensorShape({1}));
1217     ASSERT_FALSE(a.FromProto(&allocator, proto));
1218   }
1219 }
1220 
1221 // On the alignment.
1222 //
1223 // As of 2018/5, tensorflow::Tensor allocates its buffer with 64-byte
1224 // alignment. Tensor::tensor/flat/vec/matrix methods requires the
1225 // buffer satisfies Eigen::Aligned (e.g., 16-bytes aligned usually,
1226 // 32-bytes for AVX, and 64-bytes for AVX512). Tensor::Slice requires
1227 // the caller to ensure its result is aligned if the caller intends
1228 // to use those methods. In this test case, we simply make sure each
1229 // slice is 64-byte aligned: sizeof(float) * 4 * 36 = 576.  576 % 64 = 0.
TEST(Tensor,Slice_Basic)1230 TEST(Tensor, Slice_Basic) {
1231   Tensor saved;
1232   {  // General
1233     Tensor x(DT_FLOAT, TensorShape({10, 4, 36}));
1234     // Fills in known values.
1235     for (int i = 0; i < 10; ++i) {
1236       x.Slice(i, i + 1).flat<float>().setConstant(i * 1.f);
1237     }
1238     // A simple slice along dim0.
1239     Tensor y = x.Slice(4, 8);
1240     EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 4, 36})));
1241     auto tx = x.tensor<float, 3>();
1242     auto ty = y.tensor<float, 3>();
1243     for (int i = 0; i < 4; ++i) {
1244       for (int j = 0; j < 4; ++j) {
1245         for (int k = 0; k < 36; ++k) {
1246           EXPECT_EQ(ty(i, j, k), 4.0 + i);
1247           EXPECT_EQ(&tx(4 + i, j, k), &ty(i, j, k));
1248         }
1249       }
1250     }
1251     // A simple slice equivalent to identity.
1252     TestCopies<float>(y);
1253     y = x.Slice(0, 10);
1254     ExpectEqual<float>(x, y);
1255     EXPECT_EQ(x.flat<float>().data(), y.flat<float>().data());
1256 
1257     // A slice of a slice.
1258     auto z = x.Slice(4, 8).Slice(2, 3);
1259     auto tz = z.tensor<float, 3>();
1260     EXPECT_EQ(1, z.dim_size(0));
1261     for (int j = 0; j < 4; ++j) {
1262       for (int k = 0; k < 36; ++k) {
1263         EXPECT_EQ(tz(0, j, k), 6.0);
1264       }
1265     }
1266 
1267     // x and y will be out of scope. But 'saved' should be alive.
1268     saved = z;
1269   }
1270   {
1271     EXPECT_EQ(1, saved.dim_size(0));
1272     auto tsaved = saved.tensor<float, 3>();
1273     for (int j = 0; j < 4; ++j) {
1274       for (int k = 0; k < 36; ++k) {
1275         EXPECT_EQ(tsaved(0, j, k), 6.0);
1276       }
1277     }
1278   }
1279   {  // Empty
1280     Tensor x(DT_FLOAT, TensorShape({10, 0, 36}));
1281     x.flat<float>().setRandom();
1282     Tensor y = x.Slice(4, 8);
1283     EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 0, 36})));
1284   }
1285 
1286   {
1287     // Test unaligned access via a Slice.
1288     Tensor x(DT_FLOAT, TensorShape({30}));
1289     x.flat<float>().setConstant(0.0);
1290 
1291     // Take an unaligned slice.
1292     Tensor y = x.Slice(1, 13);
1293 #if EIGEN_MAX_ALIGN_BYTES > 0
1294     EXPECT_FALSE(y.IsAligned());
1295 #endif
1296     y.unaligned_flat<float>().setConstant(1.0);
1297     for (int64 i = 0; i < y.NumElements(); ++i) {
1298       EXPECT_EQ(1.0, y.unaligned_flat<float>()(i));
1299     }
1300   }
1301 }
1302 
TEST(Tensor,SubSlice_Basic)1303 TEST(Tensor, SubSlice_Basic) {
1304   {  // General
1305     Tensor x(DT_FLOAT, TensorShape({10, 4, 36}));
1306     // Fills in known values.
1307     for (int i = 0; i < 10; ++i) {
1308       x.SubSlice(i).flat<float>().setConstant(i * 1.f);
1309     }
1310     // A simple sub-slice along dim0.
1311     Tensor y = x.SubSlice(5);
1312     EXPECT_TRUE(y.shape().IsSameSize(TensorShape({4, 36})));
1313     auto tx = x.tensor<float, 3>();
1314     auto ty = y.tensor<float, 2>();
1315     for (int j = 0; j < 4; ++j) {
1316       for (int k = 0; k < 36; ++k) {
1317         EXPECT_EQ(ty(j, k), 5.0);
1318         EXPECT_EQ(&tx(5, j, k), &ty(j, k));
1319       }
1320     }
1321     Tensor z = y.SubSlice(3).SubSlice(31);
1322     auto tz = z.unaligned_flat<float>();
1323     EXPECT_EQ(*tz.data(), 5.0);
1324   }
1325   {
1326     // Test unaligned access via a SubSlice.
1327     Tensor x(DT_FLOAT, TensorShape({30, 5}));
1328     x.flat<float>().setConstant(0.0);
1329 
1330     // Take an unaligned subslice.
1331     Tensor y = x.SubSlice(1);
1332 #if EIGEN_MAX_ALIGN_BYTES > 0
1333     EXPECT_FALSE(y.IsAligned());
1334 #endif
1335     y.unaligned_flat<float>().setConstant(1.0);
1336     for (int64 i = 0; i < y.NumElements(); ++i) {
1337       EXPECT_EQ(1.0, y.unaligned_flat<float>()(i));
1338     }
1339   }
1340 }
1341 
1342 template <typename T>
MkTensor(DataType dt,const TensorShape & shape,std::vector<T> init_values)1343 Tensor MkTensor(DataType dt, const TensorShape& shape,
1344                 std::vector<T> init_values) {
1345   Tensor x(dt, shape);
1346   const int limit = x.NumElements();
1347   int vi = 0;
1348   for (int i = 0; i < limit; ++i) {
1349     x.flat<T>()(i) = init_values[vi++];
1350     if (vi >= init_values.size()) vi = 0;
1351   }
1352   return x;
1353 }
1354 
TEST(SummarizeValue,Uninitialized)1355 TEST(SummarizeValue, Uninitialized) {
1356   Tensor x(DT_INT32);
1357   TensorTestHelper::set_shape(&x, TensorShape({4, 4}));
1358   EXPECT_EQ(
1359       strings::StrCat("uninitialized Tensor of 16 elements of type ", DT_INT32),
1360       x.SummarizeValue(16));
1361 }
1362 
TEST(SummarizeValue,INT32)1363 TEST(SummarizeValue, INT32) {
1364   Tensor x = MkTensor<int>(DT_INT32, TensorShape({5}), {1, 2, 3, 4, 0});
1365   EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1366   x = MkTensor<int>(DT_INT32, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1367   EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1368   x = MkTensor<int>(DT_INT32, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1369   EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1370   EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1371   x = MkTensor<int>(DT_INT32, TensorShape({0}), {});
1372   EXPECT_EQ("", x.SummarizeValue(16));
1373 }
1374 
TEST(SummarizeValue,INT32Dims)1375 TEST(SummarizeValue, INT32Dims) {
1376   Tensor x = MkTensor<int>(DT_INT32, TensorShape({3, 4}),
1377                            {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
1378   EXPECT_EQ("[1 2 3...]...", x.SummarizeValue(3));
1379   EXPECT_EQ("[1 2 3 4][5 6 7 8][9 10...]...", x.SummarizeValue(10));
1380 }
1381 
TEST(SummarizeValue,FLOAT)1382 TEST(SummarizeValue, FLOAT) {
1383   Tensor x = MkTensor<float>(DT_FLOAT, TensorShape({5}), {1, 2, 3, 4, 0});
1384   EXPECT_EQ("1 2 3 4 0", x.SummarizeValue(16));
1385   x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1386   EXPECT_EQ("[1 2][3 4]", x.SummarizeValue(16));
1387   x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1388   EXPECT_EQ("[[[1]][[2]]][[[3]][[4]]]", x.SummarizeValue(16));
1389   EXPECT_EQ("[[[1]][[2]]][[[3]]]...", x.SummarizeValue(3));
1390   x = MkTensor<float>(DT_FLOAT, TensorShape({0}), {});
1391   EXPECT_EQ("", x.SummarizeValue(16));
1392 }
1393 
TEST(SummarizeValue,BOOL)1394 TEST(SummarizeValue, BOOL) {
1395   Tensor x = MkTensor<bool>(DT_BOOL, TensorShape({5}), {false, true, true});
1396   EXPECT_EQ("0 1 1 0 1", x.SummarizeValue(16));
1397   EXPECT_EQ("0 1 1...", x.SummarizeValue(3));
1398 }
1399 
TEST(SummarizeValue,STRING)1400 TEST(SummarizeValue, STRING) {
1401   Tensor x = MkTensor<tstring>(DT_STRING, TensorShape({5}),
1402                                {"one", "two", "three", "four", "five"});
1403   EXPECT_EQ("one two three four five", x.SummarizeValue(16));
1404   x = MkTensor<tstring>(DT_STRING, TensorShape({5, 1, 5}),
1405                         {"one", "two", "three", "four", "five"});
1406   EXPECT_EQ("[[one two three four five]][[one...]]...", x.SummarizeValue(6));
1407 }
1408 
TEST(SummarizeValue,INT32_PRINT_V2)1409 TEST(SummarizeValue, INT32_PRINT_V2) {
1410   Tensor x = MkTensor<int>(DT_INT32, TensorShape({5}), {1, 2, 3, 4, 0});
1411   EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(16, true));
1412   EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(-1, true));
1413   EXPECT_EQ("[1 2 ... 4 0]", x.SummarizeValue(2, true));
1414   EXPECT_EQ("[1 ... 0]", x.SummarizeValue(1, true));
1415   x = MkTensor<int>(DT_INT32, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1416   EXPECT_EQ("[[1 2]\n [3 4]]", x.SummarizeValue(16, true));
1417   x = MkTensor<int>(DT_INT32, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1418   EXPECT_EQ("[[[[1]]\n\n  [[2]]]\n\n\n [[[3]]\n\n  [[4]]]]",
1419             x.SummarizeValue(16, true));
1420   x = MkTensor<int>(DT_INT32, TensorShape({0}), {});
1421   EXPECT_EQ("[]", x.SummarizeValue(16, true));
1422 }
1423 
TEST(SummarizeValue,INT32Dims_PRINT_V2)1424 TEST(SummarizeValue, INT32Dims_PRINT_V2) {
1425   Tensor x = MkTensor<int>(DT_INT32, TensorShape({3, 4}),
1426                            {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12});
1427   EXPECT_EQ("[[1 ... 4]\n ...\n [9 ... 12]]", x.SummarizeValue(1, true));
1428   EXPECT_EQ("[[1 2 3 4]\n [5 6 7 8]\n [9 10 11 12]]",
1429             x.SummarizeValue(10, true));
1430   EXPECT_EQ("[[1 2 3 4]\n [5 6 7 8]\n [9 10 11 12]]",
1431             x.SummarizeValue(-1, true));
1432 }
1433 
TEST(SummarizeValue,FLOAT_PRINT_V2)1434 TEST(SummarizeValue, FLOAT_PRINT_V2) {
1435   Tensor x = MkTensor<float>(DT_FLOAT, TensorShape({5}), {1, 2, 3, 4, 0});
1436   EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(16, true));
1437   EXPECT_EQ("[1 2 3 4 0]", x.SummarizeValue(-1, true));
1438   EXPECT_EQ("[1 2 ... 4 0]", x.SummarizeValue(2, true));
1439   EXPECT_EQ("[1 ... 0]", x.SummarizeValue(1, true));
1440   x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2}), {1, 2, 3, 4, 0});
1441   EXPECT_EQ("[[1 2]\n [3 4]]", x.SummarizeValue(16, true));
1442   x = MkTensor<float>(DT_FLOAT, TensorShape({2, 2, 1, 1}), {1, 2, 3, 4, 0});
1443   EXPECT_EQ("[[[[1]]\n\n  [[2]]]\n\n\n [[[3]]\n\n  [[4]]]]",
1444             x.SummarizeValue(16, true));
1445   x = MkTensor<float>(DT_FLOAT, TensorShape({0}), {});
1446   EXPECT_EQ("[]", x.SummarizeValue(16, true));
1447 }
1448 
TEST(SummarizeValue,BOOL_PRINT_V2)1449 TEST(SummarizeValue, BOOL_PRINT_V2) {
1450   Tensor x = MkTensor<bool>(DT_BOOL, TensorShape({5}), {false, true, true});
1451   EXPECT_EQ("[0 1 1 0 1]", x.SummarizeValue(16, true));
1452   EXPECT_EQ("[0 1 1 0 1]", x.SummarizeValue(-1, true));
1453   EXPECT_EQ("[0 1 ... 0 1]", x.SummarizeValue(2, true));
1454 }
1455 
TEST(SummarizeValue,STRING_PRINT_V2)1456 TEST(SummarizeValue, STRING_PRINT_V2) {
1457   Tensor x = MkTensor<tstring>(DT_STRING, TensorShape({5}),
1458                                {"one", "two", "three", "four", "five"});
1459   EXPECT_EQ("[\"one\" \"two\" \"three\" \"four\" \"five\"]",
1460             x.SummarizeValue(16, true));
1461   EXPECT_EQ("[\"one\" \"two\" \"three\" \"four\" \"five\"]",
1462             x.SummarizeValue(-1, true));
1463   EXPECT_EQ("[\"one\" \"two\" ... \"four\" \"five\"]",
1464             x.SummarizeValue(2, true));
1465   x = MkTensor<tstring>(DT_STRING, TensorShape({2, 2}),
1466                         {"one", "two", "three", "four", "five"});
1467   EXPECT_EQ("[[\"one\" \"two\"]\n [\"three\" \"four\"]]",
1468             x.SummarizeValue(16, true));
1469 }
1470 
BM_CreateAndDestroy(::testing::benchmark::State & state)1471 void BM_CreateAndDestroy(::testing::benchmark::State& state) {
1472   TensorShape shape({10, 20});
1473   for (auto s : state) {
1474     Tensor t(DT_FLOAT, shape);
1475   }
1476 }
1477 BENCHMARK(BM_CreateAndDestroy);
1478 
BM_Assign(::testing::benchmark::State & state)1479 void BM_Assign(::testing::benchmark::State& state) {
1480   Tensor a(DT_FLOAT, TensorShape({10, 20}));
1481   Tensor b(DT_FLOAT, TensorShape({10, 20}));
1482   bool a_to_b = true;
1483   for (auto s : state) {
1484     if (a_to_b) {
1485       b = a;
1486     } else {
1487       a = b;
1488     }
1489     a_to_b = !a_to_b;
1490   }
1491 }
1492 BENCHMARK(BM_Assign);
1493 
1494 // Ensure tensor_data() works on empty tensors
TEST(Tensor,EmptyTensorData)1495 TEST(Tensor, EmptyTensorData) {
1496   Tensor empty;
1497   EXPECT_EQ(empty.tensor_data().size(), 0);
1498 }
1499 
1500 // Benchmark create and destroy a tensor, with an allocated buffer.
BM_CreateAndDestroyWithBuf(::testing::benchmark::State & state)1501 void BM_CreateAndDestroyWithBuf(::testing::benchmark::State& state) {
1502   TensorShape shape({10, 20});
1503   Allocator* allocator = cpu_allocator();
1504   for (auto s : state) {
1505     Tensor a(allocator, DT_FLOAT, shape);
1506   }
1507 }
1508 BENCHMARK(BM_CreateAndDestroyWithBuf);
1509 
1510 // Benchmark create+copy a tensor, with an allocated buffer.
BM_CreateAndCopyCtrWithBuf(::testing::benchmark::State & state)1511 void BM_CreateAndCopyCtrWithBuf(::testing::benchmark::State& state) {
1512   TensorShape shape({10, 20});
1513   Allocator* allocator = cpu_allocator();
1514   for (auto s : state) {
1515     Tensor a(allocator, DT_FLOAT, shape);
1516     Tensor b(a);
1517   }
1518 }
1519 BENCHMARK(BM_CreateAndCopyCtrWithBuf);
1520 
1521 // Benchmark create+move a tensor, with an allocated buffer.
BM_CreateAndMoveCtrWithBuf(::testing::benchmark::State & state)1522 void BM_CreateAndMoveCtrWithBuf(::testing::benchmark::State& state) {
1523   TensorShape shape({10, 20});
1524   Allocator* allocator = cpu_allocator();
1525   for (auto s : state) {
1526     Tensor a(allocator, DT_FLOAT, shape);
1527     Tensor b(std::move(a));
1528   }
1529 }
1530 BENCHMARK(BM_CreateAndMoveCtrWithBuf);
1531 
1532 // Benchmark creating and destroy a host-scalar tensor, using the allocator
1533 // interface.
BM_CreateAndDestroyHostScalarNonOptimized(::testing::benchmark::State & state)1534 void BM_CreateAndDestroyHostScalarNonOptimized(
1535     ::testing::benchmark::State& state) {
1536   TensorShape shape({});
1537   Allocator* allocator = cpu_allocator();
1538   for (auto s : state) {
1539     Tensor a(allocator, DT_FLOAT, shape);
1540     a.scalar<float>()() = 37.0;
1541   }
1542 }
1543 BENCHMARK(BM_CreateAndDestroyHostScalarNonOptimized);
1544 
1545 // Benchmark creating and destroy a host-scalar tensor, using the specialized
1546 // constructor.
BM_CreateAndDestroyHostScalarOptimized(::testing::benchmark::State & state)1547 void BM_CreateAndDestroyHostScalarOptimized(
1548     ::testing::benchmark::State& state) {
1549   for (auto s : state) {
1550     Tensor a(37.0);
1551   }
1552 }
1553 BENCHMARK(BM_CreateAndDestroyHostScalarOptimized);
1554 
BM_FromProto(::testing::benchmark::State & state)1555 void BM_FromProto(::testing::benchmark::State& state) {
1556   const int size = state.range(0);
1557 
1558   TensorShape shape({size});
1559   Allocator* allocator = cpu_allocator();
1560   Tensor a(allocator, DT_FLOAT, shape);
1561   std::fill_n(a.flat<float>().data(), size, 42.0);
1562   TensorProto p;
1563   a.AsProtoField(&p);
1564   for (auto s : state) {
1565     Tensor b;
1566     ASSERT_TRUE(b.FromProto(p));
1567   }
1568 }
1569 BENCHMARK(BM_FromProto)->Range(1, 1 << 20);
1570 
BM_FromProtoCompressed(::testing::benchmark::State & state)1571 void BM_FromProtoCompressed(::testing::benchmark::State& state) {
1572   const int size = state.range(0);
1573 
1574   TensorShape shape({size});
1575   Allocator* allocator = cpu_allocator();
1576   Tensor a(allocator, DT_FLOAT, shape);
1577   std::fill_n(a.flat<float>().data(), size, 42.0f);
1578   TensorProto p;
1579   a.AsProtoField(&p);
1580   tensor::CompressTensorProtoInPlace(&p);
1581   for (auto s : state) {
1582     Tensor b;
1583     ASSERT_TRUE(b.FromProto(p));
1584   }
1585 }
1586 BENCHMARK(BM_FromProtoCompressed)->Range(1, 1 << 20);
1587 
BM_FromProtoCompressedZero(::testing::benchmark::State & state)1588 void BM_FromProtoCompressedZero(::testing::benchmark::State& state) {
1589   const int size = state.range(0);
1590 
1591   TensorShape shape({size});
1592   Allocator* allocator = cpu_allocator();
1593   Tensor a(allocator, DT_FLOAT, shape);
1594   std::fill_n(a.flat<float>().data(), size, 0);
1595   a.flat<float>()(0) = 1;
1596   TensorProto p;
1597   a.AsProtoField(&p);
1598   tensor::CompressTensorProtoInPlace(&p);
1599   for (auto s : state) {
1600     Tensor b;
1601     ASSERT_TRUE(b.FromProto(p));
1602   }
1603 }
1604 BENCHMARK(BM_FromProtoCompressedZero)->Range(1, 1 << 20);
1605 
1606 }  // namespace
1607 }  // namespace tensorflow
1608