1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Tests for various tensorflow.ops.tf.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21from absl.testing import parameterized 22import numpy as np 23 24from tensorflow.core.framework import node_def_pb2 25from tensorflow.python.framework import constant_op 26from tensorflow.python.framework import dtypes 27from tensorflow.python.framework import errors_impl 28from tensorflow.python.framework import importer 29from tensorflow.python.framework import sparse_tensor 30from tensorflow.python.framework import test_util 31from tensorflow.python.ops import array_ops 32from tensorflow.python.ops import gradient_checker 33from tensorflow.python.ops import gradients_impl 34from tensorflow.python.platform import test 35 36 37# TODO(zongheng): it'd be great to factor out this function and various random 38# SparseTensor gen funcs. 39def _sparsify(x, thresh=0.5, index_dtype=np.int64): 40 x[x < thresh] = 0 41 42 non_zero = np.where(x) 43 x_indices = np.vstack(non_zero).astype(index_dtype).T 44 x_values = x[non_zero] 45 x_shape = x.shape 46 47 return sparse_tensor.SparseTensor( 48 indices=x_indices, values=x_values, dense_shape=x_shape), len(x_values) 49 50 51class ShapeOpsTest(test.TestCase): 52 53 def _compareShape(self, x, use_gpu=False): 54 np_ans = np.array(np.shape(x)) 55 with self.cached_session(use_gpu=use_gpu): 56 tf_ans = array_ops.shape(x) 57 tf_ans_64 = array_ops.shape(x, out_type=dtypes.int64) 58 result = self.evaluate(tf_ans) 59 result_64 = self.evaluate(tf_ans_64) 60 self.assertAllEqual(np_ans, result) 61 self.assertAllEqual(np_ans, result_64) 62 self.assertShapeEqual(np_ans, tf_ans) 63 64 def _compareShapeSparse(self, x_np, use_gpu=False): 65 np_ans = np.array(np.shape(x_np)) 66 x_tf, unused_nnz = _sparsify(x_np) 67 with self.cached_session(use_gpu=use_gpu): 68 tf_ans = array_ops.shape(x_tf) 69 result = self.evaluate(tf_ans) 70 self.assertAllEqual(np_ans, result) 71 self.assertShapeEqual(np_ans, tf_ans) 72 73 def _compareShapeN(self, x, use_gpu=False): 74 np_ans = np.array(np.shape(x)) 75 with self.cached_session(use_gpu=use_gpu) as sess: 76 tf_ans = array_ops.shape_n([x, x, x]) 77 tf_ans_64 = array_ops.shape_n([x, x, x], out_type=dtypes.int64) 78 result = self.evaluate(tf_ans) 79 result_64 = self.evaluate(tf_ans_64) 80 for i in range(3): 81 self.assertAllEqual(np_ans, result[i]) 82 self.assertAllEqual(np_ans, result_64[i]) 83 self.assertShapeEqual(np_ans, tf_ans[i]) 84 85 def _compareRank(self, x, use_gpu=False): 86 np_ans = np.asarray(np.ndim(x)) 87 with self.cached_session(use_gpu=use_gpu): 88 tf_ans = array_ops.rank(x) 89 result = self.evaluate(tf_ans) 90 self.assertAllEqual(np_ans, result) 91 self.assertShapeEqual(np_ans, tf_ans) 92 93 def _compareRankSparse(self, x_np, use_gpu=False): 94 np_ans = np.asarray(np.ndim(x_np)) 95 x_tf, unused_nnz = _sparsify(x_np) 96 with self.cached_session(use_gpu=use_gpu): 97 tf_ans = array_ops.rank(x_tf) 98 result = self.evaluate(tf_ans) 99 self.assertAllEqual(np_ans, result) 100 self.assertShapeEqual(np_ans, tf_ans) 101 102 def _compareSize(self, x, use_gpu=False): 103 np_ans = np.asarray(np.size(x)) 104 with self.cached_session(use_gpu=use_gpu): 105 tf_ans = array_ops.size(x) 106 result = self.evaluate(tf_ans) 107 tf_ans_64 = array_ops.size(x, out_type=dtypes.int64) 108 result_64 = self.evaluate(tf_ans_64) 109 self.assertAllEqual(np_ans, result) 110 self.assertAllEqual(np_ans, result_64) 111 self.assertShapeEqual(np_ans, tf_ans) 112 113 def _compareSizeSparse(self, x_np, use_gpu=False): 114 np_ans = np.asarray(np.size(x_np)) 115 x_tf, unused_nnz = _sparsify(x_np) 116 with self.cached_session(use_gpu=use_gpu): 117 tf_ans = array_ops.size(x_tf) 118 result = self.evaluate(tf_ans) 119 self.assertAllEqual(np_ans, result) 120 self.assertShapeEqual(np_ans, tf_ans) 121 122 def _testCpu(self, x): 123 self._compareShape(x, use_gpu=False) 124 self._compareShapeN(x, use_gpu=False) 125 self._compareRank(x, use_gpu=False) 126 self._compareSize(x, use_gpu=False) 127 self._compareShapeSparse(x, use_gpu=False) 128 self._compareRankSparse(x, use_gpu=False) 129 self._compareSizeSparse(x, use_gpu=False) 130 131 def _testGpu(self, x): 132 self._compareShape(x, use_gpu=True) 133 self._compareShapeN(x, use_gpu=True) 134 self._compareRank(x, use_gpu=True) 135 self._compareSize(x, use_gpu=True) 136 self._compareShapeSparse(x, use_gpu=True) 137 self._compareRankSparse(x, use_gpu=True) 138 self._compareSizeSparse(x, use_gpu=True) 139 140 def _testAll(self, x): 141 self._testCpu(x) 142 self._testGpu(x) 143 144 def testBasic(self): 145 self._testAll(np.random.randn(2)) 146 self._testAll(np.random.randn(2, 3)) 147 self._testAll(np.random.randn(2, 3, 5)) 148 self._testAll(np.random.randn(2, 3, 5, 7)) 149 self._testAll(np.random.randn(2, 3, 5, 7, 11)) 150 self._testAll(np.random.randn(2, 3, 5, 7, 11, 13)) 151 152 def testBool(self): 153 self._testAll(np.random.choice((False, True), size=(2,))) 154 self._testAll(np.random.choice((False, True), size=(2, 3))) 155 self._testAll(np.random.choice((False, True), size=(2, 3, 5))) 156 self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7))) 157 self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11))) 158 self._testAll(np.random.choice((False, True), size=(2, 3, 5, 7, 11, 13))) 159 160 # Disabled because it takes too long to run, but manually verified 161 # as passing at time of writing. 162 def _test64BitOutput(self): 163 with self.cached_session(): 164 inp = array_ops.zeros([2**31]) 165 num_elements = array_ops.size_internal( 166 inp, optimize=False, out_type=dtypes.int64) 167 self.assertEqual(2**31, self.evaluate(num_elements)) 168 169 # Too large for tf.int32 output. 170 with self.assertRaises(errors_impl.InvalidArgumentError): 171 with self.cached_session(): 172 inp = array_ops.zeros([2**31]) 173 num_elements = array_ops.size_internal( 174 inp, optimize=False, out_type=dtypes.int32) 175 self.assertEqual(2**31, self.evaluate(num_elements)) 176 177 def _compareExpandDims(self, x, dim, use_gpu): 178 np_ans = np.expand_dims(x, axis=dim) 179 with self.cached_session(use_gpu=use_gpu): 180 tensor = array_ops.expand_dims(x, dim) 181 tf_ans = self.evaluate(tensor) 182 self.assertShapeEqual(np_ans, tensor) 183 self.assertAllEqual(np_ans, tf_ans) 184 185 def _compareExpandDimsAll(self, x, dim): 186 self._compareExpandDims(x, dim, False) 187 self._compareExpandDims(x, dim, True) 188 189 def testExpandDims(self): 190 self._compareExpandDimsAll(np.zeros([2]), 0) 191 self._compareExpandDimsAll(np.zeros([2]), 1) 192 self._compareExpandDimsAll(np.zeros([2]), -1) 193 194 self._compareExpandDimsAll(np.zeros([2, 3]), 0) 195 self._compareExpandDimsAll(np.zeros([2, 3]), 1) 196 self._compareExpandDimsAll(np.zeros([2, 3]), 2) 197 self._compareExpandDimsAll(np.zeros([2, 3]), -1) 198 self._compareExpandDimsAll(np.zeros([2, 3]), -2) 199 200 self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0) 201 self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1) 202 self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2) 203 self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3) 204 205 self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1) 206 self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2) 207 self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3) 208 self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4) 209 210 def testExpandDimsBool(self): 211 choice = lambda s: np.random.choice((False, True), size=s) 212 self._compareExpandDimsAll(choice([2]), 0) 213 self._compareExpandDimsAll(choice([2]), 1) 214 self._compareExpandDimsAll(choice([2]), -1) 215 216 self._compareExpandDimsAll(choice([2, 3]), 0) 217 self._compareExpandDimsAll(choice([2, 3]), 1) 218 self._compareExpandDimsAll(choice([2, 3]), 2) 219 self._compareExpandDimsAll(choice([2, 3]), -1) 220 self._compareExpandDimsAll(choice([2, 3]), -2) 221 222 self._compareExpandDimsAll(choice([2, 3, 5]), 0) 223 self._compareExpandDimsAll(choice([2, 3, 5]), 1) 224 self._compareExpandDimsAll(choice([2, 3, 5]), 2) 225 self._compareExpandDimsAll(choice([2, 3, 5]), 3) 226 227 self._compareExpandDimsAll(choice([2, 3, 5]), -1) 228 self._compareExpandDimsAll(choice([2, 3, 5]), -2) 229 self._compareExpandDimsAll(choice([2, 3, 5]), -3) 230 self._compareExpandDimsAll(choice([2, 3, 5]), -4) 231 232 @test_util.run_deprecated_v1 233 def testExpandDimsErrors(self): 234 with self.cached_session(): 235 self.assertRaises(ValueError, array_ops.expand_dims, 236 np.zeros([2, 3, 5]), -5) 237 self.assertRaises(ValueError, array_ops.expand_dims, 238 [False, True, True], -5) 239 self.assertRaises(ValueError, array_ops.expand_dims, 240 np.zeros([2, 3, 5]), 4) 241 self.assertRaises(ValueError, array_ops.expand_dims, 242 [False, True, True], 4) 243 244 @test_util.run_deprecated_v1 245 def testExpandDimsGradient(self): 246 with self.cached_session(): 247 inp = constant_op.constant( 248 np.random.rand(4, 2).astype("f"), dtype=dtypes.float32) 249 squeezed = array_ops.expand_dims(inp, 1) 250 251 err = gradient_checker.compute_gradient_error(inp, [4, 2], squeezed, 252 [4, 1, 2]) 253 self.assertLess(err, 1e-3) 254 255 @test_util.run_deprecated_v1 256 def testExpandDimsScalar(self): 257 with self.cached_session(): 258 inp = constant_op.constant(7) 259 self.assertAllEqual([7], array_ops.expand_dims(inp, 0).eval()) 260 self.assertAllEqual([7], array_ops.expand_dims(inp, -1).eval()) 261 262 inp = constant_op.constant(True) 263 self.assertAllEqual([True], array_ops.expand_dims(inp, 0).eval()) 264 self.assertAllEqual([True], array_ops.expand_dims(inp, -1).eval()) 265 266 def testExpandDimsDimType(self): 267 for dtype in [dtypes.int32, dtypes.int64]: 268 x = np.zeros([2]) 269 np_ans = np.expand_dims(x, axis=0) 270 with self.cached_session(use_gpu=True): 271 tensor = array_ops.expand_dims(x, constant_op.constant(0, dtype)) 272 tf_ans = self.evaluate(tensor) 273 self.assertShapeEqual(np_ans, tensor) 274 self.assertAllEqual(np_ans, tf_ans) 275 276 def _compareSqueeze(self, x, squeeze_dims, use_gpu): 277 with self.cached_session(use_gpu=use_gpu): 278 if squeeze_dims: 279 np_ans = np.squeeze(x, axis=tuple(squeeze_dims)) 280 tensor = array_ops.squeeze(x, squeeze_dims) 281 tf_ans = self.evaluate(tensor) 282 else: 283 np_ans = np.squeeze(x) 284 tensor = array_ops.squeeze(x) 285 tf_ans = self.evaluate(tensor) 286 self.assertShapeEqual(np_ans, tensor) 287 self.assertAllEqual(np_ans, tf_ans) 288 289 def _compareSqueezeAll(self, x, squeeze_dims=None): 290 if squeeze_dims is None: 291 squeeze_dims = [] 292 self._compareSqueeze(x, squeeze_dims, False) 293 self._compareSqueeze(x, squeeze_dims, True) 294 295 def testSqueeze(self): 296 # Nothing to squeeze. 297 self._compareSqueezeAll(np.zeros([2])) 298 self._compareSqueezeAll(np.zeros([2, 3])) 299 300 # Squeeze the middle element away. 301 self._compareSqueezeAll(np.zeros([2, 1, 2])) 302 303 # Squeeze on both ends. 304 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1])) 305 306 def testSqueezeBool(self): 307 choice = lambda s: np.random.choice((False, True), size=s) 308 # Nothing to squeeze. 309 self._compareSqueezeAll(choice([2])) 310 self._compareSqueezeAll(choice([2, 3])) 311 312 # Squeeze the middle element away. 313 self._compareSqueezeAll(choice([2, 1, 2])) 314 315 # Squeeze on both ends. 316 self._compareSqueezeAll(choice([1, 2, 1, 3, 1])) 317 318 def testSqueezeSpecificDimension(self): 319 # Positive squeeze dim index. 320 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0]) 321 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4]) 322 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2]) 323 324 # Negative squeeze dim index. 325 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1]) 326 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5]) 327 self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1]) 328 329 def testSqueezeSpecificDimensionBool(self): 330 choice = lambda s: np.random.choice((False, True), size=s) 331 # Positive squeeze dim index. 332 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0]) 333 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [2, 4]) 334 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [0, 4, 2]) 335 336 # Negative squeeze dim index. 337 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-1]) 338 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5]) 339 self._compareSqueezeAll(choice([1, 2, 1, 3, 1]), [-3, -5, -1]) 340 341 def testSqueezeAllOnes(self): 342 # Numpy squeezes a 1 element tensor into a zero dimensional tensor. 343 # Verify that we do the same. 344 for use_gpu in [False, True]: 345 with self.cached_session(use_gpu=use_gpu): 346 tensor = array_ops.squeeze(np.zeros([1, 1, 1]), []) 347 self.assertEqual(np.shape(1), tensor.get_shape()) 348 tf_ans = self.evaluate(tensor) 349 self.assertEqual(np.shape(1), tf_ans.shape) 350 351 def testSqueezeAllOnesBool(self): 352 # Numpy squeezes a 1 element tensor into a zero dimensional tensor. 353 # Verify that we do the same. 354 for use_gpu in [False, True]: 355 with self.cached_session(use_gpu=use_gpu): 356 tensor = array_ops.squeeze([[[False]]], []) 357 self.assertEqual(np.shape(1), tensor.get_shape()) 358 tf_ans = self.evaluate(tensor) 359 self.assertEqual(np.shape(1), tf_ans.shape) 360 361 @test_util.run_deprecated_v1 362 def testSqueezeOnlyOnes(self): 363 for use_gpu in [False, True]: 364 with self.cached_session(use_gpu=use_gpu): 365 input_1x1x3 = np.zeros([1, 1, 3]) 366 self._compareSqueezeAll(input_1x1x3) 367 self._compareSqueezeAll(input_1x1x3, [0]) 368 self._compareSqueezeAll(input_1x1x3, [1]) 369 self.assertRaises(ValueError, array_ops.squeeze, input_1x1x3, [2]) 370 371 @test_util.run_deprecated_v1 372 def testSqueezeErrors(self): 373 for use_gpu in [False, True]: 374 with self.cached_session(use_gpu=use_gpu): 375 self.assertRaises(ValueError, array_ops.squeeze, 376 np.zeros([1, 2, 1]), [-4]) 377 self.assertRaises(ValueError, array_ops.squeeze, 378 np.zeros([1, 2, 1]), [0, -4]) 379 self.assertRaises(ValueError, array_ops.squeeze, 380 np.zeros([1, 2, 1]), [3]) 381 self.assertRaises(ValueError, array_ops.squeeze, 382 np.zeros([1, 2, 1]), [2, 3]) 383 384 @test_util.run_deprecated_v1 385 def testSqueezeGradient(self): 386 with self.cached_session(): 387 inp = np.random.rand(4, 2).astype("f") 388 a = array_ops.reshape(inp, [4, 1, 2]) 389 squeezed = array_ops.squeeze(a, []) 390 391 err = gradient_checker.compute_gradient_error(a, [4, 1, 2], squeezed, 392 [4, 2]) 393 self.assertLess(err, 1e-3) 394 395 @test_util.run_deprecated_v1 396 def testSqueezeGradientWithSqueezeDims(self): 397 with self.cached_session(): 398 inp = np.random.rand(4, 2).astype("f") 399 a = array_ops.reshape(inp, [4, 1, 2, 1]) 400 squeezed = array_ops.squeeze(a, [1]) 401 402 err = gradient_checker.compute_gradient_error(a, [4, 1, 2, 1], squeezed, 403 [4, 2, 1]) 404 self.assertLess(err, 1e-3) 405 406 @test_util.run_deprecated_v1 407 def testSqueezeWithUnknownShape(self): 408 with self.cached_session(): 409 a = array_ops.placeholder(dtypes.float32, shape=[2, None]) 410 411 squeezed = array_ops.squeeze(a, [1]) 412 self.assertEqual([2], squeezed.get_shape().as_list()) 413 414 squeezed = array_ops.squeeze(a) 415 self.assertEqual(None, squeezed.get_shape()) 416 417 self.assertRaises(ValueError, array_ops.squeeze, a, [0]) 418 self.assertRaises(ValueError, array_ops.squeeze, a, [100]) 419 420 421class TileTest(test.TestCase, parameterized.TestCase): 422 423 def testScalar(self): 424 for use_gpu in False, True: 425 with self.cached_session(use_gpu=use_gpu): 426 a = constant_op.constant(7, shape=[], dtype=dtypes.float32) 427 tiled = array_ops.tile(a, []) 428 result = self.evaluate(tiled) 429 self.assertEqual(result.shape, ()) 430 self.assertEqual([], tiled.get_shape()) 431 self.assertEqual(7, result) 432 433 def testSimple(self): 434 # multiples could be int32 or int64 435 for dtype in [dtypes.int32, dtypes.int64]: 436 with self.cached_session(use_gpu=True): 437 inp = np.random.rand(4, 1).astype(np.float32) 438 a = constant_op.constant(inp) 439 tiled = array_ops.tile(a, constant_op.constant([1, 4], dtype=dtype)) 440 result = self.evaluate(tiled) 441 self.assertEqual(result.shape, (4, 4)) 442 self.assertEqual([4, 4], tiled.get_shape()) 443 self.assertTrue((result == np.tile(inp, (1, 4))).all()) 444 445 def testIdentityTileAndGrad(self): 446 with self.cached_session(): 447 inp = np.random.rand(4, 1).astype(np.float32) 448 a = constant_op.constant(inp) 449 tiled = array_ops.tile(a, [1, 1]) 450 result = self.evaluate(tiled) 451 self.assertEqual(result.shape, (4, 1)) 452 self.assertEqual([4, 1], tiled.get_shape()) 453 self.assertTrue((result == np.tile(inp, (1, 1))).all()) 454 455 def testEmpty(self): 456 with self.cached_session(): 457 inp = np.random.rand(2, 3).astype(np.float32) 458 a = constant_op.constant(inp) 459 tiled = array_ops.tile(a, [5, 0]) 460 result = self.evaluate(tiled) 461 self.assertEqual(result.shape, (10, 0)) 462 self.assertEqual([10, 0], tiled.get_shape()) 463 464 @test_util.run_deprecated_v1 465 def testUnknownInputShape(self): 466 """Importing can call _TileShape without shape of <multiples> known.""" 467 with self.cached_session(): 468 inp = array_ops.placeholder(dtypes.float32) # unknown shape 469 multiples = constant_op.constant([1, 2, 3, 4], dtype=np.int32) 470 tiled = array_ops.tile(inp, multiples) 471 gdef = tiled.graph.as_graph_def() 472 473 # Move the tile op to the start of the graph so that shapes of its inputs 474 # are not available when the shape function runs on import. 475 swapped = False 476 for i, n in enumerate(gdef.node): 477 if n.op == "Tile": 478 # Swap tile op to be first in gdef.node 479 assert i != 0 480 new_node = node_def_pb2.NodeDef() 481 new_node.CopyFrom(gdef.node[i]) 482 gdef.node[i].CopyFrom(gdef.node[0]) 483 gdef.node[0].CopyFrom(new_node) 484 swapped = True 485 assert swapped 486 487 tiled_imported, = importer.import_graph_def( 488 gdef, return_elements=[tiled.name]) 489 self.assertEqual(4, tiled_imported.get_shape().ndims) 490 491 def testTypes(self): 492 types_to_test = { 493 "bool": (dtypes.bool, bool), 494 "float32": (dtypes.float32, float), 495 "float64": (dtypes.float64, float), 496 "complex64": (dtypes.complex64, complex), 497 "complex128": (dtypes.complex128, complex), 498 "uint8": (dtypes.uint8, int), 499 "int32": (dtypes.int32, int), 500 "int64": (dtypes.int64, int), 501 bytes: (dtypes.string, bytes) 502 } 503 for dtype_np, (dtype_tf, cast) in types_to_test.items(): 504 with self.cached_session(use_gpu=True): 505 inp = np.random.rand(4, 1).astype(dtype_np) 506 a = constant_op.constant( 507 [cast(x) for x in inp.ravel(order="C")], 508 shape=[4, 1], 509 dtype=dtype_tf) 510 tiled = array_ops.tile(a, [1, 4]) 511 result = self.evaluate(tiled) 512 self.assertEqual(result.shape, (4, 4)) 513 self.assertEqual([4, 4], tiled.get_shape()) 514 self.assertAllEqual(result, np.tile(inp, (1, 4))) 515 516 @test_util.run_deprecated_v1 517 def testInvalidDim(self): 518 with self.cached_session(): 519 inp = np.random.rand(4, 1).astype("f") 520 a = constant_op.constant( 521 [float(x) for x in inp.ravel(order="C")], 522 shape=[4, 1], 523 dtype=dtypes.float32) 524 # Wrong length of multiples. 525 with self.assertRaises(ValueError): 526 array_ops.tile(a, [1, 4, 2]) 527 # Wrong rank for multiples. 528 with self.assertRaises(ValueError): 529 array_ops.tile(a, [[2, 3], [3, 4]]).eval() 530 531 def _RunAndVerifyResult(self, rank, use_gpu): 532 with self.cached_session(use_gpu=use_gpu): 533 # Random dims of given rank 534 input_shape = np.random.randint(1, 4, size=rank) 535 inp = np.random.rand(*input_shape).astype("f") 536 a = constant_op.constant( 537 [float(x) for x in inp.ravel(order="C")], 538 shape=input_shape, 539 dtype=dtypes.float32) 540 multiples = np.random.randint(1, 4, size=rank).astype(np.int32) 541 tiled = array_ops.tile(a, multiples) 542 result = self.evaluate(tiled) 543 self.assertTrue((np.array(multiples) * np.array(inp.shape) == np.array( 544 result.shape)).all()) 545 self.assertAllEqual(result, np.tile(inp, tuple(multiples))) 546 self.assertShapeEqual(result, tiled) 547 548 def testRandom(self): 549 # test low rank, like 5 550 for _ in range(5): 551 self._RunAndVerifyResult(5, use_gpu=False) 552 for _ in range(5): 553 self._RunAndVerifyResult(5, use_gpu=True) 554 # test high rank, like 10 555 for _ in range(5): 556 self._RunAndVerifyResult(10, use_gpu=False) 557 for _ in range(5): 558 self._RunAndVerifyResult(10, use_gpu=True) 559 560 @parameterized.parameters(dtypes.int32, dtypes.int64) 561 @test_util.run_deprecated_v1 562 def testGradientSimpleReduction(self, multiples_dtype): 563 with self.cached_session(): 564 inp = np.random.rand(4, 1).astype("f") 565 a = constant_op.constant( 566 [float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32) 567 multiples = constant_op.constant([1, 4], dtype=multiples_dtype) 568 tiled = array_ops.tile(a, multiples) 569 grad_shape = [4, 4] 570 grad_inp = np.random.rand(*grad_shape).astype("f") 571 grad_tensor = constant_op.constant( 572 [float(x) for x in grad_inp.flatten()], shape=grad_shape) 573 grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0] 574 self.assertShapeEqual(inp, grad) 575 result = self.evaluate(grad) 576 self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3) 577 578 @test_util.run_deprecated_v1 579 def testGradientStridedReduction(self): 580 with self.cached_session(): 581 inp = np.random.rand(4, 2).astype("f") 582 a = constant_op.constant( 583 [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32) 584 tiled = array_ops.tile(a, [1, 2]) 585 grad_shape = [4, 4] 586 grad_inp = np.random.rand(*grad_shape).astype("f") 587 grad_tensor = constant_op.constant( 588 [float(x) for x in grad_inp.flatten()], shape=grad_shape) 589 grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0] 590 self.assertShapeEqual(inp, grad) 591 result = self.evaluate(grad) 592 expected_shape = [4, 2] 593 expected = np.zeros(expected_shape) 594 expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2] 595 expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3] 596 self.assertTrue((np.abs(expected - result) < 1e-3).all()) 597 598 @test_util.run_deprecated_v1 599 def testGradientSimpleReductionOnGPU(self): 600 with self.session(use_gpu=True): 601 inp = np.random.rand(4, 1).astype("f") 602 a = constant_op.constant( 603 [float(x) for x in inp.flatten()], shape=[4, 1], dtype=dtypes.float32) 604 tiled = array_ops.tile(a, [1, 4]) 605 grad_shape = [4, 4] 606 grad_inp = np.random.rand(*grad_shape).astype("f") 607 grad_tensor = constant_op.constant( 608 [float(x) for x in grad_inp.flatten()], shape=grad_shape) 609 grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0] 610 result = self.evaluate(grad) 611 self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3) 612 613 @test_util.run_deprecated_v1 614 def testGradientStridedReductionOnGPU(self): 615 with self.session(use_gpu=True): 616 inp = np.random.rand(4, 2).astype("f") 617 a = constant_op.constant( 618 [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32) 619 tiled = array_ops.tile(a, [1, 2]) 620 grad_shape = [4, 4] 621 grad_inp = np.random.rand(*grad_shape).astype("f") 622 grad_tensor = constant_op.constant( 623 [float(x) for x in grad_inp.flatten()], shape=grad_shape) 624 grad = gradients_impl.gradients([tiled], [a], [grad_tensor])[0] 625 result = self.evaluate(grad) 626 expected_shape = [4, 2] 627 expected = np.zeros(expected_shape) 628 expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2] 629 expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3] 630 self.assertAllClose(expected, result, 1e-3) 631 632 def _RunAndVerifyGradientResult(self, input_shape, multiples): 633 for use_gpu in False, True: 634 with self.cached_session(use_gpu=use_gpu): 635 # Random values 636 inp = np.asarray(np.random.rand(*input_shape)) 637 a = constant_op.constant(inp, dtype=dtypes.float64) 638 tiled = array_ops.tile(a, multiples) 639 grad_shape = list(np.array(multiples) * np.array(inp.shape)) 640 err = gradient_checker.compute_gradient_error( 641 a, list(input_shape), tiled, grad_shape, x_init_value=inp) 642 print("tile(float) error = ", err) 643 self.assertLess(err, 1e-3) 644 645 @test_util.run_deprecated_v1 646 def testGradientRandomScalar(self): 647 self._RunAndVerifyGradientResult([], []) 648 649 @test_util.run_deprecated_v1 650 def testGradientRandom(self): 651 self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 1, 1, 1, 1]) 652 self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1]) 653 self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2]) 654 self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2]) 655 656 @test_util.run_deprecated_v1 657 def testGradientStridedReductionGC(self): 658 with self.cached_session(): 659 inp = np.random.rand(4, 2).astype("f") 660 a = constant_op.constant( 661 [float(x) for x in inp.flatten()], shape=[4, 2], dtype=dtypes.float32) 662 tiled = array_ops.tile(a, [1, 2]) 663 err = gradient_checker.compute_gradient_error(a, [4, 2], tiled, [4, 4]) 664 self.assertLess(err, 1e-3) 665 666 @parameterized.parameters(dtypes.int32, dtypes.int64) 667 @test_util.run_deprecated_v1 668 def testGradientWithSparseGradWithRank1(self, multiples_dtype): 669 inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], 670 dtype=dtypes.float32) 671 multiples = constant_op.constant([3], dtype=dtypes.int64) 672 outputs = array_ops.gather(array_ops.tile(inputs, multiples), 673 [1, 5, 9, 3, 7, 2, 2, 2]) 674 with self.cached_session(): 675 error = gradient_checker.compute_gradient_error( 676 inputs, inputs.get_shape().as_list(), 677 outputs, outputs.get_shape().as_list()) 678 self.assertLess(error, 1e-4) 679 680 @test_util.run_deprecated_v1 681 def testGradientWithSparseGradWithRank3(self): 682 inputs = constant_op.constant([1.0, 2.0, 3.0, 4.0], 683 dtype=dtypes.float32) 684 inputs = array_ops.reshape(inputs, [-1, 1, 1]) 685 outputs = array_ops.gather(array_ops.tile(inputs, [3, 4, 2]), 686 [1, 5, 9, 3, 7, 2, 2, 2]) 687 with self.cached_session(): 688 error = gradient_checker.compute_gradient_error( 689 inputs, inputs.get_shape().as_list(), 690 outputs, outputs.get_shape().as_list()) 691 self.assertLess(error, 1e-4) 692 693 @test_util.run_deprecated_v1 694 def testShapeFunctionEdgeCases(self): 695 # Unknown multiples shape. 696 inp = constant_op.constant(0.0, shape=[4, 4, 4, 4]) 697 tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32)) 698 self.assertEqual([None, None, None, None], tiled.get_shape().as_list()) 699 700 # Unknown input shape. 701 inp = array_ops.placeholder(dtypes.float32) 702 tiled = array_ops.tile(inp, [2, 2, 2, 2]) 703 self.assertEqual([None, None, None, None], tiled.get_shape().as_list()) 704 705 # Unknown input and multiples shape. 706 inp = array_ops.placeholder(dtypes.float32) 707 tiled = array_ops.tile(inp, array_ops.placeholder(dtypes.int32)) 708 self.assertIs(None, tiled.get_shape().ndims) 709 710 # Known input and partially known multiples. 711 inp = constant_op.constant(0.0, shape=[1, 1]) 712 tiled = array_ops.tile(inp, [array_ops.placeholder(dtypes.int32), 7]) 713 self.assertEqual([None, 7], tiled.get_shape().as_list()) 714 715 # Mismatched input rank and multiples length. 716 inp = array_ops.placeholder(dtypes.float32, shape=[None, None]) 717 with self.assertRaises(ValueError): 718 tiled = array_ops.tile( 719 inp, array_ops.placeholder( 720 dtypes.int32, shape=[3])) 721 722 723if __name__ == "__main__": 724 test.main() 725