1# Copyright 2016 The TensorFlow Authors. All Rights Reserved. 2# 3# Licensed under the Apache License, Version 2.0 (the "License"); 4# you may not use this file except in compliance with the License. 5# You may obtain a copy of the License at 6# 7# http://www.apache.org/licenses/LICENSE-2.0 8# 9# Unless required by applicable law or agreed to in writing, software 10# distributed under the License is distributed on an "AS IS" BASIS, 11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12# See the License for the specific language governing permissions and 13# limitations under the License. 14# ============================================================================== 15"""Tests for training routines.""" 16 17from __future__ import absolute_import 18from __future__ import division 19from __future__ import print_function 20 21import numpy as np 22 23from tensorflow.python import keras 24from tensorflow.python.framework import test_util 25from tensorflow.python.keras import backend as K 26from tensorflow.python.keras.layers.convolutional import Conv2D 27from tensorflow.python.platform import test 28 29 30class TrainingGPUTest(test.TestCase): 31 32 @test_util.run_in_graph_and_eager_modes 33 def test_model_with_crossentropy_losses_channels_first(self): 34 """Tests use of all crossentropy losses with `channels_first`. 35 36 Tests `sparse_categorical_crossentropy`, `categorical_crossentropy`, 37 and `binary_crossentropy`. 38 Verifies that evaluate gives the same result with either `channels_first` 39 or `channels_last` image_data_format. 40 """ 41 def prepare_simple_model(input_tensor, loss_name, target): 42 axis = 1 if K.image_data_format() == 'channels_first' else -1 43 loss = None 44 num_channels = None 45 activation = None 46 if loss_name == 'sparse_categorical_crossentropy': 47 loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy( # pylint: disable=g-long-lambda 48 y_true, y_pred, axis=axis) 49 num_channels = np.amax(target) + 1 50 activation = 'softmax' 51 elif loss_name == 'categorical_crossentropy': 52 loss = lambda y_true, y_pred: K.categorical_crossentropy( # pylint: disable=g-long-lambda 53 y_true, y_pred, axis=axis) 54 num_channels = target.shape[axis] 55 activation = 'softmax' 56 elif loss_name == 'binary_crossentropy': 57 loss = lambda y_true, y_pred: K.binary_crossentropy(y_true, y_pred) # pylint: disable=unnecessary-lambda 58 num_channels = target.shape[axis] 59 activation = 'sigmoid' 60 predictions = Conv2D(num_channels, 61 1, 62 activation=activation, 63 kernel_initializer='ones', 64 bias_initializer='ones')(input_tensor) 65 simple_model = keras.models.Model(inputs=input_tensor, 66 outputs=predictions) 67 simple_model.compile(optimizer='rmsprop', loss=loss) 68 return simple_model 69 70 if test.is_gpu_available(cuda_only=True): 71 with test_util.use_gpu(): 72 losses_to_test = ['sparse_categorical_crossentropy', 73 'categorical_crossentropy', 'binary_crossentropy'] 74 75 data_channels_first = np.array([[[[8., 7.1, 0.], [4.5, 2.6, 0.55], 76 [0.9, 4.2, 11.2]]]], dtype=np.float32) 77 # Labels for testing 4-class sparse_categorical_crossentropy, 4-class 78 # categorical_crossentropy, and 2-class binary_crossentropy: 79 labels_channels_first = [np.array([[[[0, 1, 3], [2, 1, 0], [2, 2, 1]]]], dtype=np.float32), # pylint: disable=line-too-long 80 np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 0]], 81 [[1, 0, 0], [0, 0, 1], [0, 1, 0]], 82 [[0, 0, 0], [1, 0, 0], [0, 0, 1]], 83 [[0, 0, 1], [0, 0, 0], [1, 0, 0]]]], dtype=np.float32), # pylint: disable=line-too-long 84 np.array([[[[0, 1, 0], [0, 1, 0], [0, 0, 1]], 85 [[1, 0, 1], [1, 0, 1], [1, 1, 0]]]], dtype=np.float32)] # pylint: disable=line-too-long 86 # Compute one loss for each loss function in the list `losses_to_test`: 87 loss_channels_last = [0., 0., 0.] 88 loss_channels_first = [0., 0., 0.] 89 90 old_data_format = K.image_data_format() 91 92 # Evaluate a simple network with channels last, with all three loss 93 # functions: 94 K.set_image_data_format('channels_last') 95 data = np.moveaxis(data_channels_first, 1, -1) 96 for index, loss_function in enumerate(losses_to_test): 97 labels = np.moveaxis(labels_channels_first[index], 1, -1) 98 inputs = keras.Input(shape=(3, 3, 1)) 99 model = prepare_simple_model(inputs, loss_function, labels) 100 loss_channels_last[index] = model.evaluate(x=data, y=labels, 101 batch_size=1, verbose=0) 102 103 # Evaluate the same network with channels first, with all three loss 104 # functions: 105 K.set_image_data_format('channels_first') 106 data = data_channels_first 107 for index, loss_function in enumerate(losses_to_test): 108 labels = labels_channels_first[index] 109 inputs = keras.Input(shape=(1, 3, 3)) 110 model = prepare_simple_model(inputs, loss_function, labels) 111 loss_channels_first[index] = model.evaluate(x=data, y=labels, 112 batch_size=1, verbose=0) 113 114 K.set_image_data_format(old_data_format) 115 116 np.testing.assert_allclose(loss_channels_first, 117 loss_channels_last, 118 err_msg='{}{}'.format( 119 'Computed different losses for ', 120 'channels_first and channels_last')) 121 122 123if __name__ == '__main__': 124 test.main() 125