1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15
16 #define EIGEN_USE_THREADS
17
18 #include <vector>
19
20 #include "tensorflow/cc/client/client_session.h"
21 #include "tensorflow/cc/ops/array_ops.h"
22 #include "tensorflow/cc/ops/const_op.h"
23 #include "tensorflow/cc/ops/image_ops.h"
24 #include "tensorflow/core/framework/node_def_builder.h"
25 #include "tensorflow/core/framework/node_def_util.h"
26 #include "tensorflow/core/framework/shape_inference_testutil.h"
27 #include "tensorflow/core/framework/tensor_testutil.h"
28 #include "tensorflow/core/graph/gradients.h"
29 #include "tensorflow/core/kernels/quantization_utils.h"
30 #include "tensorflow/core/lib/core/status_test_util.h"
31 #include "tensorflow/core/platform/test.h"
32
33 namespace tensorflow {
34
35 namespace {
36 constexpr const float RESIZE_VAL_TOLERANCE = 1.0e-8;
37
38 template <typename T>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)39 Tensor BuildTensor(const int batch_size, const int height, const int width,
40 const int channels, const float ratio, const float min,
41 const float max) {
42 Tensor tensor(DataTypeToEnum<T>::value,
43 TensorShape({batch_size, height, width, channels}));
44 for (int64 i = 0; i < tensor.NumElements(); ++i) {
45 tensor.flat<T>()(i) =
46 FloatToQuantized<T>(static_cast<float>(i) / ratio, min, max);
47 }
48 return tensor;
49 }
50
51 template <>
BuildTensor(const int batch_size,const int height,const int width,const int channels,const float ratio,const float min,const float max)52 Tensor BuildTensor<float>(const int batch_size, const int height,
53 const int width, const int channels,
54 const float ratio, const float min, const float max) {
55 Tensor tensor(DT_FLOAT, TensorShape({batch_size, height, width, channels}));
56 for (int64 i = 0; i < tensor.NumElements(); ++i) {
57 tensor.flat<float>()(i) = static_cast<float>(i) / ratio;
58 }
59 return tensor;
60 }
61
CalculateResizeScale(int64 in_size,int64 out_size,bool align_corners)62 float CalculateResizeScale(int64 in_size, int64 out_size, bool align_corners) {
63 return (align_corners && out_size > 1)
64 ? (in_size - 1) / static_cast<float>(out_size - 1)
65 : in_size / static_cast<float>(out_size);
66 }
67
GetReferenceWeight(const bool half_pixel_centers,const int64 out_size,const int64 in_size,const int step,const int index,const float scale)68 inline std::tuple<int64, int64, float> GetReferenceWeight(
69 const bool half_pixel_centers, const int64 out_size, const int64 in_size,
70 const int step, const int index, const float scale) {
71 const float in = half_pixel_centers
72 ? (static_cast<float>(index) + 0.5f) * scale - 0.5f
73 : index * scale;
74 const float in_f = std::floor(in);
75 const int64 lower = std::max(static_cast<int64>(in_f), static_cast<int64>(0));
76 const int64 upper = std::min(static_cast<int64>(std::ceil(in)), in_size - 1);
77 return std::make_tuple(lower * step, upper * step, in - in_f);
78 }
79
80 template <typename T>
ComputeLerpReference(const T in_top_left,const T in_top_right,const T in_bottom_left,const T in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)81 T ComputeLerpReference(const T in_top_left, const T in_top_right,
82 const T in_bottom_left, const T in_bottom_right,
83 const float x_lerp, const float y_lerp, const float min,
84 const float max) {
85 const float top_left = QuantizedToFloat<T>(in_top_left, min, max);
86 const float top_right = QuantizedToFloat<T>(in_top_right, min, max);
87 const float bottom_left = QuantizedToFloat<T>(in_bottom_left, min, max);
88 const float bottom_right = QuantizedToFloat<T>(in_bottom_right, min, max);
89 const float top = top_left + (top_right - top_left) * x_lerp;
90 const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp;
91 const float out = top + (bottom - top) * y_lerp;
92 return FloatToQuantized<T>(out, min, max);
93 }
94
95 template <>
ComputeLerpReference(const float in_top_left,const float in_top_right,const float in_bottom_left,const float in_bottom_right,const float x_lerp,const float y_lerp,const float min,const float max)96 float ComputeLerpReference<float>(const float in_top_left,
97 const float in_top_right,
98 const float in_bottom_left,
99 const float in_bottom_right,
100 const float x_lerp, const float y_lerp,
101 const float min, const float max) {
102 const float top = in_top_left + (in_top_right - in_top_left) * x_lerp;
103 const float bottom =
104 in_bottom_left + (in_bottom_right - in_bottom_left) * x_lerp;
105 return top + (bottom - top) * y_lerp;
106 }
107
108 template <typename T>
CalcReferenceResizedVal(const T * image_data,const bool half_pixel_centers,const int batch_size,const int64 in_height,const int64 in_width,const int64 out_height,const int64 out_width,const int channels,const float height_scale,const float width_scale,const float min,const float max,const int b,const int64 x,const int64 y,const int c)109 T CalcReferenceResizedVal(const T* image_data, const bool half_pixel_centers,
110 const int batch_size, const int64 in_height,
111 const int64 in_width, const int64 out_height,
112 const int64 out_width, const int channels,
113 const float height_scale, const float width_scale,
114 const float min, const float max, const int b,
115 const int64 x, const int64 y, const int c) {
116 const std::tuple<int64, int64, float> x_weight = GetReferenceWeight(
117 half_pixel_centers, out_width, in_width, channels, x, width_scale);
118 const std::tuple<int64, int64, float> y_weight = GetReferenceWeight(
119 half_pixel_centers, out_height, in_height, 1, y, height_scale);
120
121 const int64 in_row_size = in_width * channels;
122 const int64 in_batch_num_values = in_height * in_row_size;
123
124 const int y_lower_index =
125 b * in_batch_num_values + std::get<0>(y_weight) * in_row_size;
126 const int y_upper_index =
127 b * in_batch_num_values + std::get<1>(y_weight) * in_row_size;
128
129 const int64 xs_lower = std::get<0>(x_weight);
130 const int64 xs_upper = std::get<1>(x_weight);
131 const float xs_lerp = std::get<2>(x_weight);
132 const float ys_lerp = std::get<2>(y_weight);
133 const float top_left = image_data[y_lower_index + xs_lower + c];
134 const float top_right = image_data[y_lower_index + xs_upper + c];
135 const float bottom_left = image_data[y_upper_index + xs_lower + c];
136 const float bottom_right = image_data[y_upper_index + xs_upper + c];
137 const float val =
138 ComputeLerpReference<T>(top_left, top_right, bottom_left, bottom_right,
139 xs_lerp, ys_lerp, min, max);
140 return val;
141 }
142
143 template <typename T>
CheckTensorValue(const T * in_data,const T * out_data,const int batch_size,const int64 in_height,const int64 in_width,const int64 out_height,const int64 out_width,const int channels,const bool align_corners,const bool half_pixel_centers,const float min,const float max,const float tolerance,const bool relative)144 void CheckTensorValue(const T* in_data, const T* out_data, const int batch_size,
145 const int64 in_height, const int64 in_width,
146 const int64 out_height, const int64 out_width,
147 const int channels, const bool align_corners,
148 const bool half_pixel_centers, const float min,
149 const float max, const float tolerance,
150 const bool relative) {
151 const int64 out_row_size = out_width * channels;
152 const float height_scale =
153 CalculateResizeScale(in_height, out_height, align_corners);
154 const float width_scale =
155 CalculateResizeScale(in_width, out_width, align_corners);
156
157 for (int b = 0; b < batch_size; ++b) {
158 for (int64 y = 0; y < out_height; ++y) {
159 for (int64 x = 0; x < out_width; ++x) {
160 for (int c = 0; c < channels; ++c) {
161 const T ref_qval = CalcReferenceResizedVal<T>(
162 in_data, half_pixel_centers, batch_size, in_height, in_width,
163 out_height, out_width, channels, height_scale, width_scale, min,
164 max, b, x, y, c);
165 const T qval =
166 out_data[(b * out_height + y) * out_row_size + x * channels + c];
167 const float ref_val = QuantizedToFloat<T>(ref_qval, min, max);
168 const float val = QuantizedToFloat<T>(qval, min, max);
169 if (!relative) {
170 const int q_tolerance = std::round(tolerance);
171 EXPECT_TRUE(std::abs(static_cast<int32>(ref_qval) -
172 static_cast<int32>(qval)) <= q_tolerance)
173 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
174 << y << ", " << x << ", " << c << ", qval = " << qval
175 << ", ref qval = " << ref_qval << ", " << q_tolerance;
176 } else {
177 const float rel_tolerance = std::max(ref_val, 1.0f) * tolerance;
178 EXPECT_NEAR(ref_val, val, rel_tolerance)
179 << "ref = " << ref_val << ", val = " << val << ", " << b << ", "
180 << y << ", " << x << ", " << c << ", ref qval = " << qval;
181 }
182 }
183 }
184 }
185 }
186 }
187
TestResizeBilinear(const Tensor & image_tensor,const DataType dt,const Input::Initializer & new_size,const bool show_time,const int64 iterations,const float min,const float max,const bool half_pixel_centers,std::vector<Tensor> * outputs)188 void TestResizeBilinear(const Tensor& image_tensor, const DataType dt,
189 const Input::Initializer& new_size,
190 const bool show_time, const int64 iterations,
191 const float min, const float max,
192 const bool half_pixel_centers,
193 std::vector<Tensor>* outputs) {
194 Scope root = Scope::NewRootScope();
195
196 Output placeholder = ops::Placeholder(root.WithOpName("placeholder"), dt);
197 Output size = ops::Const<int32>(root.WithOpName("size"), new_size);
198 Output in_min = ops::Const<float>(root.WithOpName("min"), min);
199 Output in_max = ops::Const<float>(root.WithOpName("max"), max);
200
201 ops::QuantizedResizeBilinear qrb = ops::QuantizedResizeBilinear(
202 root.WithOpName("qrb"), placeholder, size, in_min, in_max,
203 ops::QuantizedResizeBilinear::HalfPixelCenters(half_pixel_centers));
204
205 TF_EXPECT_OK(root.status());
206
207 ClientSession session(root);
208
209 int64 total_duration = 0;
210 outputs->clear();
211
212 for (int i = 0; i < iterations; ++i) {
213 const int64 start_time = Env::Default()->NowMicros();
214 TF_EXPECT_OK(session.Run({{placeholder, image_tensor}},
215 {qrb.resized_images, qrb.out_min, qrb.out_max},
216 outputs));
217 const int64 end_time = Env::Default()->NowMicros();
218 total_duration += end_time - start_time;
219 }
220 const int64 one_run_duration = total_duration / iterations;
221
222 const int64 num_ops = outputs->at(0).NumElements();
223
224 const double million_ops_per_second =
225 (iterations * num_ops) / static_cast<double>(total_duration);
226
227 if (show_time) {
228 LOG(INFO) << "Time resize bilinear: "
229 << TensorShape(image_tensor.shape()).DebugString()
230 << ": iterations=" << iterations
231 << ", MOps/s=" << million_ops_per_second
232 << ", one_run_duration=" << one_run_duration
233 << ", total_duration=" << total_duration;
234 }
235 }
236
237 } // namespace
238
TestResizeBilinearOneDim()239 void TestResizeBilinearOneDim() {
240 constexpr float TOLERANCE = 1.0e-5;
241 constexpr int IN_WIDTH = 128;
242 constexpr int OUT_WIDTH = 256;
243 constexpr float MIN = 0.0f;
244 constexpr float MAX = 256.0f;
245 constexpr float SCALE = static_cast<float>(IN_WIDTH) / OUT_WIDTH;
246 Tensor image_quantized_tensor(DT_QINT32, TensorShape({1, 1, IN_WIDTH, 1}));
247
248 for (int64 i = 0; i < image_quantized_tensor.NumElements(); ++i) {
249 image_quantized_tensor.flat<qint32>()(i) =
250 FloatToQuantized<qint32>(static_cast<float>(i), MIN, MAX);
251 }
252
253 std::vector<Tensor> outputs;
254 TestResizeBilinear(image_quantized_tensor, DT_QINT32, {1, OUT_WIDTH}, false,
255 1, MIN, MAX, false, &outputs);
256 ASSERT_EQ(3, outputs.size());
257 ASSERT_EQ(OUT_WIDTH, outputs.at(0).NumElements());
258 ASSERT_EQ(4, outputs.at(0).shape().dims());
259 ASSERT_EQ(OUT_WIDTH, outputs.at(0).shape().dim_size(2));
260
261 // Manual value testing
262 for (int64 i = 0; i < outputs.at(0).NumElements(); ++i) {
263 const float resized_image_val =
264 QuantizedToFloat<qint32>(outputs.at(0).flat<qint32>()(i), MIN, MAX);
265 float expected_val = 0.0f;
266 if (i == 0 || i == outputs.at(0).NumElements() - 1 || i % 2 == 0) {
267 expected_val = QuantizedToFloat<qint32>(
268 image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
269 } else {
270 const float image_val0 = QuantizedToFloat<qint32>(
271 image_quantized_tensor.flat<qint32>()(i / 2), MIN, MAX);
272 const float image_val1 = QuantizedToFloat<qint32>(
273 image_quantized_tensor.flat<qint32>()(i / 2 + 1), MIN, MAX);
274 expected_val = (image_val0 + image_val1) * SCALE;
275 }
276 VLOG(1) << "(" << i << ") " << expected_val << ", " << resized_image_val;
277 EXPECT_NEAR(expected_val, resized_image_val, RESIZE_VAL_TOLERANCE)
278 << expected_val << ", " << resized_image_val;
279 }
280
281 // Value testing with reference implementation
282 CheckTensorValue<qint32>(image_quantized_tensor.flat<qint32>().data(),
283 outputs.at(0).flat<qint32>().data(),
284 /*batch_size=*/1,
285 /*in_height=*/IN_WIDTH,
286 /*in_width=*/1,
287 /*out_height=*/OUT_WIDTH,
288 /*out_width=*/1,
289 /*channels=*/1,
290 /*align_corners=*/false,
291 /*half_pixel_centers=*/false, MIN, MAX, TOLERANCE,
292 true);
293 }
294
295 template <typename T>
RunTestResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,float tolerance,bool relative,const bool half_pixel_centers)296 void RunTestResizeBilinearTwoDims(int batch_size, int in_height, int in_width,
297 int out_height, int out_width, int channels,
298 float tolerance, bool relative,
299 const bool half_pixel_centers) {
300 constexpr float RATIO = 100.0f;
301 const float min = 0.0f;
302 const float max = batch_size * in_height * in_width * channels / RATIO;
303
304 const Tensor image_quantized_tensor = BuildTensor<T>(
305 batch_size, in_height, in_width, channels, RATIO, min, max);
306
307 std::vector<Tensor> outputs;
308 TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
309 {out_height, out_width}, false, 1, min, max,
310 half_pixel_centers, &outputs);
311 CheckTensorValue<T>(
312 image_quantized_tensor.flat<T>().data(), outputs.at(0).flat<T>().data(),
313 batch_size, in_height, in_width, out_height, out_width, channels,
314 /*align_corners=*/false,
315 /*half_pixel_centers=*/half_pixel_centers, min, max, tolerance, relative);
316 }
317
318 template <typename T>
RunBenchmarkResizeBilinearTwoDims(int batch_size,int in_height,int in_width,int out_height,int out_width,int channels,int iteration,const bool half_pixel_centers)319 void RunBenchmarkResizeBilinearTwoDims(int batch_size, int in_height,
320 int in_width, int out_height,
321 int out_width, int channels,
322 int iteration,
323 const bool half_pixel_centers) {
324 constexpr float RATIO = 100.0f;
325 const float min = 0.0f;
326 const float max = batch_size * in_height * in_width * channels / RATIO;
327
328 const Tensor image_quantized_tensor = BuildTensor<T>(
329 batch_size, in_height, in_width, channels, RATIO, min, max);
330
331 std::vector<Tensor> outputs;
332 TestResizeBilinear(image_quantized_tensor, DataTypeToEnum<T>::value,
333 {out_height, out_width}, true, iteration, min, max, false,
334 &outputs);
335 }
336
337 template <typename T>
TestResizeBilinearTwoDimsType(const float tolerance,const bool relative,const bool half_pixel_centers)338 void TestResizeBilinearTwoDimsType(const float tolerance, const bool relative,
339 const bool half_pixel_centers) {
340 RunTestResizeBilinearTwoDims<T>(1, 1, 1, 1, 1, 1, tolerance, relative,
341 half_pixel_centers);
342 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 1, tolerance, relative,
343 half_pixel_centers);
344 RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 1, tolerance, relative,
345 half_pixel_centers);
346 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, tolerance, relative,
347 half_pixel_centers);
348 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 1, tolerance, relative,
349 half_pixel_centers);
350 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 2, tolerance, relative,
351 half_pixel_centers);
352 RunTestResizeBilinearTwoDims<T>(1, 128, 1, 256, 1, 2, tolerance, relative,
353 half_pixel_centers);
354 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 2, tolerance, relative,
355 half_pixel_centers);
356 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 2, tolerance, relative,
357 half_pixel_centers);
358 RunTestResizeBilinearTwoDims<T>(1, 1, 16, 1, 32, 3, tolerance, relative,
359 half_pixel_centers);
360 RunTestResizeBilinearTwoDims<T>(1, 1, 128, 1, 256, 3, tolerance, relative,
361 half_pixel_centers);
362 RunTestResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, tolerance, relative,
363 half_pixel_centers);
364 RunTestResizeBilinearTwoDims<T>(1, 256, 256, 128, 128, 3, tolerance, relative,
365 half_pixel_centers);
366 }
367
TestResizeBilinearTwoDims()368 void TestResizeBilinearTwoDims() {
369 for (const bool half_pixel_centers : {false, true}) {
370 TestResizeBilinearTwoDimsType<quint8>(1.0f, false, half_pixel_centers);
371 TestResizeBilinearTwoDimsType<qint32>(1.0e-5, true, half_pixel_centers);
372 TestResizeBilinearTwoDimsType<float>(1.0e-5, true, half_pixel_centers);
373 }
374 }
375
376 template <typename T>
RunBenchmarkResizeBilinearTwoDimsType()377 void RunBenchmarkResizeBilinearTwoDimsType() {
378 constexpr int ITER = 100;
379 RunBenchmarkResizeBilinearTwoDims<T>(1, 1, 1, 2, 2, 1, ITER, false);
380 RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 1, ITER, false);
381 RunBenchmarkResizeBilinearTwoDims<T>(1, 128, 128, 256, 256, 3, ITER, false);
382 RunBenchmarkResizeBilinearTwoDims<T>(1, 64, 64, 128, 128, 2, ITER, false);
383 RunBenchmarkResizeBilinearTwoDims<T>(1, 32, 32, 64, 64, 16, ITER, false);
384 }
385
RunBenchmarkResizeBilinearTwoDims()386 void RunBenchmarkResizeBilinearTwoDims() {
387 LOG(INFO) << "Benchmark quint8";
388 RunBenchmarkResizeBilinearTwoDimsType<quint8>();
389 LOG(INFO) << "Benchmark qint32";
390 RunBenchmarkResizeBilinearTwoDimsType<qint32>();
391 LOG(INFO) << "Benchmark float";
392 RunBenchmarkResizeBilinearTwoDimsType<float>();
393 }
394
395 } // namespace tensorflow
396
397 #define RUN_TEST(t) \
398 TEST(QuantizationResizeBilenarTest, t) { tensorflow::t(); }
399
400 RUN_TEST(TestResizeBilinearOneDim);
401 RUN_TEST(TestResizeBilinearTwoDims);
402
403 #if defined(__ANDROID__)
404
405 RUN_TEST(RunBenchmarkResizeBilinearTwoDims);
406
407 #endif // __ANDROID__
408
main(int argc,char ** argv)409 int main(int argc, char** argv) {
410 // On Linux, add: FLAGS_logtostderr = true;
411 ::testing::InitGoogleTest(&argc, argv);
412 return RUN_ALL_TESTS();
413 }
414