1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "test_precomp.hpp"
44
45 #ifdef HAVE_CUDA
46
47 using namespace cvtest;
48
49 ////////////////////////////////////////////////////////////////////////////////
50 // Norm
51
PARAM_TEST_CASE(Norm,cv::cuda::DeviceInfo,cv::Size,MatDepth,NormCode,UseRoi)52 PARAM_TEST_CASE(Norm, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
53 {
54 cv::cuda::DeviceInfo devInfo;
55 cv::Size size;
56 int depth;
57 int normCode;
58 bool useRoi;
59
60 virtual void SetUp()
61 {
62 devInfo = GET_PARAM(0);
63 size = GET_PARAM(1);
64 depth = GET_PARAM(2);
65 normCode = GET_PARAM(3);
66 useRoi = GET_PARAM(4);
67
68 cv::cuda::setDevice(devInfo.deviceID());
69 }
70 };
71
CUDA_TEST_P(Norm,Accuracy)72 CUDA_TEST_P(Norm, Accuracy)
73 {
74 cv::Mat src = randomMat(size, depth);
75 cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
76
77 double val = cv::cuda::norm(loadMat(src, useRoi), normCode, loadMat(mask, useRoi));
78
79 double val_gold = cv::norm(src, normCode, mask);
80
81 EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
82 }
83
CUDA_TEST_P(Norm,Async)84 CUDA_TEST_P(Norm, Async)
85 {
86 cv::Mat src = randomMat(size, depth);
87 cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
88
89 cv::cuda::Stream stream;
90
91 cv::cuda::HostMem dst;
92 cv::cuda::calcNorm(loadMat(src, useRoi), dst, normCode, loadMat(mask, useRoi), stream);
93
94 stream.waitForCompletion();
95
96 double val;
97 dst.createMatHeader().convertTo(cv::Mat(1, 1, CV_64FC1, &val), CV_64F);
98
99 double val_gold = cv::norm(src, normCode, mask);
100
101 EXPECT_NEAR(val_gold, val, depth < CV_32F ? 0.0 : 1.0);
102 }
103
104 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Norm, testing::Combine(
105 ALL_DEVICES,
106 DIFFERENT_SIZES,
107 testing::Values(MatDepth(CV_8U),
108 MatDepth(CV_8S),
109 MatDepth(CV_16U),
110 MatDepth(CV_16S),
111 MatDepth(CV_32S),
112 MatDepth(CV_32F)),
113 testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
114 WHOLE_SUBMAT));
115
116 ////////////////////////////////////////////////////////////////////////////////
117 // normDiff
118
PARAM_TEST_CASE(NormDiff,cv::cuda::DeviceInfo,cv::Size,NormCode,UseRoi)119 PARAM_TEST_CASE(NormDiff, cv::cuda::DeviceInfo, cv::Size, NormCode, UseRoi)
120 {
121 cv::cuda::DeviceInfo devInfo;
122 cv::Size size;
123 int normCode;
124 bool useRoi;
125
126 virtual void SetUp()
127 {
128 devInfo = GET_PARAM(0);
129 size = GET_PARAM(1);
130 normCode = GET_PARAM(2);
131 useRoi = GET_PARAM(3);
132
133 cv::cuda::setDevice(devInfo.deviceID());
134 }
135 };
136
CUDA_TEST_P(NormDiff,Accuracy)137 CUDA_TEST_P(NormDiff, Accuracy)
138 {
139 cv::Mat src1 = randomMat(size, CV_8UC1);
140 cv::Mat src2 = randomMat(size, CV_8UC1);
141
142 double val = cv::cuda::norm(loadMat(src1, useRoi), loadMat(src2, useRoi), normCode);
143
144 double val_gold = cv::norm(src1, src2, normCode);
145
146 EXPECT_NEAR(val_gold, val, 0.0);
147 }
148
CUDA_TEST_P(NormDiff,Async)149 CUDA_TEST_P(NormDiff, Async)
150 {
151 cv::Mat src1 = randomMat(size, CV_8UC1);
152 cv::Mat src2 = randomMat(size, CV_8UC1);
153
154 cv::cuda::Stream stream;
155
156 cv::cuda::HostMem dst;
157 cv::cuda::calcNormDiff(loadMat(src1, useRoi), loadMat(src2, useRoi), dst, normCode, stream);
158
159 stream.waitForCompletion();
160
161 double val;
162 const cv::Mat val_mat(1, 1, CV_64FC1, &val);
163 dst.createMatHeader().convertTo(val_mat, CV_64F);
164
165 double val_gold = cv::norm(src1, src2, normCode);
166
167 EXPECT_NEAR(val_gold, val, 0.0);
168 }
169
170 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, NormDiff, testing::Combine(
171 ALL_DEVICES,
172 DIFFERENT_SIZES,
173 testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF)),
174 WHOLE_SUBMAT));
175
176 //////////////////////////////////////////////////////////////////////////////
177 // Sum
178
179 namespace
180 {
181 template <typename T>
absSumImpl(const cv::Mat & src)182 cv::Scalar absSumImpl(const cv::Mat& src)
183 {
184 const int cn = src.channels();
185
186 cv::Scalar sum = cv::Scalar::all(0);
187
188 for (int y = 0; y < src.rows; ++y)
189 {
190 for (int x = 0; x < src.cols; ++x)
191 {
192 for (int c = 0; c < cn; ++c)
193 sum[c] += std::abs(src.at<T>(y, x * cn + c));
194 }
195 }
196
197 return sum;
198 }
199
absSumGold(const cv::Mat & src)200 cv::Scalar absSumGold(const cv::Mat& src)
201 {
202 typedef cv::Scalar (*func_t)(const cv::Mat& src);
203
204 static const func_t funcs[] =
205 {
206 absSumImpl<uchar>,
207 absSumImpl<schar>,
208 absSumImpl<ushort>,
209 absSumImpl<short>,
210 absSumImpl<int>,
211 absSumImpl<float>,
212 absSumImpl<double>
213 };
214
215 return funcs[src.depth()](src);
216 }
217
218 template <typename T>
sqrSumImpl(const cv::Mat & src)219 cv::Scalar sqrSumImpl(const cv::Mat& src)
220 {
221 const int cn = src.channels();
222
223 cv::Scalar sum = cv::Scalar::all(0);
224
225 for (int y = 0; y < src.rows; ++y)
226 {
227 for (int x = 0; x < src.cols; ++x)
228 {
229 for (int c = 0; c < cn; ++c)
230 {
231 const T val = src.at<T>(y, x * cn + c);
232 sum[c] += val * val;
233 }
234 }
235 }
236
237 return sum;
238 }
239
sqrSumGold(const cv::Mat & src)240 cv::Scalar sqrSumGold(const cv::Mat& src)
241 {
242 typedef cv::Scalar (*func_t)(const cv::Mat& src);
243
244 static const func_t funcs[] =
245 {
246 sqrSumImpl<uchar>,
247 sqrSumImpl<schar>,
248 sqrSumImpl<ushort>,
249 sqrSumImpl<short>,
250 sqrSumImpl<int>,
251 sqrSumImpl<float>,
252 sqrSumImpl<double>
253 };
254
255 return funcs[src.depth()](src);
256 }
257 }
258
PARAM_TEST_CASE(Sum,cv::cuda::DeviceInfo,cv::Size,MatType,UseRoi)259 PARAM_TEST_CASE(Sum, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
260 {
261 cv::cuda::DeviceInfo devInfo;
262 cv::Size size;
263 int type;
264 bool useRoi;
265
266 cv::Mat src;
267
268 virtual void SetUp()
269 {
270 devInfo = GET_PARAM(0);
271 size = GET_PARAM(1);
272 type = GET_PARAM(2);
273 useRoi = GET_PARAM(3);
274
275 cv::cuda::setDevice(devInfo.deviceID());
276
277 src = randomMat(size, type, -128.0, 128.0);
278 }
279 };
280
CUDA_TEST_P(Sum,Simple)281 CUDA_TEST_P(Sum, Simple)
282 {
283 cv::Scalar val = cv::cuda::sum(loadMat(src, useRoi));
284
285 cv::Scalar val_gold = cv::sum(src);
286
287 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
288 }
289
CUDA_TEST_P(Sum,Simple_Async)290 CUDA_TEST_P(Sum, Simple_Async)
291 {
292 cv::cuda::Stream stream;
293
294 cv::cuda::HostMem dst;
295 cv::cuda::calcSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
296
297 stream.waitForCompletion();
298
299 cv::Scalar val;
300 cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
301 dst.createMatHeader().convertTo(val_mat, CV_64F);
302
303 cv::Scalar val_gold = cv::sum(src);
304
305 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
306 }
307
CUDA_TEST_P(Sum,Abs)308 CUDA_TEST_P(Sum, Abs)
309 {
310 cv::Scalar val = cv::cuda::absSum(loadMat(src, useRoi));
311
312 cv::Scalar val_gold = absSumGold(src);
313
314 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
315 }
316
CUDA_TEST_P(Sum,Abs_Async)317 CUDA_TEST_P(Sum, Abs_Async)
318 {
319 cv::cuda::Stream stream;
320
321 cv::cuda::HostMem dst;
322 cv::cuda::calcAbsSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
323
324 stream.waitForCompletion();
325
326 cv::Scalar val;
327 cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
328 dst.createMatHeader().convertTo(val_mat, CV_64F);
329
330 cv::Scalar val_gold = absSumGold(src);
331
332 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
333 }
334
CUDA_TEST_P(Sum,Sqr)335 CUDA_TEST_P(Sum, Sqr)
336 {
337 cv::Scalar val = cv::cuda::sqrSum(loadMat(src, useRoi));
338
339 cv::Scalar val_gold = sqrSumGold(src);
340
341 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
342 }
343
CUDA_TEST_P(Sum,Sqr_Async)344 CUDA_TEST_P(Sum, Sqr_Async)
345 {
346 cv::cuda::Stream stream;
347
348 cv::cuda::HostMem dst;
349 cv::cuda::calcSqrSum(loadMat(src, useRoi), dst, cv::noArray(), stream);
350
351 stream.waitForCompletion();
352
353 cv::Scalar val;
354 cv::Mat val_mat(dst.size(), CV_64FC(dst.channels()), val.val);
355 dst.createMatHeader().convertTo(val_mat, CV_64F);
356
357 cv::Scalar val_gold = sqrSumGold(src);
358
359 EXPECT_SCALAR_NEAR(val_gold, val, CV_MAT_DEPTH(type) < CV_32F ? 0.0 : 0.5);
360 }
361
362 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Sum, testing::Combine(
363 ALL_DEVICES,
364 DIFFERENT_SIZES,
365 TYPES(CV_8U, CV_64F, 1, 4),
366 WHOLE_SUBMAT));
367
368 ////////////////////////////////////////////////////////////////////////////////
369 // MinMax
370
PARAM_TEST_CASE(MinMax,cv::cuda::DeviceInfo,cv::Size,MatDepth,UseRoi)371 PARAM_TEST_CASE(MinMax, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
372 {
373 cv::cuda::DeviceInfo devInfo;
374 cv::Size size;
375 int depth;
376 bool useRoi;
377
378 virtual void SetUp()
379 {
380 devInfo = GET_PARAM(0);
381 size = GET_PARAM(1);
382 depth = GET_PARAM(2);
383 useRoi = GET_PARAM(3);
384
385 cv::cuda::setDevice(devInfo.deviceID());
386 }
387 };
388
CUDA_TEST_P(MinMax,WithoutMask)389 CUDA_TEST_P(MinMax, WithoutMask)
390 {
391 cv::Mat src = randomMat(size, depth);
392
393 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
394 {
395 try
396 {
397 double minVal, maxVal;
398 cv::cuda::minMax(loadMat(src), &minVal, &maxVal);
399 }
400 catch (const cv::Exception& e)
401 {
402 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
403 }
404 }
405 else
406 {
407 double minVal, maxVal;
408 cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal);
409
410 double minVal_gold, maxVal_gold;
411 minMaxLocGold(src, &minVal_gold, &maxVal_gold);
412
413 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
414 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
415 }
416 }
417
CUDA_TEST_P(MinMax,Async)418 CUDA_TEST_P(MinMax, Async)
419 {
420 cv::Mat src = randomMat(size, depth);
421
422 cv::cuda::Stream stream;
423
424 cv::cuda::HostMem dst;
425 cv::cuda::findMinMax(loadMat(src, useRoi), dst, cv::noArray(), stream);
426
427 stream.waitForCompletion();
428
429 double vals[2];
430 const cv::Mat vals_mat(1, 2, CV_64FC1, &vals[0]);
431 dst.createMatHeader().convertTo(vals_mat, CV_64F);
432
433 double minVal_gold, maxVal_gold;
434 minMaxLocGold(src, &minVal_gold, &maxVal_gold);
435
436 EXPECT_DOUBLE_EQ(minVal_gold, vals[0]);
437 EXPECT_DOUBLE_EQ(maxVal_gold, vals[1]);
438 }
439
CUDA_TEST_P(MinMax,WithMask)440 CUDA_TEST_P(MinMax, WithMask)
441 {
442 cv::Mat src = randomMat(size, depth);
443 cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
444
445 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
446 {
447 try
448 {
449 double minVal, maxVal;
450 cv::cuda::minMax(loadMat(src), &minVal, &maxVal, loadMat(mask));
451 }
452 catch (const cv::Exception& e)
453 {
454 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
455 }
456 }
457 else
458 {
459 double minVal, maxVal;
460 cv::cuda::minMax(loadMat(src, useRoi), &minVal, &maxVal, loadMat(mask, useRoi));
461
462 double minVal_gold, maxVal_gold;
463 minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0, mask);
464
465 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
466 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
467 }
468 }
469
CUDA_TEST_P(MinMax,NullPtr)470 CUDA_TEST_P(MinMax, NullPtr)
471 {
472 cv::Mat src = randomMat(size, depth);
473
474 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
475 {
476 try
477 {
478 double minVal, maxVal;
479 cv::cuda::minMax(loadMat(src), &minVal, 0);
480 cv::cuda::minMax(loadMat(src), 0, &maxVal);
481 }
482 catch (const cv::Exception& e)
483 {
484 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
485 }
486 }
487 else
488 {
489 double minVal, maxVal;
490 cv::cuda::minMax(loadMat(src, useRoi), &minVal, 0);
491 cv::cuda::minMax(loadMat(src, useRoi), 0, &maxVal);
492
493 double minVal_gold, maxVal_gold;
494 minMaxLocGold(src, &minVal_gold, &maxVal_gold, 0, 0);
495
496 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
497 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
498 }
499 }
500
501 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMax, testing::Combine(
502 ALL_DEVICES,
503 DIFFERENT_SIZES,
504 ALL_DEPTH,
505 WHOLE_SUBMAT));
506
507 ////////////////////////////////////////////////////////////////////////////////
508 // MinMaxLoc
509
510 namespace
511 {
512 template <typename T>
expectEqualImpl(const cv::Mat & src,cv::Point loc_gold,cv::Point loc)513 void expectEqualImpl(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
514 {
515 EXPECT_EQ(src.at<T>(loc_gold.y, loc_gold.x), src.at<T>(loc.y, loc.x));
516 }
517
expectEqual(const cv::Mat & src,cv::Point loc_gold,cv::Point loc)518 void expectEqual(const cv::Mat& src, cv::Point loc_gold, cv::Point loc)
519 {
520 typedef void (*func_t)(const cv::Mat& src, cv::Point loc_gold, cv::Point loc);
521
522 static const func_t funcs[] =
523 {
524 expectEqualImpl<uchar>,
525 expectEqualImpl<schar>,
526 expectEqualImpl<ushort>,
527 expectEqualImpl<short>,
528 expectEqualImpl<int>,
529 expectEqualImpl<float>,
530 expectEqualImpl<double>
531 };
532
533 funcs[src.depth()](src, loc_gold, loc);
534 }
535 }
536
PARAM_TEST_CASE(MinMaxLoc,cv::cuda::DeviceInfo,cv::Size,MatDepth,UseRoi)537 PARAM_TEST_CASE(MinMaxLoc, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
538 {
539 cv::cuda::DeviceInfo devInfo;
540 cv::Size size;
541 int depth;
542 bool useRoi;
543
544 virtual void SetUp()
545 {
546 devInfo = GET_PARAM(0);
547 size = GET_PARAM(1);
548 depth = GET_PARAM(2);
549 useRoi = GET_PARAM(3);
550
551 cv::cuda::setDevice(devInfo.deviceID());
552 }
553 };
554
CUDA_TEST_P(MinMaxLoc,WithoutMask)555 CUDA_TEST_P(MinMaxLoc, WithoutMask)
556 {
557 cv::Mat src = randomMat(size, depth);
558
559 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
560 {
561 try
562 {
563 double minVal, maxVal;
564 cv::Point minLoc, maxLoc;
565 cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc);
566 }
567 catch (const cv::Exception& e)
568 {
569 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
570 }
571 }
572 else
573 {
574 double minVal, maxVal;
575 cv::Point minLoc, maxLoc;
576 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
577
578 double minVal_gold, maxVal_gold;
579 cv::Point minLoc_gold, maxLoc_gold;
580 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
581
582 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
583 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
584
585 expectEqual(src, minLoc_gold, minLoc);
586 expectEqual(src, maxLoc_gold, maxLoc);
587 }
588 }
589
CUDA_TEST_P(MinMaxLoc,OneRowMat)590 CUDA_TEST_P(MinMaxLoc, OneRowMat)
591 {
592 cv::Mat src = randomMat(cv::Size(size.width, 1), depth);
593
594 double minVal, maxVal;
595 cv::Point minLoc, maxLoc;
596 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
597
598 double minVal_gold, maxVal_gold;
599 cv::Point minLoc_gold, maxLoc_gold;
600 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
601
602 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
603 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
604
605 expectEqual(src, minLoc_gold, minLoc);
606 expectEqual(src, maxLoc_gold, maxLoc);
607 }
608
CUDA_TEST_P(MinMaxLoc,OneColumnMat)609 CUDA_TEST_P(MinMaxLoc, OneColumnMat)
610 {
611 cv::Mat src = randomMat(cv::Size(1, size.height), depth);
612
613 double minVal, maxVal;
614 cv::Point minLoc, maxLoc;
615 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc);
616
617 double minVal_gold, maxVal_gold;
618 cv::Point minLoc_gold, maxLoc_gold;
619 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
620
621 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
622 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
623
624 expectEqual(src, minLoc_gold, minLoc);
625 expectEqual(src, maxLoc_gold, maxLoc);
626 }
627
CUDA_TEST_P(MinMaxLoc,Async)628 CUDA_TEST_P(MinMaxLoc, Async)
629 {
630 cv::Mat src = randomMat(size, depth);
631
632 cv::cuda::Stream stream;
633
634 cv::cuda::HostMem minMaxVals, locVals;
635 cv::cuda::findMinMaxLoc(loadMat(src, useRoi), minMaxVals, locVals, cv::noArray(), stream);
636
637 stream.waitForCompletion();
638
639 double vals[2];
640 const cv::Mat vals_mat(2, 1, CV_64FC1, &vals[0]);
641 minMaxVals.createMatHeader().convertTo(vals_mat, CV_64F);
642
643 int locs[2];
644 const cv::Mat locs_mat(2, 1, CV_32SC1, &locs[0]);
645 locVals.createMatHeader().copyTo(locs_mat);
646
647 cv::Point locs2D[] = {
648 cv::Point(locs[0] % src.cols, locs[0] / src.cols),
649 cv::Point(locs[1] % src.cols, locs[1] / src.cols),
650 };
651
652 double minVal_gold, maxVal_gold;
653 cv::Point minLoc_gold, maxLoc_gold;
654 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
655
656 EXPECT_DOUBLE_EQ(minVal_gold, vals[0]);
657 EXPECT_DOUBLE_EQ(maxVal_gold, vals[1]);
658
659 expectEqual(src, minLoc_gold, locs2D[0]);
660 expectEqual(src, maxLoc_gold, locs2D[1]);
661 }
662
CUDA_TEST_P(MinMaxLoc,WithMask)663 CUDA_TEST_P(MinMaxLoc, WithMask)
664 {
665 cv::Mat src = randomMat(size, depth);
666 cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
667
668 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
669 {
670 try
671 {
672 double minVal, maxVal;
673 cv::Point minLoc, maxLoc;
674 cv::cuda::minMaxLoc(loadMat(src), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask));
675 }
676 catch (const cv::Exception& e)
677 {
678 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
679 }
680 }
681 else
682 {
683 double minVal, maxVal;
684 cv::Point minLoc, maxLoc;
685 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, &maxVal, &minLoc, &maxLoc, loadMat(mask, useRoi));
686
687 double minVal_gold, maxVal_gold;
688 cv::Point minLoc_gold, maxLoc_gold;
689 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold, mask);
690
691 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
692 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
693
694 expectEqual(src, minLoc_gold, minLoc);
695 expectEqual(src, maxLoc_gold, maxLoc);
696 }
697 }
698
CUDA_TEST_P(MinMaxLoc,NullPtr)699 CUDA_TEST_P(MinMaxLoc, NullPtr)
700 {
701 cv::Mat src = randomMat(size, depth);
702
703 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
704 {
705 try
706 {
707 double minVal, maxVal;
708 cv::Point minLoc, maxLoc;
709 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
710 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
711 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
712 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
713 }
714 catch (const cv::Exception& e)
715 {
716 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
717 }
718 }
719 else
720 {
721 double minVal, maxVal;
722 cv::Point minLoc, maxLoc;
723 cv::cuda::minMaxLoc(loadMat(src, useRoi), &minVal, 0, 0, 0);
724 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, &maxVal, 0, 0);
725 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, &minLoc, 0);
726 cv::cuda::minMaxLoc(loadMat(src, useRoi), 0, 0, 0, &maxLoc);
727
728 double minVal_gold, maxVal_gold;
729 cv::Point minLoc_gold, maxLoc_gold;
730 minMaxLocGold(src, &minVal_gold, &maxVal_gold, &minLoc_gold, &maxLoc_gold);
731
732 EXPECT_DOUBLE_EQ(minVal_gold, minVal);
733 EXPECT_DOUBLE_EQ(maxVal_gold, maxVal);
734
735 expectEqual(src, minLoc_gold, minLoc);
736 expectEqual(src, maxLoc_gold, maxLoc);
737 }
738 }
739
740 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MinMaxLoc, testing::Combine(
741 ALL_DEVICES,
742 DIFFERENT_SIZES,
743 ALL_DEPTH,
744 WHOLE_SUBMAT));
745
746 ////////////////////////////////////////////////////////////////////////////
747 // CountNonZero
748
PARAM_TEST_CASE(CountNonZero,cv::cuda::DeviceInfo,cv::Size,MatDepth,UseRoi)749 PARAM_TEST_CASE(CountNonZero, cv::cuda::DeviceInfo, cv::Size, MatDepth, UseRoi)
750 {
751 cv::cuda::DeviceInfo devInfo;
752 cv::Size size;
753 int depth;
754 bool useRoi;
755
756 cv::Mat src;
757
758 virtual void SetUp()
759 {
760 devInfo = GET_PARAM(0);
761 size = GET_PARAM(1);
762 depth = GET_PARAM(2);
763 useRoi = GET_PARAM(3);
764
765 cv::cuda::setDevice(devInfo.deviceID());
766
767 cv::Mat srcBase = randomMat(size, CV_8U, 0.0, 1.5);
768 srcBase.convertTo(src, depth);
769 }
770 };
771
CUDA_TEST_P(CountNonZero,Accuracy)772 CUDA_TEST_P(CountNonZero, Accuracy)
773 {
774 if (depth == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
775 {
776 try
777 {
778 cv::cuda::countNonZero(loadMat(src));
779 }
780 catch (const cv::Exception& e)
781 {
782 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
783 }
784 }
785 else
786 {
787 int val = cv::cuda::countNonZero(loadMat(src, useRoi));
788
789 int val_gold = cv::countNonZero(src);
790
791 ASSERT_EQ(val_gold, val);
792 }
793 }
794
CUDA_TEST_P(CountNonZero,Async)795 CUDA_TEST_P(CountNonZero, Async)
796 {
797 cv::cuda::Stream stream;
798
799 cv::cuda::HostMem dst;
800 cv::cuda::countNonZero(loadMat(src, useRoi), dst, stream);
801
802 stream.waitForCompletion();
803
804 int val;
805 const cv::Mat val_mat(1, 1, CV_32SC1, &val);
806 dst.createMatHeader().copyTo(val_mat);
807
808 int val_gold = cv::countNonZero(src);
809
810 ASSERT_EQ(val_gold, val);
811 }
812
813 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, CountNonZero, testing::Combine(
814 ALL_DEVICES,
815 DIFFERENT_SIZES,
816 ALL_DEPTH,
817 WHOLE_SUBMAT));
818
819 //////////////////////////////////////////////////////////////////////////////
820 // Reduce
821
CV_ENUM(ReduceCode,cv::REDUCE_SUM,cv::REDUCE_AVG,cv::REDUCE_MAX,cv::REDUCE_MIN)822 CV_ENUM(ReduceCode, cv::REDUCE_SUM, cv::REDUCE_AVG, cv::REDUCE_MAX, cv::REDUCE_MIN)
823 #define ALL_REDUCE_CODES testing::Values(ReduceCode(cv::REDUCE_SUM), ReduceCode(cv::REDUCE_AVG), ReduceCode(cv::REDUCE_MAX), ReduceCode(cv::REDUCE_MIN))
824
825 PARAM_TEST_CASE(Reduce, cv::cuda::DeviceInfo, cv::Size, MatDepth, Channels, ReduceCode, UseRoi)
826 {
827 cv::cuda::DeviceInfo devInfo;
828 cv::Size size;
829 int depth;
830 int channels;
831 int reduceOp;
832 bool useRoi;
833
834 int type;
835 int dst_depth;
836 int dst_type;
837
838 virtual void SetUp()
839 {
840 devInfo = GET_PARAM(0);
841 size = GET_PARAM(1);
842 depth = GET_PARAM(2);
843 channels = GET_PARAM(3);
844 reduceOp = GET_PARAM(4);
845 useRoi = GET_PARAM(5);
846
847 cv::cuda::setDevice(devInfo.deviceID());
848
849 type = CV_MAKE_TYPE(depth, channels);
850
851 if (reduceOp == cv::REDUCE_MAX || reduceOp == cv::REDUCE_MIN)
852 dst_depth = depth;
853 else if (reduceOp == cv::REDUCE_SUM)
854 dst_depth = depth == CV_8U ? CV_32S : depth < CV_64F ? CV_32F : depth;
855 else
856 dst_depth = depth < CV_32F ? CV_32F : depth;
857
858 dst_type = CV_MAKE_TYPE(dst_depth, channels);
859 }
860
861 };
862
CUDA_TEST_P(Reduce,Rows)863 CUDA_TEST_P(Reduce, Rows)
864 {
865 cv::Mat src = randomMat(size, type);
866
867 cv::cuda::GpuMat dst = createMat(cv::Size(src.cols, 1), dst_type, useRoi);
868 cv::cuda::reduce(loadMat(src, useRoi), dst, 0, reduceOp, dst_depth);
869
870 cv::Mat dst_gold;
871 cv::reduce(src, dst_gold, 0, reduceOp, dst_depth);
872
873 EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
874 }
875
CUDA_TEST_P(Reduce,Cols)876 CUDA_TEST_P(Reduce, Cols)
877 {
878 cv::Mat src = randomMat(size, type);
879
880 cv::cuda::GpuMat dst = createMat(cv::Size(src.rows, 1), dst_type, useRoi);
881 cv::cuda::reduce(loadMat(src, useRoi), dst, 1, reduceOp, dst_depth);
882
883 cv::Mat dst_gold;
884 cv::reduce(src, dst_gold, 1, reduceOp, dst_depth);
885 dst_gold.cols = dst_gold.rows;
886 dst_gold.rows = 1;
887 dst_gold.step = dst_gold.cols * dst_gold.elemSize();
888
889 EXPECT_MAT_NEAR(dst_gold, dst, dst_depth < CV_32F ? 0.0 : 0.02);
890 }
891
892 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Reduce, testing::Combine(
893 ALL_DEVICES,
894 DIFFERENT_SIZES,
895 testing::Values(MatDepth(CV_8U),
896 MatDepth(CV_16U),
897 MatDepth(CV_16S),
898 MatDepth(CV_32F),
899 MatDepth(CV_64F)),
900 ALL_CHANNELS,
901 ALL_REDUCE_CODES,
902 WHOLE_SUBMAT));
903
904 //////////////////////////////////////////////////////////////////////////////
905 // Normalize
906
PARAM_TEST_CASE(Normalize,cv::cuda::DeviceInfo,cv::Size,MatDepth,NormCode,UseRoi)907 PARAM_TEST_CASE(Normalize, cv::cuda::DeviceInfo, cv::Size, MatDepth, NormCode, UseRoi)
908 {
909 cv::cuda::DeviceInfo devInfo;
910 cv::Size size;
911 int type;
912 int norm_type;
913 bool useRoi;
914
915 double alpha;
916 double beta;
917
918 virtual void SetUp()
919 {
920 devInfo = GET_PARAM(0);
921 size = GET_PARAM(1);
922 type = GET_PARAM(2);
923 norm_type = GET_PARAM(3);
924 useRoi = GET_PARAM(4);
925
926 cv::cuda::setDevice(devInfo.deviceID());
927
928 alpha = 1;
929 beta = 0;
930 }
931
932 };
933
CUDA_TEST_P(Normalize,WithOutMask)934 CUDA_TEST_P(Normalize, WithOutMask)
935 {
936 cv::Mat src = randomMat(size, type);
937
938 cv::cuda::GpuMat dst = createMat(size, type, useRoi);
939 cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type);
940
941 cv::Mat dst_gold;
942 cv::normalize(src, dst_gold, alpha, beta, norm_type, type);
943
944 EXPECT_MAT_NEAR(dst_gold, dst, type < CV_32F ? 1.0 : 1e-4);
945 }
946
CUDA_TEST_P(Normalize,WithMask)947 CUDA_TEST_P(Normalize, WithMask)
948 {
949 cv::Mat src = randomMat(size, type);
950 cv::Mat mask = randomMat(size, CV_8UC1, 0, 2);
951
952 cv::cuda::GpuMat dst = createMat(size, type, useRoi);
953 dst.setTo(cv::Scalar::all(0));
954 cv::cuda::normalize(loadMat(src, useRoi), dst, alpha, beta, norm_type, type, loadMat(mask, useRoi));
955
956 cv::Mat dst_gold(size, type);
957 dst_gold.setTo(cv::Scalar::all(0));
958 cv::normalize(src, dst_gold, alpha, beta, norm_type, type, mask);
959
960 EXPECT_MAT_NEAR(dst_gold, dst, type < CV_32F ? 1.0 : 1e-4);
961 }
962
963 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Normalize, testing::Combine(
964 ALL_DEVICES,
965 DIFFERENT_SIZES,
966 ALL_DEPTH,
967 testing::Values(NormCode(cv::NORM_L1), NormCode(cv::NORM_L2), NormCode(cv::NORM_INF), NormCode(cv::NORM_MINMAX)),
968 WHOLE_SUBMAT));
969
970 ////////////////////////////////////////////////////////////////////////////////
971 // MeanStdDev
972
PARAM_TEST_CASE(MeanStdDev,cv::cuda::DeviceInfo,cv::Size,UseRoi)973 PARAM_TEST_CASE(MeanStdDev, cv::cuda::DeviceInfo, cv::Size, UseRoi)
974 {
975 cv::cuda::DeviceInfo devInfo;
976 cv::Size size;
977 bool useRoi;
978
979 virtual void SetUp()
980 {
981 devInfo = GET_PARAM(0);
982 size = GET_PARAM(1);
983 useRoi = GET_PARAM(2);
984
985 cv::cuda::setDevice(devInfo.deviceID());
986 }
987 };
988
CUDA_TEST_P(MeanStdDev,Accuracy)989 CUDA_TEST_P(MeanStdDev, Accuracy)
990 {
991 cv::Mat src = randomMat(size, CV_8UC1);
992
993 if (!supportFeature(devInfo, cv::cuda::FEATURE_SET_COMPUTE_13))
994 {
995 try
996 {
997 cv::Scalar mean;
998 cv::Scalar stddev;
999 cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
1000 }
1001 catch (const cv::Exception& e)
1002 {
1003 ASSERT_EQ(cv::Error::StsNotImplemented, e.code);
1004 }
1005 }
1006 else
1007 {
1008 cv::Scalar mean;
1009 cv::Scalar stddev;
1010 cv::cuda::meanStdDev(loadMat(src, useRoi), mean, stddev);
1011
1012 cv::Scalar mean_gold;
1013 cv::Scalar stddev_gold;
1014 cv::meanStdDev(src, mean_gold, stddev_gold);
1015
1016 EXPECT_SCALAR_NEAR(mean_gold, mean, 1e-5);
1017 EXPECT_SCALAR_NEAR(stddev_gold, stddev, 1e-5);
1018 }
1019 }
1020
CUDA_TEST_P(MeanStdDev,Async)1021 CUDA_TEST_P(MeanStdDev, Async)
1022 {
1023 cv::Mat src = randomMat(size, CV_8UC1);
1024
1025 cv::cuda::Stream stream;
1026
1027 cv::cuda::HostMem dst;
1028 cv::cuda::meanStdDev(loadMat(src, useRoi), dst, stream);
1029
1030 stream.waitForCompletion();
1031
1032 double vals[2];
1033 dst.createMatHeader().copyTo(cv::Mat(1, 2, CV_64FC1, &vals[0]));
1034
1035 cv::Scalar mean_gold;
1036 cv::Scalar stddev_gold;
1037 cv::meanStdDev(src, mean_gold, stddev_gold);
1038
1039 EXPECT_SCALAR_NEAR(mean_gold, cv::Scalar(vals[0]), 1e-5);
1040 EXPECT_SCALAR_NEAR(stddev_gold, cv::Scalar(vals[1]), 1e-5);
1041 }
1042
1043 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, MeanStdDev, testing::Combine(
1044 ALL_DEVICES,
1045 DIFFERENT_SIZES,
1046 WHOLE_SUBMAT));
1047
1048 ///////////////////////////////////////////////////////////////////////////////////////////////////////
1049 // Integral
1050
PARAM_TEST_CASE(Integral,cv::cuda::DeviceInfo,cv::Size,UseRoi)1051 PARAM_TEST_CASE(Integral, cv::cuda::DeviceInfo, cv::Size, UseRoi)
1052 {
1053 cv::cuda::DeviceInfo devInfo;
1054 cv::Size size;
1055 bool useRoi;
1056
1057 virtual void SetUp()
1058 {
1059 devInfo = GET_PARAM(0);
1060 size = GET_PARAM(1);
1061 useRoi = GET_PARAM(2);
1062
1063 cv::cuda::setDevice(devInfo.deviceID());
1064 }
1065 };
1066
CUDA_TEST_P(Integral,Accuracy)1067 CUDA_TEST_P(Integral, Accuracy)
1068 {
1069 cv::Mat src = randomMat(size, CV_8UC1);
1070
1071 cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_32SC1, useRoi);
1072 cv::cuda::integral(loadMat(src, useRoi), dst);
1073
1074 cv::Mat dst_gold;
1075 cv::integral(src, dst_gold, CV_32S);
1076
1077 EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
1078 }
1079
1080 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, Integral, testing::Combine(
1081 ALL_DEVICES,
1082 testing::Values(cv::Size(128, 128), cv::Size(113, 113), cv::Size(768, 1066)),
1083 WHOLE_SUBMAT));
1084
1085 ///////////////////////////////////////////////////////////////////////////////////////////////////////
1086 // IntegralSqr
1087
PARAM_TEST_CASE(IntegralSqr,cv::cuda::DeviceInfo,cv::Size,UseRoi)1088 PARAM_TEST_CASE(IntegralSqr, cv::cuda::DeviceInfo, cv::Size, UseRoi)
1089 {
1090 cv::cuda::DeviceInfo devInfo;
1091 cv::Size size;
1092 bool useRoi;
1093
1094 virtual void SetUp()
1095 {
1096 devInfo = GET_PARAM(0);
1097 size = GET_PARAM(1);
1098 useRoi = GET_PARAM(2);
1099
1100 cv::cuda::setDevice(devInfo.deviceID());
1101 }
1102 };
1103
CUDA_TEST_P(IntegralSqr,Accuracy)1104 CUDA_TEST_P(IntegralSqr, Accuracy)
1105 {
1106 cv::Mat src = randomMat(size, CV_8UC1);
1107
1108 cv::cuda::GpuMat dst = createMat(cv::Size(src.cols + 1, src.rows + 1), CV_64FC1, useRoi);
1109 cv::cuda::sqrIntegral(loadMat(src, useRoi), dst);
1110
1111 cv::Mat dst_gold, temp;
1112 cv::integral(src, temp, dst_gold);
1113
1114 EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
1115 }
1116
1117 INSTANTIATE_TEST_CASE_P(CUDA_Arithm, IntegralSqr, testing::Combine(
1118 ALL_DEVICES,
1119 DIFFERENT_SIZES,
1120 WHOLE_SUBMAT));
1121
1122 #endif // HAVE_CUDA
1123