1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "perf_precomp.hpp"
44
45 using namespace std;
46 using namespace testing;
47 using namespace perf;
48
49 #define ARITHM_MAT_DEPTH Values(CV_8U, CV_16U, CV_32F, CV_64F)
50
51 //////////////////////////////////////////////////////////////////////
52 // AddMat
53
PERF_TEST_P(Sz_Depth,AddMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))54 PERF_TEST_P(Sz_Depth, AddMat,
55 Combine(CUDA_TYPICAL_MAT_SIZES,
56 ARITHM_MAT_DEPTH))
57 {
58 const cv::Size size = GET_PARAM(0);
59 const int depth = GET_PARAM(1);
60
61 cv::Mat src1(size, depth);
62 declare.in(src1, WARMUP_RNG);
63
64 cv::Mat src2(size, depth);
65 declare.in(src2, WARMUP_RNG);
66
67 if (PERF_RUN_CUDA())
68 {
69 const cv::cuda::GpuMat d_src1(src1);
70 const cv::cuda::GpuMat d_src2(src2);
71 cv::cuda::GpuMat dst;
72
73 TEST_CYCLE() cv::cuda::add(d_src1, d_src2, dst);
74
75 CUDA_SANITY_CHECK(dst, 1e-10);
76 }
77 else
78 {
79 cv::Mat dst;
80
81 TEST_CYCLE() cv::add(src1, src2, dst);
82
83 CPU_SANITY_CHECK(dst);
84 }
85 }
86
87 //////////////////////////////////////////////////////////////////////
88 // AddScalar
89
PERF_TEST_P(Sz_Depth,AddScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))90 PERF_TEST_P(Sz_Depth, AddScalar,
91 Combine(CUDA_TYPICAL_MAT_SIZES,
92 ARITHM_MAT_DEPTH))
93 {
94 const cv::Size size = GET_PARAM(0);
95 const int depth = GET_PARAM(1);
96
97 cv::Mat src(size, depth);
98 declare.in(src, WARMUP_RNG);
99
100 cv::Scalar s;
101 declare.in(s, WARMUP_RNG);
102
103 if (PERF_RUN_CUDA())
104 {
105 const cv::cuda::GpuMat d_src(src);
106 cv::cuda::GpuMat dst;
107
108 TEST_CYCLE() cv::cuda::add(d_src, s, dst);
109
110 CUDA_SANITY_CHECK(dst, 1e-10);
111 }
112 else
113 {
114 cv::Mat dst;
115
116 TEST_CYCLE() cv::add(src, s, dst);
117
118 CPU_SANITY_CHECK(dst);
119 }
120 }
121
122 //////////////////////////////////////////////////////////////////////
123 // SubtractMat
124
PERF_TEST_P(Sz_Depth,SubtractMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))125 PERF_TEST_P(Sz_Depth, SubtractMat,
126 Combine(CUDA_TYPICAL_MAT_SIZES,
127 ARITHM_MAT_DEPTH))
128 {
129 const cv::Size size = GET_PARAM(0);
130 const int depth = GET_PARAM(1);
131
132 cv::Mat src1(size, depth);
133 declare.in(src1, WARMUP_RNG);
134
135 cv::Mat src2(size, depth);
136 declare.in(src2, WARMUP_RNG);
137
138 if (PERF_RUN_CUDA())
139 {
140 const cv::cuda::GpuMat d_src1(src1);
141 const cv::cuda::GpuMat d_src2(src2);
142 cv::cuda::GpuMat dst;
143
144 TEST_CYCLE() cv::cuda::subtract(d_src1, d_src2, dst);
145
146 CUDA_SANITY_CHECK(dst, 1e-10);
147 }
148 else
149 {
150 cv::Mat dst;
151
152 TEST_CYCLE() cv::subtract(src1, src2, dst);
153
154 CPU_SANITY_CHECK(dst);
155 }
156 }
157
158 //////////////////////////////////////////////////////////////////////
159 // SubtractScalar
160
PERF_TEST_P(Sz_Depth,SubtractScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))161 PERF_TEST_P(Sz_Depth, SubtractScalar,
162 Combine(CUDA_TYPICAL_MAT_SIZES,
163 ARITHM_MAT_DEPTH))
164 {
165 const cv::Size size = GET_PARAM(0);
166 const int depth = GET_PARAM(1);
167
168 cv::Mat src(size, depth);
169 declare.in(src, WARMUP_RNG);
170
171 cv::Scalar s;
172 declare.in(s, WARMUP_RNG);
173
174 if (PERF_RUN_CUDA())
175 {
176 const cv::cuda::GpuMat d_src(src);
177 cv::cuda::GpuMat dst;
178
179 TEST_CYCLE() cv::cuda::subtract(d_src, s, dst);
180
181 CUDA_SANITY_CHECK(dst, 1e-10);
182 }
183 else
184 {
185 cv::Mat dst;
186
187 TEST_CYCLE() cv::subtract(src, s, dst);
188
189 CPU_SANITY_CHECK(dst);
190 }
191 }
192
193 //////////////////////////////////////////////////////////////////////
194 // MultiplyMat
195
PERF_TEST_P(Sz_Depth,MultiplyMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))196 PERF_TEST_P(Sz_Depth, MultiplyMat,
197 Combine(CUDA_TYPICAL_MAT_SIZES,
198 ARITHM_MAT_DEPTH))
199 {
200 const cv::Size size = GET_PARAM(0);
201 const int depth = GET_PARAM(1);
202
203 cv::Mat src1(size, depth);
204 declare.in(src1, WARMUP_RNG);
205
206 cv::Mat src2(size, depth);
207 declare.in(src2, WARMUP_RNG);
208
209 if (PERF_RUN_CUDA())
210 {
211 const cv::cuda::GpuMat d_src1(src1);
212 const cv::cuda::GpuMat d_src2(src2);
213 cv::cuda::GpuMat dst;
214
215 TEST_CYCLE() cv::cuda::multiply(d_src1, d_src2, dst);
216
217 CUDA_SANITY_CHECK(dst, 1e-6);
218 }
219 else
220 {
221 cv::Mat dst;
222
223 TEST_CYCLE() cv::multiply(src1, src2, dst);
224
225 CPU_SANITY_CHECK(dst);
226 }
227 }
228
229 //////////////////////////////////////////////////////////////////////
230 // MultiplyScalar
231
PERF_TEST_P(Sz_Depth,MultiplyScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))232 PERF_TEST_P(Sz_Depth, MultiplyScalar,
233 Combine(CUDA_TYPICAL_MAT_SIZES,
234 ARITHM_MAT_DEPTH))
235 {
236 const cv::Size size = GET_PARAM(0);
237 const int depth = GET_PARAM(1);
238
239 cv::Mat src(size, depth);
240 declare.in(src, WARMUP_RNG);
241
242 cv::Scalar s;
243 declare.in(s, WARMUP_RNG);
244
245 if (PERF_RUN_CUDA())
246 {
247 const cv::cuda::GpuMat d_src(src);
248 cv::cuda::GpuMat dst;
249
250 TEST_CYCLE() cv::cuda::multiply(d_src, s, dst);
251
252 CUDA_SANITY_CHECK(dst, 1e-6);
253 }
254 else
255 {
256 cv::Mat dst;
257
258 TEST_CYCLE() cv::multiply(src, s, dst);
259
260 CPU_SANITY_CHECK(dst);
261 }
262 }
263
264 //////////////////////////////////////////////////////////////////////
265 // DivideMat
266
PERF_TEST_P(Sz_Depth,DivideMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))267 PERF_TEST_P(Sz_Depth, DivideMat,
268 Combine(CUDA_TYPICAL_MAT_SIZES,
269 ARITHM_MAT_DEPTH))
270 {
271 const cv::Size size = GET_PARAM(0);
272 const int depth = GET_PARAM(1);
273
274 cv::Mat src1(size, depth);
275 declare.in(src1, WARMUP_RNG);
276
277 cv::Mat src2(size, depth);
278 declare.in(src2, WARMUP_RNG);
279
280 if (PERF_RUN_CUDA())
281 {
282 const cv::cuda::GpuMat d_src1(src1);
283 const cv::cuda::GpuMat d_src2(src2);
284 cv::cuda::GpuMat dst;
285
286 TEST_CYCLE() cv::cuda::divide(d_src1, d_src2, dst);
287
288 CUDA_SANITY_CHECK(dst, 1e-6);
289 }
290 else
291 {
292 cv::Mat dst;
293
294 TEST_CYCLE() cv::divide(src1, src2, dst);
295
296 CPU_SANITY_CHECK(dst);
297 }
298 }
299
300 //////////////////////////////////////////////////////////////////////
301 // DivideScalar
302
PERF_TEST_P(Sz_Depth,DivideScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))303 PERF_TEST_P(Sz_Depth, DivideScalar,
304 Combine(CUDA_TYPICAL_MAT_SIZES,
305 ARITHM_MAT_DEPTH))
306 {
307 const cv::Size size = GET_PARAM(0);
308 const int depth = GET_PARAM(1);
309
310 cv::Mat src(size, depth);
311 declare.in(src, WARMUP_RNG);
312
313 cv::Scalar s;
314 declare.in(s, WARMUP_RNG);
315
316 if (PERF_RUN_CUDA())
317 {
318 const cv::cuda::GpuMat d_src(src);
319 cv::cuda::GpuMat dst;
320
321 TEST_CYCLE() cv::cuda::divide(d_src, s, dst);
322
323 CUDA_SANITY_CHECK(dst, 1e-6);
324 }
325 else
326 {
327 cv::Mat dst;
328
329 TEST_CYCLE() cv::divide(src, s, dst);
330
331 CPU_SANITY_CHECK(dst);
332 }
333 }
334
335 //////////////////////////////////////////////////////////////////////
336 // DivideScalarInv
337
PERF_TEST_P(Sz_Depth,DivideScalarInv,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))338 PERF_TEST_P(Sz_Depth, DivideScalarInv,
339 Combine(CUDA_TYPICAL_MAT_SIZES,
340 ARITHM_MAT_DEPTH))
341 {
342 const cv::Size size = GET_PARAM(0);
343 const int depth = GET_PARAM(1);
344
345 cv::Mat src(size, depth);
346 declare.in(src, WARMUP_RNG);
347
348 cv::Scalar s;
349 declare.in(s, WARMUP_RNG);
350
351 if (PERF_RUN_CUDA())
352 {
353 const cv::cuda::GpuMat d_src(src);
354 cv::cuda::GpuMat dst;
355
356 TEST_CYCLE() cv::cuda::divide(s[0], d_src, dst);
357
358 CUDA_SANITY_CHECK(dst, 1e-6);
359 }
360 else
361 {
362 cv::Mat dst;
363
364 TEST_CYCLE() cv::divide(s, src, dst);
365
366 CPU_SANITY_CHECK(dst);
367 }
368 }
369
370 //////////////////////////////////////////////////////////////////////
371 // AbsDiffMat
372
PERF_TEST_P(Sz_Depth,AbsDiffMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))373 PERF_TEST_P(Sz_Depth, AbsDiffMat,
374 Combine(CUDA_TYPICAL_MAT_SIZES,
375 ARITHM_MAT_DEPTH))
376 {
377 const cv::Size size = GET_PARAM(0);
378 const int depth = GET_PARAM(1);
379
380 cv::Mat src1(size, depth);
381 declare.in(src1, WARMUP_RNG);
382
383 cv::Mat src2(size, depth);
384 declare.in(src2, WARMUP_RNG);
385
386 if (PERF_RUN_CUDA())
387 {
388 const cv::cuda::GpuMat d_src1(src1);
389 const cv::cuda::GpuMat d_src2(src2);
390 cv::cuda::GpuMat dst;
391
392 TEST_CYCLE() cv::cuda::absdiff(d_src1, d_src2, dst);
393
394 CUDA_SANITY_CHECK(dst, 1e-10);
395 }
396 else
397 {
398 cv::Mat dst;
399
400 TEST_CYCLE() cv::absdiff(src1, src2, dst);
401
402 CPU_SANITY_CHECK(dst);
403 }
404 }
405
406 //////////////////////////////////////////////////////////////////////
407 // AbsDiffScalar
408
PERF_TEST_P(Sz_Depth,AbsDiffScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH))409 PERF_TEST_P(Sz_Depth, AbsDiffScalar,
410 Combine(CUDA_TYPICAL_MAT_SIZES,
411 ARITHM_MAT_DEPTH))
412 {
413 const cv::Size size = GET_PARAM(0);
414 const int depth = GET_PARAM(1);
415
416 cv::Mat src(size, depth);
417 declare.in(src, WARMUP_RNG);
418
419 cv::Scalar s;
420 declare.in(s, WARMUP_RNG);
421
422 if (PERF_RUN_CUDA())
423 {
424 const cv::cuda::GpuMat d_src(src);
425 cv::cuda::GpuMat dst;
426
427 TEST_CYCLE() cv::cuda::absdiff(d_src, s, dst);
428
429 CUDA_SANITY_CHECK(dst, 1e-10);
430 }
431 else
432 {
433 cv::Mat dst;
434
435 TEST_CYCLE() cv::absdiff(src, s, dst);
436
437 CPU_SANITY_CHECK(dst);
438 }
439 }
440
441 //////////////////////////////////////////////////////////////////////
442 // Abs
443
PERF_TEST_P(Sz_Depth,Abs,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_16S,CV_32F)))444 PERF_TEST_P(Sz_Depth, Abs,
445 Combine(CUDA_TYPICAL_MAT_SIZES,
446 Values(CV_16S, CV_32F)))
447 {
448 const cv::Size size = GET_PARAM(0);
449 const int depth = GET_PARAM(1);
450
451 cv::Mat src(size, depth);
452 declare.in(src, WARMUP_RNG);
453
454 if (PERF_RUN_CUDA())
455 {
456 const cv::cuda::GpuMat d_src(src);
457 cv::cuda::GpuMat dst;
458
459 TEST_CYCLE() cv::cuda::abs(d_src, dst);
460
461 CUDA_SANITY_CHECK(dst);
462 }
463 else
464 {
465 FAIL_NO_CPU();
466 }
467 }
468
469 //////////////////////////////////////////////////////////////////////
470 // Sqr
471
PERF_TEST_P(Sz_Depth,Sqr,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16S,CV_32F)))472 PERF_TEST_P(Sz_Depth, Sqr,
473 Combine(CUDA_TYPICAL_MAT_SIZES,
474 Values(CV_8U, CV_16S, CV_32F)))
475 {
476 const cv::Size size = GET_PARAM(0);
477 const int depth = GET_PARAM(1);
478
479 cv::Mat src(size, depth);
480 declare.in(src, WARMUP_RNG);
481
482 if (PERF_RUN_CUDA())
483 {
484 const cv::cuda::GpuMat d_src(src);
485 cv::cuda::GpuMat dst;
486
487 TEST_CYCLE() cv::cuda::sqr(d_src, dst);
488
489 CUDA_SANITY_CHECK(dst);
490 }
491 else
492 {
493 FAIL_NO_CPU();
494 }
495 }
496
497 //////////////////////////////////////////////////////////////////////
498 // Sqrt
499
PERF_TEST_P(Sz_Depth,Sqrt,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16S,CV_32F)))500 PERF_TEST_P(Sz_Depth, Sqrt,
501 Combine(CUDA_TYPICAL_MAT_SIZES,
502 Values(CV_8U, CV_16S, CV_32F)))
503 {
504 const cv::Size size = GET_PARAM(0);
505 const int depth = GET_PARAM(1);
506
507 cv::Mat src(size, depth);
508 cv::randu(src, 0, 100000);
509
510 if (PERF_RUN_CUDA())
511 {
512 const cv::cuda::GpuMat d_src(src);
513 cv::cuda::GpuMat dst;
514
515 TEST_CYCLE() cv::cuda::sqrt(d_src, dst);
516
517 CUDA_SANITY_CHECK(dst);
518 }
519 else
520 {
521 cv::Mat dst;
522
523 TEST_CYCLE() cv::sqrt(src, dst);
524
525 CPU_SANITY_CHECK(dst);
526 }
527 }
528
529 //////////////////////////////////////////////////////////////////////
530 // Log
531
PERF_TEST_P(Sz_Depth,Log,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16S,CV_32F)))532 PERF_TEST_P(Sz_Depth, Log,
533 Combine(CUDA_TYPICAL_MAT_SIZES,
534 Values(CV_8U, CV_16S, CV_32F)))
535 {
536 const cv::Size size = GET_PARAM(0);
537 const int depth = GET_PARAM(1);
538
539 cv::Mat src(size, depth);
540 cv::randu(src, 0, 100000);
541
542 if (PERF_RUN_CUDA())
543 {
544 const cv::cuda::GpuMat d_src(src);
545 cv::cuda::GpuMat dst;
546
547 TEST_CYCLE() cv::cuda::log(d_src, dst);
548
549 CUDA_SANITY_CHECK(dst);
550 }
551 else
552 {
553 cv::Mat dst;
554
555 TEST_CYCLE() cv::log(src, dst);
556
557 CPU_SANITY_CHECK(dst);
558 }
559 }
560
561 //////////////////////////////////////////////////////////////////////
562 // Exp
563
PERF_TEST_P(Sz_Depth,Exp,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16S,CV_32F)))564 PERF_TEST_P(Sz_Depth, Exp,
565 Combine(CUDA_TYPICAL_MAT_SIZES,
566 Values(CV_8U, CV_16S, CV_32F)))
567 {
568 const cv::Size size = GET_PARAM(0);
569 const int depth = GET_PARAM(1);
570
571 cv::Mat src(size, depth);
572 cv::randu(src, 0, 10);
573
574 if (PERF_RUN_CUDA())
575 {
576 const cv::cuda::GpuMat d_src(src);
577 cv::cuda::GpuMat dst;
578
579 TEST_CYCLE() cv::cuda::exp(d_src, dst);
580
581 CUDA_SANITY_CHECK(dst);
582 }
583 else
584 {
585 cv::Mat dst;
586
587 TEST_CYCLE() cv::exp(src, dst);
588
589 CPU_SANITY_CHECK(dst);
590 }
591 }
592
593 //////////////////////////////////////////////////////////////////////
594 // Pow
595
596 DEF_PARAM_TEST(Sz_Depth_Power, cv::Size, MatDepth, double);
597
598 PERF_TEST_P(Sz_Depth_Power, Pow,
599 Combine(CUDA_TYPICAL_MAT_SIZES,
600 Values(CV_8U, CV_16S, CV_32F),
601 Values(0.3, 2.0, 2.4)))
602 {
603 const cv::Size size = GET_PARAM(0);
604 const int depth = GET_PARAM(1);
605 const double power = GET_PARAM(2);
606
607 cv::Mat src(size, depth);
608 declare.in(src, WARMUP_RNG);
609
610 if (PERF_RUN_CUDA())
611 {
612 const cv::cuda::GpuMat d_src(src);
613 cv::cuda::GpuMat dst;
614
615 TEST_CYCLE() cv::cuda::pow(d_src, power, dst);
616
617 CUDA_SANITY_CHECK(dst);
618 }
619 else
620 {
621 cv::Mat dst;
622
623 TEST_CYCLE() cv::pow(src, power, dst);
624
625 CPU_SANITY_CHECK(dst);
626 }
627 }
628
629 //////////////////////////////////////////////////////////////////////
630 // CompareMat
631
632 CV_ENUM(CmpCode, cv::CMP_EQ, cv::CMP_GT, cv::CMP_GE, cv::CMP_LT, cv::CMP_LE, cv::CMP_NE)
633
634 DEF_PARAM_TEST(Sz_Depth_Code, cv::Size, MatDepth, CmpCode);
635
PERF_TEST_P(Sz_Depth_Code,CompareMat,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH,CmpCode::all ()))636 PERF_TEST_P(Sz_Depth_Code, CompareMat,
637 Combine(CUDA_TYPICAL_MAT_SIZES,
638 ARITHM_MAT_DEPTH,
639 CmpCode::all()))
640 {
641 const cv::Size size = GET_PARAM(0);
642 const int depth = GET_PARAM(1);
643 const int cmp_code = GET_PARAM(2);
644
645 cv::Mat src1(size, depth);
646 declare.in(src1, WARMUP_RNG);
647
648 cv::Mat src2(size, depth);
649 declare.in(src2, WARMUP_RNG);
650
651 if (PERF_RUN_CUDA())
652 {
653 const cv::cuda::GpuMat d_src1(src1);
654 const cv::cuda::GpuMat d_src2(src2);
655 cv::cuda::GpuMat dst;
656
657 TEST_CYCLE() cv::cuda::compare(d_src1, d_src2, dst, cmp_code);
658
659 CUDA_SANITY_CHECK(dst);
660 }
661 else
662 {
663 cv::Mat dst;
664
665 TEST_CYCLE() cv::compare(src1, src2, dst, cmp_code);
666
667 CPU_SANITY_CHECK(dst);
668 }
669 }
670
671 //////////////////////////////////////////////////////////////////////
672 // CompareScalar
673
PERF_TEST_P(Sz_Depth_Code,CompareScalar,Combine (CUDA_TYPICAL_MAT_SIZES,ARITHM_MAT_DEPTH,CmpCode::all ()))674 PERF_TEST_P(Sz_Depth_Code, CompareScalar,
675 Combine(CUDA_TYPICAL_MAT_SIZES,
676 ARITHM_MAT_DEPTH,
677 CmpCode::all()))
678 {
679 const cv::Size size = GET_PARAM(0);
680 const int depth = GET_PARAM(1);
681 const int cmp_code = GET_PARAM(2);
682
683 cv::Mat src(size, depth);
684 declare.in(src, WARMUP_RNG);
685
686 cv::Scalar s;
687 declare.in(s, WARMUP_RNG);
688
689 if (PERF_RUN_CUDA())
690 {
691 const cv::cuda::GpuMat d_src(src);
692 cv::cuda::GpuMat dst;
693
694 TEST_CYCLE() cv::cuda::compare(d_src, s, dst, cmp_code);
695
696 CUDA_SANITY_CHECK(dst);
697 }
698 else
699 {
700 cv::Mat dst;
701
702 TEST_CYCLE() cv::compare(src, s, dst, cmp_code);
703
704 CPU_SANITY_CHECK(dst);
705 }
706 }
707
708 //////////////////////////////////////////////////////////////////////
709 // BitwiseNot
710
PERF_TEST_P(Sz_Depth,BitwiseNot,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S)))711 PERF_TEST_P(Sz_Depth, BitwiseNot,
712 Combine(CUDA_TYPICAL_MAT_SIZES,
713 Values(CV_8U, CV_16U, CV_32S)))
714 {
715 const cv::Size size = GET_PARAM(0);
716 const int depth = GET_PARAM(1);
717
718 cv::Mat src(size, depth);
719 declare.in(src, WARMUP_RNG);
720
721 if (PERF_RUN_CUDA())
722 {
723 const cv::cuda::GpuMat d_src(src);
724 cv::cuda::GpuMat dst;
725
726 TEST_CYCLE() cv::cuda::bitwise_not(d_src, dst);
727
728 CUDA_SANITY_CHECK(dst);
729 }
730 else
731 {
732 cv::Mat dst;
733
734 TEST_CYCLE() cv::bitwise_not(src, dst);
735
736 CPU_SANITY_CHECK(dst);
737 }
738 }
739
740 //////////////////////////////////////////////////////////////////////
741 // BitwiseAndMat
742
PERF_TEST_P(Sz_Depth,BitwiseAndMat,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S)))743 PERF_TEST_P(Sz_Depth, BitwiseAndMat,
744 Combine(CUDA_TYPICAL_MAT_SIZES,
745 Values(CV_8U, CV_16U, CV_32S)))
746 {
747 const cv::Size size = GET_PARAM(0);
748 const int depth = GET_PARAM(1);
749
750 cv::Mat src1(size, depth);
751 declare.in(src1, WARMUP_RNG);
752
753 cv::Mat src2(size, depth);
754 declare.in(src2, WARMUP_RNG);
755
756 if (PERF_RUN_CUDA())
757 {
758 const cv::cuda::GpuMat d_src1(src1);
759 const cv::cuda::GpuMat d_src2(src2);
760 cv::cuda::GpuMat dst;
761
762 TEST_CYCLE() cv::cuda::bitwise_and(d_src1, d_src2, dst);
763
764 CUDA_SANITY_CHECK(dst);
765 }
766 else
767 {
768 cv::Mat dst;
769
770 TEST_CYCLE() cv::bitwise_and(src1, src2, dst);
771
772 CPU_SANITY_CHECK(dst);
773 }
774 }
775
776 //////////////////////////////////////////////////////////////////////
777 // BitwiseAndScalar
778
PERF_TEST_P(Sz_Depth_Cn,BitwiseAndScalar,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S),CUDA_CHANNELS_1_3_4))779 PERF_TEST_P(Sz_Depth_Cn, BitwiseAndScalar,
780 Combine(CUDA_TYPICAL_MAT_SIZES,
781 Values(CV_8U, CV_16U, CV_32S),
782 CUDA_CHANNELS_1_3_4))
783 {
784 const cv::Size size = GET_PARAM(0);
785 const int depth = GET_PARAM(1);
786 const int channels = GET_PARAM(2);
787
788 const int type = CV_MAKE_TYPE(depth, channels);
789
790 cv::Mat src(size, type);
791 declare.in(src, WARMUP_RNG);
792
793 cv::Scalar s;
794 declare.in(s, WARMUP_RNG);
795 cv::Scalar_<int> is = s;
796
797 if (PERF_RUN_CUDA())
798 {
799 const cv::cuda::GpuMat d_src(src);
800 cv::cuda::GpuMat dst;
801
802 TEST_CYCLE() cv::cuda::bitwise_and(d_src, is, dst);
803
804 CUDA_SANITY_CHECK(dst);
805 }
806 else
807 {
808 cv::Mat dst;
809
810 TEST_CYCLE() cv::bitwise_and(src, is, dst);
811
812 CPU_SANITY_CHECK(dst);
813 }
814 }
815
816 //////////////////////////////////////////////////////////////////////
817 // BitwiseOrMat
818
PERF_TEST_P(Sz_Depth,BitwiseOrMat,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S)))819 PERF_TEST_P(Sz_Depth, BitwiseOrMat,
820 Combine(CUDA_TYPICAL_MAT_SIZES,
821 Values(CV_8U, CV_16U, CV_32S)))
822 {
823 const cv::Size size = GET_PARAM(0);
824 const int depth = GET_PARAM(1);
825
826 cv::Mat src1(size, depth);
827 declare.in(src1, WARMUP_RNG);
828
829 cv::Mat src2(size, depth);
830 declare.in(src2, WARMUP_RNG);
831
832 if (PERF_RUN_CUDA())
833 {
834 const cv::cuda::GpuMat d_src1(src1);
835 const cv::cuda::GpuMat d_src2(src2);
836 cv::cuda::GpuMat dst;
837
838 TEST_CYCLE() cv::cuda::bitwise_or(d_src1, d_src2, dst);
839
840 CUDA_SANITY_CHECK(dst);
841 }
842 else
843 {
844 cv::Mat dst;
845
846 TEST_CYCLE() cv::bitwise_or(src1, src2, dst);
847
848 CPU_SANITY_CHECK(dst);
849 }
850 }
851
852 //////////////////////////////////////////////////////////////////////
853 // BitwiseOrScalar
854
PERF_TEST_P(Sz_Depth_Cn,BitwiseOrScalar,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S),CUDA_CHANNELS_1_3_4))855 PERF_TEST_P(Sz_Depth_Cn, BitwiseOrScalar,
856 Combine(CUDA_TYPICAL_MAT_SIZES,
857 Values(CV_8U, CV_16U, CV_32S),
858 CUDA_CHANNELS_1_3_4))
859 {
860 const cv::Size size = GET_PARAM(0);
861 const int depth = GET_PARAM(1);
862 const int channels = GET_PARAM(2);
863
864 const int type = CV_MAKE_TYPE(depth, channels);
865
866 cv::Mat src(size, type);
867 declare.in(src, WARMUP_RNG);
868
869 cv::Scalar s;
870 declare.in(s, WARMUP_RNG);
871 cv::Scalar_<int> is = s;
872
873 if (PERF_RUN_CUDA())
874 {
875 const cv::cuda::GpuMat d_src(src);
876 cv::cuda::GpuMat dst;
877
878 TEST_CYCLE() cv::cuda::bitwise_or(d_src, is, dst);
879
880 CUDA_SANITY_CHECK(dst);
881 }
882 else
883 {
884 cv::Mat dst;
885
886 TEST_CYCLE() cv::bitwise_or(src, is, dst);
887
888 CPU_SANITY_CHECK(dst);
889 }
890 }
891
892 //////////////////////////////////////////////////////////////////////
893 // BitwiseXorMat
894
PERF_TEST_P(Sz_Depth,BitwiseXorMat,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S)))895 PERF_TEST_P(Sz_Depth, BitwiseXorMat,
896 Combine(CUDA_TYPICAL_MAT_SIZES,
897 Values(CV_8U, CV_16U, CV_32S)))
898 {
899 const cv::Size size = GET_PARAM(0);
900 const int depth = GET_PARAM(1);
901
902 cv::Mat src1(size, depth);
903 declare.in(src1, WARMUP_RNG);
904
905 cv::Mat src2(size, depth);
906 declare.in(src2, WARMUP_RNG);
907
908 if (PERF_RUN_CUDA())
909 {
910 const cv::cuda::GpuMat d_src1(src1);
911 const cv::cuda::GpuMat d_src2(src2);
912 cv::cuda::GpuMat dst;
913
914 TEST_CYCLE() cv::cuda::bitwise_xor(d_src1, d_src2, dst);
915
916 CUDA_SANITY_CHECK(dst);
917 }
918 else
919 {
920 cv::Mat dst;
921
922 TEST_CYCLE() cv::bitwise_xor(src1, src2, dst);
923
924 CPU_SANITY_CHECK(dst);
925 }
926 }
927
928 //////////////////////////////////////////////////////////////////////
929 // BitwiseXorScalar
930
PERF_TEST_P(Sz_Depth_Cn,BitwiseXorScalar,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S),CUDA_CHANNELS_1_3_4))931 PERF_TEST_P(Sz_Depth_Cn, BitwiseXorScalar,
932 Combine(CUDA_TYPICAL_MAT_SIZES,
933 Values(CV_8U, CV_16U, CV_32S),
934 CUDA_CHANNELS_1_3_4))
935 {
936 const cv::Size size = GET_PARAM(0);
937 const int depth = GET_PARAM(1);
938 const int channels = GET_PARAM(2);
939
940 const int type = CV_MAKE_TYPE(depth, channels);
941
942 cv::Mat src(size, type);
943 declare.in(src, WARMUP_RNG);
944
945 cv::Scalar s;
946 declare.in(s, WARMUP_RNG);
947 cv::Scalar_<int> is = s;
948
949 if (PERF_RUN_CUDA())
950 {
951 const cv::cuda::GpuMat d_src(src);
952 cv::cuda::GpuMat dst;
953
954 TEST_CYCLE() cv::cuda::bitwise_xor(d_src, is, dst);
955
956 CUDA_SANITY_CHECK(dst);
957 }
958 else
959 {
960 cv::Mat dst;
961
962 TEST_CYCLE() cv::bitwise_xor(src, is, dst);
963
964 CPU_SANITY_CHECK(dst);
965 }
966 }
967
968 //////////////////////////////////////////////////////////////////////
969 // RShift
970
PERF_TEST_P(Sz_Depth_Cn,RShift,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S),CUDA_CHANNELS_1_3_4))971 PERF_TEST_P(Sz_Depth_Cn, RShift,
972 Combine(CUDA_TYPICAL_MAT_SIZES,
973 Values(CV_8U, CV_16U, CV_32S),
974 CUDA_CHANNELS_1_3_4))
975 {
976 const cv::Size size = GET_PARAM(0);
977 const int depth = GET_PARAM(1);
978 const int channels = GET_PARAM(2);
979
980 const int type = CV_MAKE_TYPE(depth, channels);
981
982 cv::Mat src(size, type);
983 declare.in(src, WARMUP_RNG);
984
985 const cv::Scalar_<int> val = cv::Scalar_<int>::all(4);
986
987 if (PERF_RUN_CUDA())
988 {
989 const cv::cuda::GpuMat d_src(src);
990 cv::cuda::GpuMat dst;
991
992 TEST_CYCLE() cv::cuda::rshift(d_src, val, dst);
993
994 CUDA_SANITY_CHECK(dst);
995 }
996 else
997 {
998 FAIL_NO_CPU();
999 }
1000 }
1001
1002 //////////////////////////////////////////////////////////////////////
1003 // LShift
1004
PERF_TEST_P(Sz_Depth_Cn,LShift,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32S),CUDA_CHANNELS_1_3_4))1005 PERF_TEST_P(Sz_Depth_Cn, LShift,
1006 Combine(CUDA_TYPICAL_MAT_SIZES,
1007 Values(CV_8U, CV_16U, CV_32S),
1008 CUDA_CHANNELS_1_3_4))
1009 {
1010 const cv::Size size = GET_PARAM(0);
1011 const int depth = GET_PARAM(1);
1012 const int channels = GET_PARAM(2);
1013
1014 const int type = CV_MAKE_TYPE(depth, channels);
1015
1016 cv::Mat src(size, type);
1017 declare.in(src, WARMUP_RNG);
1018
1019 const cv::Scalar_<int> val = cv::Scalar_<int>::all(4);
1020
1021 if (PERF_RUN_CUDA())
1022 {
1023 const cv::cuda::GpuMat d_src(src);
1024 cv::cuda::GpuMat dst;
1025
1026 TEST_CYCLE() cv::cuda::lshift(d_src, val, dst);
1027
1028 CUDA_SANITY_CHECK(dst);
1029 }
1030 else
1031 {
1032 FAIL_NO_CPU();
1033 }
1034 }
1035
1036 //////////////////////////////////////////////////////////////////////
1037 // MinMat
1038
PERF_TEST_P(Sz_Depth,MinMat,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F)))1039 PERF_TEST_P(Sz_Depth, MinMat,
1040 Combine(CUDA_TYPICAL_MAT_SIZES,
1041 Values(CV_8U, CV_16U, CV_32F)))
1042 {
1043 const cv::Size size = GET_PARAM(0);
1044 const int depth = GET_PARAM(1);
1045
1046 cv::Mat src1(size, depth);
1047 declare.in(src1, WARMUP_RNG);
1048
1049 cv::Mat src2(size, depth);
1050 declare.in(src2, WARMUP_RNG);
1051
1052 if (PERF_RUN_CUDA())
1053 {
1054 const cv::cuda::GpuMat d_src1(src1);
1055 const cv::cuda::GpuMat d_src2(src2);
1056 cv::cuda::GpuMat dst;
1057
1058 TEST_CYCLE() cv::cuda::min(d_src1, d_src2, dst);
1059
1060 CUDA_SANITY_CHECK(dst);
1061 }
1062 else
1063 {
1064 cv::Mat dst;
1065
1066 TEST_CYCLE() cv::min(src1, src2, dst);
1067
1068 CPU_SANITY_CHECK(dst);
1069 }
1070 }
1071
1072 //////////////////////////////////////////////////////////////////////
1073 // MinScalar
1074
PERF_TEST_P(Sz_Depth,MinScalar,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F)))1075 PERF_TEST_P(Sz_Depth, MinScalar,
1076 Combine(CUDA_TYPICAL_MAT_SIZES,
1077 Values(CV_8U, CV_16U, CV_32F)))
1078 {
1079 const cv::Size size = GET_PARAM(0);
1080 const int depth = GET_PARAM(1);
1081
1082 cv::Mat src(size, depth);
1083 declare.in(src, WARMUP_RNG);
1084
1085 cv::Scalar val;
1086 declare.in(val, WARMUP_RNG);
1087
1088 if (PERF_RUN_CUDA())
1089 {
1090 const cv::cuda::GpuMat d_src(src);
1091 cv::cuda::GpuMat dst;
1092
1093 TEST_CYCLE() cv::cuda::min(d_src, val[0], dst);
1094
1095 CUDA_SANITY_CHECK(dst);
1096 }
1097 else
1098 {
1099 cv::Mat dst;
1100
1101 TEST_CYCLE() cv::min(src, val[0], dst);
1102
1103 CPU_SANITY_CHECK(dst);
1104 }
1105 }
1106
1107 //////////////////////////////////////////////////////////////////////
1108 // MaxMat
1109
PERF_TEST_P(Sz_Depth,MaxMat,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F)))1110 PERF_TEST_P(Sz_Depth, MaxMat,
1111 Combine(CUDA_TYPICAL_MAT_SIZES,
1112 Values(CV_8U, CV_16U, CV_32F)))
1113 {
1114 const cv::Size size = GET_PARAM(0);
1115 const int depth = GET_PARAM(1);
1116
1117 cv::Mat src1(size, depth);
1118 declare.in(src1, WARMUP_RNG);
1119
1120 cv::Mat src2(size, depth);
1121 declare.in(src2, WARMUP_RNG);
1122
1123 if (PERF_RUN_CUDA())
1124 {
1125 const cv::cuda::GpuMat d_src1(src1);
1126 const cv::cuda::GpuMat d_src2(src2);
1127 cv::cuda::GpuMat dst;
1128
1129 TEST_CYCLE() cv::cuda::max(d_src1, d_src2, dst);
1130
1131 CUDA_SANITY_CHECK(dst);
1132 }
1133 else
1134 {
1135 cv::Mat dst;
1136
1137 TEST_CYCLE() cv::max(src1, src2, dst);
1138
1139 CPU_SANITY_CHECK(dst);
1140 }
1141 }
1142
1143 //////////////////////////////////////////////////////////////////////
1144 // MaxScalar
1145
PERF_TEST_P(Sz_Depth,MaxScalar,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F)))1146 PERF_TEST_P(Sz_Depth, MaxScalar,
1147 Combine(CUDA_TYPICAL_MAT_SIZES,
1148 Values(CV_8U, CV_16U, CV_32F)))
1149 {
1150 const cv::Size size = GET_PARAM(0);
1151 const int depth = GET_PARAM(1);
1152
1153 cv::Mat src(size, depth);
1154 declare.in(src, WARMUP_RNG);
1155
1156 cv::Scalar val;
1157 declare.in(val, WARMUP_RNG);
1158
1159 if (PERF_RUN_CUDA())
1160 {
1161 const cv::cuda::GpuMat d_src(src);
1162 cv::cuda::GpuMat dst;
1163
1164 TEST_CYCLE() cv::cuda::max(d_src, val[0], dst);
1165
1166 CUDA_SANITY_CHECK(dst);
1167 }
1168 else
1169 {
1170 cv::Mat dst;
1171
1172 TEST_CYCLE() cv::max(src, val[0], dst);
1173
1174 CPU_SANITY_CHECK(dst);
1175 }
1176 }
1177
1178 //////////////////////////////////////////////////////////////////////
1179 // AddWeighted
1180
1181 DEF_PARAM_TEST(Sz_3Depth, cv::Size, MatDepth, MatDepth, MatDepth);
1182
PERF_TEST_P(Sz_3Depth,AddWeighted,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F,CV_64F),Values (CV_8U,CV_16U,CV_32F,CV_64F),Values (CV_8U,CV_16U,CV_32F,CV_64F)))1183 PERF_TEST_P(Sz_3Depth, AddWeighted,
1184 Combine(CUDA_TYPICAL_MAT_SIZES,
1185 Values(CV_8U, CV_16U, CV_32F, CV_64F),
1186 Values(CV_8U, CV_16U, CV_32F, CV_64F),
1187 Values(CV_8U, CV_16U, CV_32F, CV_64F)))
1188 {
1189 const cv::Size size = GET_PARAM(0);
1190 const int depth1 = GET_PARAM(1);
1191 const int depth2 = GET_PARAM(2);
1192 const int dst_depth = GET_PARAM(3);
1193
1194 cv::Mat src1(size, depth1);
1195 declare.in(src1, WARMUP_RNG);
1196
1197 cv::Mat src2(size, depth2);
1198 declare.in(src2, WARMUP_RNG);
1199
1200 if (PERF_RUN_CUDA())
1201 {
1202 const cv::cuda::GpuMat d_src1(src1);
1203 const cv::cuda::GpuMat d_src2(src2);
1204 cv::cuda::GpuMat dst;
1205
1206 TEST_CYCLE() cv::cuda::addWeighted(d_src1, 0.5, d_src2, 0.5, 10.0, dst, dst_depth);
1207
1208 CUDA_SANITY_CHECK(dst, 1e-10);
1209 }
1210 else
1211 {
1212 cv::Mat dst;
1213
1214 TEST_CYCLE() cv::addWeighted(src1, 0.5, src2, 0.5, 10.0, dst, dst_depth);
1215
1216 CPU_SANITY_CHECK(dst);
1217 }
1218 }
1219
1220 //////////////////////////////////////////////////////////////////////
1221 // MagnitudeComplex
1222
PERF_TEST_P(Sz,MagnitudeComplex,CUDA_TYPICAL_MAT_SIZES)1223 PERF_TEST_P(Sz, MagnitudeComplex,
1224 CUDA_TYPICAL_MAT_SIZES)
1225 {
1226 const cv::Size size = GetParam();
1227
1228 cv::Mat src(size, CV_32FC2);
1229 declare.in(src, WARMUP_RNG);
1230
1231 if (PERF_RUN_CUDA())
1232 {
1233 const cv::cuda::GpuMat d_src(src);
1234 cv::cuda::GpuMat dst;
1235
1236 TEST_CYCLE() cv::cuda::magnitude(d_src, dst);
1237
1238 CUDA_SANITY_CHECK(dst);
1239 }
1240 else
1241 {
1242 cv::Mat xy[2];
1243 cv::split(src, xy);
1244
1245 cv::Mat dst;
1246
1247 TEST_CYCLE() cv::magnitude(xy[0], xy[1], dst);
1248
1249 CPU_SANITY_CHECK(dst);
1250 }
1251 }
1252
1253 //////////////////////////////////////////////////////////////////////
1254 // MagnitudeSqrComplex
1255
PERF_TEST_P(Sz,MagnitudeSqrComplex,CUDA_TYPICAL_MAT_SIZES)1256 PERF_TEST_P(Sz, MagnitudeSqrComplex,
1257 CUDA_TYPICAL_MAT_SIZES)
1258 {
1259 const cv::Size size = GetParam();
1260
1261 cv::Mat src(size, CV_32FC2);
1262 declare.in(src, WARMUP_RNG);
1263
1264 if (PERF_RUN_CUDA())
1265 {
1266 const cv::cuda::GpuMat d_src(src);
1267 cv::cuda::GpuMat dst;
1268
1269 TEST_CYCLE() cv::cuda::magnitudeSqr(d_src, dst);
1270
1271 CUDA_SANITY_CHECK(dst);
1272 }
1273 else
1274 {
1275 FAIL_NO_CPU();
1276 }
1277 }
1278
1279 //////////////////////////////////////////////////////////////////////
1280 // Magnitude
1281
PERF_TEST_P(Sz,Magnitude,CUDA_TYPICAL_MAT_SIZES)1282 PERF_TEST_P(Sz, Magnitude,
1283 CUDA_TYPICAL_MAT_SIZES)
1284 {
1285 const cv::Size size = GetParam();
1286
1287 cv::Mat src1(size, CV_32FC1);
1288 declare.in(src1, WARMUP_RNG);
1289
1290 cv::Mat src2(size, CV_32FC1);
1291 declare.in(src2, WARMUP_RNG);
1292
1293 if (PERF_RUN_CUDA())
1294 {
1295 const cv::cuda::GpuMat d_src1(src1);
1296 const cv::cuda::GpuMat d_src2(src2);
1297 cv::cuda::GpuMat dst;
1298
1299 TEST_CYCLE() cv::cuda::magnitude(d_src1, d_src2, dst);
1300
1301 CUDA_SANITY_CHECK(dst);
1302 }
1303 else
1304 {
1305 cv::Mat dst;
1306
1307 TEST_CYCLE() cv::magnitude(src1, src2, dst);
1308
1309 CPU_SANITY_CHECK(dst);
1310 }
1311 }
1312
1313 //////////////////////////////////////////////////////////////////////
1314 // MagnitudeSqr
1315
PERF_TEST_P(Sz,MagnitudeSqr,CUDA_TYPICAL_MAT_SIZES)1316 PERF_TEST_P(Sz, MagnitudeSqr,
1317 CUDA_TYPICAL_MAT_SIZES)
1318 {
1319 const cv::Size size = GetParam();
1320
1321 cv::Mat src1(size, CV_32FC1);
1322 declare.in(src1, WARMUP_RNG);
1323
1324 cv::Mat src2(size, CV_32FC1);
1325 declare.in(src2, WARMUP_RNG);
1326
1327 if (PERF_RUN_CUDA())
1328 {
1329 const cv::cuda::GpuMat d_src1(src1);
1330 const cv::cuda::GpuMat d_src2(src2);
1331 cv::cuda::GpuMat dst;
1332
1333 TEST_CYCLE() cv::cuda::magnitudeSqr(d_src1, d_src2, dst);
1334
1335 CUDA_SANITY_CHECK(dst);
1336 }
1337 else
1338 {
1339 FAIL_NO_CPU();
1340 }
1341 }
1342
1343 //////////////////////////////////////////////////////////////////////
1344 // Phase
1345
1346 DEF_PARAM_TEST(Sz_AngleInDegrees, cv::Size, bool);
1347
PERF_TEST_P(Sz_AngleInDegrees,Phase,Combine (CUDA_TYPICAL_MAT_SIZES,Bool ()))1348 PERF_TEST_P(Sz_AngleInDegrees, Phase,
1349 Combine(CUDA_TYPICAL_MAT_SIZES,
1350 Bool()))
1351 {
1352 const cv::Size size = GET_PARAM(0);
1353 const bool angleInDegrees = GET_PARAM(1);
1354
1355 cv::Mat src1(size, CV_32FC1);
1356 declare.in(src1, WARMUP_RNG);
1357
1358 cv::Mat src2(size, CV_32FC1);
1359 declare.in(src2, WARMUP_RNG);
1360
1361 if (PERF_RUN_CUDA())
1362 {
1363 const cv::cuda::GpuMat d_src1(src1);
1364 const cv::cuda::GpuMat d_src2(src2);
1365 cv::cuda::GpuMat dst;
1366
1367 TEST_CYCLE() cv::cuda::phase(d_src1, d_src2, dst, angleInDegrees);
1368
1369 CUDA_SANITY_CHECK(dst, 1e-6, ERROR_RELATIVE);
1370 }
1371 else
1372 {
1373 cv::Mat dst;
1374
1375 TEST_CYCLE() cv::phase(src1, src2, dst, angleInDegrees);
1376
1377 CPU_SANITY_CHECK(dst);
1378 }
1379 }
1380
1381 //////////////////////////////////////////////////////////////////////
1382 // CartToPolar
1383
PERF_TEST_P(Sz_AngleInDegrees,CartToPolar,Combine (CUDA_TYPICAL_MAT_SIZES,Bool ()))1384 PERF_TEST_P(Sz_AngleInDegrees, CartToPolar,
1385 Combine(CUDA_TYPICAL_MAT_SIZES,
1386 Bool()))
1387 {
1388 const cv::Size size = GET_PARAM(0);
1389 const bool angleInDegrees = GET_PARAM(1);
1390
1391 cv::Mat src1(size, CV_32FC1);
1392 declare.in(src1, WARMUP_RNG);
1393
1394 cv::Mat src2(size, CV_32FC1);
1395 declare.in(src2, WARMUP_RNG);
1396
1397 if (PERF_RUN_CUDA())
1398 {
1399 const cv::cuda::GpuMat d_src1(src1);
1400 const cv::cuda::GpuMat d_src2(src2);
1401 cv::cuda::GpuMat magnitude;
1402 cv::cuda::GpuMat angle;
1403
1404 TEST_CYCLE() cv::cuda::cartToPolar(d_src1, d_src2, magnitude, angle, angleInDegrees);
1405
1406 CUDA_SANITY_CHECK(magnitude);
1407 CUDA_SANITY_CHECK(angle, 1e-6, ERROR_RELATIVE);
1408 }
1409 else
1410 {
1411 cv::Mat magnitude;
1412 cv::Mat angle;
1413
1414 TEST_CYCLE() cv::cartToPolar(src1, src2, magnitude, angle, angleInDegrees);
1415
1416 CPU_SANITY_CHECK(magnitude);
1417 CPU_SANITY_CHECK(angle);
1418 }
1419 }
1420
1421 //////////////////////////////////////////////////////////////////////
1422 // PolarToCart
1423
PERF_TEST_P(Sz_AngleInDegrees,PolarToCart,Combine (CUDA_TYPICAL_MAT_SIZES,Bool ()))1424 PERF_TEST_P(Sz_AngleInDegrees, PolarToCart,
1425 Combine(CUDA_TYPICAL_MAT_SIZES,
1426 Bool()))
1427 {
1428 const cv::Size size = GET_PARAM(0);
1429 const bool angleInDegrees = GET_PARAM(1);
1430
1431 cv::Mat magnitude(size, CV_32FC1);
1432 declare.in(magnitude, WARMUP_RNG);
1433
1434 cv::Mat angle(size, CV_32FC1);
1435 declare.in(angle, WARMUP_RNG);
1436
1437 if (PERF_RUN_CUDA())
1438 {
1439 const cv::cuda::GpuMat d_magnitude(magnitude);
1440 const cv::cuda::GpuMat d_angle(angle);
1441 cv::cuda::GpuMat x;
1442 cv::cuda::GpuMat y;
1443
1444 TEST_CYCLE() cv::cuda::polarToCart(d_magnitude, d_angle, x, y, angleInDegrees);
1445
1446 CUDA_SANITY_CHECK(x);
1447 CUDA_SANITY_CHECK(y);
1448 }
1449 else
1450 {
1451 cv::Mat x;
1452 cv::Mat y;
1453
1454 TEST_CYCLE() cv::polarToCart(magnitude, angle, x, y, angleInDegrees);
1455
1456 CPU_SANITY_CHECK(x);
1457 CPU_SANITY_CHECK(y);
1458 }
1459 }
1460
1461 //////////////////////////////////////////////////////////////////////
1462 // Threshold
1463
1464 CV_ENUM(ThreshOp, cv::THRESH_BINARY, cv::THRESH_BINARY_INV, cv::THRESH_TRUNC, cv::THRESH_TOZERO, cv::THRESH_TOZERO_INV)
1465
1466 DEF_PARAM_TEST(Sz_Depth_Op, cv::Size, MatDepth, ThreshOp);
1467
PERF_TEST_P(Sz_Depth_Op,Threshold,Combine (CUDA_TYPICAL_MAT_SIZES,Values (CV_8U,CV_16U,CV_32F,CV_64F),ThreshOp::all ()))1468 PERF_TEST_P(Sz_Depth_Op, Threshold,
1469 Combine(CUDA_TYPICAL_MAT_SIZES,
1470 Values(CV_8U, CV_16U, CV_32F, CV_64F),
1471 ThreshOp::all()))
1472 {
1473 const cv::Size size = GET_PARAM(0);
1474 const int depth = GET_PARAM(1);
1475 const int threshOp = GET_PARAM(2);
1476
1477 cv::Mat src(size, depth);
1478 declare.in(src, WARMUP_RNG);
1479
1480 if (PERF_RUN_CUDA())
1481 {
1482 const cv::cuda::GpuMat d_src(src);
1483 cv::cuda::GpuMat dst;
1484
1485 TEST_CYCLE() cv::cuda::threshold(d_src, dst, 100.0, 255.0, threshOp);
1486
1487 CUDA_SANITY_CHECK(dst, 1e-10);
1488 }
1489 else
1490 {
1491 cv::Mat dst;
1492
1493 TEST_CYCLE() cv::threshold(src, dst, 100.0, 255.0, threshOp);
1494
1495 CPU_SANITY_CHECK(dst);
1496 }
1497 }
1498