1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "test_precomp.hpp"
44
45 #ifdef HAVE_CUDA
46
47 #include "opencv2/core/cuda.hpp"
48 #include "opencv2/ts/cuda_test.hpp"
49
50 using namespace cvtest;
51
52 ////////////////////////////////////////////////////////////////////////////////
53 // SetTo
54
PARAM_TEST_CASE(GpuMat_SetTo,cv::cuda::DeviceInfo,cv::Size,MatType,UseRoi)55 PARAM_TEST_CASE(GpuMat_SetTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
56 {
57 cv::cuda::DeviceInfo devInfo;
58 cv::Size size;
59 int type;
60 bool useRoi;
61
62 virtual void SetUp()
63 {
64 devInfo = GET_PARAM(0);
65 size = GET_PARAM(1);
66 type = GET_PARAM(2);
67 useRoi = GET_PARAM(3);
68
69 cv::cuda::setDevice(devInfo.deviceID());
70 }
71 };
72
CUDA_TEST_P(GpuMat_SetTo,Zero)73 CUDA_TEST_P(GpuMat_SetTo, Zero)
74 {
75 cv::Scalar zero = cv::Scalar::all(0);
76
77 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
78 mat.setTo(zero);
79
80 EXPECT_MAT_NEAR(cv::Mat::zeros(size, type), mat, 0.0);
81 }
82
CUDA_TEST_P(GpuMat_SetTo,SameVal)83 CUDA_TEST_P(GpuMat_SetTo, SameVal)
84 {
85 cv::Scalar val = cv::Scalar::all(randomDouble(0.0, 255.0));
86
87 if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
88 {
89 try
90 {
91 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
92 mat.setTo(val);
93 }
94 catch (const cv::Exception& e)
95 {
96 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
97 }
98 }
99 else
100 {
101 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
102 mat.setTo(val);
103
104 EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
105 }
106 }
107
CUDA_TEST_P(GpuMat_SetTo,DifferentVal)108 CUDA_TEST_P(GpuMat_SetTo, DifferentVal)
109 {
110 cv::Scalar val = randomScalar(0.0, 255.0);
111
112 if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
113 {
114 try
115 {
116 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
117 mat.setTo(val);
118 }
119 catch (const cv::Exception& e)
120 {
121 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
122 }
123 }
124 else
125 {
126 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
127 mat.setTo(val);
128
129 EXPECT_MAT_NEAR(cv::Mat(size, type, val), mat, 0.0);
130 }
131 }
132
CUDA_TEST_P(GpuMat_SetTo,Masked)133 CUDA_TEST_P(GpuMat_SetTo, Masked)
134 {
135 cv::Scalar val = randomScalar(0.0, 255.0);
136 cv::Mat mat_gold = randomMat(size, type);
137 cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
138
139 if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
140 {
141 try
142 {
143 cv::cuda::GpuMat mat = createMat(size, type, useRoi);
144 mat.setTo(val, loadMat(mask));
145 }
146 catch (const cv::Exception& e)
147 {
148 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
149 }
150 }
151 else
152 {
153 cv::cuda::GpuMat mat = loadMat(mat_gold, useRoi);
154 mat.setTo(val, loadMat(mask, useRoi));
155
156 mat_gold.setTo(val, mask);
157
158 EXPECT_MAT_NEAR(mat_gold, mat, 0.0);
159 }
160 }
161
162 INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_SetTo, testing::Combine(
163 ALL_DEVICES,
164 DIFFERENT_SIZES,
165 ALL_TYPES,
166 WHOLE_SUBMAT));
167
168 ////////////////////////////////////////////////////////////////////////////////
169 // CopyTo
170
PARAM_TEST_CASE(GpuMat_CopyTo,cv::cuda::DeviceInfo,cv::Size,MatType,UseRoi)171 PARAM_TEST_CASE(GpuMat_CopyTo, cv::cuda::DeviceInfo, cv::Size, MatType, UseRoi)
172 {
173 cv::cuda::DeviceInfo devInfo;
174 cv::Size size;
175 int type;
176 bool useRoi;
177
178
179 virtual void SetUp()
180 {
181 devInfo = GET_PARAM(0);
182 size = GET_PARAM(1);
183 type = GET_PARAM(2);
184 useRoi = GET_PARAM(3);
185
186 cv::cuda::setDevice(devInfo.deviceID());
187 }
188 };
189
CUDA_TEST_P(GpuMat_CopyTo,WithOutMask)190 CUDA_TEST_P(GpuMat_CopyTo, WithOutMask)
191 {
192 cv::Mat src = randomMat(size, type);
193
194 cv::cuda::GpuMat d_src = loadMat(src, useRoi);
195 cv::cuda::GpuMat dst = createMat(size, type, useRoi);
196 d_src.copyTo(dst);
197
198 EXPECT_MAT_NEAR(src, dst, 0.0);
199 }
200
CUDA_TEST_P(GpuMat_CopyTo,Masked)201 CUDA_TEST_P(GpuMat_CopyTo, Masked)
202 {
203 cv::Mat src = randomMat(size, type);
204 cv::Mat mask = randomMat(size, CV_8UC1, 0.0, 2.0);
205
206 if (CV_MAT_DEPTH(type) == CV_64F && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
207 {
208 try
209 {
210 cv::cuda::GpuMat d_src = loadMat(src);
211 cv::cuda::GpuMat dst;
212 d_src.copyTo(dst, loadMat(mask, useRoi));
213 }
214 catch (const cv::Exception& e)
215 {
216 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
217 }
218 }
219 else
220 {
221 cv::cuda::GpuMat d_src = loadMat(src, useRoi);
222 cv::cuda::GpuMat dst = loadMat(cv::Mat::zeros(size, type), useRoi);
223 d_src.copyTo(dst, loadMat(mask, useRoi));
224
225 cv::Mat dst_gold = cv::Mat::zeros(size, type);
226 src.copyTo(dst_gold, mask);
227
228 EXPECT_MAT_NEAR(dst_gold, dst, 0.0);
229 }
230 }
231
232 INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_CopyTo, testing::Combine(
233 ALL_DEVICES,
234 DIFFERENT_SIZES,
235 ALL_TYPES,
236 WHOLE_SUBMAT));
237
238 ////////////////////////////////////////////////////////////////////////////////
239 // ConvertTo
240
PARAM_TEST_CASE(GpuMat_ConvertTo,cv::cuda::DeviceInfo,cv::Size,MatDepth,MatDepth,UseRoi)241 PARAM_TEST_CASE(GpuMat_ConvertTo, cv::cuda::DeviceInfo, cv::Size, MatDepth, MatDepth, UseRoi)
242 {
243 cv::cuda::DeviceInfo devInfo;
244 cv::Size size;
245 int depth1;
246 int depth2;
247 bool useRoi;
248
249 virtual void SetUp()
250 {
251 devInfo = GET_PARAM(0);
252 size = GET_PARAM(1);
253 depth1 = GET_PARAM(2);
254 depth2 = GET_PARAM(3);
255 useRoi = GET_PARAM(4);
256
257 cv::cuda::setDevice(devInfo.deviceID());
258 }
259 };
260
CUDA_TEST_P(GpuMat_ConvertTo,WithOutScaling)261 CUDA_TEST_P(GpuMat_ConvertTo, WithOutScaling)
262 {
263 cv::Mat src = randomMat(size, depth1);
264
265 if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
266 {
267 try
268 {
269 cv::cuda::GpuMat d_src = loadMat(src);
270 cv::cuda::GpuMat dst;
271 d_src.convertTo(dst, depth2);
272 }
273 catch (const cv::Exception& e)
274 {
275 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
276 }
277 }
278 else
279 {
280 cv::cuda::GpuMat d_src = loadMat(src, useRoi);
281 cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
282 d_src.convertTo(dst, depth2);
283
284 cv::Mat dst_gold;
285 src.convertTo(dst_gold, depth2);
286
287 EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
288 }
289 }
290
CUDA_TEST_P(GpuMat_ConvertTo,WithScaling)291 CUDA_TEST_P(GpuMat_ConvertTo, WithScaling)
292 {
293 cv::Mat src = randomMat(size, depth1);
294 double a = randomDouble(0.0, 1.0);
295 double b = randomDouble(-10.0, 10.0);
296
297 if ((depth1 == CV_64F || depth2 == CV_64F) && !supportFeature(devInfo, cv::cuda::NATIVE_DOUBLE))
298 {
299 try
300 {
301 cv::cuda::GpuMat d_src = loadMat(src);
302 cv::cuda::GpuMat dst;
303 d_src.convertTo(dst, depth2, a, b);
304 }
305 catch (const cv::Exception& e)
306 {
307 ASSERT_EQ(cv::Error::StsUnsupportedFormat, e.code);
308 }
309 }
310 else
311 {
312 cv::cuda::GpuMat d_src = loadMat(src, useRoi);
313 cv::cuda::GpuMat dst = createMat(size, depth2, useRoi);
314 d_src.convertTo(dst, depth2, a, b);
315
316 cv::Mat dst_gold;
317 src.convertTo(dst_gold, depth2, a, b);
318
319 EXPECT_MAT_NEAR(dst_gold, dst, depth2 < CV_32F ? 1.0 : 1e-4);
320 }
321 }
322
323 INSTANTIATE_TEST_CASE_P(CUDA, GpuMat_ConvertTo, testing::Combine(
324 ALL_DEVICES,
325 DIFFERENT_SIZES,
326 ALL_DEPTH,
327 ALL_DEPTH,
328 WHOLE_SUBMAT));
329
330 ////////////////////////////////////////////////////////////////////////////////
331 // ensureSizeIsEnough
332
333 struct EnsureSizeIsEnough : testing::TestWithParam<cv::cuda::DeviceInfo>
334 {
SetUpEnsureSizeIsEnough335 virtual void SetUp()
336 {
337 cv::cuda::DeviceInfo devInfo = GetParam();
338 cv::cuda::setDevice(devInfo.deviceID());
339 }
340 };
341
CUDA_TEST_P(EnsureSizeIsEnough,BufferReuse)342 CUDA_TEST_P(EnsureSizeIsEnough, BufferReuse)
343 {
344 cv::cuda::GpuMat buffer(100, 100, CV_8U);
345 cv::cuda::GpuMat old = buffer;
346
347 // don't reallocate memory
348 cv::cuda::ensureSizeIsEnough(10, 20, CV_8U, buffer);
349 EXPECT_EQ(10, buffer.rows);
350 EXPECT_EQ(20, buffer.cols);
351 EXPECT_EQ(CV_8UC1, buffer.type());
352 EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
353
354 // don't reallocate memory
355 cv::cuda::ensureSizeIsEnough(20, 30, CV_8U, buffer);
356 EXPECT_EQ(20, buffer.rows);
357 EXPECT_EQ(30, buffer.cols);
358 EXPECT_EQ(CV_8UC1, buffer.type());
359 EXPECT_EQ(reinterpret_cast<intptr_t>(old.data), reinterpret_cast<intptr_t>(buffer.data));
360 }
361
362 INSTANTIATE_TEST_CASE_P(CUDA, EnsureSizeIsEnough, ALL_DEVICES);
363
364 #endif // HAVE_CUDA
365