1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44
45 using namespace cv;
46 using namespace cv::cuda;
47
48 #if !defined(HAVE_CUDA) || defined(CUDA_DISABLER) || !defined(HAVE_OPENCV_IMGPROC) || !defined(HAVE_OPENCV_CUDAARITHM) || !defined(HAVE_OPENCV_CUDAIMGPROC)
49
FGDParams()50 cv::cuda::FGDParams::FGDParams() { throw_no_cuda(); }
51
createBackgroundSubtractorFGD(const FGDParams &)52 Ptr<cuda::BackgroundSubtractorFGD> cv::cuda::createBackgroundSubtractorFGD(const FGDParams&) { throw_no_cuda(); return Ptr<cuda::BackgroundSubtractorFGD>(); }
53
54 #else
55
56 #include "cuda/fgd.hpp"
57 #include "opencv2/imgproc/imgproc_c.h"
58
59 /////////////////////////////////////////////////////////////////////////
60 // FGDParams
61
62 namespace
63 {
64 // Default parameters of foreground detection algorithm:
65 const int BGFG_FGD_LC = 128;
66 const int BGFG_FGD_N1C = 15;
67 const int BGFG_FGD_N2C = 25;
68
69 const int BGFG_FGD_LCC = 64;
70 const int BGFG_FGD_N1CC = 25;
71 const int BGFG_FGD_N2CC = 40;
72
73 // Background reference image update parameter:
74 const float BGFG_FGD_ALPHA_1 = 0.1f;
75
76 // stat model update parameter
77 // 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
78 const float BGFG_FGD_ALPHA_2 = 0.005f;
79
80 // start value for alpha parameter (to fast initiate statistic model)
81 const float BGFG_FGD_ALPHA_3 = 0.1f;
82
83 const float BGFG_FGD_DELTA = 2.0f;
84
85 const float BGFG_FGD_T = 0.9f;
86
87 const float BGFG_FGD_MINAREA= 15.0f;
88 }
89
FGDParams()90 cv::cuda::FGDParams::FGDParams()
91 {
92 Lc = BGFG_FGD_LC;
93 N1c = BGFG_FGD_N1C;
94 N2c = BGFG_FGD_N2C;
95
96 Lcc = BGFG_FGD_LCC;
97 N1cc = BGFG_FGD_N1CC;
98 N2cc = BGFG_FGD_N2CC;
99
100 delta = BGFG_FGD_DELTA;
101
102 alpha1 = BGFG_FGD_ALPHA_1;
103 alpha2 = BGFG_FGD_ALPHA_2;
104 alpha3 = BGFG_FGD_ALPHA_3;
105
106 T = BGFG_FGD_T;
107 minArea = BGFG_FGD_MINAREA;
108
109 is_obj_without_holes = true;
110 perform_morphing = 1;
111 }
112
113 /////////////////////////////////////////////////////////////////////////
114 // copyChannels
115
116 namespace
117 {
copyChannels(const GpuMat & src,GpuMat & dst,int dst_cn=-1)118 void copyChannels(const GpuMat& src, GpuMat& dst, int dst_cn = -1)
119 {
120 const int src_cn = src.channels();
121
122 if (dst_cn < 0)
123 dst_cn = src_cn;
124
125 cuda::ensureSizeIsEnough(src.size(), CV_MAKE_TYPE(src.depth(), dst_cn), dst);
126
127 if (src_cn == dst_cn)
128 {
129 src.copyTo(dst);
130 }
131 else
132 {
133 static const int cvt_codes[4][4] =
134 {
135 {-1, -1, COLOR_GRAY2BGR, COLOR_GRAY2BGRA},
136 {-1, -1, -1, -1},
137 {COLOR_BGR2GRAY, -1, -1, COLOR_BGR2BGRA},
138 {COLOR_BGRA2GRAY, -1, COLOR_BGRA2BGR, -1}
139 };
140
141 const int cvt_code = cvt_codes[src_cn - 1][dst_cn - 1];
142 CV_DbgAssert( cvt_code >= 0 );
143
144 cuda::cvtColor(src, dst, cvt_code, dst_cn);
145 }
146 }
147 }
148
149 /////////////////////////////////////////////////////////////////////////
150 // changeDetection
151
152 namespace
153 {
calcDiffHistogram(const GpuMat & prevFrame,const GpuMat & curFrame,GpuMat & hist,GpuMat & histBuf)154 void calcDiffHistogram(const GpuMat& prevFrame, const GpuMat& curFrame, GpuMat& hist, GpuMat& histBuf)
155 {
156 typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame,
157 unsigned int* hist0, unsigned int* hist1, unsigned int* hist2,
158 unsigned int* partialBuf0, unsigned int* partialBuf1, unsigned int* partialBuf2,
159 bool cc20, cudaStream_t stream);
160 static const func_t funcs[4][4] =
161 {
162 {0,0,0,0},
163 {0,0,0,0},
164 {0,0,fgd::calcDiffHistogram_gpu<uchar3, uchar3>,fgd::calcDiffHistogram_gpu<uchar3, uchar4>},
165 {0,0,fgd::calcDiffHistogram_gpu<uchar4, uchar3>,fgd::calcDiffHistogram_gpu<uchar4, uchar4>}
166 };
167
168 hist.create(3, 256, CV_32SC1);
169 histBuf.create(3, fgd::PARTIAL_HISTOGRAM_COUNT * fgd::HISTOGRAM_BIN_COUNT, CV_32SC1);
170
171 funcs[prevFrame.channels() - 1][curFrame.channels() - 1](
172 prevFrame, curFrame,
173 hist.ptr<unsigned int>(0), hist.ptr<unsigned int>(1), hist.ptr<unsigned int>(2),
174 histBuf.ptr<unsigned int>(0), histBuf.ptr<unsigned int>(1), histBuf.ptr<unsigned int>(2),
175 deviceSupports(FEATURE_SET_COMPUTE_20), 0);
176 }
177
calcRelativeVariance(unsigned int hist[3* 256],double relativeVariance[3][fgd::HISTOGRAM_BIN_COUNT])178 void calcRelativeVariance(unsigned int hist[3 * 256], double relativeVariance[3][fgd::HISTOGRAM_BIN_COUNT])
179 {
180 std::memset(relativeVariance, 0, 3 * fgd::HISTOGRAM_BIN_COUNT * sizeof(double));
181
182 for (int thres = fgd::HISTOGRAM_BIN_COUNT - 2; thres >= 0; --thres)
183 {
184 Vec3d sum(0.0, 0.0, 0.0);
185 Vec3d sqsum(0.0, 0.0, 0.0);
186 Vec3i count(0, 0, 0);
187
188 for (int j = thres; j < fgd::HISTOGRAM_BIN_COUNT; ++j)
189 {
190 sum[0] += static_cast<double>(j) * hist[j];
191 sqsum[0] += static_cast<double>(j * j) * hist[j];
192 count[0] += hist[j];
193
194 sum[1] += static_cast<double>(j) * hist[j + 256];
195 sqsum[1] += static_cast<double>(j * j) * hist[j + 256];
196 count[1] += hist[j + 256];
197
198 sum[2] += static_cast<double>(j) * hist[j + 512];
199 sqsum[2] += static_cast<double>(j * j) * hist[j + 512];
200 count[2] += hist[j + 512];
201 }
202
203 count[0] = std::max(count[0], 1);
204 count[1] = std::max(count[1], 1);
205 count[2] = std::max(count[2], 1);
206
207 Vec3d my(
208 sum[0] / count[0],
209 sum[1] / count[1],
210 sum[2] / count[2]
211 );
212
213 relativeVariance[0][thres] = std::sqrt(sqsum[0] / count[0] - my[0] * my[0]);
214 relativeVariance[1][thres] = std::sqrt(sqsum[1] / count[1] - my[1] * my[1]);
215 relativeVariance[2][thres] = std::sqrt(sqsum[2] / count[2] - my[2] * my[2]);
216 }
217 }
218
calcDiffThreshMask(const GpuMat & prevFrame,const GpuMat & curFrame,Vec3d bestThres,GpuMat & changeMask)219 void calcDiffThreshMask(const GpuMat& prevFrame, const GpuMat& curFrame, Vec3d bestThres, GpuMat& changeMask)
220 {
221 typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, uchar3 bestThres, PtrStepSzb changeMask, cudaStream_t stream);
222 static const func_t funcs[4][4] =
223 {
224 {0,0,0,0},
225 {0,0,0,0},
226 {0,0,fgd::calcDiffThreshMask_gpu<uchar3, uchar3>,fgd::calcDiffThreshMask_gpu<uchar3, uchar4>},
227 {0,0,fgd::calcDiffThreshMask_gpu<uchar4, uchar3>,fgd::calcDiffThreshMask_gpu<uchar4, uchar4>}
228 };
229
230 changeMask.setTo(Scalar::all(0));
231
232 funcs[prevFrame.channels() - 1][curFrame.channels() - 1](prevFrame, curFrame,
233 make_uchar3((uchar)bestThres[0], (uchar)bestThres[1], (uchar)bestThres[2]),
234 changeMask, 0);
235 }
236
237 // performs change detection for Foreground detection algorithm
changeDetection(const GpuMat & prevFrame,const GpuMat & curFrame,GpuMat & changeMask,GpuMat & hist,GpuMat & histBuf)238 void changeDetection(const GpuMat& prevFrame, const GpuMat& curFrame, GpuMat& changeMask, GpuMat& hist, GpuMat& histBuf)
239 {
240 calcDiffHistogram(prevFrame, curFrame, hist, histBuf);
241
242 unsigned int histData[3 * 256];
243 Mat h_hist(3, 256, CV_32SC1, histData);
244 hist.download(h_hist);
245
246 double relativeVariance[3][fgd::HISTOGRAM_BIN_COUNT];
247 calcRelativeVariance(histData, relativeVariance);
248
249 // Find maximum:
250 Vec3d bestThres(10.0, 10.0, 10.0);
251 for (int i = 0; i < fgd::HISTOGRAM_BIN_COUNT; ++i)
252 {
253 bestThres[0] = std::max(bestThres[0], relativeVariance[0][i]);
254 bestThres[1] = std::max(bestThres[1], relativeVariance[1][i]);
255 bestThres[2] = std::max(bestThres[2], relativeVariance[2][i]);
256 }
257
258 calcDiffThreshMask(prevFrame, curFrame, bestThres, changeMask);
259 }
260 }
261
262 /////////////////////////////////////////////////////////////////////////
263 // bgfgClassification
264
265 namespace
266 {
bgfgClassification(const GpuMat & prevFrame,const GpuMat & curFrame,const GpuMat & Ftd,const GpuMat & Fbd,GpuMat & foreground,const FGDParams & params,int out_cn)267 int bgfgClassification(const GpuMat& prevFrame, const GpuMat& curFrame,
268 const GpuMat& Ftd, const GpuMat& Fbd,
269 GpuMat& foreground,
270 const FGDParams& params, int out_cn)
271 {
272 typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd, PtrStepSzb foreground,
273 int deltaC, int deltaCC, float alpha2, int N1c, int N1cc, cudaStream_t stream);
274 static const func_t funcs[4][4][4] =
275 {
276 {
277 {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}
278 },
279 {
280 {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}
281 },
282 {
283 {0,0,0,0}, {0,0,0,0},
284 {0,0,fgd::bgfgClassification_gpu<uchar3, uchar3, uchar3>,fgd::bgfgClassification_gpu<uchar3, uchar3, uchar4>},
285 {0,0,fgd::bgfgClassification_gpu<uchar3, uchar4, uchar3>,fgd::bgfgClassification_gpu<uchar3, uchar4, uchar4>}
286 },
287 {
288 {0,0,0,0}, {0,0,0,0},
289 {0,0,fgd::bgfgClassification_gpu<uchar4, uchar3, uchar3>,fgd::bgfgClassification_gpu<uchar4, uchar3, uchar4>},
290 {0,0,fgd::bgfgClassification_gpu<uchar4, uchar4, uchar3>,fgd::bgfgClassification_gpu<uchar4, uchar4, uchar4>}
291 }
292 };
293
294 const int deltaC = cvRound(params.delta * 256 / params.Lc);
295 const int deltaCC = cvRound(params.delta * 256 / params.Lcc);
296
297 funcs[prevFrame.channels() - 1][curFrame.channels() - 1][out_cn - 1](prevFrame, curFrame, Ftd, Fbd, foreground,
298 deltaC, deltaCC, params.alpha2,
299 params.N1c, params.N1cc, 0);
300
301 int count = cuda::countNonZero(foreground);
302
303 cuda::multiply(foreground, Scalar::all(255), foreground);
304
305 return count;
306 }
307 }
308
309 /////////////////////////////////////////////////////////////////////////
310 // smoothForeground
311
312 #ifdef HAVE_OPENCV_CUDAFILTERS
313
314 namespace
315 {
morphology(const GpuMat & src,GpuMat & dst,GpuMat & filterBrd,int brd,Ptr<cuda::Filter> & filter,Scalar brdVal)316 void morphology(const GpuMat& src, GpuMat& dst, GpuMat& filterBrd, int brd, Ptr<cuda::Filter>& filter, Scalar brdVal)
317 {
318 cuda::copyMakeBorder(src, filterBrd, brd, brd, brd, brd, BORDER_CONSTANT, brdVal);
319 filter->apply(filterBrd(Rect(brd, brd, src.cols, src.rows)), dst);
320 }
321
smoothForeground(GpuMat & foreground,GpuMat & filterBrd,GpuMat & buf,Ptr<cuda::Filter> & erodeFilter,Ptr<cuda::Filter> & dilateFilter,const FGDParams & params)322 void smoothForeground(GpuMat& foreground, GpuMat& filterBrd, GpuMat& buf,
323 Ptr<cuda::Filter>& erodeFilter, Ptr<cuda::Filter>& dilateFilter,
324 const FGDParams& params)
325 {
326 const int brd = params.perform_morphing;
327
328 const Scalar erodeBrdVal = Scalar::all(UCHAR_MAX);
329 const Scalar dilateBrdVal = Scalar::all(0);
330
331 // MORPH_OPEN
332 morphology(foreground, buf, filterBrd, brd, erodeFilter, erodeBrdVal);
333 morphology(buf, foreground, filterBrd, brd, dilateFilter, dilateBrdVal);
334
335 // MORPH_CLOSE
336 morphology(foreground, buf, filterBrd, brd, dilateFilter, dilateBrdVal);
337 morphology(buf, foreground, filterBrd, brd, erodeFilter, erodeBrdVal);
338 }
339 }
340
341 #endif
342
343 /////////////////////////////////////////////////////////////////////////
344 // findForegroundRegions
345
346 namespace
347 {
seqToContours(CvSeq * _ccontours,CvMemStorage * storage,OutputArrayOfArrays _contours)348 void seqToContours(CvSeq* _ccontours, CvMemStorage* storage, OutputArrayOfArrays _contours)
349 {
350 Seq<CvSeq*> all_contours(cvTreeToNodeSeq(_ccontours, sizeof(CvSeq), storage));
351
352 size_t total = all_contours.size();
353
354 _contours.create((int) total, 1, 0, -1, true);
355
356 SeqIterator<CvSeq*> it = all_contours.begin();
357 for (size_t i = 0; i < total; ++i, ++it)
358 {
359 CvSeq* c = *it;
360 ((CvContour*)c)->color = (int)i;
361 _contours.create((int)c->total, 1, CV_32SC2, (int)i, true);
362 Mat ci = _contours.getMat((int)i);
363 CV_Assert( ci.isContinuous() );
364 cvCvtSeqToArray(c, ci.data);
365 }
366 }
367
findForegroundRegions(GpuMat & d_foreground,Mat & h_foreground,std::vector<std::vector<Point>> & foreground_regions,CvMemStorage * storage,const FGDParams & params)368 int findForegroundRegions(GpuMat& d_foreground, Mat& h_foreground, std::vector< std::vector<Point> >& foreground_regions,
369 CvMemStorage* storage, const FGDParams& params)
370 {
371 int region_count = 0;
372
373 // Discard under-size foreground regions:
374
375 d_foreground.download(h_foreground);
376 IplImage ipl_foreground = h_foreground;
377 CvSeq* first_seq = 0;
378
379 cvFindContours(&ipl_foreground, storage, &first_seq, sizeof(CvContour), CV_RETR_LIST);
380
381 for (CvSeq* seq = first_seq; seq; seq = seq->h_next)
382 {
383 CvContour* cnt = reinterpret_cast<CvContour*>(seq);
384
385 if (cnt->rect.width * cnt->rect.height < params.minArea || (params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)))
386 {
387 // Delete under-size contour:
388 CvSeq* prev_seq = seq->h_prev;
389 if (prev_seq)
390 {
391 prev_seq->h_next = seq->h_next;
392
393 if (seq->h_next)
394 seq->h_next->h_prev = prev_seq;
395 }
396 else
397 {
398 first_seq = seq->h_next;
399
400 if (seq->h_next)
401 seq->h_next->h_prev = NULL;
402 }
403 }
404 else
405 {
406 region_count++;
407 }
408 }
409
410 seqToContours(first_seq, storage, foreground_regions);
411 h_foreground.setTo(0);
412
413 drawContours(h_foreground, foreground_regions, -1, Scalar::all(255), -1);
414
415 d_foreground.upload(h_foreground);
416
417 return region_count;
418 }
419 }
420
421 /////////////////////////////////////////////////////////////////////////
422 // updateBackgroundModel
423
424 namespace
425 {
updateBackgroundModel(const GpuMat & prevFrame,const GpuMat & curFrame,const GpuMat & Ftd,const GpuMat & Fbd,const GpuMat & foreground,GpuMat & background,const FGDParams & params)426 void updateBackgroundModel(const GpuMat& prevFrame, const GpuMat& curFrame, const GpuMat& Ftd, const GpuMat& Fbd,
427 const GpuMat& foreground, GpuMat& background,
428 const FGDParams& params)
429 {
430 typedef void (*func_t)(PtrStepSzb prevFrame, PtrStepSzb curFrame, PtrStepSzb Ftd, PtrStepSzb Fbd,
431 PtrStepSzb foreground, PtrStepSzb background,
432 int deltaC, int deltaCC, float alpha1, float alpha2, float alpha3, int N1c, int N1cc, int N2c, int N2cc, float T, cudaStream_t stream);
433 static const func_t funcs[4][4][4] =
434 {
435 {
436 {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}
437 },
438 {
439 {0,0,0,0}, {0,0,0,0}, {0,0,0,0}, {0,0,0,0}
440 },
441 {
442 {0,0,0,0}, {0,0,0,0},
443 {0,0,fgd::updateBackgroundModel_gpu<uchar3, uchar3, uchar3>,fgd::updateBackgroundModel_gpu<uchar3, uchar3, uchar4>},
444 {0,0,fgd::updateBackgroundModel_gpu<uchar3, uchar4, uchar3>,fgd::updateBackgroundModel_gpu<uchar3, uchar4, uchar4>}
445 },
446 {
447 {0,0,0,0}, {0,0,0,0},
448 {0,0,fgd::updateBackgroundModel_gpu<uchar4, uchar3, uchar3>,fgd::updateBackgroundModel_gpu<uchar4, uchar3, uchar4>},
449 {0,0,fgd::updateBackgroundModel_gpu<uchar4, uchar4, uchar3>,fgd::updateBackgroundModel_gpu<uchar4, uchar4, uchar4>}
450 }
451 };
452
453 const int deltaC = cvRound(params.delta * 256 / params.Lc);
454 const int deltaCC = cvRound(params.delta * 256 / params.Lcc);
455
456 funcs[prevFrame.channels() - 1][curFrame.channels() - 1][background.channels() - 1](
457 prevFrame, curFrame, Ftd, Fbd, foreground, background,
458 deltaC, deltaCC, params.alpha1, params.alpha2, params.alpha3,
459 params.N1c, params.N1cc, params.N2c, params.N2cc, params.T,
460 0);
461 }
462 }
463
464
465 namespace
466 {
467 class BGPixelStat
468 {
469 public:
470 void create(Size size, const FGDParams& params);
471
472 void setTrained();
473
474 operator fgd::BGPixelStat();
475
476 private:
477 GpuMat Pbc_;
478 GpuMat Pbcc_;
479 GpuMat is_trained_st_model_;
480 GpuMat is_trained_dyn_model_;
481
482 GpuMat ctable_Pv_;
483 GpuMat ctable_Pvb_;
484 GpuMat ctable_v_;
485
486 GpuMat cctable_Pv_;
487 GpuMat cctable_Pvb_;
488 GpuMat cctable_v1_;
489 GpuMat cctable_v2_;
490 };
491
create(Size size,const FGDParams & params)492 void BGPixelStat::create(Size size, const FGDParams& params)
493 {
494 cuda::ensureSizeIsEnough(size, CV_32FC1, Pbc_);
495 Pbc_.setTo(Scalar::all(0));
496
497 cuda::ensureSizeIsEnough(size, CV_32FC1, Pbcc_);
498 Pbcc_.setTo(Scalar::all(0));
499
500 cuda::ensureSizeIsEnough(size, CV_8UC1, is_trained_st_model_);
501 is_trained_st_model_.setTo(Scalar::all(0));
502
503 cuda::ensureSizeIsEnough(size, CV_8UC1, is_trained_dyn_model_);
504 is_trained_dyn_model_.setTo(Scalar::all(0));
505
506 cuda::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pv_);
507 ctable_Pv_.setTo(Scalar::all(0));
508
509 cuda::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_32FC1, ctable_Pvb_);
510 ctable_Pvb_.setTo(Scalar::all(0));
511
512 cuda::ensureSizeIsEnough(params.N2c * size.height, size.width, CV_8UC4, ctable_v_);
513 ctable_v_.setTo(Scalar::all(0));
514
515 cuda::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pv_);
516 cctable_Pv_.setTo(Scalar::all(0));
517
518 cuda::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_32FC1, cctable_Pvb_);
519 cctable_Pvb_.setTo(Scalar::all(0));
520
521 cuda::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC4, cctable_v1_);
522 cctable_v1_.setTo(Scalar::all(0));
523
524 cuda::ensureSizeIsEnough(params.N2cc * size.height, size.width, CV_8UC4, cctable_v2_);
525 cctable_v2_.setTo(Scalar::all(0));
526 }
527
setTrained()528 void BGPixelStat::setTrained()
529 {
530 is_trained_st_model_.setTo(Scalar::all(1));
531 is_trained_dyn_model_.setTo(Scalar::all(1));
532 }
533
operator fgd::BGPixelStat()534 BGPixelStat::operator fgd::BGPixelStat()
535 {
536 fgd::BGPixelStat stat;
537
538 stat.rows_ = Pbc_.rows;
539
540 stat.Pbc_data_ = Pbc_.data;
541 stat.Pbc_step_ = Pbc_.step;
542
543 stat.Pbcc_data_ = Pbcc_.data;
544 stat.Pbcc_step_ = Pbcc_.step;
545
546 stat.is_trained_st_model_data_ = is_trained_st_model_.data;
547 stat.is_trained_st_model_step_ = is_trained_st_model_.step;
548
549 stat.is_trained_dyn_model_data_ = is_trained_dyn_model_.data;
550 stat.is_trained_dyn_model_step_ = is_trained_dyn_model_.step;
551
552 stat.ctable_Pv_data_ = ctable_Pv_.data;
553 stat.ctable_Pv_step_ = ctable_Pv_.step;
554
555 stat.ctable_Pvb_data_ = ctable_Pvb_.data;
556 stat.ctable_Pvb_step_ = ctable_Pvb_.step;
557
558 stat.ctable_v_data_ = ctable_v_.data;
559 stat.ctable_v_step_ = ctable_v_.step;
560
561 stat.cctable_Pv_data_ = cctable_Pv_.data;
562 stat.cctable_Pv_step_ = cctable_Pv_.step;
563
564 stat.cctable_Pvb_data_ = cctable_Pvb_.data;
565 stat.cctable_Pvb_step_ = cctable_Pvb_.step;
566
567 stat.cctable_v1_data_ = cctable_v1_.data;
568 stat.cctable_v1_step_ = cctable_v1_.step;
569
570 stat.cctable_v2_data_ = cctable_v2_.data;
571 stat.cctable_v2_step_ = cctable_v2_.step;
572
573 return stat;
574 }
575
576 class FGDImpl : public cuda::BackgroundSubtractorFGD
577 {
578 public:
579 explicit FGDImpl(const FGDParams& params);
580 ~FGDImpl();
581
582 void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
583
584 void getBackgroundImage(OutputArray backgroundImage) const;
585
586 void getForegroundRegions(OutputArrayOfArrays foreground_regions);
587
588 private:
589 void initialize(const GpuMat& firstFrame);
590
591 FGDParams params_;
592 Size frameSize_;
593
594 GpuMat background_;
595 GpuMat foreground_;
596 std::vector< std::vector<Point> > foreground_regions_;
597
598 Mat h_foreground_;
599
600 GpuMat prevFrame_;
601 GpuMat Ftd_;
602 GpuMat Fbd_;
603 BGPixelStat stat_;
604
605 GpuMat hist_;
606 GpuMat histBuf_;
607
608 GpuMat buf_;
609 GpuMat filterBrd_;
610
611 #ifdef HAVE_OPENCV_CUDAFILTERS
612 Ptr<cuda::Filter> dilateFilter_;
613 Ptr<cuda::Filter> erodeFilter_;
614 #endif
615
616 CvMemStorage* storage_;
617 };
618
FGDImpl(const FGDParams & params)619 FGDImpl::FGDImpl(const FGDParams& params) : params_(params), frameSize_(0, 0)
620 {
621 storage_ = cvCreateMemStorage();
622 CV_Assert( storage_ != 0 );
623 }
624
~FGDImpl()625 FGDImpl::~FGDImpl()
626 {
627 cvReleaseMemStorage(&storage_);
628 }
629
apply(InputArray _frame,OutputArray fgmask,double)630 void FGDImpl::apply(InputArray _frame, OutputArray fgmask, double)
631 {
632 GpuMat curFrame = _frame.getGpuMat();
633
634 if (curFrame.size() != frameSize_)
635 {
636 initialize(curFrame);
637 return;
638 }
639
640 CV_Assert( curFrame.type() == CV_8UC3 || curFrame.type() == CV_8UC4 );
641 CV_Assert( curFrame.size() == prevFrame_.size() );
642
643 cvClearMemStorage(storage_);
644 foreground_regions_.clear();
645 foreground_.setTo(Scalar::all(0));
646
647 changeDetection(prevFrame_, curFrame, Ftd_, hist_, histBuf_);
648 changeDetection(background_, curFrame, Fbd_, hist_, histBuf_);
649
650 int FG_pixels_count = bgfgClassification(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, params_, 4);
651
652 #ifdef HAVE_OPENCV_CUDAFILTERS
653 if (params_.perform_morphing > 0)
654 smoothForeground(foreground_, filterBrd_, buf_, erodeFilter_, dilateFilter_, params_);
655 #endif
656
657 if (params_.minArea > 0 || params_.is_obj_without_holes)
658 findForegroundRegions(foreground_, h_foreground_, foreground_regions_, storage_, params_);
659
660 // Check ALL BG update condition:
661 const double BGFG_FGD_BG_UPDATE_TRESH = 0.5;
662 if (static_cast<double>(FG_pixels_count) / Ftd_.size().area() > BGFG_FGD_BG_UPDATE_TRESH)
663 stat_.setTrained();
664
665 updateBackgroundModel(prevFrame_, curFrame, Ftd_, Fbd_, foreground_, background_, params_);
666
667 copyChannels(curFrame, prevFrame_, 4);
668
669 foreground_.copyTo(fgmask);
670 }
671
getBackgroundImage(OutputArray backgroundImage) const672 void FGDImpl::getBackgroundImage(OutputArray backgroundImage) const
673 {
674 cuda::cvtColor(background_, backgroundImage, COLOR_BGRA2BGR);
675 }
676
getForegroundRegions(OutputArrayOfArrays dst)677 void FGDImpl::getForegroundRegions(OutputArrayOfArrays dst)
678 {
679 size_t total = foreground_regions_.size();
680
681 dst.create((int) total, 1, 0, -1, true);
682
683 for (size_t i = 0; i < total; ++i)
684 {
685 std::vector<Point>& c = foreground_regions_[i];
686
687 dst.create((int) c.size(), 1, CV_32SC2, (int) i, true);
688 Mat ci = dst.getMat((int) i);
689
690 Mat(ci.size(), ci.type(), &c[0]).copyTo(ci);
691 }
692 }
693
initialize(const GpuMat & firstFrame)694 void FGDImpl::initialize(const GpuMat& firstFrame)
695 {
696 CV_Assert( firstFrame.type() == CV_8UC3 || firstFrame.type() == CV_8UC4 );
697
698 frameSize_ = firstFrame.size();
699
700 cuda::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, foreground_);
701
702 copyChannels(firstFrame, background_, 4);
703 copyChannels(firstFrame, prevFrame_, 4);
704
705 cuda::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Ftd_);
706 cuda::ensureSizeIsEnough(firstFrame.size(), CV_8UC1, Fbd_);
707
708 stat_.create(firstFrame.size(), params_);
709 fgd::setBGPixelStat(stat_);
710
711 #ifdef HAVE_OPENCV_CUDAFILTERS
712 if (params_.perform_morphing > 0)
713 {
714 Mat kernel = getStructuringElement(MORPH_RECT, Size(1 + params_.perform_morphing * 2, 1 + params_.perform_morphing * 2));
715 Point anchor(params_.perform_morphing, params_.perform_morphing);
716
717 dilateFilter_ = cuda::createMorphologyFilter(MORPH_DILATE, CV_8UC1, kernel, anchor);
718 erodeFilter_ = cuda::createMorphologyFilter(MORPH_ERODE, CV_8UC1, kernel, anchor);
719 }
720 #endif
721 }
722 }
723
createBackgroundSubtractorFGD(const FGDParams & params)724 Ptr<cuda::BackgroundSubtractorFGD> cv::cuda::createBackgroundSubtractorFGD(const FGDParams& params)
725 {
726 return makePtr<FGDImpl>(params);
727 }
728
729 #endif // HAVE_CUDA
730