1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                          License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
16 // Third party copyrights are property of their respective owners.
17 //
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
20 //
21 //   * Redistribution's of source code must retain the above copyright notice,
22 //     this list of conditions and the following disclaimer.
23 //
24 //   * Redistribution's in binary form must reproduce the above copyright notice,
25 //     this list of conditions and the following disclaimer in the documentation
26 //     and/or other materials provided with the distribution.
27 //
28 //   * The name of the copyright holders may not be used to endorse or promote products
29 //     derived from this software without specific prior written permission.
30 //
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
41 //
42 //M*/
43 
44 #ifndef __OPENCV_TRACKING_HPP__
45 #define __OPENCV_TRACKING_HPP__
46 
47 #include "opencv2/core.hpp"
48 #include "opencv2/imgproc.hpp"
49 
50 namespace cv
51 {
52 
53 //! @addtogroup video_track
54 //! @{
55 
56 enum { OPTFLOW_USE_INITIAL_FLOW     = 4,
57        OPTFLOW_LK_GET_MIN_EIGENVALS = 8,
58        OPTFLOW_FARNEBACK_GAUSSIAN   = 256
59      };
60 
61 /** @brief Finds an object center, size, and orientation.
62 
63 @param probImage Back projection of the object histogram. See calcBackProject.
64 @param window Initial search window.
65 @param criteria Stop criteria for the underlying meanShift.
66 returns
67 (in old interfaces) Number of iterations CAMSHIFT took to converge
68 The function implements the CAMSHIFT object tracking algorithm @cite Bradski98 . First, it finds an
69 object center using meanShift and then adjusts the window size and finds the optimal rotation. The
70 function returns the rotated rectangle structure that includes the object position, size, and
71 orientation. The next position of the search window can be obtained with RotatedRect::boundingRect()
72 
73 See the OpenCV sample camshiftdemo.c that tracks colored objects.
74 
75 @note
76 -   (Python) A sample explaining the camshift tracking algorithm can be found at
77     opencv_source_code/samples/python2/camshift.py
78  */
79 CV_EXPORTS_W RotatedRect CamShift( InputArray probImage, CV_IN_OUT Rect& window,
80                                    TermCriteria criteria );
81 
82 /** @brief Finds an object on a back projection image.
83 
84 @param probImage Back projection of the object histogram. See calcBackProject for details.
85 @param window Initial search window.
86 @param criteria Stop criteria for the iterative search algorithm.
87 returns
88 :   Number of iterations CAMSHIFT took to converge.
89 The function implements the iterative object search algorithm. It takes the input back projection of
90 an object and the initial position. The mass center in window of the back projection image is
91 computed and the search window center shifts to the mass center. The procedure is repeated until the
92 specified number of iterations criteria.maxCount is done or until the window center shifts by less
93 than criteria.epsilon. The algorithm is used inside CamShift and, unlike CamShift , the search
94 window size or orientation do not change during the search. You can simply pass the output of
95 calcBackProject to this function. But better results can be obtained if you pre-filter the back
96 projection and remove the noise. For example, you can do this by retrieving connected components
97 with findContours , throwing away contours with small area ( contourArea ), and rendering the
98 remaining contours with drawContours.
99 
100 @note
101 -   A mean-shift tracking sample can be found at opencv_source_code/samples/cpp/camshiftdemo.cpp
102  */
103 CV_EXPORTS_W int meanShift( InputArray probImage, CV_IN_OUT Rect& window, TermCriteria criteria );
104 
105 /** @brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
106 
107 @param img 8-bit input image.
108 @param pyramid output pyramid.
109 @param winSize window size of optical flow algorithm. Must be not less than winSize argument of
110 calcOpticalFlowPyrLK. It is needed to calculate required padding for pyramid levels.
111 @param maxLevel 0-based maximal pyramid level number.
112 @param withDerivatives set to precompute gradients for the every pyramid level. If pyramid is
113 constructed without the gradients then calcOpticalFlowPyrLK will calculate them internally.
114 @param pyrBorder the border mode for pyramid layers.
115 @param derivBorder the border mode for gradients.
116 @param tryReuseInputImage put ROI of input image into the pyramid if possible. You can pass false
117 to force data copying.
118 @return number of levels in constructed pyramid. Can be less than maxLevel.
119  */
120 CV_EXPORTS_W int buildOpticalFlowPyramid( InputArray img, OutputArrayOfArrays pyramid,
121                                           Size winSize, int maxLevel, bool withDerivatives = true,
122                                           int pyrBorder = BORDER_REFLECT_101,
123                                           int derivBorder = BORDER_CONSTANT,
124                                           bool tryReuseInputImage = true );
125 
126 /** @brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
127 pyramids.
128 
129 @param prevImg first 8-bit input image or pyramid constructed by buildOpticalFlowPyramid.
130 @param nextImg second input image or pyramid of the same size and the same type as prevImg.
131 @param prevPts vector of 2D points for which the flow needs to be found; point coordinates must be
132 single-precision floating-point numbers.
133 @param nextPts output vector of 2D points (with single-precision floating-point coordinates)
134 containing the calculated new positions of input features in the second image; when
135 OPTFLOW_USE_INITIAL_FLOW flag is passed, the vector must have the same size as in the input.
136 @param status output status vector (of unsigned chars); each element of the vector is set to 1 if
137 the flow for the corresponding features has been found, otherwise, it is set to 0.
138 @param err output vector of errors; each element of the vector is set to an error for the
139 corresponding feature, type of the error measure can be set in flags parameter; if the flow wasn't
140 found then the error is not defined (use the status parameter to find such cases).
141 @param winSize size of the search window at each pyramid level.
142 @param maxLevel 0-based maximal pyramid level number; if set to 0, pyramids are not used (single
143 level), if set to 1, two levels are used, and so on; if pyramids are passed to input then
144 algorithm will use as many levels as pyramids have but no more than maxLevel.
145 @param criteria parameter, specifying the termination criteria of the iterative search algorithm
146 (after the specified maximum number of iterations criteria.maxCount or when the search window
147 moves by less than criteria.epsilon.
148 @param flags operation flags:
149  -   **OPTFLOW_USE_INITIAL_FLOW** uses initial estimations, stored in nextPts; if the flag is
150      not set, then prevPts is copied to nextPts and is considered the initial estimate.
151  -   **OPTFLOW_LK_GET_MIN_EIGENVALS** use minimum eigen values as an error measure (see
152      minEigThreshold description); if the flag is not set, then L1 distance between patches
153      around the original and a moved point, divided by number of pixels in a window, is used as a
154      error measure.
155 @param minEigThreshold the algorithm calculates the minimum eigen value of a 2x2 normal matrix of
156 optical flow equations (this matrix is called a spatial gradient matrix in @cite Bouguet00), divided
157 by number of pixels in a window; if this value is less than minEigThreshold, then a corresponding
158 feature is filtered out and its flow is not processed, so it allows to remove bad points and get a
159 performance boost.
160 
161 The function implements a sparse iterative version of the Lucas-Kanade optical flow in pyramids. See
162 @cite Bouguet00 . The function is parallelized with the TBB library.
163 
164 @note
165 
166 -   An example using the Lucas-Kanade optical flow algorithm can be found at
167     opencv_source_code/samples/cpp/lkdemo.cpp
168 -   (Python) An example using the Lucas-Kanade optical flow algorithm can be found at
169     opencv_source_code/samples/python2/lk_track.py
170 -   (Python) An example using the Lucas-Kanade tracker for homography matching can be found at
171     opencv_source_code/samples/python2/lk_homography.py
172  */
173 CV_EXPORTS_W void calcOpticalFlowPyrLK( InputArray prevImg, InputArray nextImg,
174                                         InputArray prevPts, InputOutputArray nextPts,
175                                         OutputArray status, OutputArray err,
176                                         Size winSize = Size(21,21), int maxLevel = 3,
177                                         TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01),
178                                         int flags = 0, double minEigThreshold = 1e-4 );
179 
180 /** @brief Computes a dense optical flow using the Gunnar Farneback's algorithm.
181 
182 @param prev first 8-bit single-channel input image.
183 @param next second input image of the same size and the same type as prev.
184 @param flow computed flow image that has the same size as prev and type CV_32FC2.
185 @param pyr_scale parameter, specifying the image scale (\<1) to build pyramids for each image;
186 pyr_scale=0.5 means a classical pyramid, where each next layer is twice smaller than the previous
187 one.
188 @param levels number of pyramid layers including the initial image; levels=1 means that no extra
189 layers are created and only the original images are used.
190 @param winsize averaging window size; larger values increase the algorithm robustness to image
191 noise and give more chances for fast motion detection, but yield more blurred motion field.
192 @param iterations number of iterations the algorithm does at each pyramid level.
193 @param poly_n size of the pixel neighborhood used to find polynomial expansion in each pixel;
194 larger values mean that the image will be approximated with smoother surfaces, yielding more
195 robust algorithm and more blurred motion field, typically poly_n =5 or 7.
196 @param poly_sigma standard deviation of the Gaussian that is used to smooth derivatives used as a
197 basis for the polynomial expansion; for poly_n=5, you can set poly_sigma=1.1, for poly_n=7, a
198 good value would be poly_sigma=1.5.
199 @param flags operation flags that can be a combination of the following:
200  -   **OPTFLOW_USE_INITIAL_FLOW** uses the input flow as an initial flow approximation.
201  -   **OPTFLOW_FARNEBACK_GAUSSIAN** uses the Gaussian \f$\texttt{winsize}\times\texttt{winsize}\f$
202      filter instead of a box filter of the same size for optical flow estimation; usually, this
203      option gives z more accurate flow than with a box filter, at the cost of lower speed;
204      normally, winsize for a Gaussian window should be set to a larger value to achieve the same
205      level of robustness.
206 
207 The function finds an optical flow for each prev pixel using the @cite Farneback2003 algorithm so that
208 
209 \f[\texttt{prev} (y,x)  \sim \texttt{next} ( y + \texttt{flow} (y,x)[1],  x + \texttt{flow} (y,x)[0])\f]
210 
211 @note
212 
213 -   An example using the optical flow algorithm described by Gunnar Farneback can be found at
214     opencv_source_code/samples/cpp/fback.cpp
215 -   (Python) An example using the optical flow algorithm described by Gunnar Farneback can be
216     found at opencv_source_code/samples/python2/opt_flow.py
217  */
218 CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next, InputOutputArray flow,
219                                             double pyr_scale, int levels, int winsize,
220                                             int iterations, int poly_n, double poly_sigma,
221                                             int flags );
222 
223 /** @brief Computes an optimal affine transformation between two 2D point sets.
224 
225 @param src First input 2D point set stored in std::vector or Mat, or an image stored in Mat.
226 @param dst Second input 2D point set of the same size and the same type as A, or another image.
227 @param fullAffine If true, the function finds an optimal affine transformation with no additional
228 restrictions (6 degrees of freedom). Otherwise, the class of transformations to choose from is
229 limited to combinations of translation, rotation, and uniform scaling (5 degrees of freedom).
230 
231 The function finds an optimal affine transform *[A|b]* (a 2 x 3 floating-point matrix) that
232 approximates best the affine transformation between:
233 
234 *   Two point sets
235 *   Two raster images. In this case, the function first finds some features in the src image and
236     finds the corresponding features in dst image. After that, the problem is reduced to the first
237     case.
238 In case of point sets, the problem is formulated as follows: you need to find a 2x2 matrix *A* and
239 2x1 vector *b* so that:
240 
241 \f[[A^*|b^*] = arg  \min _{[A|b]}  \sum _i  \| \texttt{dst}[i] - A { \texttt{src}[i]}^T - b  \| ^2\f]
242 where src[i] and dst[i] are the i-th points in src and dst, respectively
243 \f$[A|b]\f$ can be either arbitrary (when fullAffine=true ) or have a form of
244 \f[\begin{bmatrix} a_{11} & a_{12} & b_1  \\ -a_{12} & a_{11} & b_2  \end{bmatrix}\f]
245 when fullAffine=false.
246 
247 @sa
248 getAffineTransform, getPerspectiveTransform, findHomography
249  */
250 CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, bool fullAffine );
251 
252 
253 enum
254 {
255     MOTION_TRANSLATION = 0,
256     MOTION_EUCLIDEAN   = 1,
257     MOTION_AFFINE      = 2,
258     MOTION_HOMOGRAPHY  = 3
259 };
260 
261 /** @brief Finds the geometric transform (warp) between two images in terms of the ECC criterion @cite EP08 .
262 
263 @param templateImage single-channel template image; CV_8U or CV_32F array.
264 @param inputImage single-channel input image which should be warped with the final warpMatrix in
265 order to provide an image similar to templateImage, same type as temlateImage.
266 @param warpMatrix floating-point \f$2\times 3\f$ or \f$3\times 3\f$ mapping matrix (warp).
267 @param motionType parameter, specifying the type of motion:
268  -   **MOTION_TRANSLATION** sets a translational motion model; warpMatrix is \f$2\times 3\f$ with
269      the first \f$2\times 2\f$ part being the unity matrix and the rest two parameters being
270      estimated.
271  -   **MOTION_EUCLIDEAN** sets a Euclidean (rigid) transformation as motion model; three
272      parameters are estimated; warpMatrix is \f$2\times 3\f$.
273  -   **MOTION_AFFINE** sets an affine motion model (DEFAULT); six parameters are estimated;
274      warpMatrix is \f$2\times 3\f$.
275  -   **MOTION_HOMOGRAPHY** sets a homography as a motion model; eight parameters are
276      estimated;\`warpMatrix\` is \f$3\times 3\f$.
277 @param criteria parameter, specifying the termination criteria of the ECC algorithm;
278 criteria.epsilon defines the threshold of the increment in the correlation coefficient between two
279 iterations (a negative criteria.epsilon makes criteria.maxcount the only termination criterion).
280 Default values are shown in the declaration above.
281 @param inputMask An optional mask to indicate valid values of inputImage.
282 
283 The function estimates the optimum transformation (warpMatrix) with respect to ECC criterion
284 (@cite EP08), that is
285 
286 \f[\texttt{warpMatrix} = \texttt{warpMatrix} = \arg\max_{W} \texttt{ECC}(\texttt{templateImage}(x,y),\texttt{inputImage}(x',y'))\f]
287 
288 where
289 
290 \f[\begin{bmatrix} x' \\ y' \end{bmatrix} = W \cdot \begin{bmatrix} x \\ y \\ 1 \end{bmatrix}\f]
291 
292 (the equation holds with homogeneous coordinates for homography). It returns the final enhanced
293 correlation coefficient, that is the correlation coefficient between the template image and the
294 final warped input image. When a \f$3\times 3\f$ matrix is given with motionType =0, 1 or 2, the third
295 row is ignored.
296 
297 Unlike findHomography and estimateRigidTransform, the function findTransformECC implements an
298 area-based alignment that builds on intensity similarities. In essence, the function updates the
299 initial transformation that roughly aligns the images. If this information is missing, the identity
300 warp (unity matrix) should be given as input. Note that if images undergo strong
301 displacements/rotations, an initial transformation that roughly aligns the images is necessary
302 (e.g., a simple euclidean/similarity transform that allows for the images showing the same image
303 content approximately). Use inverse warping in the second image to take an image close to the first
304 one, i.e. use the flag WARP_INVERSE_MAP with warpAffine or warpPerspective. See also the OpenCV
305 sample image_alignment.cpp that demonstrates the use of the function. Note that the function throws
306 an exception if algorithm does not converges.
307 
308 @sa
309 estimateRigidTransform, findHomography
310  */
311 CV_EXPORTS_W double findTransformECC( InputArray templateImage, InputArray inputImage,
312                                       InputOutputArray warpMatrix, int motionType = MOTION_AFFINE,
313                                       TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 50, 0.001),
314                                       InputArray inputMask = noArray());
315 
316 /** @brief Kalman filter class.
317 
318 The class implements a standard Kalman filter <http://en.wikipedia.org/wiki/Kalman_filter>,
319 @cite Welch95 . However, you can modify transitionMatrix, controlMatrix, and measurementMatrix to get
320 an extended Kalman filter functionality. See the OpenCV sample kalman.cpp.
321 
322 @note
323 
324 -   An example using the standard Kalman filter can be found at
325     opencv_source_code/samples/cpp/kalman.cpp
326  */
327 class CV_EXPORTS_W KalmanFilter
328 {
329 public:
330     /** @brief The constructors.
331 
332     @note In C API when CvKalman\* kalmanFilter structure is not needed anymore, it should be released
333     with cvReleaseKalman(&kalmanFilter)
334      */
335     CV_WRAP KalmanFilter();
336     /** @overload
337     @param dynamParams Dimensionality of the state.
338     @param measureParams Dimensionality of the measurement.
339     @param controlParams Dimensionality of the control vector.
340     @param type Type of the created matrices that should be CV_32F or CV_64F.
341     */
342     CV_WRAP KalmanFilter( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
343 
344     /** @brief Re-initializes Kalman filter. The previous content is destroyed.
345 
346     @param dynamParams Dimensionality of the state.
347     @param measureParams Dimensionality of the measurement.
348     @param controlParams Dimensionality of the control vector.
349     @param type Type of the created matrices that should be CV_32F or CV_64F.
350      */
351     void init( int dynamParams, int measureParams, int controlParams = 0, int type = CV_32F );
352 
353     /** @brief Computes a predicted state.
354 
355     @param control The optional input control
356      */
357     CV_WRAP const Mat& predict( const Mat& control = Mat() );
358 
359     /** @brief Updates the predicted state from the measurement.
360 
361     @param measurement The measured system parameters
362      */
363     CV_WRAP const Mat& correct( const Mat& measurement );
364 
365     CV_PROP_RW Mat statePre;           //!< predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
366     CV_PROP_RW Mat statePost;          //!< corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
367     CV_PROP_RW Mat transitionMatrix;   //!< state transition matrix (A)
368     CV_PROP_RW Mat controlMatrix;      //!< control matrix (B) (not used if there is no control)
369     CV_PROP_RW Mat measurementMatrix;  //!< measurement matrix (H)
370     CV_PROP_RW Mat processNoiseCov;    //!< process noise covariance matrix (Q)
371     CV_PROP_RW Mat measurementNoiseCov;//!< measurement noise covariance matrix (R)
372     CV_PROP_RW Mat errorCovPre;        //!< priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)*/
373     CV_PROP_RW Mat gain;               //!< Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
374     CV_PROP_RW Mat errorCovPost;       //!< posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
375 
376     // temporary matrices
377     Mat temp1;
378     Mat temp2;
379     Mat temp3;
380     Mat temp4;
381     Mat temp5;
382 };
383 
384 
385 class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
386 {
387 public:
388     /** @brief Calculates an optical flow.
389 
390     @param I0 first 8-bit single-channel input image.
391     @param I1 second input image of the same size and the same type as prev.
392     @param flow computed flow image that has the same size as prev and type CV_32FC2.
393      */
394     CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
395     /** @brief Releases all inner buffers.
396     */
397     CV_WRAP virtual void collectGarbage() = 0;
398 };
399 
400 /** @brief "Dual TV L1" Optical Flow Algorithm.
401 
402 The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
403 @cite Javier2012 .
404 Here are important members of the class that control the algorithm, which you can set after
405 constructing the class instance:
406 
407 -   member double tau
408     Time step of the numerical scheme.
409 
410 -   member double lambda
411     Weight parameter for the data term, attachment parameter. This is the most relevant
412     parameter, which determines the smoothness of the output. The smaller this parameter is,
413     the smoother the solutions we obtain. It depends on the range of motions of the images, so
414     its value should be adapted to each image sequence.
415 
416 -   member double theta
417     Weight parameter for (u - v)\^2, tightness parameter. It serves as a link between the
418     attachment and the regularization terms. In theory, it should have a small value in order
419     to maintain both parts in correspondence. The method is stable for a large range of values
420     of this parameter.
421 
422 -   member int nscales
423     Number of scales used to create the pyramid of images.
424 
425 -   member int warps
426     Number of warpings per scale. Represents the number of times that I1(x+u0) and grad(
427     I1(x+u0) ) are computed per scale. This is a parameter that assures the stability of the
428     method. It also affects the running time, so it is a compromise between speed and
429     accuracy.
430 
431 -   member double epsilon
432     Stopping criterion threshold used in the numerical scheme, which is a trade-off between
433     precision and running time. A small value will yield more accurate solutions at the
434     expense of a slower convergence.
435 
436 -   member int iterations
437     Stopping criterion iterations number used in the numerical scheme.
438 
439 C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
440 Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
441 */
442 class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
443 {
444 public:
445     //! @brief Time step of the numerical scheme
446     /** @see setTau */
447     virtual double getTau() const = 0;
448     /** @copybrief getTau @see getTau */
449     virtual void setTau(double val) = 0;
450     //! @brief Weight parameter for the data term, attachment parameter
451     /** @see setLambda */
452     virtual double getLambda() const = 0;
453     /** @copybrief getLambda @see getLambda */
454     virtual void setLambda(double val) = 0;
455     //! @brief Weight parameter for (u - v)^2, tightness parameter
456     /** @see setTheta */
457     virtual double getTheta() const = 0;
458     /** @copybrief getTheta @see getTheta */
459     virtual void setTheta(double val) = 0;
460     //! @brief coefficient for additional illumination variation term
461     /** @see setGamma */
462     virtual double getGamma() const = 0;
463     /** @copybrief getGamma @see getGamma */
464     virtual void setGamma(double val) = 0;
465     //! @brief Number of scales used to create the pyramid of images
466     /** @see setScalesNumber */
467     virtual int getScalesNumber() const = 0;
468     /** @copybrief getScalesNumber @see getScalesNumber */
469     virtual void setScalesNumber(int val) = 0;
470     //! @brief Number of warpings per scale
471     /** @see setWarpingsNumber */
472     virtual int getWarpingsNumber() const = 0;
473     /** @copybrief getWarpingsNumber @see getWarpingsNumber */
474     virtual void setWarpingsNumber(int val) = 0;
475     //! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
476     /** @see setEpsilon */
477     virtual double getEpsilon() const = 0;
478     /** @copybrief getEpsilon @see getEpsilon */
479     virtual void setEpsilon(double val) = 0;
480     //! @brief Inner iterations (between outlier filtering) used in the numerical scheme
481     /** @see setInnerIterations */
482     virtual int getInnerIterations() const = 0;
483     /** @copybrief getInnerIterations @see getInnerIterations */
484     virtual void setInnerIterations(int val) = 0;
485     //! @brief Outer iterations (number of inner loops) used in the numerical scheme
486     /** @see setOuterIterations */
487     virtual int getOuterIterations() const = 0;
488     /** @copybrief getOuterIterations @see getOuterIterations */
489     virtual void setOuterIterations(int val) = 0;
490     //! @brief Use initial flow
491     /** @see setUseInitialFlow */
492     virtual bool getUseInitialFlow() const = 0;
493     /** @copybrief getUseInitialFlow @see getUseInitialFlow */
494     virtual void setUseInitialFlow(bool val) = 0;
495     //! @brief Step between scales (<1)
496     /** @see setScaleStep */
497     virtual double getScaleStep() const = 0;
498     /** @copybrief getScaleStep @see getScaleStep */
499     virtual void setScaleStep(double val) = 0;
500     //! @brief Median filter kernel size (1 = no filter) (3 or 5)
501     /** @see setMedianFiltering */
502     virtual int getMedianFiltering() const = 0;
503     /** @copybrief getMedianFiltering @see getMedianFiltering */
504     virtual void setMedianFiltering(int val) = 0;
505 };
506 
507 /** @brief Creates instance of cv::DenseOpticalFlow
508 */
509 CV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
510 
511 //! @} video_track
512 
513 } // cv
514 
515 #endif
516