1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
15 //
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
18 //
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
21 //
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
25 //
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
28 //
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
39 //
40 //M*/
41 #include "precomp.hpp"
42 #include "opencl_kernels_imgproc.hpp"
43
44 namespace cv
45 {
46
47 // The function calculates center of gravity and the central second order moments
completeMomentState(Moments * moments)48 static void completeMomentState( Moments* moments )
49 {
50 double cx = 0, cy = 0;
51 double mu20, mu11, mu02;
52 double inv_m00 = 0.0;
53 assert( moments != 0 );
54
55 if( fabs(moments->m00) > DBL_EPSILON )
56 {
57 inv_m00 = 1. / moments->m00;
58 cx = moments->m10 * inv_m00;
59 cy = moments->m01 * inv_m00;
60 }
61
62 // mu20 = m20 - m10*cx
63 mu20 = moments->m20 - moments->m10 * cx;
64 // mu11 = m11 - m10*cy
65 mu11 = moments->m11 - moments->m10 * cy;
66 // mu02 = m02 - m01*cy
67 mu02 = moments->m02 - moments->m01 * cy;
68
69 moments->mu20 = mu20;
70 moments->mu11 = mu11;
71 moments->mu02 = mu02;
72
73 // mu30 = m30 - cx*(3*mu20 + cx*m10)
74 moments->mu30 = moments->m30 - cx * (3 * mu20 + cx * moments->m10);
75 mu11 += mu11;
76 // mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20
77 moments->mu21 = moments->m21 - cx * (mu11 + cx * moments->m01) - cy * mu20;
78 // mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02
79 moments->mu12 = moments->m12 - cy * (mu11 + cy * moments->m10) - cx * mu02;
80 // mu03 = m03 - cy*(3*mu02 + cy*m01)
81 moments->mu03 = moments->m03 - cy * (3 * mu02 + cy * moments->m01);
82
83
84 double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00));
85 double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00;
86
87 moments->nu20 = moments->mu20*s2; moments->nu11 = moments->mu11*s2; moments->nu02 = moments->mu02*s2;
88 moments->nu30 = moments->mu30*s3; moments->nu21 = moments->mu21*s3; moments->nu12 = moments->mu12*s3; moments->nu03 = moments->mu03*s3;
89
90 }
91
92
contourMoments(const Mat & contour)93 static Moments contourMoments( const Mat& contour )
94 {
95 Moments m;
96 int lpt = contour.checkVector(2);
97 int is_float = contour.depth() == CV_32F;
98 const Point* ptsi = contour.ptr<Point>();
99 const Point2f* ptsf = contour.ptr<Point2f>();
100
101 CV_Assert( contour.depth() == CV_32S || contour.depth() == CV_32F );
102
103 if( lpt == 0 )
104 return m;
105
106 double a00 = 0, a10 = 0, a01 = 0, a20 = 0, a11 = 0, a02 = 0, a30 = 0, a21 = 0, a12 = 0, a03 = 0;
107 double xi, yi, xi2, yi2, xi_1, yi_1, xi_12, yi_12, dxy, xii_1, yii_1;
108
109 if( !is_float )
110 {
111 xi_1 = ptsi[lpt-1].x;
112 yi_1 = ptsi[lpt-1].y;
113 }
114 else
115 {
116 xi_1 = ptsf[lpt-1].x;
117 yi_1 = ptsf[lpt-1].y;
118 }
119
120 xi_12 = xi_1 * xi_1;
121 yi_12 = yi_1 * yi_1;
122
123 for( int i = 0; i < lpt; i++ )
124 {
125 if( !is_float )
126 {
127 xi = ptsi[i].x;
128 yi = ptsi[i].y;
129 }
130 else
131 {
132 xi = ptsf[i].x;
133 yi = ptsf[i].y;
134 }
135
136 xi2 = xi * xi;
137 yi2 = yi * yi;
138 dxy = xi_1 * yi - xi * yi_1;
139 xii_1 = xi_1 + xi;
140 yii_1 = yi_1 + yi;
141
142 a00 += dxy;
143 a10 += dxy * xii_1;
144 a01 += dxy * yii_1;
145 a20 += dxy * (xi_1 * xii_1 + xi2);
146 a11 += dxy * (xi_1 * (yii_1 + yi_1) + xi * (yii_1 + yi));
147 a02 += dxy * (yi_1 * yii_1 + yi2);
148 a30 += dxy * xii_1 * (xi_12 + xi2);
149 a03 += dxy * yii_1 * (yi_12 + yi2);
150 a21 += dxy * (xi_12 * (3 * yi_1 + yi) + 2 * xi * xi_1 * yii_1 +
151 xi2 * (yi_1 + 3 * yi));
152 a12 += dxy * (yi_12 * (3 * xi_1 + xi) + 2 * yi * yi_1 * xii_1 +
153 yi2 * (xi_1 + 3 * xi));
154 xi_1 = xi;
155 yi_1 = yi;
156 xi_12 = xi2;
157 yi_12 = yi2;
158 }
159
160 if( fabs(a00) > FLT_EPSILON )
161 {
162 double db1_2, db1_6, db1_12, db1_24, db1_20, db1_60;
163
164 if( a00 > 0 )
165 {
166 db1_2 = 0.5;
167 db1_6 = 0.16666666666666666666666666666667;
168 db1_12 = 0.083333333333333333333333333333333;
169 db1_24 = 0.041666666666666666666666666666667;
170 db1_20 = 0.05;
171 db1_60 = 0.016666666666666666666666666666667;
172 }
173 else
174 {
175 db1_2 = -0.5;
176 db1_6 = -0.16666666666666666666666666666667;
177 db1_12 = -0.083333333333333333333333333333333;
178 db1_24 = -0.041666666666666666666666666666667;
179 db1_20 = -0.05;
180 db1_60 = -0.016666666666666666666666666666667;
181 }
182
183 // spatial moments
184 m.m00 = a00 * db1_2;
185 m.m10 = a10 * db1_6;
186 m.m01 = a01 * db1_6;
187 m.m20 = a20 * db1_12;
188 m.m11 = a11 * db1_24;
189 m.m02 = a02 * db1_12;
190 m.m30 = a30 * db1_20;
191 m.m21 = a21 * db1_60;
192 m.m12 = a12 * db1_60;
193 m.m03 = a03 * db1_20;
194
195 completeMomentState( &m );
196 }
197 return m;
198 }
199
200
201 /****************************************************************************************\
202 * Spatial Raster Moments *
203 \****************************************************************************************/
204
205 template<typename T, typename WT, typename MT>
206 struct MomentsInTile_SIMD
207 {
operator ()cv::MomentsInTile_SIMD208 int operator() (const T *, int, WT &, WT &, WT &, MT &)
209 {
210 return 0;
211 }
212 };
213
214 #if CV_SSE2
215
216 template <>
217 struct MomentsInTile_SIMD<uchar, int, int>
218 {
MomentsInTile_SIMDcv::MomentsInTile_SIMD219 MomentsInTile_SIMD()
220 {
221 useSIMD = checkHardwareSupport(CV_CPU_SSE2);
222 }
223
operator ()cv::MomentsInTile_SIMD224 int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3)
225 {
226 int x = 0;
227
228 if( useSIMD )
229 {
230 __m128i qx_init = _mm_setr_epi16(0, 1, 2, 3, 4, 5, 6, 7);
231 __m128i dx = _mm_set1_epi16(8);
232 __m128i z = _mm_setzero_si128(), qx0 = z, qx1 = z, qx2 = z, qx3 = z, qx = qx_init;
233
234 for( ; x <= len - 8; x += 8 )
235 {
236 __m128i p = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(ptr + x)), z);
237 __m128i sx = _mm_mullo_epi16(qx, qx);
238
239 qx0 = _mm_add_epi32(qx0, _mm_sad_epu8(p, z));
240 qx1 = _mm_add_epi32(qx1, _mm_madd_epi16(p, qx));
241 qx2 = _mm_add_epi32(qx2, _mm_madd_epi16(p, sx));
242 qx3 = _mm_add_epi32(qx3, _mm_madd_epi16( _mm_mullo_epi16(p, qx), sx));
243
244 qx = _mm_add_epi16(qx, dx);
245 }
246
247 _mm_store_si128((__m128i*)buf, qx0);
248 x0 = buf[0] + buf[1] + buf[2] + buf[3];
249 _mm_store_si128((__m128i*)buf, qx1);
250 x1 = buf[0] + buf[1] + buf[2] + buf[3];
251 _mm_store_si128((__m128i*)buf, qx2);
252 x2 = buf[0] + buf[1] + buf[2] + buf[3];
253 _mm_store_si128((__m128i*)buf, qx3);
254 x3 = buf[0] + buf[1] + buf[2] + buf[3];
255 }
256
257 return x;
258 }
259
260 int CV_DECL_ALIGNED(16) buf[4];
261 bool useSIMD;
262 };
263
264 #elif CV_NEON
265
266 template <>
267 struct MomentsInTile_SIMD<uchar, int, int>
268 {
MomentsInTile_SIMDcv::MomentsInTile_SIMD269 MomentsInTile_SIMD()
270 {
271 ushort CV_DECL_ALIGNED(8) init[4] = { 0, 1, 2, 3 };
272 qx_init = vld1_u16(init);
273 v_step = vdup_n_u16(4);
274 }
275
operator ()cv::MomentsInTile_SIMD276 int operator() (const uchar * ptr, int len, int & x0, int & x1, int & x2, int & x3)
277 {
278 int x = 0;
279
280 uint32x4_t v_z = vdupq_n_u32(0), v_x0 = v_z, v_x1 = v_z,
281 v_x2 = v_z, v_x3 = v_z;
282 uint16x4_t qx = qx_init;
283
284 for( ; x <= len - 8; x += 8 )
285 {
286 uint16x8_t v_src = vmovl_u8(vld1_u8(ptr + x));
287
288 // first part
289 uint32x4_t v_qx = vmovl_u16(qx);
290 uint16x4_t v_p = vget_low_u16(v_src);
291 uint32x4_t v_px = vmull_u16(qx, v_p);
292
293 v_x0 = vaddw_u16(v_x0, v_p);
294 v_x1 = vaddq_u32(v_x1, v_px);
295 v_px = vmulq_u32(v_px, v_qx);
296 v_x2 = vaddq_u32(v_x2, v_px);
297 v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
298 qx = vadd_u16(qx, v_step);
299
300 // second part
301 v_qx = vmovl_u16(qx);
302 v_p = vget_high_u16(v_src);
303 v_px = vmull_u16(qx, v_p);
304
305 v_x0 = vaddw_u16(v_x0, v_p);
306 v_x1 = vaddq_u32(v_x1, v_px);
307 v_px = vmulq_u32(v_px, v_qx);
308 v_x2 = vaddq_u32(v_x2, v_px);
309 v_x3 = vaddq_u32(v_x3, vmulq_u32(v_px, v_qx));
310
311 qx = vadd_u16(qx, v_step);
312 }
313
314 vst1q_u32(buf, v_x0);
315 x0 = buf[0] + buf[1] + buf[2] + buf[3];
316 vst1q_u32(buf, v_x1);
317 x1 = buf[0] + buf[1] + buf[2] + buf[3];
318 vst1q_u32(buf, v_x2);
319 x2 = buf[0] + buf[1] + buf[2] + buf[3];
320 vst1q_u32(buf, v_x3);
321 x3 = buf[0] + buf[1] + buf[2] + buf[3];
322
323 return x;
324 }
325
326 uint CV_DECL_ALIGNED(16) buf[4];
327 uint16x4_t qx_init, v_step;
328 };
329
330 #endif
331
332 #if CV_SSE4_1
333
334 template <>
335 struct MomentsInTile_SIMD<ushort, int, int64>
336 {
MomentsInTile_SIMDcv::MomentsInTile_SIMD337 MomentsInTile_SIMD()
338 {
339 useSIMD = checkHardwareSupport(CV_CPU_SSE4_1);
340 }
341
operator ()cv::MomentsInTile_SIMD342 int operator() (const ushort * ptr, int len, int & x0, int & x1, int & x2, int64 & x3)
343 {
344 int x = 0;
345
346 if (useSIMD)
347 {
348 __m128i vx_init0 = _mm_setr_epi32(0, 1, 2, 3), vx_init1 = _mm_setr_epi32(4, 5, 6, 7),
349 v_delta = _mm_set1_epi32(8), v_zero = _mm_setzero_si128(), v_x0 = v_zero,
350 v_x1 = v_zero, v_x2 = v_zero, v_x3 = v_zero, v_ix0 = vx_init0, v_ix1 = vx_init1;
351
352 for( ; x <= len - 8; x += 8 )
353 {
354 __m128i v_src = _mm_loadu_si128((const __m128i *)(ptr + x));
355 __m128i v_src0 = _mm_unpacklo_epi16(v_src, v_zero), v_src1 = _mm_unpackhi_epi16(v_src, v_zero);
356
357 v_x0 = _mm_add_epi32(v_x0, _mm_add_epi32(v_src0, v_src1));
358 __m128i v_x1_0 = _mm_mullo_epi32(v_src0, v_ix0), v_x1_1 = _mm_mullo_epi32(v_src1, v_ix1);
359 v_x1 = _mm_add_epi32(v_x1, _mm_add_epi32(v_x1_0, v_x1_1));
360
361 __m128i v_2ix0 = _mm_mullo_epi32(v_ix0, v_ix0), v_2ix1 = _mm_mullo_epi32(v_ix1, v_ix1);
362 v_x2 = _mm_add_epi32(v_x2, _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_src0), _mm_mullo_epi32(v_2ix1, v_src1)));
363
364 __m128i t = _mm_add_epi32(_mm_mullo_epi32(v_2ix0, v_x1_0), _mm_mullo_epi32(v_2ix1, v_x1_1));
365 v_x3 = _mm_add_epi64(v_x3, _mm_add_epi64(_mm_unpacklo_epi32(t, v_zero), _mm_unpackhi_epi32(t, v_zero)));
366
367 v_ix0 = _mm_add_epi32(v_ix0, v_delta);
368 v_ix1 = _mm_add_epi32(v_ix1, v_delta);
369 }
370
371 _mm_store_si128((__m128i*)buf, v_x0);
372 x0 = buf[0] + buf[1] + buf[2] + buf[3];
373 _mm_store_si128((__m128i*)buf, v_x1);
374 x1 = buf[0] + buf[1] + buf[2] + buf[3];
375 _mm_store_si128((__m128i*)buf, v_x2);
376 x2 = buf[0] + buf[1] + buf[2] + buf[3];
377
378 _mm_store_si128((__m128i*)buf64, v_x3);
379 x3 = buf64[0] + buf64[1];
380 }
381
382 return x;
383 }
384
385 int CV_DECL_ALIGNED(16) buf[4];
386 int64 CV_DECL_ALIGNED(16) buf64[2];
387 bool useSIMD;
388 };
389
390 #endif
391
392 template<typename T, typename WT, typename MT>
393 #if defined __GNUC__ && __GNUC__ == 4 && __GNUC_MINOR__ >= 5 && __GNUC_MINOR__ < 9
394 // Workaround for http://gcc.gnu.org/bugzilla/show_bug.cgi?id=60196
395 __attribute__((optimize("no-tree-vectorize")))
396 #endif
momentsInTile(const Mat & img,double * moments)397 static void momentsInTile( const Mat& img, double* moments )
398 {
399 Size size = img.size();
400 int x, y;
401 MT mom[10] = {0,0,0,0,0,0,0,0,0,0};
402 MomentsInTile_SIMD<T, WT, MT> vop;
403
404 for( y = 0; y < size.height; y++ )
405 {
406 const T* ptr = img.ptr<T>(y);
407 WT x0 = 0, x1 = 0, x2 = 0;
408 MT x3 = 0;
409 x = vop(ptr, size.width, x0, x1, x2, x3);
410
411 for( ; x < size.width; x++ )
412 {
413 WT p = ptr[x];
414 WT xp = x * p, xxp;
415
416 x0 += p;
417 x1 += xp;
418 xxp = xp * x;
419 x2 += xxp;
420 x3 += xxp * x;
421 }
422
423 WT py = y * x0, sy = y*y;
424
425 mom[9] += ((MT)py) * sy; // m03
426 mom[8] += ((MT)x1) * sy; // m12
427 mom[7] += ((MT)x2) * y; // m21
428 mom[6] += x3; // m30
429 mom[5] += x0 * sy; // m02
430 mom[4] += x1 * y; // m11
431 mom[3] += x2; // m20
432 mom[2] += py; // m01
433 mom[1] += x1; // m10
434 mom[0] += x0; // m00
435 }
436
437 for( x = 0; x < 10; x++ )
438 moments[x] = (double)mom[x];
439 }
440
441 typedef void (*MomentsInTileFunc)(const Mat& img, double* moments);
442
Moments()443 Moments::Moments()
444 {
445 m00 = m10 = m01 = m20 = m11 = m02 = m30 = m21 = m12 = m03 =
446 mu20 = mu11 = mu02 = mu30 = mu21 = mu12 = mu03 =
447 nu20 = nu11 = nu02 = nu30 = nu21 = nu12 = nu03 = 0.;
448 }
449
Moments(double _m00,double _m10,double _m01,double _m20,double _m11,double _m02,double _m30,double _m21,double _m12,double _m03)450 Moments::Moments( double _m00, double _m10, double _m01, double _m20, double _m11,
451 double _m02, double _m30, double _m21, double _m12, double _m03 )
452 {
453 m00 = _m00; m10 = _m10; m01 = _m01;
454 m20 = _m20; m11 = _m11; m02 = _m02;
455 m30 = _m30; m21 = _m21; m12 = _m12; m03 = _m03;
456
457 double cx = 0, cy = 0, inv_m00 = 0;
458 if( std::abs(m00) > DBL_EPSILON )
459 {
460 inv_m00 = 1./m00;
461 cx = m10*inv_m00; cy = m01*inv_m00;
462 }
463
464 mu20 = m20 - m10*cx;
465 mu11 = m11 - m10*cy;
466 mu02 = m02 - m01*cy;
467
468 mu30 = m30 - cx*(3*mu20 + cx*m10);
469 mu21 = m21 - cx*(2*mu11 + cx*m01) - cy*mu20;
470 mu12 = m12 - cy*(2*mu11 + cy*m10) - cx*mu02;
471 mu03 = m03 - cy*(3*mu02 + cy*m01);
472
473 double inv_sqrt_m00 = std::sqrt(std::abs(inv_m00));
474 double s2 = inv_m00*inv_m00, s3 = s2*inv_sqrt_m00;
475
476 nu20 = mu20*s2; nu11 = mu11*s2; nu02 = mu02*s2;
477 nu30 = mu30*s3; nu21 = mu21*s3; nu12 = mu12*s3; nu03 = mu03*s3;
478 }
479
480 #ifdef HAVE_OPENCL
481
ocl_moments(InputArray _src,Moments & m,bool binary)482 static bool ocl_moments( InputArray _src, Moments& m, bool binary)
483 {
484 const int TILE_SIZE = 32;
485 const int K = 10;
486
487 ocl::Kernel k = ocl::Kernel("moments", ocl::imgproc::moments_oclsrc,
488 format("-D TILE_SIZE=%d%s",
489 TILE_SIZE,
490 binary ? " -D OP_MOMENTS_BINARY" : ""));
491
492 if( k.empty() )
493 return false;
494
495 UMat src = _src.getUMat();
496 Size sz = src.size();
497 int xtiles = (sz.width + TILE_SIZE-1)/TILE_SIZE;
498 int ytiles = (sz.height + TILE_SIZE-1)/TILE_SIZE;
499 int ntiles = xtiles*ytiles;
500 UMat umbuf(1, ntiles*K, CV_32S);
501
502 size_t globalsize[] = {xtiles, sz.height}, localsize[] = {1, TILE_SIZE};
503 bool ok = k.args(ocl::KernelArg::ReadOnly(src),
504 ocl::KernelArg::PtrWriteOnly(umbuf),
505 xtiles).run(2, globalsize, localsize, true);
506 if(!ok)
507 return false;
508 Mat mbuf = umbuf.getMat(ACCESS_READ);
509 for( int i = 0; i < ntiles; i++ )
510 {
511 double x = (i % xtiles)*TILE_SIZE, y = (i / xtiles)*TILE_SIZE;
512 const int* mom = mbuf.ptr<int>() + i*K;
513 double xm = x * mom[0], ym = y * mom[0];
514
515 // accumulate moments computed in each tile
516
517 // + m00 ( = m00' )
518 m.m00 += mom[0];
519
520 // + m10 ( = m10' + x*m00' )
521 m.m10 += mom[1] + xm;
522
523 // + m01 ( = m01' + y*m00' )
524 m.m01 += mom[2] + ym;
525
526 // + m20 ( = m20' + 2*x*m10' + x*x*m00' )
527 m.m20 += mom[3] + x * (mom[1] * 2 + xm);
528
529 // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' )
530 m.m11 += mom[4] + x * (mom[2] + ym) + y * mom[1];
531
532 // + m02 ( = m02' + 2*y*m01' + y*y*m00' )
533 m.m02 += mom[5] + y * (mom[2] * 2 + ym);
534
535 // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' )
536 m.m30 += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm));
537
538 // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20')
539 m.m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
540
541 // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02')
542 m.m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
543
544 // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' )
545 m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
546 }
547
548 return true;
549 }
550
551 #endif
552
553 }
554
555
moments(InputArray _src,bool binary)556 cv::Moments cv::moments( InputArray _src, bool binary )
557 {
558 const int TILE_SIZE = 32;
559 MomentsInTileFunc func = 0;
560 uchar nzbuf[TILE_SIZE*TILE_SIZE];
561 Moments m;
562 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
563 Size size = _src.size();
564
565 if( size.width <= 0 || size.height <= 0 )
566 return m;
567
568 #ifdef HAVE_OPENCL
569 if( !(ocl::useOpenCL() && type == CV_8UC1 &&
570 _src.isUMat() && ocl_moments(_src, m, binary)) )
571 #endif
572 {
573 Mat mat = _src.getMat();
574 if( mat.checkVector(2) >= 0 && (depth == CV_32F || depth == CV_32S))
575 return contourMoments(mat);
576
577 if( cn > 1 )
578 CV_Error( CV_StsBadArg, "Invalid image type (must be single-channel)" );
579
580 #if IPP_VERSION_X100 >= 801 && 0
581 CV_IPP_CHECK()
582 {
583 if (!binary)
584 {
585 IppiSize roi = { mat.cols, mat.rows };
586 IppiMomentState_64f * moment = NULL;
587 // ippiMomentInitAlloc_64f, ippiMomentFree_64f are deprecated in 8.1, but there are not another way
588 // to initialize IppiMomentState_64f. When GetStateSize and Init functions will appear we have to
589 // change our code.
590 CV_SUPPRESS_DEPRECATED_START
591 if (ippiMomentInitAlloc_64f(&moment, ippAlgHintAccurate) >= 0)
592 {
593 typedef IppStatus (CV_STDCALL * ippiMoments)(const void * pSrc, int srcStep, IppiSize roiSize, IppiMomentState_64f* pCtx);
594 ippiMoments ippFunc =
595 type == CV_8UC1 ? (ippiMoments)ippiMoments64f_8u_C1R :
596 type == CV_16UC1 ? (ippiMoments)ippiMoments64f_16u_C1R :
597 type == CV_32FC1? (ippiMoments)ippiMoments64f_32f_C1R : 0;
598
599 if (ippFunc)
600 {
601 if (ippFunc(mat.data, (int)mat.step, roi, moment) >= 0)
602 {
603 IppiPoint point = { 0, 0 };
604 ippiGetSpatialMoment_64f(moment, 0, 0, 0, point, &m.m00);
605 ippiGetSpatialMoment_64f(moment, 1, 0, 0, point, &m.m10);
606 ippiGetSpatialMoment_64f(moment, 0, 1, 0, point, &m.m01);
607
608 ippiGetSpatialMoment_64f(moment, 2, 0, 0, point, &m.m20);
609 ippiGetSpatialMoment_64f(moment, 1, 1, 0, point, &m.m11);
610 ippiGetSpatialMoment_64f(moment, 0, 2, 0, point, &m.m02);
611
612 ippiGetSpatialMoment_64f(moment, 3, 0, 0, point, &m.m30);
613 ippiGetSpatialMoment_64f(moment, 2, 1, 0, point, &m.m21);
614 ippiGetSpatialMoment_64f(moment, 1, 2, 0, point, &m.m12);
615 ippiGetSpatialMoment_64f(moment, 0, 3, 0, point, &m.m03);
616 ippiGetCentralMoment_64f(moment, 2, 0, 0, &m.mu20);
617 ippiGetCentralMoment_64f(moment, 1, 1, 0, &m.mu11);
618 ippiGetCentralMoment_64f(moment, 0, 2, 0, &m.mu02);
619 ippiGetCentralMoment_64f(moment, 3, 0, 0, &m.mu30);
620 ippiGetCentralMoment_64f(moment, 2, 1, 0, &m.mu21);
621 ippiGetCentralMoment_64f(moment, 1, 2, 0, &m.mu12);
622 ippiGetCentralMoment_64f(moment, 0, 3, 0, &m.mu03);
623 ippiGetNormalizedCentralMoment_64f(moment, 2, 0, 0, &m.nu20);
624 ippiGetNormalizedCentralMoment_64f(moment, 1, 1, 0, &m.nu11);
625 ippiGetNormalizedCentralMoment_64f(moment, 0, 2, 0, &m.nu02);
626 ippiGetNormalizedCentralMoment_64f(moment, 3, 0, 0, &m.nu30);
627 ippiGetNormalizedCentralMoment_64f(moment, 2, 1, 0, &m.nu21);
628 ippiGetNormalizedCentralMoment_64f(moment, 1, 2, 0, &m.nu12);
629 ippiGetNormalizedCentralMoment_64f(moment, 0, 3, 0, &m.nu03);
630
631 ippiMomentFree_64f(moment);
632 CV_IMPL_ADD(CV_IMPL_IPP);
633 return m;
634 }
635 setIppErrorStatus();
636 }
637 ippiMomentFree_64f(moment);
638 }
639 else
640 setIppErrorStatus();
641 CV_SUPPRESS_DEPRECATED_END
642 }
643 }
644 #endif
645
646 if( binary || depth == CV_8U )
647 func = momentsInTile<uchar, int, int>;
648 else if( depth == CV_16U )
649 func = momentsInTile<ushort, int, int64>;
650 else if( depth == CV_16S )
651 func = momentsInTile<short, int, int64>;
652 else if( depth == CV_32F )
653 func = momentsInTile<float, double, double>;
654 else if( depth == CV_64F )
655 func = momentsInTile<double, double, double>;
656 else
657 CV_Error( CV_StsUnsupportedFormat, "" );
658
659 Mat src0(mat);
660
661 for( int y = 0; y < size.height; y += TILE_SIZE )
662 {
663 Size tileSize;
664 tileSize.height = std::min(TILE_SIZE, size.height - y);
665
666 for( int x = 0; x < size.width; x += TILE_SIZE )
667 {
668 tileSize.width = std::min(TILE_SIZE, size.width - x);
669 Mat src(src0, cv::Rect(x, y, tileSize.width, tileSize.height));
670
671 if( binary )
672 {
673 cv::Mat tmp(tileSize, CV_8U, nzbuf);
674 cv::compare( src, 0, tmp, CV_CMP_NE );
675 src = tmp;
676 }
677
678 double mom[10];
679 func( src, mom );
680
681 if(binary)
682 {
683 double s = 1./255;
684 for( int k = 0; k < 10; k++ )
685 mom[k] *= s;
686 }
687
688 double xm = x * mom[0], ym = y * mom[0];
689
690 // accumulate moments computed in each tile
691
692 // + m00 ( = m00' )
693 m.m00 += mom[0];
694
695 // + m10 ( = m10' + x*m00' )
696 m.m10 += mom[1] + xm;
697
698 // + m01 ( = m01' + y*m00' )
699 m.m01 += mom[2] + ym;
700
701 // + m20 ( = m20' + 2*x*m10' + x*x*m00' )
702 m.m20 += mom[3] + x * (mom[1] * 2 + xm);
703
704 // + m11 ( = m11' + x*m01' + y*m10' + x*y*m00' )
705 m.m11 += mom[4] + x * (mom[2] + ym) + y * mom[1];
706
707 // + m02 ( = m02' + 2*y*m01' + y*y*m00' )
708 m.m02 += mom[5] + y * (mom[2] * 2 + ym);
709
710 // + m30 ( = m30' + 3*x*m20' + 3*x*x*m10' + x*x*x*m00' )
711 m.m30 += mom[6] + x * (3. * mom[3] + x * (3. * mom[1] + xm));
712
713 // + m21 ( = m21' + x*(2*m11' + 2*y*m10' + x*m01' + x*y*m00') + y*m20')
714 m.m21 += mom[7] + x * (2 * (mom[4] + y * mom[1]) + x * (mom[2] + ym)) + y * mom[3];
715
716 // + m12 ( = m12' + y*(2*m11' + 2*x*m01' + y*m10' + x*y*m00') + x*m02')
717 m.m12 += mom[8] + y * (2 * (mom[4] + x * mom[2]) + y * (mom[1] + xm)) + x * mom[5];
718
719 // + m03 ( = m03' + 3*y*m02' + 3*y*y*m01' + y*y*y*m00' )
720 m.m03 += mom[9] + y * (3. * mom[5] + y * (3. * mom[2] + ym));
721 }
722 }
723 }
724
725 completeMomentState( &m );
726 return m;
727 }
728
729
HuMoments(const Moments & m,double hu[7])730 void cv::HuMoments( const Moments& m, double hu[7] )
731 {
732 double t0 = m.nu30 + m.nu12;
733 double t1 = m.nu21 + m.nu03;
734
735 double q0 = t0 * t0, q1 = t1 * t1;
736
737 double n4 = 4 * m.nu11;
738 double s = m.nu20 + m.nu02;
739 double d = m.nu20 - m.nu02;
740
741 hu[0] = s;
742 hu[1] = d * d + n4 * m.nu11;
743 hu[3] = q0 + q1;
744 hu[5] = d * (q0 - q1) + n4 * t0 * t1;
745
746 t0 *= q0 - 3 * q1;
747 t1 *= 3 * q0 - q1;
748
749 q0 = m.nu30 - 3 * m.nu12;
750 q1 = 3 * m.nu21 - m.nu03;
751
752 hu[2] = q0 * q0 + q1 * q1;
753 hu[4] = q0 * t0 + q1 * t1;
754 hu[6] = q1 * t0 - q0 * t1;
755 }
756
HuMoments(const Moments & m,OutputArray _hu)757 void cv::HuMoments( const Moments& m, OutputArray _hu )
758 {
759 _hu.create(7, 1, CV_64F);
760 Mat hu = _hu.getMat();
761 CV_Assert( hu.isContinuous() );
762 HuMoments(m, hu.ptr<double>());
763 }
764
765
cvMoments(const CvArr * arr,CvMoments * moments,int binary)766 CV_IMPL void cvMoments( const CvArr* arr, CvMoments* moments, int binary )
767 {
768 const IplImage* img = (const IplImage*)arr;
769 cv::Mat src;
770 if( CV_IS_IMAGE(arr) && img->roi && img->roi->coi > 0 )
771 cv::extractImageCOI(arr, src, img->roi->coi-1);
772 else
773 src = cv::cvarrToMat(arr);
774 cv::Moments m = cv::moments(src, binary != 0);
775 CV_Assert( moments != 0 );
776 *moments = m;
777 }
778
779
cvGetSpatialMoment(CvMoments * moments,int x_order,int y_order)780 CV_IMPL double cvGetSpatialMoment( CvMoments * moments, int x_order, int y_order )
781 {
782 int order = x_order + y_order;
783
784 if( !moments )
785 CV_Error( CV_StsNullPtr, "" );
786 if( (x_order | y_order) < 0 || order > 3 )
787 CV_Error( CV_StsOutOfRange, "" );
788
789 return (&(moments->m00))[order + (order >> 1) + (order > 2) * 2 + y_order];
790 }
791
792
cvGetCentralMoment(CvMoments * moments,int x_order,int y_order)793 CV_IMPL double cvGetCentralMoment( CvMoments * moments, int x_order, int y_order )
794 {
795 int order = x_order + y_order;
796
797 if( !moments )
798 CV_Error( CV_StsNullPtr, "" );
799 if( (x_order | y_order) < 0 || order > 3 )
800 CV_Error( CV_StsOutOfRange, "" );
801
802 return order >= 2 ? (&(moments->m00))[4 + order * 3 + y_order] :
803 order == 0 ? moments->m00 : 0;
804 }
805
806
cvGetNormalizedCentralMoment(CvMoments * moments,int x_order,int y_order)807 CV_IMPL double cvGetNormalizedCentralMoment( CvMoments * moments, int x_order, int y_order )
808 {
809 int order = x_order + y_order;
810
811 double mu = cvGetCentralMoment( moments, x_order, y_order );
812 double m00s = moments->inv_sqrt_m00;
813
814 while( --order >= 0 )
815 mu *= m00s;
816 return mu * m00s * m00s;
817 }
818
819
cvGetHuMoments(CvMoments * mState,CvHuMoments * HuState)820 CV_IMPL void cvGetHuMoments( CvMoments * mState, CvHuMoments * HuState )
821 {
822 if( !mState || !HuState )
823 CV_Error( CV_StsNullPtr, "" );
824
825 double m00s = mState->inv_sqrt_m00, m00 = m00s * m00s, s2 = m00 * m00, s3 = s2 * m00s;
826
827 double nu20 = mState->mu20 * s2,
828 nu11 = mState->mu11 * s2,
829 nu02 = mState->mu02 * s2,
830 nu30 = mState->mu30 * s3,
831 nu21 = mState->mu21 * s3, nu12 = mState->mu12 * s3, nu03 = mState->mu03 * s3;
832
833 double t0 = nu30 + nu12;
834 double t1 = nu21 + nu03;
835
836 double q0 = t0 * t0, q1 = t1 * t1;
837
838 double n4 = 4 * nu11;
839 double s = nu20 + nu02;
840 double d = nu20 - nu02;
841
842 HuState->hu1 = s;
843 HuState->hu2 = d * d + n4 * nu11;
844 HuState->hu4 = q0 + q1;
845 HuState->hu6 = d * (q0 - q1) + n4 * t0 * t1;
846
847 t0 *= q0 - 3 * q1;
848 t1 *= 3 * q0 - q1;
849
850 q0 = nu30 - 3 * nu12;
851 q1 = 3 * nu21 - nu03;
852
853 HuState->hu3 = q0 * q0 + q1 * q1;
854 HuState->hu5 = q0 * t0 + q1 * t1;
855 HuState->hu7 = q1 * t0 - q0 * t1;
856 }
857
858
859 /* End of file. */
860