1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44 #include "opencl_kernels_core.hpp"
45
46 #include "bufferpool.impl.hpp"
47
48 /****************************************************************************************\
49 * [scaled] Identity matrix initialization *
50 \****************************************************************************************/
51
52 namespace cv {
53
map(UMatData *,int) const54 void MatAllocator::map(UMatData*, int) const
55 {
56 }
57
unmap(UMatData * u) const58 void MatAllocator::unmap(UMatData* u) const
59 {
60 if(u->urefcount == 0 && u->refcount == 0)
61 {
62 deallocate(u);
63 u = NULL;
64 }
65 }
66
download(UMatData * u,void * dstptr,int dims,const size_t sz[],const size_t srcofs[],const size_t srcstep[],const size_t dststep[]) const67 void MatAllocator::download(UMatData* u, void* dstptr,
68 int dims, const size_t sz[],
69 const size_t srcofs[], const size_t srcstep[],
70 const size_t dststep[]) const
71 {
72 if(!u)
73 return;
74 int isz[CV_MAX_DIM];
75 uchar* srcptr = u->data;
76 for( int i = 0; i < dims; i++ )
77 {
78 CV_Assert( sz[i] <= (size_t)INT_MAX );
79 if( sz[i] == 0 )
80 return;
81 if( srcofs )
82 srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
83 isz[i] = (int)sz[i];
84 }
85
86 Mat src(dims, isz, CV_8U, srcptr, srcstep);
87 Mat dst(dims, isz, CV_8U, dstptr, dststep);
88
89 const Mat* arrays[] = { &src, &dst };
90 uchar* ptrs[2];
91 NAryMatIterator it(arrays, ptrs, 2);
92 size_t j, planesz = it.size;
93
94 for( j = 0; j < it.nplanes; j++, ++it )
95 memcpy(ptrs[1], ptrs[0], planesz);
96 }
97
98
upload(UMatData * u,const void * srcptr,int dims,const size_t sz[],const size_t dstofs[],const size_t dststep[],const size_t srcstep[]) const99 void MatAllocator::upload(UMatData* u, const void* srcptr, int dims, const size_t sz[],
100 const size_t dstofs[], const size_t dststep[],
101 const size_t srcstep[]) const
102 {
103 if(!u)
104 return;
105 int isz[CV_MAX_DIM];
106 uchar* dstptr = u->data;
107 for( int i = 0; i < dims; i++ )
108 {
109 CV_Assert( sz[i] <= (size_t)INT_MAX );
110 if( sz[i] == 0 )
111 return;
112 if( dstofs )
113 dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
114 isz[i] = (int)sz[i];
115 }
116
117 Mat src(dims, isz, CV_8U, (void*)srcptr, srcstep);
118 Mat dst(dims, isz, CV_8U, dstptr, dststep);
119
120 const Mat* arrays[] = { &src, &dst };
121 uchar* ptrs[2];
122 NAryMatIterator it(arrays, ptrs, 2);
123 size_t j, planesz = it.size;
124
125 for( j = 0; j < it.nplanes; j++, ++it )
126 memcpy(ptrs[1], ptrs[0], planesz);
127 }
128
copy(UMatData * usrc,UMatData * udst,int dims,const size_t sz[],const size_t srcofs[],const size_t srcstep[],const size_t dstofs[],const size_t dststep[],bool) const129 void MatAllocator::copy(UMatData* usrc, UMatData* udst, int dims, const size_t sz[],
130 const size_t srcofs[], const size_t srcstep[],
131 const size_t dstofs[], const size_t dststep[], bool /*sync*/) const
132 {
133 if(!usrc || !udst)
134 return;
135 int isz[CV_MAX_DIM];
136 uchar* srcptr = usrc->data;
137 uchar* dstptr = udst->data;
138 for( int i = 0; i < dims; i++ )
139 {
140 CV_Assert( sz[i] <= (size_t)INT_MAX );
141 if( sz[i] == 0 )
142 return;
143 if( srcofs )
144 srcptr += srcofs[i]*(i <= dims-2 ? srcstep[i] : 1);
145 if( dstofs )
146 dstptr += dstofs[i]*(i <= dims-2 ? dststep[i] : 1);
147 isz[i] = (int)sz[i];
148 }
149
150 Mat src(dims, isz, CV_8U, srcptr, srcstep);
151 Mat dst(dims, isz, CV_8U, dstptr, dststep);
152
153 const Mat* arrays[] = { &src, &dst };
154 uchar* ptrs[2];
155 NAryMatIterator it(arrays, ptrs, 2);
156 size_t j, planesz = it.size;
157
158 for( j = 0; j < it.nplanes; j++, ++it )
159 memcpy(ptrs[1], ptrs[0], planesz);
160 }
161
getBufferPoolController(const char * id) const162 BufferPoolController* MatAllocator::getBufferPoolController(const char* id) const
163 {
164 (void)id;
165 static DummyBufferPoolController dummy;
166 return &dummy;
167 }
168
169 class StdMatAllocator : public MatAllocator
170 {
171 public:
allocate(int dims,const int * sizes,int type,void * data0,size_t * step,int,UMatUsageFlags) const172 UMatData* allocate(int dims, const int* sizes, int type,
173 void* data0, size_t* step, int /*flags*/, UMatUsageFlags /*usageFlags*/) const
174 {
175 size_t total = CV_ELEM_SIZE(type);
176 for( int i = dims-1; i >= 0; i-- )
177 {
178 if( step )
179 {
180 if( data0 && step[i] != CV_AUTOSTEP )
181 {
182 CV_Assert(total <= step[i]);
183 total = step[i];
184 }
185 else
186 step[i] = total;
187 }
188 total *= sizes[i];
189 }
190 uchar* data = data0 ? (uchar*)data0 : (uchar*)fastMalloc(total);
191 UMatData* u = new UMatData(this);
192 u->data = u->origdata = data;
193 u->size = total;
194 if(data0)
195 u->flags |= UMatData::USER_ALLOCATED;
196
197 return u;
198 }
199
allocate(UMatData * u,int,UMatUsageFlags) const200 bool allocate(UMatData* u, int /*accessFlags*/, UMatUsageFlags /*usageFlags*/) const
201 {
202 if(!u) return false;
203 return true;
204 }
205
deallocate(UMatData * u) const206 void deallocate(UMatData* u) const
207 {
208 if(!u)
209 return;
210
211 CV_Assert(u->urefcount >= 0);
212 CV_Assert(u->refcount >= 0);
213 if(u->refcount == 0)
214 {
215 if( !(u->flags & UMatData::USER_ALLOCATED) )
216 {
217 fastFree(u->origdata);
218 u->origdata = 0;
219 }
220 delete u;
221 }
222 }
223 };
224
getStdAllocator()225 MatAllocator* Mat::getStdAllocator()
226 {
227 static StdMatAllocator allocator;
228 return &allocator;
229 }
230
swap(Mat & a,Mat & b)231 void swap( Mat& a, Mat& b )
232 {
233 std::swap(a.flags, b.flags);
234 std::swap(a.dims, b.dims);
235 std::swap(a.rows, b.rows);
236 std::swap(a.cols, b.cols);
237 std::swap(a.data, b.data);
238 std::swap(a.datastart, b.datastart);
239 std::swap(a.dataend, b.dataend);
240 std::swap(a.datalimit, b.datalimit);
241 std::swap(a.allocator, b.allocator);
242 std::swap(a.u, b.u);
243
244 std::swap(a.size.p, b.size.p);
245 std::swap(a.step.p, b.step.p);
246 std::swap(a.step.buf[0], b.step.buf[0]);
247 std::swap(a.step.buf[1], b.step.buf[1]);
248
249 if( a.step.p == b.step.buf )
250 {
251 a.step.p = a.step.buf;
252 a.size.p = &a.rows;
253 }
254
255 if( b.step.p == a.step.buf )
256 {
257 b.step.p = b.step.buf;
258 b.size.p = &b.rows;
259 }
260 }
261
262
setSize(Mat & m,int _dims,const int * _sz,const size_t * _steps,bool autoSteps=false)263 static inline void setSize( Mat& m, int _dims, const int* _sz,
264 const size_t* _steps, bool autoSteps=false )
265 {
266 CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM );
267 if( m.dims != _dims )
268 {
269 if( m.step.p != m.step.buf )
270 {
271 fastFree(m.step.p);
272 m.step.p = m.step.buf;
273 m.size.p = &m.rows;
274 }
275 if( _dims > 2 )
276 {
277 m.step.p = (size_t*)fastMalloc(_dims*sizeof(m.step.p[0]) + (_dims+1)*sizeof(m.size.p[0]));
278 m.size.p = (int*)(m.step.p + _dims) + 1;
279 m.size.p[-1] = _dims;
280 m.rows = m.cols = -1;
281 }
282 }
283
284 m.dims = _dims;
285 if( !_sz )
286 return;
287
288 size_t esz = CV_ELEM_SIZE(m.flags), esz1 = CV_ELEM_SIZE1(m.flags), total = esz;
289 int i;
290 for( i = _dims-1; i >= 0; i-- )
291 {
292 int s = _sz[i];
293 CV_Assert( s >= 0 );
294 m.size.p[i] = s;
295
296 if( _steps )
297 {
298 if (_steps[i] % esz1 != 0)
299 {
300 CV_Error(Error::BadStep, "Step must be a multiple of esz1");
301 }
302
303 m.step.p[i] = i < _dims-1 ? _steps[i] : esz;
304 }
305 else if( autoSteps )
306 {
307 m.step.p[i] = total;
308 int64 total1 = (int64)total*s;
309 if( (uint64)total1 != (size_t)total1 )
310 CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
311 total = (size_t)total1;
312 }
313 }
314
315 if( _dims == 1 )
316 {
317 m.dims = 2;
318 m.cols = 1;
319 m.step[1] = esz;
320 }
321 }
322
updateContinuityFlag(Mat & m)323 static void updateContinuityFlag(Mat& m)
324 {
325 int i, j;
326 for( i = 0; i < m.dims; i++ )
327 {
328 if( m.size[i] > 1 )
329 break;
330 }
331
332 for( j = m.dims-1; j > i; j-- )
333 {
334 if( m.step[j]*m.size[j] < m.step[j-1] )
335 break;
336 }
337
338 uint64 t = (uint64)m.step[0]*m.size[0];
339 if( j <= i && t == (size_t)t )
340 m.flags |= Mat::CONTINUOUS_FLAG;
341 else
342 m.flags &= ~Mat::CONTINUOUS_FLAG;
343 }
344
finalizeHdr(Mat & m)345 static void finalizeHdr(Mat& m)
346 {
347 updateContinuityFlag(m);
348 int d = m.dims;
349 if( d > 2 )
350 m.rows = m.cols = -1;
351 if(m.u)
352 m.datastart = m.data = m.u->data;
353 if( m.data )
354 {
355 m.datalimit = m.datastart + m.size[0]*m.step[0];
356 if( m.size[0] > 0 )
357 {
358 m.dataend = m.ptr() + m.size[d-1]*m.step[d-1];
359 for( int i = 0; i < d-1; i++ )
360 m.dataend += (m.size[i] - 1)*m.step[i];
361 }
362 else
363 m.dataend = m.datalimit;
364 }
365 else
366 m.dataend = m.datalimit = 0;
367 }
368
369
create(int d,const int * _sizes,int _type)370 void Mat::create(int d, const int* _sizes, int _type)
371 {
372 int i;
373 CV_Assert(0 <= d && d <= CV_MAX_DIM && _sizes);
374 _type = CV_MAT_TYPE(_type);
375
376 if( data && (d == dims || (d == 1 && dims <= 2)) && _type == type() )
377 {
378 if( d == 2 && rows == _sizes[0] && cols == _sizes[1] )
379 return;
380 for( i = 0; i < d; i++ )
381 if( size[i] != _sizes[i] )
382 break;
383 if( i == d && (d > 1 || size[1] == 1))
384 return;
385 }
386
387 release();
388 if( d == 0 )
389 return;
390 flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL;
391 setSize(*this, d, _sizes, 0, true);
392
393 if( total() > 0 )
394 {
395 MatAllocator *a = allocator, *a0 = getStdAllocator();
396 #ifdef HAVE_TGPU
397 if( !a || a == tegra::getAllocator() )
398 a = tegra::getAllocator(d, _sizes, _type);
399 #endif
400 if(!a)
401 a = a0;
402 try
403 {
404 u = a->allocate(dims, size, _type, 0, step.p, 0, USAGE_DEFAULT);
405 CV_Assert(u != 0);
406 }
407 catch(...)
408 {
409 if(a != a0)
410 u = a0->allocate(dims, size, _type, 0, step.p, 0, USAGE_DEFAULT);
411 CV_Assert(u != 0);
412 }
413 CV_Assert( step[dims-1] == (size_t)CV_ELEM_SIZE(flags) );
414 }
415
416 addref();
417 finalizeHdr(*this);
418 }
419
copySize(const Mat & m)420 void Mat::copySize(const Mat& m)
421 {
422 setSize(*this, m.dims, 0, 0);
423 for( int i = 0; i < dims; i++ )
424 {
425 size[i] = m.size[i];
426 step[i] = m.step[i];
427 }
428 }
429
deallocate()430 void Mat::deallocate()
431 {
432 if(u)
433 (u->currAllocator ? u->currAllocator : allocator ? allocator : getStdAllocator())->unmap(u);
434 u = NULL;
435 }
436
Mat(const Mat & m,const Range & _rowRange,const Range & _colRange)437 Mat::Mat(const Mat& m, const Range& _rowRange, const Range& _colRange)
438 : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
439 datalimit(0), allocator(0), u(0), size(&rows)
440 {
441 CV_Assert( m.dims >= 2 );
442 if( m.dims > 2 )
443 {
444 AutoBuffer<Range> rs(m.dims);
445 rs[0] = _rowRange;
446 rs[1] = _colRange;
447 for( int i = 2; i < m.dims; i++ )
448 rs[i] = Range::all();
449 *this = m(rs);
450 return;
451 }
452
453 *this = m;
454 if( _rowRange != Range::all() && _rowRange != Range(0,rows) )
455 {
456 CV_Assert( 0 <= _rowRange.start && _rowRange.start <= _rowRange.end && _rowRange.end <= m.rows );
457 rows = _rowRange.size();
458 data += step*_rowRange.start;
459 flags |= SUBMATRIX_FLAG;
460 }
461
462 if( _colRange != Range::all() && _colRange != Range(0,cols) )
463 {
464 CV_Assert( 0 <= _colRange.start && _colRange.start <= _colRange.end && _colRange.end <= m.cols );
465 cols = _colRange.size();
466 data += _colRange.start*elemSize();
467 flags &= cols < m.cols ? ~CONTINUOUS_FLAG : -1;
468 flags |= SUBMATRIX_FLAG;
469 }
470
471 if( rows == 1 )
472 flags |= CONTINUOUS_FLAG;
473
474 if( rows <= 0 || cols <= 0 )
475 {
476 release();
477 rows = cols = 0;
478 }
479 }
480
481
Mat(const Mat & m,const Rect & roi)482 Mat::Mat(const Mat& m, const Rect& roi)
483 : flags(m.flags), dims(2), rows(roi.height), cols(roi.width),
484 data(m.data + roi.y*m.step[0]),
485 datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit),
486 allocator(m.allocator), u(m.u), size(&rows)
487 {
488 CV_Assert( m.dims <= 2 );
489 flags &= roi.width < m.cols ? ~CONTINUOUS_FLAG : -1;
490 flags |= roi.height == 1 ? CONTINUOUS_FLAG : 0;
491
492 size_t esz = CV_ELEM_SIZE(flags);
493 data += roi.x*esz;
494 CV_Assert( 0 <= roi.x && 0 <= roi.width && roi.x + roi.width <= m.cols &&
495 0 <= roi.y && 0 <= roi.height && roi.y + roi.height <= m.rows );
496 if( u )
497 CV_XADD(&u->refcount, 1);
498 if( roi.width < m.cols || roi.height < m.rows )
499 flags |= SUBMATRIX_FLAG;
500
501 step[0] = m.step[0]; step[1] = esz;
502
503 if( rows <= 0 || cols <= 0 )
504 {
505 release();
506 rows = cols = 0;
507 }
508 }
509
510
Mat(int _dims,const int * _sizes,int _type,void * _data,const size_t * _steps)511 Mat::Mat(int _dims, const int* _sizes, int _type, void* _data, const size_t* _steps)
512 : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
513 datalimit(0), allocator(0), u(0), size(&rows)
514 {
515 flags |= CV_MAT_TYPE(_type);
516 datastart = data = (uchar*)_data;
517 setSize(*this, _dims, _sizes, _steps, true);
518 finalizeHdr(*this);
519 }
520
521
Mat(const Mat & m,const Range * ranges)522 Mat::Mat(const Mat& m, const Range* ranges)
523 : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0),
524 datalimit(0), allocator(0), u(0), size(&rows)
525 {
526 int i, d = m.dims;
527
528 CV_Assert(ranges);
529 for( i = 0; i < d; i++ )
530 {
531 Range r = ranges[i];
532 CV_Assert( r == Range::all() || (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
533 }
534 *this = m;
535 for( i = 0; i < d; i++ )
536 {
537 Range r = ranges[i];
538 if( r != Range::all() && r != Range(0, size.p[i]))
539 {
540 size.p[i] = r.end - r.start;
541 data += r.start*step.p[i];
542 flags |= SUBMATRIX_FLAG;
543 }
544 }
545 updateContinuityFlag(*this);
546 }
547
548
cvMatNDToMat(const CvMatND * m,bool copyData)549 static Mat cvMatNDToMat(const CvMatND* m, bool copyData)
550 {
551 Mat thiz;
552
553 if( !m )
554 return thiz;
555 thiz.datastart = thiz.data = m->data.ptr;
556 thiz.flags |= CV_MAT_TYPE(m->type);
557 int _sizes[CV_MAX_DIM];
558 size_t _steps[CV_MAX_DIM];
559
560 int i, d = m->dims;
561 for( i = 0; i < d; i++ )
562 {
563 _sizes[i] = m->dim[i].size;
564 _steps[i] = m->dim[i].step;
565 }
566
567 setSize(thiz, d, _sizes, _steps);
568 finalizeHdr(thiz);
569
570 if( copyData )
571 {
572 Mat temp(thiz);
573 thiz.release();
574 temp.copyTo(thiz);
575 }
576
577 return thiz;
578 }
579
cvMatToMat(const CvMat * m,bool copyData)580 static Mat cvMatToMat(const CvMat* m, bool copyData)
581 {
582 Mat thiz;
583
584 if( !m )
585 return thiz;
586
587 if( !copyData )
588 {
589 thiz.flags = Mat::MAGIC_VAL + (m->type & (CV_MAT_TYPE_MASK|CV_MAT_CONT_FLAG));
590 thiz.dims = 2;
591 thiz.rows = m->rows;
592 thiz.cols = m->cols;
593 thiz.datastart = thiz.data = m->data.ptr;
594 size_t esz = CV_ELEM_SIZE(m->type), minstep = thiz.cols*esz, _step = m->step;
595 if( _step == 0 )
596 _step = minstep;
597 thiz.datalimit = thiz.datastart + _step*thiz.rows;
598 thiz.dataend = thiz.datalimit - _step + minstep;
599 thiz.step[0] = _step; thiz.step[1] = esz;
600 }
601 else
602 {
603 thiz.datastart = thiz.dataend = thiz.data = 0;
604 Mat(m->rows, m->cols, m->type, m->data.ptr, m->step).copyTo(thiz);
605 }
606
607 return thiz;
608 }
609
610
iplImageToMat(const IplImage * img,bool copyData)611 static Mat iplImageToMat(const IplImage* img, bool copyData)
612 {
613 Mat m;
614
615 if( !img )
616 return m;
617
618 m.dims = 2;
619 CV_DbgAssert(CV_IS_IMAGE(img) && img->imageData != 0);
620
621 int imgdepth = IPL2CV_DEPTH(img->depth);
622 size_t esz;
623 m.step[0] = img->widthStep;
624
625 if(!img->roi)
626 {
627 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL);
628 m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, img->nChannels);
629 m.rows = img->height;
630 m.cols = img->width;
631 m.datastart = m.data = (uchar*)img->imageData;
632 esz = CV_ELEM_SIZE(m.flags);
633 }
634 else
635 {
636 CV_Assert(img->dataOrder == IPL_DATA_ORDER_PIXEL || img->roi->coi != 0);
637 bool selectedPlane = img->roi->coi && img->dataOrder == IPL_DATA_ORDER_PLANE;
638 m.flags = Mat::MAGIC_VAL + CV_MAKETYPE(imgdepth, selectedPlane ? 1 : img->nChannels);
639 m.rows = img->roi->height;
640 m.cols = img->roi->width;
641 esz = CV_ELEM_SIZE(m.flags);
642 m.datastart = m.data = (uchar*)img->imageData +
643 (selectedPlane ? (img->roi->coi - 1)*m.step*img->height : 0) +
644 img->roi->yOffset*m.step[0] + img->roi->xOffset*esz;
645 }
646 m.datalimit = m.datastart + m.step.p[0]*m.rows;
647 m.dataend = m.datastart + m.step.p[0]*(m.rows-1) + esz*m.cols;
648 m.flags |= (m.cols*esz == m.step.p[0] || m.rows == 1 ? Mat::CONTINUOUS_FLAG : 0);
649 m.step[1] = esz;
650
651 if( copyData )
652 {
653 Mat m2 = m;
654 m.release();
655 if( !img->roi || !img->roi->coi ||
656 img->dataOrder == IPL_DATA_ORDER_PLANE)
657 m2.copyTo(m);
658 else
659 {
660 int ch[] = {img->roi->coi - 1, 0};
661 m.create(m2.rows, m2.cols, m2.type());
662 mixChannels(&m2, 1, &m, 1, ch, 1);
663 }
664 }
665
666 return m;
667 }
668
diag(int d) const669 Mat Mat::diag(int d) const
670 {
671 CV_Assert( dims <= 2 );
672 Mat m = *this;
673 size_t esz = elemSize();
674 int len;
675
676 if( d >= 0 )
677 {
678 len = std::min(cols - d, rows);
679 m.data += esz*d;
680 }
681 else
682 {
683 len = std::min(rows + d, cols);
684 m.data -= step[0]*d;
685 }
686 CV_DbgAssert( len > 0 );
687
688 m.size[0] = m.rows = len;
689 m.size[1] = m.cols = 1;
690 m.step[0] += (len > 1 ? esz : 0);
691
692 if( m.rows > 1 )
693 m.flags &= ~CONTINUOUS_FLAG;
694 else
695 m.flags |= CONTINUOUS_FLAG;
696
697 if( size() != Size(1,1) )
698 m.flags |= SUBMATRIX_FLAG;
699
700 return m;
701 }
702
pop_back(size_t nelems)703 void Mat::pop_back(size_t nelems)
704 {
705 CV_Assert( nelems <= (size_t)size.p[0] );
706
707 if( isSubmatrix() )
708 *this = rowRange(0, size.p[0] - (int)nelems);
709 else
710 {
711 size.p[0] -= (int)nelems;
712 dataend -= nelems*step.p[0];
713 /*if( size.p[0] <= 1 )
714 {
715 if( dims <= 2 )
716 flags |= CONTINUOUS_FLAG;
717 else
718 updateContinuityFlag(*this);
719 }*/
720 }
721 }
722
723
push_back_(const void * elem)724 void Mat::push_back_(const void* elem)
725 {
726 int r = size.p[0];
727 if( isSubmatrix() || dataend + step.p[0] > datalimit )
728 reserve( std::max(r + 1, (r*3+1)/2) );
729
730 size_t esz = elemSize();
731 memcpy(data + r*step.p[0], elem, esz);
732 size.p[0] = r + 1;
733 dataend += step.p[0];
734 if( esz < step.p[0] )
735 flags &= ~CONTINUOUS_FLAG;
736 }
737
reserve(size_t nelems)738 void Mat::reserve(size_t nelems)
739 {
740 const size_t MIN_SIZE = 64;
741
742 CV_Assert( (int)nelems >= 0 );
743 if( !isSubmatrix() && data + step.p[0]*nelems <= datalimit )
744 return;
745
746 int r = size.p[0];
747
748 if( (size_t)r >= nelems )
749 return;
750
751 size.p[0] = std::max((int)nelems, 1);
752 size_t newsize = total()*elemSize();
753
754 if( newsize < MIN_SIZE )
755 size.p[0] = (int)((MIN_SIZE + newsize - 1)*nelems/newsize);
756
757 Mat m(dims, size.p, type());
758 size.p[0] = r;
759 if( r > 0 )
760 {
761 Mat mpart = m.rowRange(0, r);
762 copyTo(mpart);
763 }
764
765 *this = m;
766 size.p[0] = r;
767 dataend = data + step.p[0]*r;
768 }
769
770
resize(size_t nelems)771 void Mat::resize(size_t nelems)
772 {
773 int saveRows = size.p[0];
774 if( saveRows == (int)nelems )
775 return;
776 CV_Assert( (int)nelems >= 0 );
777
778 if( isSubmatrix() || data + step.p[0]*nelems > datalimit )
779 reserve(nelems);
780
781 size.p[0] = (int)nelems;
782 dataend += (size.p[0] - saveRows)*step.p[0];
783
784 //updateContinuityFlag(*this);
785 }
786
787
resize(size_t nelems,const Scalar & s)788 void Mat::resize(size_t nelems, const Scalar& s)
789 {
790 int saveRows = size.p[0];
791 resize(nelems);
792
793 if( size.p[0] > saveRows )
794 {
795 Mat part = rowRange(saveRows, size.p[0]);
796 part = s;
797 }
798 }
799
push_back(const Mat & elems)800 void Mat::push_back(const Mat& elems)
801 {
802 int r = size.p[0], delta = elems.size.p[0];
803 if( delta == 0 )
804 return;
805 if( this == &elems )
806 {
807 Mat tmp = elems;
808 push_back(tmp);
809 return;
810 }
811 if( !data )
812 {
813 *this = elems.clone();
814 return;
815 }
816
817 size.p[0] = elems.size.p[0];
818 bool eq = size == elems.size;
819 size.p[0] = r;
820 if( !eq )
821 CV_Error(CV_StsUnmatchedSizes, "");
822 if( type() != elems.type() )
823 CV_Error(CV_StsUnmatchedFormats, "");
824
825 if( isSubmatrix() || dataend + step.p[0]*delta > datalimit )
826 reserve( std::max(r + delta, (r*3+1)/2) );
827
828 size.p[0] += delta;
829 dataend += step.p[0]*delta;
830
831 //updateContinuityFlag(*this);
832
833 if( isContinuous() && elems.isContinuous() )
834 memcpy(data + r*step.p[0], elems.data, elems.total()*elems.elemSize());
835 else
836 {
837 Mat part = rowRange(r, r + delta);
838 elems.copyTo(part);
839 }
840 }
841
842
cvarrToMat(const CvArr * arr,bool copyData,bool,int coiMode,AutoBuffer<double> * abuf)843 Mat cvarrToMat(const CvArr* arr, bool copyData,
844 bool /*allowND*/, int coiMode, AutoBuffer<double>* abuf )
845 {
846 if( !arr )
847 return Mat();
848 if( CV_IS_MAT_HDR_Z(arr) )
849 return cvMatToMat((const CvMat*)arr, copyData);
850 if( CV_IS_MATND(arr) )
851 return cvMatNDToMat((const CvMatND*)arr, copyData );
852 if( CV_IS_IMAGE(arr) )
853 {
854 const IplImage* iplimg = (const IplImage*)arr;
855 if( coiMode == 0 && iplimg->roi && iplimg->roi->coi > 0 )
856 CV_Error(CV_BadCOI, "COI is not supported by the function");
857 return iplImageToMat(iplimg, copyData);
858 }
859 if( CV_IS_SEQ(arr) )
860 {
861 CvSeq* seq = (CvSeq*)arr;
862 int total = seq->total, type = CV_MAT_TYPE(seq->flags), esz = seq->elem_size;
863 if( total == 0 )
864 return Mat();
865 CV_Assert(total > 0 && CV_ELEM_SIZE(seq->flags) == esz);
866 if(!copyData && seq->first->next == seq->first)
867 return Mat(total, 1, type, seq->first->data);
868 if( abuf )
869 {
870 abuf->allocate(((size_t)total*esz + sizeof(double)-1)/sizeof(double));
871 double* bufdata = *abuf;
872 cvCvtSeqToArray(seq, bufdata, CV_WHOLE_SEQ);
873 return Mat(total, 1, type, bufdata);
874 }
875
876 Mat buf(total, 1, type);
877 cvCvtSeqToArray(seq, buf.ptr(), CV_WHOLE_SEQ);
878 return buf;
879 }
880 CV_Error(CV_StsBadArg, "Unknown array type");
881 return Mat();
882 }
883
locateROI(Size & wholeSize,Point & ofs) const884 void Mat::locateROI( Size& wholeSize, Point& ofs ) const
885 {
886 CV_Assert( dims <= 2 && step[0] > 0 );
887 size_t esz = elemSize(), minstep;
888 ptrdiff_t delta1 = data - datastart, delta2 = dataend - datastart;
889
890 if( delta1 == 0 )
891 ofs.x = ofs.y = 0;
892 else
893 {
894 ofs.y = (int)(delta1/step[0]);
895 ofs.x = (int)((delta1 - step[0]*ofs.y)/esz);
896 CV_DbgAssert( data == datastart + ofs.y*step[0] + ofs.x*esz );
897 }
898 minstep = (ofs.x + cols)*esz;
899 wholeSize.height = (int)((delta2 - minstep)/step[0] + 1);
900 wholeSize.height = std::max(wholeSize.height, ofs.y + rows);
901 wholeSize.width = (int)((delta2 - step*(wholeSize.height-1))/esz);
902 wholeSize.width = std::max(wholeSize.width, ofs.x + cols);
903 }
904
adjustROI(int dtop,int dbottom,int dleft,int dright)905 Mat& Mat::adjustROI( int dtop, int dbottom, int dleft, int dright )
906 {
907 CV_Assert( dims <= 2 && step[0] > 0 );
908 Size wholeSize; Point ofs;
909 size_t esz = elemSize();
910 locateROI( wholeSize, ofs );
911 int row1 = std::max(ofs.y - dtop, 0), row2 = std::min(ofs.y + rows + dbottom, wholeSize.height);
912 int col1 = std::max(ofs.x - dleft, 0), col2 = std::min(ofs.x + cols + dright, wholeSize.width);
913 data += (row1 - ofs.y)*step + (col1 - ofs.x)*esz;
914 rows = row2 - row1; cols = col2 - col1;
915 size.p[0] = rows; size.p[1] = cols;
916 if( esz*cols == step[0] || rows == 1 )
917 flags |= CONTINUOUS_FLAG;
918 else
919 flags &= ~CONTINUOUS_FLAG;
920 return *this;
921 }
922
923 }
924
extractImageCOI(const CvArr * arr,OutputArray _ch,int coi)925 void cv::extractImageCOI(const CvArr* arr, OutputArray _ch, int coi)
926 {
927 Mat mat = cvarrToMat(arr, false, true, 1);
928 _ch.create(mat.dims, mat.size, mat.depth());
929 Mat ch = _ch.getMat();
930 if(coi < 0)
931 {
932 CV_Assert( CV_IS_IMAGE(arr) );
933 coi = cvGetImageCOI((const IplImage*)arr)-1;
934 }
935 CV_Assert(0 <= coi && coi < mat.channels());
936 int _pairs[] = { coi, 0 };
937 mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
938 }
939
insertImageCOI(InputArray _ch,CvArr * arr,int coi)940 void cv::insertImageCOI(InputArray _ch, CvArr* arr, int coi)
941 {
942 Mat ch = _ch.getMat(), mat = cvarrToMat(arr, false, true, 1);
943 if(coi < 0)
944 {
945 CV_Assert( CV_IS_IMAGE(arr) );
946 coi = cvGetImageCOI((const IplImage*)arr)-1;
947 }
948 CV_Assert(ch.size == mat.size && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
949 int _pairs[] = { 0, coi };
950 mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
951 }
952
953 namespace cv
954 {
955
reshape(int new_cn,int new_rows) const956 Mat Mat::reshape(int new_cn, int new_rows) const
957 {
958 int cn = channels();
959 Mat hdr = *this;
960
961 if( dims > 2 && new_rows == 0 && new_cn != 0 && size[dims-1]*cn % new_cn == 0 )
962 {
963 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
964 hdr.step[dims-1] = CV_ELEM_SIZE(hdr.flags);
965 hdr.size[dims-1] = hdr.size[dims-1]*cn / new_cn;
966 return hdr;
967 }
968
969 CV_Assert( dims <= 2 );
970
971 if( new_cn == 0 )
972 new_cn = cn;
973
974 int total_width = cols * cn;
975
976 if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
977 new_rows = rows * total_width / new_cn;
978
979 if( new_rows != 0 && new_rows != rows )
980 {
981 int total_size = total_width * rows;
982 if( !isContinuous() )
983 CV_Error( CV_BadStep,
984 "The matrix is not continuous, thus its number of rows can not be changed" );
985
986 if( (unsigned)new_rows > (unsigned)total_size )
987 CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
988
989 total_width = total_size / new_rows;
990
991 if( total_width * new_rows != total_size )
992 CV_Error( CV_StsBadArg, "The total number of matrix elements "
993 "is not divisible by the new number of rows" );
994
995 hdr.rows = new_rows;
996 hdr.step[0] = total_width * elemSize1();
997 }
998
999 int new_width = total_width / new_cn;
1000
1001 if( new_width * new_cn != total_width )
1002 CV_Error( CV_BadNumChannels,
1003 "The total width is not divisible by the new number of channels" );
1004
1005 hdr.cols = new_width;
1006 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
1007 hdr.step[1] = CV_ELEM_SIZE(hdr.flags);
1008 return hdr;
1009 }
1010
diag(const Mat & d)1011 Mat Mat::diag(const Mat& d)
1012 {
1013 CV_Assert( d.cols == 1 || d.rows == 1 );
1014 int len = d.rows + d.cols - 1;
1015 Mat m(len, len, d.type(), Scalar(0));
1016 Mat md = m.diag();
1017 if( d.cols == 1 )
1018 d.copyTo(md);
1019 else
1020 transpose(d, md);
1021 return m;
1022 }
1023
checkVector(int _elemChannels,int _depth,bool _requireContinuous) const1024 int Mat::checkVector(int _elemChannels, int _depth, bool _requireContinuous) const
1025 {
1026 return (depth() == _depth || _depth <= 0) &&
1027 (isContinuous() || !_requireContinuous) &&
1028 ((dims == 2 && (((rows == 1 || cols == 1) && channels() == _elemChannels) ||
1029 (cols == _elemChannels && channels() == 1))) ||
1030 (dims == 3 && channels() == 1 && size.p[2] == _elemChannels && (size.p[0] == 1 || size.p[1] == 1) &&
1031 (isContinuous() || step.p[1] == step.p[2]*size.p[2])))
1032 ? (int)(total()*channels()/_elemChannels) : -1;
1033 }
1034
1035
scalarToRawData(const Scalar & s,void * _buf,int type,int unroll_to)1036 void scalarToRawData(const Scalar& s, void* _buf, int type, int unroll_to)
1037 {
1038 int i, depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1039 CV_Assert(cn <= 4);
1040 switch(depth)
1041 {
1042 case CV_8U:
1043 {
1044 uchar* buf = (uchar*)_buf;
1045 for(i = 0; i < cn; i++)
1046 buf[i] = saturate_cast<uchar>(s.val[i]);
1047 for(; i < unroll_to; i++)
1048 buf[i] = buf[i-cn];
1049 }
1050 break;
1051 case CV_8S:
1052 {
1053 schar* buf = (schar*)_buf;
1054 for(i = 0; i < cn; i++)
1055 buf[i] = saturate_cast<schar>(s.val[i]);
1056 for(; i < unroll_to; i++)
1057 buf[i] = buf[i-cn];
1058 }
1059 break;
1060 case CV_16U:
1061 {
1062 ushort* buf = (ushort*)_buf;
1063 for(i = 0; i < cn; i++)
1064 buf[i] = saturate_cast<ushort>(s.val[i]);
1065 for(; i < unroll_to; i++)
1066 buf[i] = buf[i-cn];
1067 }
1068 break;
1069 case CV_16S:
1070 {
1071 short* buf = (short*)_buf;
1072 for(i = 0; i < cn; i++)
1073 buf[i] = saturate_cast<short>(s.val[i]);
1074 for(; i < unroll_to; i++)
1075 buf[i] = buf[i-cn];
1076 }
1077 break;
1078 case CV_32S:
1079 {
1080 int* buf = (int*)_buf;
1081 for(i = 0; i < cn; i++)
1082 buf[i] = saturate_cast<int>(s.val[i]);
1083 for(; i < unroll_to; i++)
1084 buf[i] = buf[i-cn];
1085 }
1086 break;
1087 case CV_32F:
1088 {
1089 float* buf = (float*)_buf;
1090 for(i = 0; i < cn; i++)
1091 buf[i] = saturate_cast<float>(s.val[i]);
1092 for(; i < unroll_to; i++)
1093 buf[i] = buf[i-cn];
1094 }
1095 break;
1096 case CV_64F:
1097 {
1098 double* buf = (double*)_buf;
1099 for(i = 0; i < cn; i++)
1100 buf[i] = saturate_cast<double>(s.val[i]);
1101 for(; i < unroll_to; i++)
1102 buf[i] = buf[i-cn];
1103 break;
1104 }
1105 default:
1106 CV_Error(CV_StsUnsupportedFormat,"");
1107 }
1108 }
1109
1110
1111 /*************************************************************************************************\
1112 Input/Output Array
1113 \*************************************************************************************************/
1114
getMat_(int i) const1115 Mat _InputArray::getMat_(int i) const
1116 {
1117 int k = kind();
1118 int accessFlags = flags & ACCESS_MASK;
1119
1120 if( k == MAT )
1121 {
1122 const Mat* m = (const Mat*)obj;
1123 if( i < 0 )
1124 return *m;
1125 return m->row(i);
1126 }
1127
1128 if( k == UMAT )
1129 {
1130 const UMat* m = (const UMat*)obj;
1131 if( i < 0 )
1132 return m->getMat(accessFlags);
1133 return m->getMat(accessFlags).row(i);
1134 }
1135
1136 if( k == EXPR )
1137 {
1138 CV_Assert( i < 0 );
1139 return (Mat)*((const MatExpr*)obj);
1140 }
1141
1142 if( k == MATX )
1143 {
1144 CV_Assert( i < 0 );
1145 return Mat(sz, flags, obj);
1146 }
1147
1148 if( k == STD_VECTOR )
1149 {
1150 CV_Assert( i < 0 );
1151 int t = CV_MAT_TYPE(flags);
1152 const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
1153
1154 return !v.empty() ? Mat(size(), t, (void*)&v[0]) : Mat();
1155 }
1156
1157 if( k == STD_BOOL_VECTOR )
1158 {
1159 CV_Assert( i < 0 );
1160 int t = CV_8U;
1161 const std::vector<bool>& v = *(const std::vector<bool>*)obj;
1162 int j, n = (int)v.size();
1163 if( n == 0 )
1164 return Mat();
1165 Mat m(1, n, t);
1166 uchar* dst = m.data;
1167 for( j = 0; j < n; j++ )
1168 dst[j] = (uchar)v[j];
1169 return m;
1170 }
1171
1172 if( k == NONE )
1173 return Mat();
1174
1175 if( k == STD_VECTOR_VECTOR )
1176 {
1177 int t = type(i);
1178 const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
1179 CV_Assert( 0 <= i && i < (int)vv.size() );
1180 const std::vector<uchar>& v = vv[i];
1181
1182 return !v.empty() ? Mat(size(i), t, (void*)&v[0]) : Mat();
1183 }
1184
1185 if( k == STD_VECTOR_MAT )
1186 {
1187 const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
1188 CV_Assert( 0 <= i && i < (int)v.size() );
1189
1190 return v[i];
1191 }
1192
1193 if( k == STD_VECTOR_UMAT )
1194 {
1195 const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
1196 CV_Assert( 0 <= i && i < (int)v.size() );
1197
1198 return v[i].getMat(accessFlags);
1199 }
1200
1201 if( k == OPENGL_BUFFER )
1202 {
1203 CV_Assert( i < 0 );
1204 CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapHost/unmapHost methods for ogl::Buffer object");
1205 return Mat();
1206 }
1207
1208 if( k == CUDA_GPU_MAT )
1209 {
1210 CV_Assert( i < 0 );
1211 CV_Error(cv::Error::StsNotImplemented, "You should explicitly call download method for cuda::GpuMat object");
1212 return Mat();
1213 }
1214
1215 if( k == CUDA_HOST_MEM )
1216 {
1217 CV_Assert( i < 0 );
1218
1219 const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
1220
1221 return cuda_mem->createMatHeader();
1222 }
1223
1224 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1225 return Mat();
1226 }
1227
getUMat(int i) const1228 UMat _InputArray::getUMat(int i) const
1229 {
1230 int k = kind();
1231 int accessFlags = flags & ACCESS_MASK;
1232
1233 if( k == UMAT )
1234 {
1235 const UMat* m = (const UMat*)obj;
1236 if( i < 0 )
1237 return *m;
1238 return m->row(i);
1239 }
1240
1241 if( k == STD_VECTOR_UMAT )
1242 {
1243 const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
1244 CV_Assert( 0 <= i && i < (int)v.size() );
1245
1246 return v[i];
1247 }
1248
1249 if( k == MAT )
1250 {
1251 const Mat* m = (const Mat*)obj;
1252 if( i < 0 )
1253 return m->getUMat(accessFlags);
1254 return m->row(i).getUMat(accessFlags);
1255 }
1256
1257 return getMat(i).getUMat(accessFlags);
1258 }
1259
getMatVector(std::vector<Mat> & mv) const1260 void _InputArray::getMatVector(std::vector<Mat>& mv) const
1261 {
1262 int k = kind();
1263 int accessFlags = flags & ACCESS_MASK;
1264
1265 if( k == MAT )
1266 {
1267 const Mat& m = *(const Mat*)obj;
1268 int i, n = (int)m.size[0];
1269 mv.resize(n);
1270
1271 for( i = 0; i < n; i++ )
1272 mv[i] = m.dims == 2 ? Mat(1, m.cols, m.type(), (void*)m.ptr(i)) :
1273 Mat(m.dims-1, &m.size[1], m.type(), (void*)m.ptr(i), &m.step[1]);
1274 return;
1275 }
1276
1277 if( k == EXPR )
1278 {
1279 Mat m = *(const MatExpr*)obj;
1280 int i, n = m.size[0];
1281 mv.resize(n);
1282
1283 for( i = 0; i < n; i++ )
1284 mv[i] = m.row(i);
1285 return;
1286 }
1287
1288 if( k == MATX )
1289 {
1290 size_t i, n = sz.height, esz = CV_ELEM_SIZE(flags);
1291 mv.resize(n);
1292
1293 for( i = 0; i < n; i++ )
1294 mv[i] = Mat(1, sz.width, CV_MAT_TYPE(flags), (uchar*)obj + esz*sz.width*i);
1295 return;
1296 }
1297
1298 if( k == STD_VECTOR )
1299 {
1300 const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
1301
1302 size_t i, n = v.size(), esz = CV_ELEM_SIZE(flags);
1303 int t = CV_MAT_DEPTH(flags), cn = CV_MAT_CN(flags);
1304 mv.resize(n);
1305
1306 for( i = 0; i < n; i++ )
1307 mv[i] = Mat(1, cn, t, (void*)(&v[0] + esz*i));
1308 return;
1309 }
1310
1311 if( k == NONE )
1312 {
1313 mv.clear();
1314 return;
1315 }
1316
1317 if( k == STD_VECTOR_VECTOR )
1318 {
1319 const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
1320 int i, n = (int)vv.size();
1321 int t = CV_MAT_TYPE(flags);
1322 mv.resize(n);
1323
1324 for( i = 0; i < n; i++ )
1325 {
1326 const std::vector<uchar>& v = vv[i];
1327 mv[i] = Mat(size(i), t, (void*)&v[0]);
1328 }
1329 return;
1330 }
1331
1332 if( k == STD_VECTOR_MAT )
1333 {
1334 const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
1335 size_t i, n = v.size();
1336 mv.resize(n);
1337
1338 for( i = 0; i < n; i++ )
1339 mv[i] = v[i];
1340 return;
1341 }
1342
1343 if( k == STD_VECTOR_UMAT )
1344 {
1345 const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
1346 size_t i, n = v.size();
1347 mv.resize(n);
1348
1349 for( i = 0; i < n; i++ )
1350 mv[i] = v[i].getMat(accessFlags);
1351 return;
1352 }
1353
1354 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1355 }
1356
getUMatVector(std::vector<UMat> & umv) const1357 void _InputArray::getUMatVector(std::vector<UMat>& umv) const
1358 {
1359 int k = kind();
1360 int accessFlags = flags & ACCESS_MASK;
1361
1362 if( k == NONE )
1363 {
1364 umv.clear();
1365 return;
1366 }
1367
1368 if( k == STD_VECTOR_MAT )
1369 {
1370 const std::vector<Mat>& v = *(const std::vector<Mat>*)obj;
1371 size_t i, n = v.size();
1372 umv.resize(n);
1373
1374 for( i = 0; i < n; i++ )
1375 umv[i] = v[i].getUMat(accessFlags);
1376 return;
1377 }
1378
1379 if( k == STD_VECTOR_UMAT )
1380 {
1381 const std::vector<UMat>& v = *(const std::vector<UMat>*)obj;
1382 size_t i, n = v.size();
1383 umv.resize(n);
1384
1385 for( i = 0; i < n; i++ )
1386 umv[i] = v[i];
1387 return;
1388 }
1389
1390 if( k == UMAT )
1391 {
1392 UMat& v = *(UMat*)obj;
1393 umv.resize(1);
1394 umv[0] = v;
1395 return;
1396 }
1397 if( k == MAT )
1398 {
1399 Mat& v = *(Mat*)obj;
1400 umv.resize(1);
1401 umv[0] = v.getUMat(accessFlags);
1402 return;
1403 }
1404
1405 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1406 }
1407
getGpuMat() const1408 cuda::GpuMat _InputArray::getGpuMat() const
1409 {
1410 int k = kind();
1411
1412 if (k == CUDA_GPU_MAT)
1413 {
1414 const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
1415 return *d_mat;
1416 }
1417
1418 if (k == CUDA_HOST_MEM)
1419 {
1420 const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
1421 return cuda_mem->createGpuMatHeader();
1422 }
1423
1424 if (k == OPENGL_BUFFER)
1425 {
1426 CV_Error(cv::Error::StsNotImplemented, "You should explicitly call mapDevice/unmapDevice methods for ogl::Buffer object");
1427 return cuda::GpuMat();
1428 }
1429
1430 if (k == NONE)
1431 return cuda::GpuMat();
1432
1433 CV_Error(cv::Error::StsNotImplemented, "getGpuMat is available only for cuda::GpuMat and cuda::HostMem");
1434 return cuda::GpuMat();
1435 }
1436
getOGlBuffer() const1437 ogl::Buffer _InputArray::getOGlBuffer() const
1438 {
1439 int k = kind();
1440
1441 CV_Assert(k == OPENGL_BUFFER);
1442
1443 const ogl::Buffer* gl_buf = (const ogl::Buffer*)obj;
1444 return *gl_buf;
1445 }
1446
kind() const1447 int _InputArray::kind() const
1448 {
1449 return flags & KIND_MASK;
1450 }
1451
rows(int i) const1452 int _InputArray::rows(int i) const
1453 {
1454 return size(i).height;
1455 }
1456
cols(int i) const1457 int _InputArray::cols(int i) const
1458 {
1459 return size(i).width;
1460 }
1461
size(int i) const1462 Size _InputArray::size(int i) const
1463 {
1464 int k = kind();
1465
1466 if( k == MAT )
1467 {
1468 CV_Assert( i < 0 );
1469 return ((const Mat*)obj)->size();
1470 }
1471
1472 if( k == EXPR )
1473 {
1474 CV_Assert( i < 0 );
1475 return ((const MatExpr*)obj)->size();
1476 }
1477
1478 if( k == UMAT )
1479 {
1480 CV_Assert( i < 0 );
1481 return ((const UMat*)obj)->size();
1482 }
1483
1484 if( k == MATX )
1485 {
1486 CV_Assert( i < 0 );
1487 return sz;
1488 }
1489
1490 if( k == STD_VECTOR )
1491 {
1492 CV_Assert( i < 0 );
1493 const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
1494 const std::vector<int>& iv = *(const std::vector<int>*)obj;
1495 size_t szb = v.size(), szi = iv.size();
1496 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1497 }
1498
1499 if( k == STD_BOOL_VECTOR )
1500 {
1501 CV_Assert( i < 0 );
1502 const std::vector<bool>& v = *(const std::vector<bool>*)obj;
1503 return Size((int)v.size(), 1);
1504 }
1505
1506 if( k == NONE )
1507 return Size();
1508
1509 if( k == STD_VECTOR_VECTOR )
1510 {
1511 const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
1512 if( i < 0 )
1513 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1514 CV_Assert( i < (int)vv.size() );
1515 const std::vector<std::vector<int> >& ivv = *(const std::vector<std::vector<int> >*)obj;
1516
1517 size_t szb = vv[i].size(), szi = ivv[i].size();
1518 return szb == szi ? Size((int)szb, 1) : Size((int)(szb/CV_ELEM_SIZE(flags)), 1);
1519 }
1520
1521 if( k == STD_VECTOR_MAT )
1522 {
1523 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1524 if( i < 0 )
1525 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1526 CV_Assert( i < (int)vv.size() );
1527
1528 return vv[i].size();
1529 }
1530
1531 if( k == STD_VECTOR_UMAT )
1532 {
1533 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1534 if( i < 0 )
1535 return vv.empty() ? Size() : Size((int)vv.size(), 1);
1536 CV_Assert( i < (int)vv.size() );
1537
1538 return vv[i].size();
1539 }
1540
1541 if( k == OPENGL_BUFFER )
1542 {
1543 CV_Assert( i < 0 );
1544 const ogl::Buffer* buf = (const ogl::Buffer*)obj;
1545 return buf->size();
1546 }
1547
1548 if( k == CUDA_GPU_MAT )
1549 {
1550 CV_Assert( i < 0 );
1551 const cuda::GpuMat* d_mat = (const cuda::GpuMat*)obj;
1552 return d_mat->size();
1553 }
1554
1555 if( k == CUDA_HOST_MEM )
1556 {
1557 CV_Assert( i < 0 );
1558 const cuda::HostMem* cuda_mem = (const cuda::HostMem*)obj;
1559 return cuda_mem->size();
1560 }
1561
1562 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1563 return Size();
1564 }
1565
sizend(int * arrsz,int i) const1566 int _InputArray::sizend(int* arrsz, int i) const
1567 {
1568 int j, d=0, k = kind();
1569
1570 if( k == NONE )
1571 ;
1572 else if( k == MAT )
1573 {
1574 CV_Assert( i < 0 );
1575 const Mat& m = *(const Mat*)obj;
1576 d = m.dims;
1577 if(arrsz)
1578 for(j = 0; j < d; j++)
1579 arrsz[j] = m.size.p[j];
1580 }
1581 else if( k == UMAT )
1582 {
1583 CV_Assert( i < 0 );
1584 const UMat& m = *(const UMat*)obj;
1585 d = m.dims;
1586 if(arrsz)
1587 for(j = 0; j < d; j++)
1588 arrsz[j] = m.size.p[j];
1589 }
1590 else if( k == STD_VECTOR_MAT && i >= 0 )
1591 {
1592 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1593 CV_Assert( i < (int)vv.size() );
1594 const Mat& m = vv[i];
1595 d = m.dims;
1596 if(arrsz)
1597 for(j = 0; j < d; j++)
1598 arrsz[j] = m.size.p[j];
1599 }
1600 else if( k == STD_VECTOR_UMAT && i >= 0 )
1601 {
1602 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1603 CV_Assert( i < (int)vv.size() );
1604 const UMat& m = vv[i];
1605 d = m.dims;
1606 if(arrsz)
1607 for(j = 0; j < d; j++)
1608 arrsz[j] = m.size.p[j];
1609 }
1610 else
1611 {
1612 Size sz2d = size(i);
1613 d = 2;
1614 if(arrsz)
1615 {
1616 arrsz[0] = sz2d.height;
1617 arrsz[1] = sz2d.width;
1618 }
1619 }
1620
1621 return d;
1622 }
1623
sameSize(const _InputArray & arr) const1624 bool _InputArray::sameSize(const _InputArray& arr) const
1625 {
1626 int k1 = kind(), k2 = arr.kind();
1627 Size sz1;
1628
1629 if( k1 == MAT )
1630 {
1631 const Mat* m = ((const Mat*)obj);
1632 if( k2 == MAT )
1633 return m->size == ((const Mat*)arr.obj)->size;
1634 if( k2 == UMAT )
1635 return m->size == ((const UMat*)arr.obj)->size;
1636 if( m->dims > 2 )
1637 return false;
1638 sz1 = m->size();
1639 }
1640 else if( k1 == UMAT )
1641 {
1642 const UMat* m = ((const UMat*)obj);
1643 if( k2 == MAT )
1644 return m->size == ((const Mat*)arr.obj)->size;
1645 if( k2 == UMAT )
1646 return m->size == ((const UMat*)arr.obj)->size;
1647 if( m->dims > 2 )
1648 return false;
1649 sz1 = m->size();
1650 }
1651 else
1652 sz1 = size();
1653 if( arr.dims() > 2 )
1654 return false;
1655 return sz1 == arr.size();
1656 }
1657
dims(int i) const1658 int _InputArray::dims(int i) const
1659 {
1660 int k = kind();
1661
1662 if( k == MAT )
1663 {
1664 CV_Assert( i < 0 );
1665 return ((const Mat*)obj)->dims;
1666 }
1667
1668 if( k == EXPR )
1669 {
1670 CV_Assert( i < 0 );
1671 return ((const MatExpr*)obj)->a.dims;
1672 }
1673
1674 if( k == UMAT )
1675 {
1676 CV_Assert( i < 0 );
1677 return ((const UMat*)obj)->dims;
1678 }
1679
1680 if( k == MATX )
1681 {
1682 CV_Assert( i < 0 );
1683 return 2;
1684 }
1685
1686 if( k == STD_VECTOR || k == STD_BOOL_VECTOR )
1687 {
1688 CV_Assert( i < 0 );
1689 return 2;
1690 }
1691
1692 if( k == NONE )
1693 return 0;
1694
1695 if( k == STD_VECTOR_VECTOR )
1696 {
1697 const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
1698 if( i < 0 )
1699 return 1;
1700 CV_Assert( i < (int)vv.size() );
1701 return 2;
1702 }
1703
1704 if( k == STD_VECTOR_MAT )
1705 {
1706 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1707 if( i < 0 )
1708 return 1;
1709 CV_Assert( i < (int)vv.size() );
1710
1711 return vv[i].dims;
1712 }
1713
1714 if( k == STD_VECTOR_UMAT )
1715 {
1716 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1717 if( i < 0 )
1718 return 1;
1719 CV_Assert( i < (int)vv.size() );
1720
1721 return vv[i].dims;
1722 }
1723
1724 if( k == OPENGL_BUFFER )
1725 {
1726 CV_Assert( i < 0 );
1727 return 2;
1728 }
1729
1730 if( k == CUDA_GPU_MAT )
1731 {
1732 CV_Assert( i < 0 );
1733 return 2;
1734 }
1735
1736 if( k == CUDA_HOST_MEM )
1737 {
1738 CV_Assert( i < 0 );
1739 return 2;
1740 }
1741
1742 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1743 return 0;
1744 }
1745
total(int i) const1746 size_t _InputArray::total(int i) const
1747 {
1748 int k = kind();
1749
1750 if( k == MAT )
1751 {
1752 CV_Assert( i < 0 );
1753 return ((const Mat*)obj)->total();
1754 }
1755
1756 if( k == UMAT )
1757 {
1758 CV_Assert( i < 0 );
1759 return ((const UMat*)obj)->total();
1760 }
1761
1762 if( k == STD_VECTOR_MAT )
1763 {
1764 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1765 if( i < 0 )
1766 return vv.size();
1767
1768 CV_Assert( i < (int)vv.size() );
1769 return vv[i].total();
1770 }
1771
1772 if( k == STD_VECTOR_UMAT )
1773 {
1774 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1775 if( i < 0 )
1776 return vv.size();
1777
1778 CV_Assert( i < (int)vv.size() );
1779 return vv[i].total();
1780 }
1781
1782 return size(i).area();
1783 }
1784
type(int i) const1785 int _InputArray::type(int i) const
1786 {
1787 int k = kind();
1788
1789 if( k == MAT )
1790 return ((const Mat*)obj)->type();
1791
1792 if( k == UMAT )
1793 return ((const UMat*)obj)->type();
1794
1795 if( k == EXPR )
1796 return ((const MatExpr*)obj)->type();
1797
1798 if( k == MATX || k == STD_VECTOR || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
1799 return CV_MAT_TYPE(flags);
1800
1801 if( k == NONE )
1802 return -1;
1803
1804 if( k == STD_VECTOR_UMAT )
1805 {
1806 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1807 if( vv.empty() )
1808 {
1809 CV_Assert((flags & FIXED_TYPE) != 0);
1810 return CV_MAT_TYPE(flags);
1811 }
1812 CV_Assert( i < (int)vv.size() );
1813 return vv[i >= 0 ? i : 0].type();
1814 }
1815
1816 if( k == STD_VECTOR_MAT )
1817 {
1818 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1819 if( vv.empty() )
1820 {
1821 CV_Assert((flags & FIXED_TYPE) != 0);
1822 return CV_MAT_TYPE(flags);
1823 }
1824 CV_Assert( i < (int)vv.size() );
1825 return vv[i >= 0 ? i : 0].type();
1826 }
1827
1828 if( k == OPENGL_BUFFER )
1829 return ((const ogl::Buffer*)obj)->type();
1830
1831 if( k == CUDA_GPU_MAT )
1832 return ((const cuda::GpuMat*)obj)->type();
1833
1834 if( k == CUDA_HOST_MEM )
1835 return ((const cuda::HostMem*)obj)->type();
1836
1837 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1838 return 0;
1839 }
1840
depth(int i) const1841 int _InputArray::depth(int i) const
1842 {
1843 return CV_MAT_DEPTH(type(i));
1844 }
1845
channels(int i) const1846 int _InputArray::channels(int i) const
1847 {
1848 return CV_MAT_CN(type(i));
1849 }
1850
empty() const1851 bool _InputArray::empty() const
1852 {
1853 int k = kind();
1854
1855 if( k == MAT )
1856 return ((const Mat*)obj)->empty();
1857
1858 if( k == UMAT )
1859 return ((const UMat*)obj)->empty();
1860
1861 if( k == EXPR )
1862 return false;
1863
1864 if( k == MATX )
1865 return false;
1866
1867 if( k == STD_VECTOR )
1868 {
1869 const std::vector<uchar>& v = *(const std::vector<uchar>*)obj;
1870 return v.empty();
1871 }
1872
1873 if( k == STD_BOOL_VECTOR )
1874 {
1875 const std::vector<bool>& v = *(const std::vector<bool>*)obj;
1876 return v.empty();
1877 }
1878
1879 if( k == NONE )
1880 return true;
1881
1882 if( k == STD_VECTOR_VECTOR )
1883 {
1884 const std::vector<std::vector<uchar> >& vv = *(const std::vector<std::vector<uchar> >*)obj;
1885 return vv.empty();
1886 }
1887
1888 if( k == STD_VECTOR_MAT )
1889 {
1890 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1891 return vv.empty();
1892 }
1893
1894 if( k == STD_VECTOR_UMAT )
1895 {
1896 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1897 return vv.empty();
1898 }
1899
1900 if( k == OPENGL_BUFFER )
1901 return ((const ogl::Buffer*)obj)->empty();
1902
1903 if( k == CUDA_GPU_MAT )
1904 return ((const cuda::GpuMat*)obj)->empty();
1905
1906 if( k == CUDA_HOST_MEM )
1907 return ((const cuda::HostMem*)obj)->empty();
1908
1909 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
1910 return true;
1911 }
1912
isContinuous(int i) const1913 bool _InputArray::isContinuous(int i) const
1914 {
1915 int k = kind();
1916
1917 if( k == MAT )
1918 return i < 0 ? ((const Mat*)obj)->isContinuous() : true;
1919
1920 if( k == UMAT )
1921 return i < 0 ? ((const UMat*)obj)->isContinuous() : true;
1922
1923 if( k == EXPR || k == MATX || k == STD_VECTOR ||
1924 k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
1925 return true;
1926
1927 if( k == STD_VECTOR_MAT )
1928 {
1929 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1930 CV_Assert((size_t)i < vv.size());
1931 return vv[i].isContinuous();
1932 }
1933
1934 if( k == STD_VECTOR_UMAT )
1935 {
1936 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1937 CV_Assert((size_t)i < vv.size());
1938 return vv[i].isContinuous();
1939 }
1940
1941 CV_Error(CV_StsNotImplemented, "Unknown/unsupported array type");
1942 return false;
1943 }
1944
isSubmatrix(int i) const1945 bool _InputArray::isSubmatrix(int i) const
1946 {
1947 int k = kind();
1948
1949 if( k == MAT )
1950 return i < 0 ? ((const Mat*)obj)->isSubmatrix() : false;
1951
1952 if( k == UMAT )
1953 return i < 0 ? ((const UMat*)obj)->isSubmatrix() : false;
1954
1955 if( k == EXPR || k == MATX || k == STD_VECTOR ||
1956 k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
1957 return false;
1958
1959 if( k == STD_VECTOR_MAT )
1960 {
1961 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
1962 CV_Assert((size_t)i < vv.size());
1963 return vv[i].isSubmatrix();
1964 }
1965
1966 if( k == STD_VECTOR_UMAT )
1967 {
1968 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
1969 CV_Assert((size_t)i < vv.size());
1970 return vv[i].isSubmatrix();
1971 }
1972
1973 CV_Error(CV_StsNotImplemented, "");
1974 return false;
1975 }
1976
offset(int i) const1977 size_t _InputArray::offset(int i) const
1978 {
1979 int k = kind();
1980
1981 if( k == MAT )
1982 {
1983 CV_Assert( i < 0 );
1984 const Mat * const m = ((const Mat*)obj);
1985 return (size_t)(m->ptr() - m->datastart);
1986 }
1987
1988 if( k == UMAT )
1989 {
1990 CV_Assert( i < 0 );
1991 return ((const UMat*)obj)->offset;
1992 }
1993
1994 if( k == EXPR || k == MATX || k == STD_VECTOR ||
1995 k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
1996 return 0;
1997
1998 if( k == STD_VECTOR_MAT )
1999 {
2000 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
2001 if( i < 0 )
2002 return 1;
2003 CV_Assert( i < (int)vv.size() );
2004
2005 return (size_t)(vv[i].ptr() - vv[i].datastart);
2006 }
2007
2008 if( k == STD_VECTOR_UMAT )
2009 {
2010 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
2011 CV_Assert((size_t)i < vv.size());
2012 return vv[i].offset;
2013 }
2014
2015 if( k == CUDA_GPU_MAT )
2016 {
2017 CV_Assert( i < 0 );
2018 const cuda::GpuMat * const m = ((const cuda::GpuMat*)obj);
2019 return (size_t)(m->data - m->datastart);
2020 }
2021
2022 CV_Error(Error::StsNotImplemented, "");
2023 return 0;
2024 }
2025
step(int i) const2026 size_t _InputArray::step(int i) const
2027 {
2028 int k = kind();
2029
2030 if( k == MAT )
2031 {
2032 CV_Assert( i < 0 );
2033 return ((const Mat*)obj)->step;
2034 }
2035
2036 if( k == UMAT )
2037 {
2038 CV_Assert( i < 0 );
2039 return ((const UMat*)obj)->step;
2040 }
2041
2042 if( k == EXPR || k == MATX || k == STD_VECTOR ||
2043 k == NONE || k == STD_VECTOR_VECTOR || k == STD_BOOL_VECTOR )
2044 return 0;
2045
2046 if( k == STD_VECTOR_MAT )
2047 {
2048 const std::vector<Mat>& vv = *(const std::vector<Mat>*)obj;
2049 if( i < 0 )
2050 return 1;
2051 CV_Assert( i < (int)vv.size() );
2052 return vv[i].step;
2053 }
2054
2055 if( k == STD_VECTOR_UMAT )
2056 {
2057 const std::vector<UMat>& vv = *(const std::vector<UMat>*)obj;
2058 CV_Assert((size_t)i < vv.size());
2059 return vv[i].step;
2060 }
2061
2062 if( k == CUDA_GPU_MAT )
2063 {
2064 CV_Assert( i < 0 );
2065 return ((const cuda::GpuMat*)obj)->step;
2066 }
2067
2068 CV_Error(Error::StsNotImplemented, "");
2069 return 0;
2070 }
2071
copyTo(const _OutputArray & arr) const2072 void _InputArray::copyTo(const _OutputArray& arr) const
2073 {
2074 int k = kind();
2075
2076 if( k == NONE )
2077 arr.release();
2078 else if( k == MAT || k == MATX || k == STD_VECTOR || k == STD_BOOL_VECTOR )
2079 {
2080 Mat m = getMat();
2081 m.copyTo(arr);
2082 }
2083 else if( k == EXPR )
2084 {
2085 const MatExpr& e = *((MatExpr*)obj);
2086 if( arr.kind() == MAT )
2087 arr.getMatRef() = e;
2088 else
2089 Mat(e).copyTo(arr);
2090 }
2091 else if( k == UMAT )
2092 ((UMat*)obj)->copyTo(arr);
2093 else
2094 CV_Error(Error::StsNotImplemented, "");
2095 }
2096
copyTo(const _OutputArray & arr,const _InputArray & mask) const2097 void _InputArray::copyTo(const _OutputArray& arr, const _InputArray & mask) const
2098 {
2099 int k = kind();
2100
2101 if( k == NONE )
2102 arr.release();
2103 else if( k == MAT || k == MATX || k == STD_VECTOR || k == STD_BOOL_VECTOR )
2104 {
2105 Mat m = getMat();
2106 m.copyTo(arr, mask);
2107 }
2108 else if( k == UMAT )
2109 ((UMat*)obj)->copyTo(arr, mask);
2110 else
2111 CV_Error(Error::StsNotImplemented, "");
2112 }
2113
fixedSize() const2114 bool _OutputArray::fixedSize() const
2115 {
2116 return (flags & FIXED_SIZE) == FIXED_SIZE;
2117 }
2118
fixedType() const2119 bool _OutputArray::fixedType() const
2120 {
2121 return (flags & FIXED_TYPE) == FIXED_TYPE;
2122 }
2123
create(Size _sz,int mtype,int i,bool allowTransposed,int fixedDepthMask) const2124 void _OutputArray::create(Size _sz, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
2125 {
2126 int k = kind();
2127 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2128 {
2129 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == _sz);
2130 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
2131 ((Mat*)obj)->create(_sz, mtype);
2132 return;
2133 }
2134 if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2135 {
2136 CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == _sz);
2137 CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
2138 ((UMat*)obj)->create(_sz, mtype);
2139 return;
2140 }
2141 if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2142 {
2143 CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == _sz);
2144 CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
2145 ((cuda::GpuMat*)obj)->create(_sz, mtype);
2146 return;
2147 }
2148 if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2149 {
2150 CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == _sz);
2151 CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
2152 ((ogl::Buffer*)obj)->create(_sz, mtype);
2153 return;
2154 }
2155 if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2156 {
2157 CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == _sz);
2158 CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
2159 ((cuda::HostMem*)obj)->create(_sz, mtype);
2160 return;
2161 }
2162 int sizes[] = {_sz.height, _sz.width};
2163 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
2164 }
2165
create(int _rows,int _cols,int mtype,int i,bool allowTransposed,int fixedDepthMask) const2166 void _OutputArray::create(int _rows, int _cols, int mtype, int i, bool allowTransposed, int fixedDepthMask) const
2167 {
2168 int k = kind();
2169 if( k == MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2170 {
2171 CV_Assert(!fixedSize() || ((Mat*)obj)->size.operator()() == Size(_cols, _rows));
2172 CV_Assert(!fixedType() || ((Mat*)obj)->type() == mtype);
2173 ((Mat*)obj)->create(_rows, _cols, mtype);
2174 return;
2175 }
2176 if( k == UMAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2177 {
2178 CV_Assert(!fixedSize() || ((UMat*)obj)->size.operator()() == Size(_cols, _rows));
2179 CV_Assert(!fixedType() || ((UMat*)obj)->type() == mtype);
2180 ((UMat*)obj)->create(_rows, _cols, mtype);
2181 return;
2182 }
2183 if( k == CUDA_GPU_MAT && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2184 {
2185 CV_Assert(!fixedSize() || ((cuda::GpuMat*)obj)->size() == Size(_cols, _rows));
2186 CV_Assert(!fixedType() || ((cuda::GpuMat*)obj)->type() == mtype);
2187 ((cuda::GpuMat*)obj)->create(_rows, _cols, mtype);
2188 return;
2189 }
2190 if( k == OPENGL_BUFFER && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2191 {
2192 CV_Assert(!fixedSize() || ((ogl::Buffer*)obj)->size() == Size(_cols, _rows));
2193 CV_Assert(!fixedType() || ((ogl::Buffer*)obj)->type() == mtype);
2194 ((ogl::Buffer*)obj)->create(_rows, _cols, mtype);
2195 return;
2196 }
2197 if( k == CUDA_HOST_MEM && i < 0 && !allowTransposed && fixedDepthMask == 0 )
2198 {
2199 CV_Assert(!fixedSize() || ((cuda::HostMem*)obj)->size() == Size(_cols, _rows));
2200 CV_Assert(!fixedType() || ((cuda::HostMem*)obj)->type() == mtype);
2201 ((cuda::HostMem*)obj)->create(_rows, _cols, mtype);
2202 return;
2203 }
2204 int sizes[] = {_rows, _cols};
2205 create(2, sizes, mtype, i, allowTransposed, fixedDepthMask);
2206 }
2207
create(int d,const int * sizes,int mtype,int i,bool allowTransposed,int fixedDepthMask) const2208 void _OutputArray::create(int d, const int* sizes, int mtype, int i,
2209 bool allowTransposed, int fixedDepthMask) const
2210 {
2211 int k = kind();
2212 mtype = CV_MAT_TYPE(mtype);
2213
2214 if( k == MAT )
2215 {
2216 CV_Assert( i < 0 );
2217 Mat& m = *(Mat*)obj;
2218 if( allowTransposed )
2219 {
2220 if( !m.isContinuous() )
2221 {
2222 CV_Assert(!fixedType() && !fixedSize());
2223 m.release();
2224 }
2225
2226 if( d == 2 && m.dims == 2 && m.data &&
2227 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
2228 return;
2229 }
2230
2231 if(fixedType())
2232 {
2233 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
2234 mtype = m.type();
2235 else
2236 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
2237 }
2238 if(fixedSize())
2239 {
2240 CV_Assert(m.dims == d);
2241 for(int j = 0; j < d; ++j)
2242 CV_Assert(m.size[j] == sizes[j]);
2243 }
2244 m.create(d, sizes, mtype);
2245 return;
2246 }
2247
2248 if( k == UMAT )
2249 {
2250 CV_Assert( i < 0 );
2251 UMat& m = *(UMat*)obj;
2252 if( allowTransposed )
2253 {
2254 if( !m.isContinuous() )
2255 {
2256 CV_Assert(!fixedType() && !fixedSize());
2257 m.release();
2258 }
2259
2260 if( d == 2 && m.dims == 2 && !m.empty() &&
2261 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
2262 return;
2263 }
2264
2265 if(fixedType())
2266 {
2267 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
2268 mtype = m.type();
2269 else
2270 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
2271 }
2272 if(fixedSize())
2273 {
2274 CV_Assert(m.dims == d);
2275 for(int j = 0; j < d; ++j)
2276 CV_Assert(m.size[j] == sizes[j]);
2277 }
2278 m.create(d, sizes, mtype);
2279 return;
2280 }
2281
2282 if( k == MATX )
2283 {
2284 CV_Assert( i < 0 );
2285 int type0 = CV_MAT_TYPE(flags);
2286 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == 1 && ((1 << type0) & fixedDepthMask) != 0) );
2287 CV_Assert( d == 2 && ((sizes[0] == sz.height && sizes[1] == sz.width) ||
2288 (allowTransposed && sizes[0] == sz.width && sizes[1] == sz.height)));
2289 return;
2290 }
2291
2292 if( k == STD_VECTOR || k == STD_VECTOR_VECTOR )
2293 {
2294 CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
2295 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0;
2296 std::vector<uchar>* v = (std::vector<uchar>*)obj;
2297
2298 if( k == STD_VECTOR_VECTOR )
2299 {
2300 std::vector<std::vector<uchar> >& vv = *(std::vector<std::vector<uchar> >*)obj;
2301 if( i < 0 )
2302 {
2303 CV_Assert(!fixedSize() || len == vv.size());
2304 vv.resize(len);
2305 return;
2306 }
2307 CV_Assert( i < (int)vv.size() );
2308 v = &vv[i];
2309 }
2310 else
2311 CV_Assert( i < 0 );
2312
2313 int type0 = CV_MAT_TYPE(flags);
2314 CV_Assert( mtype == type0 || (CV_MAT_CN(mtype) == CV_MAT_CN(type0) && ((1 << type0) & fixedDepthMask) != 0) );
2315
2316 int esz = CV_ELEM_SIZE(type0);
2317 CV_Assert(!fixedSize() || len == ((std::vector<uchar>*)v)->size() / esz);
2318 switch( esz )
2319 {
2320 case 1:
2321 ((std::vector<uchar>*)v)->resize(len);
2322 break;
2323 case 2:
2324 ((std::vector<Vec2b>*)v)->resize(len);
2325 break;
2326 case 3:
2327 ((std::vector<Vec3b>*)v)->resize(len);
2328 break;
2329 case 4:
2330 ((std::vector<int>*)v)->resize(len);
2331 break;
2332 case 6:
2333 ((std::vector<Vec3s>*)v)->resize(len);
2334 break;
2335 case 8:
2336 ((std::vector<Vec2i>*)v)->resize(len);
2337 break;
2338 case 12:
2339 ((std::vector<Vec3i>*)v)->resize(len);
2340 break;
2341 case 16:
2342 ((std::vector<Vec4i>*)v)->resize(len);
2343 break;
2344 case 24:
2345 ((std::vector<Vec6i>*)v)->resize(len);
2346 break;
2347 case 32:
2348 ((std::vector<Vec8i>*)v)->resize(len);
2349 break;
2350 case 36:
2351 ((std::vector<Vec<int, 9> >*)v)->resize(len);
2352 break;
2353 case 48:
2354 ((std::vector<Vec<int, 12> >*)v)->resize(len);
2355 break;
2356 case 64:
2357 ((std::vector<Vec<int, 16> >*)v)->resize(len);
2358 break;
2359 case 128:
2360 ((std::vector<Vec<int, 32> >*)v)->resize(len);
2361 break;
2362 case 256:
2363 ((std::vector<Vec<int, 64> >*)v)->resize(len);
2364 break;
2365 case 512:
2366 ((std::vector<Vec<int, 128> >*)v)->resize(len);
2367 break;
2368 default:
2369 CV_Error_(CV_StsBadArg, ("Vectors with element size %d are not supported. Please, modify OutputArray::create()\n", esz));
2370 }
2371 return;
2372 }
2373
2374 if( k == NONE )
2375 {
2376 CV_Error(CV_StsNullPtr, "create() called for the missing output array" );
2377 return;
2378 }
2379
2380 if( k == STD_VECTOR_MAT )
2381 {
2382 std::vector<Mat>& v = *(std::vector<Mat>*)obj;
2383
2384 if( i < 0 )
2385 {
2386 CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
2387 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
2388
2389 CV_Assert(!fixedSize() || len == len0);
2390 v.resize(len);
2391 if( fixedType() )
2392 {
2393 int _type = CV_MAT_TYPE(flags);
2394 for( size_t j = len0; j < len; j++ )
2395 {
2396 if( v[j].type() == _type )
2397 continue;
2398 CV_Assert( v[j].empty() );
2399 v[j].flags = (v[j].flags & ~CV_MAT_TYPE_MASK) | _type;
2400 }
2401 }
2402 return;
2403 }
2404
2405 CV_Assert( i < (int)v.size() );
2406 Mat& m = v[i];
2407
2408 if( allowTransposed )
2409 {
2410 if( !m.isContinuous() )
2411 {
2412 CV_Assert(!fixedType() && !fixedSize());
2413 m.release();
2414 }
2415
2416 if( d == 2 && m.dims == 2 && m.data &&
2417 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
2418 return;
2419 }
2420
2421 if(fixedType())
2422 {
2423 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
2424 mtype = m.type();
2425 else
2426 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
2427 }
2428 if(fixedSize())
2429 {
2430 CV_Assert(m.dims == d);
2431 for(int j = 0; j < d; ++j)
2432 CV_Assert(m.size[j] == sizes[j]);
2433 }
2434
2435 m.create(d, sizes, mtype);
2436 return;
2437 }
2438
2439 if( k == STD_VECTOR_UMAT )
2440 {
2441 std::vector<UMat>& v = *(std::vector<UMat>*)obj;
2442
2443 if( i < 0 )
2444 {
2445 CV_Assert( d == 2 && (sizes[0] == 1 || sizes[1] == 1 || sizes[0]*sizes[1] == 0) );
2446 size_t len = sizes[0]*sizes[1] > 0 ? sizes[0] + sizes[1] - 1 : 0, len0 = v.size();
2447
2448 CV_Assert(!fixedSize() || len == len0);
2449 v.resize(len);
2450 if( fixedType() )
2451 {
2452 int _type = CV_MAT_TYPE(flags);
2453 for( size_t j = len0; j < len; j++ )
2454 {
2455 if( v[j].type() == _type )
2456 continue;
2457 CV_Assert( v[j].empty() );
2458 v[j].flags = (v[j].flags & ~CV_MAT_TYPE_MASK) | _type;
2459 }
2460 }
2461 return;
2462 }
2463
2464 CV_Assert( i < (int)v.size() );
2465 UMat& m = v[i];
2466
2467 if( allowTransposed )
2468 {
2469 if( !m.isContinuous() )
2470 {
2471 CV_Assert(!fixedType() && !fixedSize());
2472 m.release();
2473 }
2474
2475 if( d == 2 && m.dims == 2 && m.u &&
2476 m.type() == mtype && m.rows == sizes[1] && m.cols == sizes[0] )
2477 return;
2478 }
2479
2480 if(fixedType())
2481 {
2482 if(CV_MAT_CN(mtype) == m.channels() && ((1 << CV_MAT_TYPE(flags)) & fixedDepthMask) != 0 )
2483 mtype = m.type();
2484 else
2485 CV_Assert(CV_MAT_TYPE(mtype) == m.type());
2486 }
2487 if(fixedSize())
2488 {
2489 CV_Assert(m.dims == d);
2490 for(int j = 0; j < d; ++j)
2491 CV_Assert(m.size[j] == sizes[j]);
2492 }
2493
2494 m.create(d, sizes, mtype);
2495 return;
2496 }
2497
2498 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
2499 }
2500
createSameSize(const _InputArray & arr,int mtype) const2501 void _OutputArray::createSameSize(const _InputArray& arr, int mtype) const
2502 {
2503 int arrsz[CV_MAX_DIM], d = arr.sizend(arrsz);
2504 create(d, arrsz, mtype);
2505 }
2506
release() const2507 void _OutputArray::release() const
2508 {
2509 CV_Assert(!fixedSize());
2510
2511 int k = kind();
2512
2513 if( k == MAT )
2514 {
2515 ((Mat*)obj)->release();
2516 return;
2517 }
2518
2519 if( k == UMAT )
2520 {
2521 ((UMat*)obj)->release();
2522 return;
2523 }
2524
2525 if( k == CUDA_GPU_MAT )
2526 {
2527 ((cuda::GpuMat*)obj)->release();
2528 return;
2529 }
2530
2531 if( k == CUDA_HOST_MEM )
2532 {
2533 ((cuda::HostMem*)obj)->release();
2534 return;
2535 }
2536
2537 if( k == OPENGL_BUFFER )
2538 {
2539 ((ogl::Buffer*)obj)->release();
2540 return;
2541 }
2542
2543 if( k == NONE )
2544 return;
2545
2546 if( k == STD_VECTOR )
2547 {
2548 create(Size(), CV_MAT_TYPE(flags));
2549 return;
2550 }
2551
2552 if( k == STD_VECTOR_VECTOR )
2553 {
2554 ((std::vector<std::vector<uchar> >*)obj)->clear();
2555 return;
2556 }
2557
2558 if( k == STD_VECTOR_MAT )
2559 {
2560 ((std::vector<Mat>*)obj)->clear();
2561 return;
2562 }
2563
2564 if( k == STD_VECTOR_UMAT )
2565 {
2566 ((std::vector<UMat>*)obj)->clear();
2567 return;
2568 }
2569
2570 CV_Error(Error::StsNotImplemented, "Unknown/unsupported array type");
2571 }
2572
clear() const2573 void _OutputArray::clear() const
2574 {
2575 int k = kind();
2576
2577 if( k == MAT )
2578 {
2579 CV_Assert(!fixedSize());
2580 ((Mat*)obj)->resize(0);
2581 return;
2582 }
2583
2584 release();
2585 }
2586
needed() const2587 bool _OutputArray::needed() const
2588 {
2589 return kind() != NONE;
2590 }
2591
getMatRef(int i) const2592 Mat& _OutputArray::getMatRef(int i) const
2593 {
2594 int k = kind();
2595 if( i < 0 )
2596 {
2597 CV_Assert( k == MAT );
2598 return *(Mat*)obj;
2599 }
2600 else
2601 {
2602 CV_Assert( k == STD_VECTOR_MAT );
2603 std::vector<Mat>& v = *(std::vector<Mat>*)obj;
2604 CV_Assert( i < (int)v.size() );
2605 return v[i];
2606 }
2607 }
2608
getUMatRef(int i) const2609 UMat& _OutputArray::getUMatRef(int i) const
2610 {
2611 int k = kind();
2612 if( i < 0 )
2613 {
2614 CV_Assert( k == UMAT );
2615 return *(UMat*)obj;
2616 }
2617 else
2618 {
2619 CV_Assert( k == STD_VECTOR_UMAT );
2620 std::vector<UMat>& v = *(std::vector<UMat>*)obj;
2621 CV_Assert( i < (int)v.size() );
2622 return v[i];
2623 }
2624 }
2625
getGpuMatRef() const2626 cuda::GpuMat& _OutputArray::getGpuMatRef() const
2627 {
2628 int k = kind();
2629 CV_Assert( k == CUDA_GPU_MAT );
2630 return *(cuda::GpuMat*)obj;
2631 }
2632
getOGlBufferRef() const2633 ogl::Buffer& _OutputArray::getOGlBufferRef() const
2634 {
2635 int k = kind();
2636 CV_Assert( k == OPENGL_BUFFER );
2637 return *(ogl::Buffer*)obj;
2638 }
2639
getHostMemRef() const2640 cuda::HostMem& _OutputArray::getHostMemRef() const
2641 {
2642 int k = kind();
2643 CV_Assert( k == CUDA_HOST_MEM );
2644 return *(cuda::HostMem*)obj;
2645 }
2646
setTo(const _InputArray & arr,const _InputArray & mask) const2647 void _OutputArray::setTo(const _InputArray& arr, const _InputArray & mask) const
2648 {
2649 int k = kind();
2650
2651 if( k == NONE )
2652 ;
2653 else if( k == MAT || k == MATX || k == STD_VECTOR )
2654 {
2655 Mat m = getMat();
2656 m.setTo(arr, mask);
2657 }
2658 else if( k == UMAT )
2659 ((UMat*)obj)->setTo(arr, mask);
2660 else if( k == CUDA_GPU_MAT )
2661 {
2662 Mat value = arr.getMat();
2663 CV_Assert( checkScalar(value, type(), arr.kind(), _InputArray::CUDA_GPU_MAT) );
2664 ((cuda::GpuMat*)obj)->setTo(Scalar(Vec<double, 4>(value.ptr<double>())), mask);
2665 }
2666 else
2667 CV_Error(Error::StsNotImplemented, "");
2668 }
2669
2670
assign(const UMat & u) const2671 void _OutputArray::assign(const UMat& u) const
2672 {
2673 int k = kind();
2674 if (k == UMAT)
2675 {
2676 *(UMat*)obj = u;
2677 }
2678 else if (k == MAT)
2679 {
2680 u.copyTo(*(Mat*)obj); // TODO check u.getMat()
2681 }
2682 else if (k == MATX)
2683 {
2684 u.copyTo(getMat()); // TODO check u.getMat()
2685 }
2686 else
2687 {
2688 CV_Error(Error::StsNotImplemented, "");
2689 }
2690 }
2691
2692
assign(const Mat & m) const2693 void _OutputArray::assign(const Mat& m) const
2694 {
2695 int k = kind();
2696 if (k == UMAT)
2697 {
2698 m.copyTo(*(UMat*)obj); // TODO check m.getUMat()
2699 }
2700 else if (k == MAT)
2701 {
2702 *(Mat*)obj = m;
2703 }
2704 else if (k == MATX)
2705 {
2706 m.copyTo(getMat());
2707 }
2708 else
2709 {
2710 CV_Error(Error::StsNotImplemented, "");
2711 }
2712 }
2713
2714
2715 static _InputOutputArray _none;
noArray()2716 InputOutputArray noArray() { return _none; }
2717
2718 }
2719
2720 /*************************************************************************************************\
2721 Matrix Operations
2722 \*************************************************************************************************/
2723
hconcat(const Mat * src,size_t nsrc,OutputArray _dst)2724 void cv::hconcat(const Mat* src, size_t nsrc, OutputArray _dst)
2725 {
2726 if( nsrc == 0 || !src )
2727 {
2728 _dst.release();
2729 return;
2730 }
2731
2732 int totalCols = 0, cols = 0;
2733 size_t i;
2734 for( i = 0; i < nsrc; i++ )
2735 {
2736 CV_Assert( src[i].dims <= 2 &&
2737 src[i].rows == src[0].rows &&
2738 src[i].type() == src[0].type());
2739 totalCols += src[i].cols;
2740 }
2741 _dst.create( src[0].rows, totalCols, src[0].type());
2742 Mat dst = _dst.getMat();
2743 for( i = 0; i < nsrc; i++ )
2744 {
2745 Mat dpart = dst(Rect(cols, 0, src[i].cols, src[i].rows));
2746 src[i].copyTo(dpart);
2747 cols += src[i].cols;
2748 }
2749 }
2750
hconcat(InputArray src1,InputArray src2,OutputArray dst)2751 void cv::hconcat(InputArray src1, InputArray src2, OutputArray dst)
2752 {
2753 Mat src[] = {src1.getMat(), src2.getMat()};
2754 hconcat(src, 2, dst);
2755 }
2756
hconcat(InputArray _src,OutputArray dst)2757 void cv::hconcat(InputArray _src, OutputArray dst)
2758 {
2759 std::vector<Mat> src;
2760 _src.getMatVector(src);
2761 hconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
2762 }
2763
vconcat(const Mat * src,size_t nsrc,OutputArray _dst)2764 void cv::vconcat(const Mat* src, size_t nsrc, OutputArray _dst)
2765 {
2766 if( nsrc == 0 || !src )
2767 {
2768 _dst.release();
2769 return;
2770 }
2771
2772 int totalRows = 0, rows = 0;
2773 size_t i;
2774 for( i = 0; i < nsrc; i++ )
2775 {
2776 CV_Assert(src[i].dims <= 2 &&
2777 src[i].cols == src[0].cols &&
2778 src[i].type() == src[0].type());
2779 totalRows += src[i].rows;
2780 }
2781 _dst.create( totalRows, src[0].cols, src[0].type());
2782 Mat dst = _dst.getMat();
2783 for( i = 0; i < nsrc; i++ )
2784 {
2785 Mat dpart(dst, Rect(0, rows, src[i].cols, src[i].rows));
2786 src[i].copyTo(dpart);
2787 rows += src[i].rows;
2788 }
2789 }
2790
vconcat(InputArray src1,InputArray src2,OutputArray dst)2791 void cv::vconcat(InputArray src1, InputArray src2, OutputArray dst)
2792 {
2793 Mat src[] = {src1.getMat(), src2.getMat()};
2794 vconcat(src, 2, dst);
2795 }
2796
vconcat(InputArray _src,OutputArray dst)2797 void cv::vconcat(InputArray _src, OutputArray dst)
2798 {
2799 std::vector<Mat> src;
2800 _src.getMatVector(src);
2801 vconcat(!src.empty() ? &src[0] : 0, src.size(), dst);
2802 }
2803
2804 //////////////////////////////////////// set identity ////////////////////////////////////////////
2805
2806 #ifdef HAVE_OPENCL
2807
2808 namespace cv {
2809
ocl_setIdentity(InputOutputArray _m,const Scalar & s)2810 static bool ocl_setIdentity( InputOutputArray _m, const Scalar& s )
2811 {
2812 int type = _m.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), kercn = cn, rowsPerWI = 1;
2813 int sctype = CV_MAKE_TYPE(depth, cn == 3 ? 4 : cn);
2814 if (ocl::Device::getDefault().isIntel())
2815 {
2816 rowsPerWI = 4;
2817 if (cn == 1)
2818 {
2819 kercn = std::min(ocl::predictOptimalVectorWidth(_m), 4);
2820 if (kercn != 4)
2821 kercn = 1;
2822 }
2823 }
2824
2825 ocl::Kernel k("setIdentity", ocl::core::set_identity_oclsrc,
2826 format("-D T=%s -D T1=%s -D cn=%d -D ST=%s -D kercn=%d -D rowsPerWI=%d",
2827 ocl::memopTypeToStr(CV_MAKE_TYPE(depth, kercn)),
2828 ocl::memopTypeToStr(depth), cn,
2829 ocl::memopTypeToStr(sctype),
2830 kercn, rowsPerWI));
2831 if (k.empty())
2832 return false;
2833
2834 UMat m = _m.getUMat();
2835 k.args(ocl::KernelArg::WriteOnly(m, cn, kercn),
2836 ocl::KernelArg::Constant(Mat(1, 1, sctype, s)));
2837
2838 size_t globalsize[2] = { m.cols * cn / kercn, (m.rows + rowsPerWI - 1) / rowsPerWI };
2839 return k.run(2, globalsize, NULL, false);
2840 }
2841
2842 }
2843
2844 #endif
2845
setIdentity(InputOutputArray _m,const Scalar & s)2846 void cv::setIdentity( InputOutputArray _m, const Scalar& s )
2847 {
2848 CV_Assert( _m.dims() <= 2 );
2849
2850 CV_OCL_RUN(_m.isUMat(),
2851 ocl_setIdentity(_m, s))
2852
2853 Mat m = _m.getMat();
2854 int i, j, rows = m.rows, cols = m.cols, type = m.type();
2855
2856 if( type == CV_32FC1 )
2857 {
2858 float* data = m.ptr<float>();
2859 float val = (float)s[0];
2860 size_t step = m.step/sizeof(data[0]);
2861
2862 for( i = 0; i < rows; i++, data += step )
2863 {
2864 for( j = 0; j < cols; j++ )
2865 data[j] = 0;
2866 if( i < cols )
2867 data[i] = val;
2868 }
2869 }
2870 else if( type == CV_64FC1 )
2871 {
2872 double* data = m.ptr<double>();
2873 double val = s[0];
2874 size_t step = m.step/sizeof(data[0]);
2875
2876 for( i = 0; i < rows; i++, data += step )
2877 {
2878 for( j = 0; j < cols; j++ )
2879 data[j] = j == i ? val : 0;
2880 }
2881 }
2882 else
2883 {
2884 m = Scalar(0);
2885 m.diag() = s;
2886 }
2887 }
2888
2889 //////////////////////////////////////////// trace ///////////////////////////////////////////
2890
trace(InputArray _m)2891 cv::Scalar cv::trace( InputArray _m )
2892 {
2893 Mat m = _m.getMat();
2894 CV_Assert( m.dims <= 2 );
2895 int i, type = m.type();
2896 int nm = std::min(m.rows, m.cols);
2897
2898 if( type == CV_32FC1 )
2899 {
2900 const float* ptr = m.ptr<float>();
2901 size_t step = m.step/sizeof(ptr[0]) + 1;
2902 double _s = 0;
2903 for( i = 0; i < nm; i++ )
2904 _s += ptr[i*step];
2905 return _s;
2906 }
2907
2908 if( type == CV_64FC1 )
2909 {
2910 const double* ptr = m.ptr<double>();
2911 size_t step = m.step/sizeof(ptr[0]) + 1;
2912 double _s = 0;
2913 for( i = 0; i < nm; i++ )
2914 _s += ptr[i*step];
2915 return _s;
2916 }
2917
2918 return cv::sum(m.diag());
2919 }
2920
2921 ////////////////////////////////////// transpose /////////////////////////////////////////
2922
2923 namespace cv
2924 {
2925
2926 template<typename T> static void
transpose_(const uchar * src,size_t sstep,uchar * dst,size_t dstep,Size sz)2927 transpose_( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz )
2928 {
2929 int i=0, j, m = sz.width, n = sz.height;
2930
2931 #if CV_ENABLE_UNROLLED
2932 for(; i <= m - 4; i += 4 )
2933 {
2934 T* d0 = (T*)(dst + dstep*i);
2935 T* d1 = (T*)(dst + dstep*(i+1));
2936 T* d2 = (T*)(dst + dstep*(i+2));
2937 T* d3 = (T*)(dst + dstep*(i+3));
2938
2939 for( j = 0; j <= n - 4; j += 4 )
2940 {
2941 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
2942 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
2943 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
2944 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
2945
2946 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
2947 d1[j] = s0[1]; d1[j+1] = s1[1]; d1[j+2] = s2[1]; d1[j+3] = s3[1];
2948 d2[j] = s0[2]; d2[j+1] = s1[2]; d2[j+2] = s2[2]; d2[j+3] = s3[2];
2949 d3[j] = s0[3]; d3[j+1] = s1[3]; d3[j+2] = s2[3]; d3[j+3] = s3[3];
2950 }
2951
2952 for( ; j < n; j++ )
2953 {
2954 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
2955 d0[j] = s0[0]; d1[j] = s0[1]; d2[j] = s0[2]; d3[j] = s0[3];
2956 }
2957 }
2958 #endif
2959 for( ; i < m; i++ )
2960 {
2961 T* d0 = (T*)(dst + dstep*i);
2962 j = 0;
2963 #if CV_ENABLE_UNROLLED
2964 for(; j <= n - 4; j += 4 )
2965 {
2966 const T* s0 = (const T*)(src + i*sizeof(T) + sstep*j);
2967 const T* s1 = (const T*)(src + i*sizeof(T) + sstep*(j+1));
2968 const T* s2 = (const T*)(src + i*sizeof(T) + sstep*(j+2));
2969 const T* s3 = (const T*)(src + i*sizeof(T) + sstep*(j+3));
2970
2971 d0[j] = s0[0]; d0[j+1] = s1[0]; d0[j+2] = s2[0]; d0[j+3] = s3[0];
2972 }
2973 #endif
2974 for( ; j < n; j++ )
2975 {
2976 const T* s0 = (const T*)(src + i*sizeof(T) + j*sstep);
2977 d0[j] = s0[0];
2978 }
2979 }
2980 }
2981
2982 template<typename T> static void
transposeI_(uchar * data,size_t step,int n)2983 transposeI_( uchar* data, size_t step, int n )
2984 {
2985 int i, j;
2986 for( i = 0; i < n; i++ )
2987 {
2988 T* row = (T*)(data + step*i);
2989 uchar* data1 = data + i*sizeof(T);
2990 for( j = i+1; j < n; j++ )
2991 std::swap( row[j], *(T*)(data1 + step*j) );
2992 }
2993 }
2994
2995 typedef void (*TransposeFunc)( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz );
2996 typedef void (*TransposeInplaceFunc)( uchar* data, size_t step, int n );
2997
2998 #define DEF_TRANSPOSE_FUNC(suffix, type) \
2999 static void transpose_##suffix( const uchar* src, size_t sstep, uchar* dst, size_t dstep, Size sz ) \
3000 { transpose_<type>(src, sstep, dst, dstep, sz); } \
3001 \
3002 static void transposeI_##suffix( uchar* data, size_t step, int n ) \
3003 { transposeI_<type>(data, step, n); }
3004
3005 DEF_TRANSPOSE_FUNC(8u, uchar)
3006 DEF_TRANSPOSE_FUNC(16u, ushort)
3007 DEF_TRANSPOSE_FUNC(8uC3, Vec3b)
3008 DEF_TRANSPOSE_FUNC(32s, int)
3009 DEF_TRANSPOSE_FUNC(16uC3, Vec3s)
3010 DEF_TRANSPOSE_FUNC(32sC2, Vec2i)
3011 DEF_TRANSPOSE_FUNC(32sC3, Vec3i)
3012 DEF_TRANSPOSE_FUNC(32sC4, Vec4i)
3013 DEF_TRANSPOSE_FUNC(32sC6, Vec6i)
3014 DEF_TRANSPOSE_FUNC(32sC8, Vec8i)
3015
3016 static TransposeFunc transposeTab[] =
3017 {
3018 0, transpose_8u, transpose_16u, transpose_8uC3, transpose_32s, 0, transpose_16uC3, 0,
3019 transpose_32sC2, 0, 0, 0, transpose_32sC3, 0, 0, 0, transpose_32sC4,
3020 0, 0, 0, 0, 0, 0, 0, transpose_32sC6, 0, 0, 0, 0, 0, 0, 0, transpose_32sC8
3021 };
3022
3023 static TransposeInplaceFunc transposeInplaceTab[] =
3024 {
3025 0, transposeI_8u, transposeI_16u, transposeI_8uC3, transposeI_32s, 0, transposeI_16uC3, 0,
3026 transposeI_32sC2, 0, 0, 0, transposeI_32sC3, 0, 0, 0, transposeI_32sC4,
3027 0, 0, 0, 0, 0, 0, 0, transposeI_32sC6, 0, 0, 0, 0, 0, 0, 0, transposeI_32sC8
3028 };
3029
3030 #ifdef HAVE_OPENCL
3031
divUp(int a,int b)3032 static inline int divUp(int a, int b)
3033 {
3034 return (a + b - 1) / b;
3035 }
3036
ocl_transpose(InputArray _src,OutputArray _dst)3037 static bool ocl_transpose( InputArray _src, OutputArray _dst )
3038 {
3039 const ocl::Device & dev = ocl::Device::getDefault();
3040 const int TILE_DIM = 32, BLOCK_ROWS = 8;
3041 int type = _src.type(), cn = CV_MAT_CN(type), depth = CV_MAT_DEPTH(type),
3042 rowsPerWI = dev.isIntel() ? 4 : 1;
3043
3044 UMat src = _src.getUMat();
3045 _dst.create(src.cols, src.rows, type);
3046 UMat dst = _dst.getUMat();
3047
3048 String kernelName("transpose");
3049 bool inplace = dst.u == src.u;
3050
3051 if (inplace)
3052 {
3053 CV_Assert(dst.cols == dst.rows);
3054 kernelName += "_inplace";
3055 }
3056 else
3057 {
3058 // check required local memory size
3059 size_t required_local_memory = (size_t) TILE_DIM*(TILE_DIM+1)*CV_ELEM_SIZE(type);
3060 if (required_local_memory > ocl::Device::getDefault().localMemSize())
3061 return false;
3062 }
3063
3064 ocl::Kernel k(kernelName.c_str(), ocl::core::transpose_oclsrc,
3065 format("-D T=%s -D T1=%s -D cn=%d -D TILE_DIM=%d -D BLOCK_ROWS=%d -D rowsPerWI=%d%s",
3066 ocl::memopTypeToStr(type), ocl::memopTypeToStr(depth),
3067 cn, TILE_DIM, BLOCK_ROWS, rowsPerWI, inplace ? " -D INPLACE" : ""));
3068 if (k.empty())
3069 return false;
3070
3071 if (inplace)
3072 k.args(ocl::KernelArg::ReadWriteNoSize(dst), dst.rows);
3073 else
3074 k.args(ocl::KernelArg::ReadOnly(src),
3075 ocl::KernelArg::WriteOnlyNoSize(dst));
3076
3077 size_t localsize[2] = { TILE_DIM, BLOCK_ROWS };
3078 size_t globalsize[2] = { src.cols, inplace ? (src.rows + rowsPerWI - 1) / rowsPerWI : (divUp(src.rows, TILE_DIM) * BLOCK_ROWS) };
3079
3080 if (inplace && dev.isIntel())
3081 {
3082 localsize[0] = 16;
3083 localsize[1] = dev.maxWorkGroupSize() / localsize[0];
3084 }
3085
3086 return k.run(2, globalsize, localsize, false);
3087 }
3088
3089 #endif
3090
3091 }
3092
transpose(InputArray _src,OutputArray _dst)3093 void cv::transpose( InputArray _src, OutputArray _dst )
3094 {
3095 int type = _src.type(), esz = CV_ELEM_SIZE(type);
3096 CV_Assert( _src.dims() <= 2 && esz <= 32 );
3097
3098 CV_OCL_RUN(_dst.isUMat(),
3099 ocl_transpose(_src, _dst))
3100
3101 Mat src = _src.getMat();
3102 if( src.empty() )
3103 {
3104 _dst.release();
3105 return;
3106 }
3107
3108 _dst.create(src.cols, src.rows, src.type());
3109 Mat dst = _dst.getMat();
3110
3111 // handle the case of single-column/single-row matrices, stored in STL vectors.
3112 if( src.rows != dst.cols || src.cols != dst.rows )
3113 {
3114 CV_Assert( src.size() == dst.size() && (src.cols == 1 || src.rows == 1) );
3115 src.copyTo(dst);
3116 return;
3117 }
3118
3119 #if defined HAVE_IPP
3120 CV_IPP_CHECK()
3121 {
3122 typedef IppStatus (CV_STDCALL * ippiTranspose)(const void * pSrc, int srcStep, void * pDst, int dstStep, IppiSize roiSize);
3123 typedef IppStatus (CV_STDCALL * ippiTransposeI)(const void * pSrcDst, int srcDstStep, IppiSize roiSize);
3124 ippiTranspose ippFunc = 0;
3125 ippiTransposeI ippFuncI = 0;
3126
3127 if (dst.data == src.data && dst.cols == dst.rows)
3128 {
3129 CV_SUPPRESS_DEPRECATED_START
3130 ippFuncI =
3131 type == CV_8UC1 ? (ippiTransposeI)ippiTranspose_8u_C1IR :
3132 type == CV_8UC3 ? (ippiTransposeI)ippiTranspose_8u_C3IR :
3133 type == CV_8UC4 ? (ippiTransposeI)ippiTranspose_8u_C4IR :
3134 type == CV_16UC1 ? (ippiTransposeI)ippiTranspose_16u_C1IR :
3135 type == CV_16UC3 ? (ippiTransposeI)ippiTranspose_16u_C3IR :
3136 type == CV_16UC4 ? (ippiTransposeI)ippiTranspose_16u_C4IR :
3137 type == CV_16SC1 ? (ippiTransposeI)ippiTranspose_16s_C1IR :
3138 type == CV_16SC3 ? (ippiTransposeI)ippiTranspose_16s_C3IR :
3139 type == CV_16SC4 ? (ippiTransposeI)ippiTranspose_16s_C4IR :
3140 type == CV_32SC1 ? (ippiTransposeI)ippiTranspose_32s_C1IR :
3141 type == CV_32SC3 ? (ippiTransposeI)ippiTranspose_32s_C3IR :
3142 type == CV_32SC4 ? (ippiTransposeI)ippiTranspose_32s_C4IR :
3143 type == CV_32FC1 ? (ippiTransposeI)ippiTranspose_32f_C1IR :
3144 type == CV_32FC3 ? (ippiTransposeI)ippiTranspose_32f_C3IR :
3145 type == CV_32FC4 ? (ippiTransposeI)ippiTranspose_32f_C4IR : 0;
3146 CV_SUPPRESS_DEPRECATED_END
3147 }
3148 else
3149 {
3150 ippFunc =
3151 type == CV_8UC1 ? (ippiTranspose)ippiTranspose_8u_C1R :
3152 type == CV_8UC3 ? (ippiTranspose)ippiTranspose_8u_C3R :
3153 type == CV_8UC4 ? (ippiTranspose)ippiTranspose_8u_C4R :
3154 type == CV_16UC1 ? (ippiTranspose)ippiTranspose_16u_C1R :
3155 type == CV_16UC3 ? (ippiTranspose)ippiTranspose_16u_C3R :
3156 type == CV_16UC4 ? (ippiTranspose)ippiTranspose_16u_C4R :
3157 type == CV_16SC1 ? (ippiTranspose)ippiTranspose_16s_C1R :
3158 type == CV_16SC3 ? (ippiTranspose)ippiTranspose_16s_C3R :
3159 type == CV_16SC4 ? (ippiTranspose)ippiTranspose_16s_C4R :
3160 type == CV_32SC1 ? (ippiTranspose)ippiTranspose_32s_C1R :
3161 type == CV_32SC3 ? (ippiTranspose)ippiTranspose_32s_C3R :
3162 type == CV_32SC4 ? (ippiTranspose)ippiTranspose_32s_C4R :
3163 type == CV_32FC1 ? (ippiTranspose)ippiTranspose_32f_C1R :
3164 type == CV_32FC3 ? (ippiTranspose)ippiTranspose_32f_C3R :
3165 type == CV_32FC4 ? (ippiTranspose)ippiTranspose_32f_C4R : 0;
3166 }
3167
3168 IppiSize roiSize = { src.cols, src.rows };
3169 if (ippFunc != 0)
3170 {
3171 if (ippFunc(src.ptr(), (int)src.step, dst.ptr(), (int)dst.step, roiSize) >= 0)
3172 {
3173 CV_IMPL_ADD(CV_IMPL_IPP);
3174 return;
3175 }
3176 setIppErrorStatus();
3177 }
3178 else if (ippFuncI != 0)
3179 {
3180 if (ippFuncI(dst.ptr(), (int)dst.step, roiSize) >= 0)
3181 {
3182 CV_IMPL_ADD(CV_IMPL_IPP);
3183 return;
3184 }
3185 setIppErrorStatus();
3186 }
3187 }
3188 #endif
3189
3190 if( dst.data == src.data )
3191 {
3192 TransposeInplaceFunc func = transposeInplaceTab[esz];
3193 CV_Assert( func != 0 );
3194 CV_Assert( dst.cols == dst.rows );
3195 func( dst.ptr(), dst.step, dst.rows );
3196 }
3197 else
3198 {
3199 TransposeFunc func = transposeTab[esz];
3200 CV_Assert( func != 0 );
3201 func( src.ptr(), src.step, dst.ptr(), dst.step, src.size() );
3202 }
3203 }
3204
3205
3206 ////////////////////////////////////// completeSymm /////////////////////////////////////////
3207
completeSymm(InputOutputArray _m,bool LtoR)3208 void cv::completeSymm( InputOutputArray _m, bool LtoR )
3209 {
3210 Mat m = _m.getMat();
3211 size_t step = m.step, esz = m.elemSize();
3212 CV_Assert( m.dims <= 2 && m.rows == m.cols );
3213
3214 int rows = m.rows;
3215 int j0 = 0, j1 = rows;
3216
3217 uchar* data = m.ptr();
3218 for( int i = 0; i < rows; i++ )
3219 {
3220 if( !LtoR ) j1 = i; else j0 = i+1;
3221 for( int j = j0; j < j1; j++ )
3222 memcpy(data + (i*step + j*esz), data + (j*step + i*esz), esz);
3223 }
3224 }
3225
3226
cross(InputArray _m) const3227 cv::Mat cv::Mat::cross(InputArray _m) const
3228 {
3229 Mat m = _m.getMat();
3230 int tp = type(), d = CV_MAT_DEPTH(tp);
3231 CV_Assert( dims <= 2 && m.dims <= 2 && size() == m.size() && tp == m.type() &&
3232 ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
3233 Mat result(rows, cols, tp);
3234
3235 if( d == CV_32F )
3236 {
3237 const float *a = (const float*)data, *b = (const float*)m.data;
3238 float* c = (float*)result.data;
3239 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
3240 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
3241
3242 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
3243 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
3244 c[2] = a[0] * b[ldb] - a[lda] * b[0];
3245 }
3246 else if( d == CV_64F )
3247 {
3248 const double *a = (const double*)data, *b = (const double*)m.data;
3249 double* c = (double*)result.data;
3250 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
3251 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
3252
3253 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
3254 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
3255 c[2] = a[0] * b[ldb] - a[lda] * b[0];
3256 }
3257
3258 return result;
3259 }
3260
3261
3262 ////////////////////////////////////////// reduce ////////////////////////////////////////////
3263
3264 namespace cv
3265 {
3266
3267 template<typename T, typename ST, class Op> static void
reduceR_(const Mat & srcmat,Mat & dstmat)3268 reduceR_( const Mat& srcmat, Mat& dstmat )
3269 {
3270 typedef typename Op::rtype WT;
3271 Size size = srcmat.size();
3272 size.width *= srcmat.channels();
3273 AutoBuffer<WT> buffer(size.width);
3274 WT* buf = buffer;
3275 ST* dst = dstmat.ptr<ST>();
3276 const T* src = srcmat.ptr<T>();
3277 size_t srcstep = srcmat.step/sizeof(src[0]);
3278 int i;
3279 Op op;
3280
3281 for( i = 0; i < size.width; i++ )
3282 buf[i] = src[i];
3283
3284 for( ; --size.height; )
3285 {
3286 src += srcstep;
3287 i = 0;
3288 #if CV_ENABLE_UNROLLED
3289 for(; i <= size.width - 4; i += 4 )
3290 {
3291 WT s0, s1;
3292 s0 = op(buf[i], (WT)src[i]);
3293 s1 = op(buf[i+1], (WT)src[i+1]);
3294 buf[i] = s0; buf[i+1] = s1;
3295
3296 s0 = op(buf[i+2], (WT)src[i+2]);
3297 s1 = op(buf[i+3], (WT)src[i+3]);
3298 buf[i+2] = s0; buf[i+3] = s1;
3299 }
3300 #endif
3301 for( ; i < size.width; i++ )
3302 buf[i] = op(buf[i], (WT)src[i]);
3303 }
3304
3305 for( i = 0; i < size.width; i++ )
3306 dst[i] = (ST)buf[i];
3307 }
3308
3309
3310 template<typename T, typename ST, class Op> static void
reduceC_(const Mat & srcmat,Mat & dstmat)3311 reduceC_( const Mat& srcmat, Mat& dstmat )
3312 {
3313 typedef typename Op::rtype WT;
3314 Size size = srcmat.size();
3315 int i, k, cn = srcmat.channels();
3316 size.width *= cn;
3317 Op op;
3318
3319 for( int y = 0; y < size.height; y++ )
3320 {
3321 const T* src = srcmat.ptr<T>(y);
3322 ST* dst = dstmat.ptr<ST>(y);
3323 if( size.width == cn )
3324 for( k = 0; k < cn; k++ )
3325 dst[k] = src[k];
3326 else
3327 {
3328 for( k = 0; k < cn; k++ )
3329 {
3330 WT a0 = src[k], a1 = src[k+cn];
3331 for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn )
3332 {
3333 a0 = op(a0, (WT)src[i+k]);
3334 a1 = op(a1, (WT)src[i+k+cn]);
3335 a0 = op(a0, (WT)src[i+k+cn*2]);
3336 a1 = op(a1, (WT)src[i+k+cn*3]);
3337 }
3338
3339 for( ; i < size.width; i += cn )
3340 {
3341 a0 = op(a0, (WT)src[i+k]);
3342 }
3343 a0 = op(a0, a1);
3344 dst[k] = (ST)a0;
3345 }
3346 }
3347 }
3348 }
3349
3350 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
3351
3352 }
3353
3354 #define reduceSumR8u32s reduceR_<uchar, int, OpAdd<int> >
3355 #define reduceSumR8u32f reduceR_<uchar, float, OpAdd<int> >
3356 #define reduceSumR8u64f reduceR_<uchar, double,OpAdd<int> >
3357 #define reduceSumR16u32f reduceR_<ushort,float, OpAdd<float> >
3358 #define reduceSumR16u64f reduceR_<ushort,double,OpAdd<double> >
3359 #define reduceSumR16s32f reduceR_<short, float, OpAdd<float> >
3360 #define reduceSumR16s64f reduceR_<short, double,OpAdd<double> >
3361 #define reduceSumR32f32f reduceR_<float, float, OpAdd<float> >
3362 #define reduceSumR32f64f reduceR_<float, double,OpAdd<double> >
3363 #define reduceSumR64f64f reduceR_<double,double,OpAdd<double> >
3364
3365 #define reduceMaxR8u reduceR_<uchar, uchar, OpMax<uchar> >
3366 #define reduceMaxR16u reduceR_<ushort,ushort,OpMax<ushort> >
3367 #define reduceMaxR16s reduceR_<short, short, OpMax<short> >
3368 #define reduceMaxR32f reduceR_<float, float, OpMax<float> >
3369 #define reduceMaxR64f reduceR_<double,double,OpMax<double> >
3370
3371 #define reduceMinR8u reduceR_<uchar, uchar, OpMin<uchar> >
3372 #define reduceMinR16u reduceR_<ushort,ushort,OpMin<ushort> >
3373 #define reduceMinR16s reduceR_<short, short, OpMin<short> >
3374 #define reduceMinR32f reduceR_<float, float, OpMin<float> >
3375 #define reduceMinR64f reduceR_<double,double,OpMin<double> >
3376
3377 #if IPP_VERSION_X100 > 0
3378
reduceSumC_8u16u16s32f_64f(const cv::Mat & srcmat,cv::Mat & dstmat)3379 static inline void reduceSumC_8u16u16s32f_64f(const cv::Mat& srcmat, cv::Mat& dstmat)
3380 {
3381 cv::Size size = srcmat.size();
3382 IppiSize roisize = { size.width, 1 };
3383 int sstep = (int)srcmat.step, stype = srcmat.type(),
3384 sdepth = CV_MAT_DEPTH(stype), ddepth = dstmat.depth();
3385
3386 typedef IppStatus (CV_STDCALL * ippiSum)(const void * pSrc, int srcStep, IppiSize roiSize, Ipp64f* pSum);
3387 typedef IppStatus (CV_STDCALL * ippiSumHint)(const void * pSrc, int srcStep, IppiSize roiSize, Ipp64f* pSum, IppHintAlgorithm hint);
3388 ippiSum ippFunc = 0;
3389 ippiSumHint ippFuncHint = 0;
3390 cv::ReduceFunc func = 0;
3391
3392 if (ddepth == CV_64F)
3393 {
3394 ippFunc =
3395 stype == CV_8UC1 ? (ippiSum)ippiSum_8u_C1R :
3396 stype == CV_8UC3 ? (ippiSum)ippiSum_8u_C3R :
3397 stype == CV_8UC4 ? (ippiSum)ippiSum_8u_C4R :
3398 stype == CV_16UC1 ? (ippiSum)ippiSum_16u_C1R :
3399 stype == CV_16UC3 ? (ippiSum)ippiSum_16u_C3R :
3400 stype == CV_16UC4 ? (ippiSum)ippiSum_16u_C4R :
3401 stype == CV_16SC1 ? (ippiSum)ippiSum_16s_C1R :
3402 stype == CV_16SC3 ? (ippiSum)ippiSum_16s_C3R :
3403 stype == CV_16SC4 ? (ippiSum)ippiSum_16s_C4R : 0;
3404 ippFuncHint =
3405 stype == CV_32FC1 ? (ippiSumHint)ippiSum_32f_C1R :
3406 stype == CV_32FC3 ? (ippiSumHint)ippiSum_32f_C3R :
3407 stype == CV_32FC4 ? (ippiSumHint)ippiSum_32f_C4R : 0;
3408 func =
3409 sdepth == CV_8U ? (cv::ReduceFunc)cv::reduceC_<uchar, double, cv::OpAdd<double> > :
3410 sdepth == CV_16U ? (cv::ReduceFunc)cv::reduceC_<ushort, double, cv::OpAdd<double> > :
3411 sdepth == CV_16S ? (cv::ReduceFunc)cv::reduceC_<short, double, cv::OpAdd<double> > :
3412 sdepth == CV_32F ? (cv::ReduceFunc)cv::reduceC_<float, double, cv::OpAdd<double> > : 0;
3413 }
3414 CV_Assert(!(ippFunc && ippFuncHint) && func);
3415
3416 CV_IPP_CHECK()
3417 {
3418 if (ippFunc)
3419 {
3420 for (int y = 0; y < size.height; ++y)
3421 if (ippFunc(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y)) < 0)
3422 {
3423 setIppErrorStatus();
3424 cv::Mat dstroi = dstmat.rowRange(y, y + 1);
3425 func(srcmat.rowRange(y, y + 1), dstroi);
3426 }
3427 CV_IMPL_ADD(CV_IMPL_IPP);
3428 return;
3429 }
3430 else if (ippFuncHint)
3431 {
3432 for (int y = 0; y < size.height; ++y)
3433 if (ippFuncHint(srcmat.ptr(y), sstep, roisize, dstmat.ptr<Ipp64f>(y), ippAlgHintAccurate) < 0)
3434 {
3435 setIppErrorStatus();
3436 cv::Mat dstroi = dstmat.rowRange(y, y + 1);
3437 func(srcmat.rowRange(y, y + 1), dstroi);
3438 }
3439 CV_IMPL_ADD(CV_IMPL_IPP);
3440 return;
3441 }
3442 }
3443
3444 func(srcmat, dstmat);
3445 }
3446
3447 #endif
3448
3449 #define reduceSumC8u32s reduceC_<uchar, int, OpAdd<int> >
3450 #define reduceSumC8u32f reduceC_<uchar, float, OpAdd<int> >
3451 #define reduceSumC16u32f reduceC_<ushort,float, OpAdd<float> >
3452 #define reduceSumC16s32f reduceC_<short, float, OpAdd<float> >
3453 #define reduceSumC32f32f reduceC_<float, float, OpAdd<float> >
3454 #define reduceSumC64f64f reduceC_<double,double,OpAdd<double> >
3455
3456 #if IPP_VERSION_X100 > 0
3457 #define reduceSumC8u64f reduceSumC_8u16u16s32f_64f
3458 #define reduceSumC16u64f reduceSumC_8u16u16s32f_64f
3459 #define reduceSumC16s64f reduceSumC_8u16u16s32f_64f
3460 #define reduceSumC32f64f reduceSumC_8u16u16s32f_64f
3461 #else
3462 #define reduceSumC8u64f reduceC_<uchar, double,OpAdd<int> >
3463 #define reduceSumC16u64f reduceC_<ushort,double,OpAdd<double> >
3464 #define reduceSumC16s64f reduceC_<short, double,OpAdd<double> >
3465 #define reduceSumC32f64f reduceC_<float, double,OpAdd<double> >
3466 #endif
3467
3468 #if IPP_VERSION_X100 > 0
3469 #define REDUCE_OP(favor, optype, type1, type2) \
3470 static inline void reduce##optype##C##favor(const cv::Mat& srcmat, cv::Mat& dstmat) \
3471 { \
3472 typedef Ipp##favor IppType; \
3473 cv::Size size = srcmat.size(); \
3474 IppiSize roisize = ippiSize(size.width, 1);\
3475 int sstep = (int)srcmat.step; \
3476 \
3477 if (CV_IPP_CHECK_COND && (srcmat.channels() == 1)) \
3478 { \
3479 for (int y = 0; y < size.height; ++y) \
3480 if (ippi##optype##_##favor##_C1R(srcmat.ptr<IppType>(y), sstep, roisize, dstmat.ptr<IppType>(y)) < 0) \
3481 { \
3482 setIppErrorStatus(); \
3483 cv::Mat dstroi = dstmat.rowRange(y, y + 1); \
3484 cv::reduceC_ < type1, type2, cv::Op##optype < type2 > >(srcmat.rowRange(y, y + 1), dstroi); \
3485 } \
3486 else \
3487 { \
3488 CV_IMPL_ADD(CV_IMPL_IPP);\
3489 } \
3490 return; \
3491 } \
3492 cv::reduceC_ < type1, type2, cv::Op##optype < type2 > >(srcmat, dstmat); \
3493 }
3494 #endif
3495
3496 #if IPP_VERSION_X100 > 0
3497 REDUCE_OP(8u, Max, uchar, uchar)
3498 REDUCE_OP(16u, Max, ushort, ushort)
3499 REDUCE_OP(16s, Max, short, short)
3500 REDUCE_OP(32f, Max, float, float)
3501 #else
3502 #define reduceMaxC8u reduceC_<uchar, uchar, OpMax<uchar> >
3503 #define reduceMaxC16u reduceC_<ushort,ushort,OpMax<ushort> >
3504 #define reduceMaxC16s reduceC_<short, short, OpMax<short> >
3505 #define reduceMaxC32f reduceC_<float, float, OpMax<float> >
3506 #endif
3507 #define reduceMaxC64f reduceC_<double,double,OpMax<double> >
3508
3509 #if IPP_VERSION_X100 > 0
3510 REDUCE_OP(8u, Min, uchar, uchar)
3511 REDUCE_OP(16u, Min, ushort, ushort)
3512 REDUCE_OP(16s, Min, short, short)
3513 REDUCE_OP(32f, Min, float, float)
3514 #else
3515 #define reduceMinC8u reduceC_<uchar, uchar, OpMin<uchar> >
3516 #define reduceMinC16u reduceC_<ushort,ushort,OpMin<ushort> >
3517 #define reduceMinC16s reduceC_<short, short, OpMin<short> >
3518 #define reduceMinC32f reduceC_<float, float, OpMin<float> >
3519 #endif
3520 #define reduceMinC64f reduceC_<double,double,OpMin<double> >
3521
3522 #ifdef HAVE_OPENCL
3523
3524 namespace cv {
3525
ocl_reduce(InputArray _src,OutputArray _dst,int dim,int op,int op0,int stype,int dtype)3526 static bool ocl_reduce(InputArray _src, OutputArray _dst,
3527 int dim, int op, int op0, int stype, int dtype)
3528 {
3529 const int min_opt_cols = 128, buf_cols = 32;
3530 int sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype),
3531 ddepth = CV_MAT_DEPTH(dtype), ddepth0 = ddepth;
3532 const ocl::Device &defDev = ocl::Device::getDefault();
3533 bool doubleSupport = defDev.doubleFPConfig() > 0;
3534
3535 size_t wgs = defDev.maxWorkGroupSize();
3536 bool useOptimized = 1 == dim && _src.cols() > min_opt_cols && (wgs >= buf_cols);
3537
3538 if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
3539 return false;
3540
3541 if (op == CV_REDUCE_AVG)
3542 {
3543 if (sdepth < CV_32S && ddepth < CV_32S)
3544 ddepth = CV_32S;
3545 }
3546
3547 const char * const ops[4] = { "OCL_CV_REDUCE_SUM", "OCL_CV_REDUCE_AVG",
3548 "OCL_CV_REDUCE_MAX", "OCL_CV_REDUCE_MIN" };
3549 int wdepth = std::max(ddepth, CV_32F);
3550 if (useOptimized)
3551 {
3552 size_t tileHeight = (size_t)(wgs / buf_cols);
3553 if (defDev.isIntel())
3554 {
3555 static const size_t maxItemInGroupCount = 16;
3556 tileHeight = min(tileHeight, defDev.localMemSize() / buf_cols / CV_ELEM_SIZE(CV_MAKETYPE(wdepth, cn)) / maxItemInGroupCount);
3557 }
3558 char cvt[3][40];
3559 cv::String build_opt = format("-D OP_REDUCE_PRE -D BUF_COLS=%d -D TILE_HEIGHT=%d -D %s -D dim=1"
3560 " -D cn=%d -D ddepth=%d"
3561 " -D srcT=%s -D bufT=%s -D dstT=%s"
3562 " -D convertToWT=%s -D convertToBufT=%s -D convertToDT=%s%s",
3563 buf_cols, tileHeight, ops[op], cn, ddepth,
3564 ocl::typeToStr(sdepth),
3565 ocl::typeToStr(ddepth),
3566 ocl::typeToStr(ddepth0),
3567 ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]),
3568 ocl::convertTypeStr(sdepth, ddepth, 1, cvt[1]),
3569 ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[2]),
3570 doubleSupport ? " -D DOUBLE_SUPPORT" : "");
3571 ocl::Kernel k("reduce_horz_opt", ocl::core::reduce2_oclsrc, build_opt);
3572 if (k.empty())
3573 return false;
3574 UMat src = _src.getUMat();
3575 Size dsize(1, src.rows);
3576 _dst.create(dsize, dtype);
3577 UMat dst = _dst.getUMat();
3578
3579 if (op0 == CV_REDUCE_AVG)
3580 k.args(ocl::KernelArg::ReadOnly(src),
3581 ocl::KernelArg::WriteOnlyNoSize(dst), 1.0f / src.cols);
3582 else
3583 k.args(ocl::KernelArg::ReadOnly(src),
3584 ocl::KernelArg::WriteOnlyNoSize(dst));
3585
3586 size_t localSize[2] = { buf_cols, tileHeight};
3587 size_t globalSize[2] = { buf_cols, src.rows };
3588 return k.run(2, globalSize, localSize, false);
3589 }
3590 else
3591 {
3592 char cvt[2][40];
3593 cv::String build_opt = format("-D %s -D dim=%d -D cn=%d -D ddepth=%d"
3594 " -D srcT=%s -D dstT=%s -D dstT0=%s -D convertToWT=%s"
3595 " -D convertToDT=%s -D convertToDT0=%s%s",
3596 ops[op], dim, cn, ddepth, ocl::typeToStr(useOptimized ? ddepth : sdepth),
3597 ocl::typeToStr(ddepth), ocl::typeToStr(ddepth0),
3598 ocl::convertTypeStr(ddepth, wdepth, 1, cvt[0]),
3599 ocl::convertTypeStr(sdepth, ddepth, 1, cvt[0]),
3600 ocl::convertTypeStr(wdepth, ddepth0, 1, cvt[1]),
3601 doubleSupport ? " -D DOUBLE_SUPPORT" : "");
3602
3603 ocl::Kernel k("reduce", ocl::core::reduce2_oclsrc, build_opt);
3604 if (k.empty())
3605 return false;
3606
3607 UMat src = _src.getUMat();
3608 Size dsize(dim == 0 ? src.cols : 1, dim == 0 ? 1 : src.rows);
3609 _dst.create(dsize, dtype);
3610 UMat dst = _dst.getUMat();
3611
3612 ocl::KernelArg srcarg = ocl::KernelArg::ReadOnly(src),
3613 temparg = ocl::KernelArg::WriteOnlyNoSize(dst);
3614
3615 if (op0 == CV_REDUCE_AVG)
3616 k.args(srcarg, temparg, 1.0f / (dim == 0 ? src.rows : src.cols));
3617 else
3618 k.args(srcarg, temparg);
3619
3620 size_t globalsize = std::max(dsize.width, dsize.height);
3621 return k.run(1, &globalsize, NULL, false);
3622 }
3623 }
3624
3625 }
3626
3627 #endif
3628
reduce(InputArray _src,OutputArray _dst,int dim,int op,int dtype)3629 void cv::reduce(InputArray _src, OutputArray _dst, int dim, int op, int dtype)
3630 {
3631 CV_Assert( _src.dims() <= 2 );
3632 int op0 = op;
3633 int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
3634 if( dtype < 0 )
3635 dtype = _dst.fixedType() ? _dst.type() : stype;
3636 dtype = CV_MAKETYPE(dtype >= 0 ? dtype : stype, cn);
3637 int ddepth = CV_MAT_DEPTH(dtype);
3638
3639 CV_Assert( cn == CV_MAT_CN(dtype) );
3640 CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
3641 op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
3642
3643 CV_OCL_RUN(_dst.isUMat(),
3644 ocl_reduce(_src, _dst, dim, op, op0, stype, dtype))
3645
3646 Mat src = _src.getMat();
3647 _dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1, dtype);
3648 Mat dst = _dst.getMat(), temp = dst;
3649
3650 if( op == CV_REDUCE_AVG )
3651 {
3652 op = CV_REDUCE_SUM;
3653 if( sdepth < CV_32S && ddepth < CV_32S )
3654 {
3655 temp.create(dst.rows, dst.cols, CV_32SC(cn));
3656 ddepth = CV_32S;
3657 }
3658 }
3659
3660 ReduceFunc func = 0;
3661 if( dim == 0 )
3662 {
3663 if( op == CV_REDUCE_SUM )
3664 {
3665 if(sdepth == CV_8U && ddepth == CV_32S)
3666 func = GET_OPTIMIZED(reduceSumR8u32s);
3667 else if(sdepth == CV_8U && ddepth == CV_32F)
3668 func = reduceSumR8u32f;
3669 else if(sdepth == CV_8U && ddepth == CV_64F)
3670 func = reduceSumR8u64f;
3671 else if(sdepth == CV_16U && ddepth == CV_32F)
3672 func = reduceSumR16u32f;
3673 else if(sdepth == CV_16U && ddepth == CV_64F)
3674 func = reduceSumR16u64f;
3675 else if(sdepth == CV_16S && ddepth == CV_32F)
3676 func = reduceSumR16s32f;
3677 else if(sdepth == CV_16S && ddepth == CV_64F)
3678 func = reduceSumR16s64f;
3679 else if(sdepth == CV_32F && ddepth == CV_32F)
3680 func = GET_OPTIMIZED(reduceSumR32f32f);
3681 else if(sdepth == CV_32F && ddepth == CV_64F)
3682 func = reduceSumR32f64f;
3683 else if(sdepth == CV_64F && ddepth == CV_64F)
3684 func = reduceSumR64f64f;
3685 }
3686 else if(op == CV_REDUCE_MAX)
3687 {
3688 if(sdepth == CV_8U && ddepth == CV_8U)
3689 func = GET_OPTIMIZED(reduceMaxR8u);
3690 else if(sdepth == CV_16U && ddepth == CV_16U)
3691 func = reduceMaxR16u;
3692 else if(sdepth == CV_16S && ddepth == CV_16S)
3693 func = reduceMaxR16s;
3694 else if(sdepth == CV_32F && ddepth == CV_32F)
3695 func = GET_OPTIMIZED(reduceMaxR32f);
3696 else if(sdepth == CV_64F && ddepth == CV_64F)
3697 func = reduceMaxR64f;
3698 }
3699 else if(op == CV_REDUCE_MIN)
3700 {
3701 if(sdepth == CV_8U && ddepth == CV_8U)
3702 func = GET_OPTIMIZED(reduceMinR8u);
3703 else if(sdepth == CV_16U && ddepth == CV_16U)
3704 func = reduceMinR16u;
3705 else if(sdepth == CV_16S && ddepth == CV_16S)
3706 func = reduceMinR16s;
3707 else if(sdepth == CV_32F && ddepth == CV_32F)
3708 func = GET_OPTIMIZED(reduceMinR32f);
3709 else if(sdepth == CV_64F && ddepth == CV_64F)
3710 func = reduceMinR64f;
3711 }
3712 }
3713 else
3714 {
3715 if(op == CV_REDUCE_SUM)
3716 {
3717 if(sdepth == CV_8U && ddepth == CV_32S)
3718 func = GET_OPTIMIZED(reduceSumC8u32s);
3719 else if(sdepth == CV_8U && ddepth == CV_32F)
3720 func = reduceSumC8u32f;
3721 else if(sdepth == CV_8U && ddepth == CV_64F)
3722 func = reduceSumC8u64f;
3723 else if(sdepth == CV_16U && ddepth == CV_32F)
3724 func = reduceSumC16u32f;
3725 else if(sdepth == CV_16U && ddepth == CV_64F)
3726 func = reduceSumC16u64f;
3727 else if(sdepth == CV_16S && ddepth == CV_32F)
3728 func = reduceSumC16s32f;
3729 else if(sdepth == CV_16S && ddepth == CV_64F)
3730 func = reduceSumC16s64f;
3731 else if(sdepth == CV_32F && ddepth == CV_32F)
3732 func = GET_OPTIMIZED(reduceSumC32f32f);
3733 else if(sdepth == CV_32F && ddepth == CV_64F)
3734 func = reduceSumC32f64f;
3735 else if(sdepth == CV_64F && ddepth == CV_64F)
3736 func = reduceSumC64f64f;
3737 }
3738 else if(op == CV_REDUCE_MAX)
3739 {
3740 if(sdepth == CV_8U && ddepth == CV_8U)
3741 func = GET_OPTIMIZED(reduceMaxC8u);
3742 else if(sdepth == CV_16U && ddepth == CV_16U)
3743 func = reduceMaxC16u;
3744 else if(sdepth == CV_16S && ddepth == CV_16S)
3745 func = reduceMaxC16s;
3746 else if(sdepth == CV_32F && ddepth == CV_32F)
3747 func = GET_OPTIMIZED(reduceMaxC32f);
3748 else if(sdepth == CV_64F && ddepth == CV_64F)
3749 func = reduceMaxC64f;
3750 }
3751 else if(op == CV_REDUCE_MIN)
3752 {
3753 if(sdepth == CV_8U && ddepth == CV_8U)
3754 func = GET_OPTIMIZED(reduceMinC8u);
3755 else if(sdepth == CV_16U && ddepth == CV_16U)
3756 func = reduceMinC16u;
3757 else if(sdepth == CV_16S && ddepth == CV_16S)
3758 func = reduceMinC16s;
3759 else if(sdepth == CV_32F && ddepth == CV_32F)
3760 func = GET_OPTIMIZED(reduceMinC32f);
3761 else if(sdepth == CV_64F && ddepth == CV_64F)
3762 func = reduceMinC64f;
3763 }
3764 }
3765
3766 if( !func )
3767 CV_Error( CV_StsUnsupportedFormat,
3768 "Unsupported combination of input and output array formats" );
3769
3770 func( src, temp );
3771
3772 if( op0 == CV_REDUCE_AVG )
3773 temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
3774 }
3775
3776
3777 //////////////////////////////////////// sort ///////////////////////////////////////////
3778
3779 namespace cv
3780 {
3781
3782 #if IPP_VERSION_X100 > 0
3783 #define USE_IPP_SORT
3784
3785 typedef IppStatus (CV_STDCALL * IppSortFunc)(void *, int);
3786 typedef IppSortFunc IppFlipFunc;
3787
getSortFunc(int depth,bool sortDescending)3788 static IppSortFunc getSortFunc(int depth, bool sortDescending)
3789 {
3790 if (!sortDescending)
3791 return depth == CV_8U ? (IppSortFunc)ippsSortAscend_8u_I :
3792 /*depth == CV_16U ? (IppSortFunc)ippsSortAscend_16u_I :
3793 depth == CV_16S ? (IppSortFunc)ippsSortAscend_16s_I :
3794 depth == CV_32S ? (IppSortFunc)ippsSortAscend_32s_I :
3795 depth == CV_32F ? (IppSortFunc)ippsSortAscend_32f_I :
3796 depth == CV_64F ? (IppSortFunc)ippsSortAscend_64f_I :*/ 0;
3797 else
3798 return depth == CV_8U ? (IppSortFunc)ippsSortDescend_8u_I :
3799 /*depth == CV_16U ? (IppSortFunc)ippsSortDescend_16u_I :
3800 depth == CV_16S ? (IppSortFunc)ippsSortDescend_16s_I :
3801 depth == CV_32S ? (IppSortFunc)ippsSortDescend_32s_I :
3802 depth == CV_32F ? (IppSortFunc)ippsSortDescend_32f_I :
3803 depth == CV_64F ? (IppSortFunc)ippsSortDescend_64f_I :*/ 0;
3804 }
3805
getFlipFunc(int depth)3806 static IppFlipFunc getFlipFunc(int depth)
3807 {
3808 CV_SUPPRESS_DEPRECATED_START
3809 return
3810 depth == CV_8U || depth == CV_8S ? (IppFlipFunc)ippsFlip_8u_I :
3811 depth == CV_16U || depth == CV_16S ? (IppFlipFunc)ippsFlip_16u_I :
3812 depth == CV_32S || depth == CV_32F ? (IppFlipFunc)ippsFlip_32f_I :
3813 depth == CV_64F ? (IppFlipFunc)ippsFlip_64f_I : 0;
3814 CV_SUPPRESS_DEPRECATED_END
3815 }
3816
3817
3818 #endif
3819
sort_(const Mat & src,Mat & dst,int flags)3820 template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
3821 {
3822 AutoBuffer<T> buf;
3823 T* bptr;
3824 int i, j, n, len;
3825 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
3826 bool inplace = src.data == dst.data;
3827 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
3828
3829 if( sortRows )
3830 n = src.rows, len = src.cols;
3831 else
3832 {
3833 n = src.cols, len = src.rows;
3834 buf.allocate(len);
3835 }
3836 bptr = (T*)buf;
3837
3838 #ifdef USE_IPP_SORT
3839 int depth = src.depth();
3840 IppSortFunc ippSortFunc = 0;
3841 IppFlipFunc ippFlipFunc = 0;
3842 CV_IPP_CHECK()
3843 {
3844 ippSortFunc = getSortFunc(depth, sortDescending);
3845 ippFlipFunc = getFlipFunc(depth);
3846 }
3847 #endif
3848
3849 for( i = 0; i < n; i++ )
3850 {
3851 T* ptr = bptr;
3852 if( sortRows )
3853 {
3854 T* dptr = dst.ptr<T>(i);
3855 if( !inplace )
3856 {
3857 const T* sptr = src.ptr<T>(i);
3858 memcpy(dptr, sptr, sizeof(T) * len);
3859 }
3860 ptr = dptr;
3861 }
3862 else
3863 {
3864 for( j = 0; j < len; j++ )
3865 ptr[j] = src.ptr<T>(j)[i];
3866 }
3867
3868 #ifdef USE_IPP_SORT
3869 if (!ippSortFunc || ippSortFunc(ptr, len) < 0)
3870 #endif
3871 {
3872 #ifdef USE_IPP_SORT
3873 if (depth == CV_8U)
3874 setIppErrorStatus();
3875 #endif
3876 std::sort( ptr, ptr + len );
3877 if( sortDescending )
3878 {
3879 #ifdef USE_IPP_SORT
3880 if (!ippFlipFunc || ippFlipFunc(ptr, len) < 0)
3881 #endif
3882 {
3883 #ifdef USE_IPP_SORT
3884 setIppErrorStatus();
3885 #endif
3886 for( j = 0; j < len/2; j++ )
3887 std::swap(ptr[j], ptr[len-1-j]);
3888 }
3889 #ifdef USE_IPP_SORT
3890 else
3891 {
3892 CV_IMPL_ADD(CV_IMPL_IPP);
3893 }
3894 #endif
3895 }
3896 }
3897 #ifdef USE_IPP_SORT
3898 else
3899 {
3900 CV_IMPL_ADD(CV_IMPL_IPP);
3901 }
3902 #endif
3903
3904 if( !sortRows )
3905 for( j = 0; j < len; j++ )
3906 dst.ptr<T>(j)[i] = ptr[j];
3907 }
3908 }
3909
3910 template<typename _Tp> class LessThanIdx
3911 {
3912 public:
LessThanIdx(const _Tp * _arr)3913 LessThanIdx( const _Tp* _arr ) : arr(_arr) {}
operator ()(int a,int b) const3914 bool operator()(int a, int b) const { return arr[a] < arr[b]; }
3915 const _Tp* arr;
3916 };
3917
3918 #if defined USE_IPP_SORT && 0
3919
3920 typedef IppStatus (CV_STDCALL *IppSortIndexFunc)(void *, int *, int);
3921
getSortIndexFunc(int depth,bool sortDescending)3922 static IppSortIndexFunc getSortIndexFunc(int depth, bool sortDescending)
3923 {
3924 if (!sortDescending)
3925 return depth == CV_8U ? (IppSortIndexFunc)ippsSortIndexAscend_8u_I :
3926 depth == CV_16U ? (IppSortIndexFunc)ippsSortIndexAscend_16u_I :
3927 depth == CV_16S ? (IppSortIndexFunc)ippsSortIndexAscend_16s_I :
3928 depth == CV_32S ? (IppSortIndexFunc)ippsSortIndexAscend_32s_I :
3929 depth == CV_32F ? (IppSortIndexFunc)ippsSortIndexAscend_32f_I :
3930 depth == CV_64F ? (IppSortIndexFunc)ippsSortIndexAscend_64f_I : 0;
3931 else
3932 return depth == CV_8U ? (IppSortIndexFunc)ippsSortIndexDescend_8u_I :
3933 depth == CV_16U ? (IppSortIndexFunc)ippsSortIndexDescend_16u_I :
3934 depth == CV_16S ? (IppSortIndexFunc)ippsSortIndexDescend_16s_I :
3935 depth == CV_32S ? (IppSortIndexFunc)ippsSortIndexDescend_32s_I :
3936 depth == CV_32F ? (IppSortIndexFunc)ippsSortIndexDescend_32f_I :
3937 depth == CV_64F ? (IppSortIndexFunc)ippsSortIndexDescend_64f_I : 0;
3938 }
3939
3940 #endif
3941
sortIdx_(const Mat & src,Mat & dst,int flags)3942 template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
3943 {
3944 AutoBuffer<T> buf;
3945 AutoBuffer<int> ibuf;
3946 T* bptr;
3947 int* _iptr;
3948 int i, j, n, len;
3949 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
3950 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
3951
3952 CV_Assert( src.data != dst.data );
3953
3954 if( sortRows )
3955 n = src.rows, len = src.cols;
3956 else
3957 {
3958 n = src.cols, len = src.rows;
3959 buf.allocate(len);
3960 ibuf.allocate(len);
3961 }
3962 bptr = (T*)buf;
3963 _iptr = (int*)ibuf;
3964
3965 #if defined USE_IPP_SORT && 0
3966 int depth = src.depth();
3967 IppSortIndexFunc ippFunc = 0;
3968 IppFlipFunc ippFlipFunc = 0;
3969 CV_IPP_CHECK()
3970 {
3971 ippFunc = getSortIndexFunc(depth, sortDescending);
3972 ippFlipFunc = getFlipFunc(depth);
3973 }
3974 #endif
3975
3976 for( i = 0; i < n; i++ )
3977 {
3978 T* ptr = bptr;
3979 int* iptr = _iptr;
3980
3981 if( sortRows )
3982 {
3983 ptr = (T*)(src.data + src.step*i);
3984 iptr = dst.ptr<int>(i);
3985 }
3986 else
3987 {
3988 for( j = 0; j < len; j++ )
3989 ptr[j] = src.ptr<T>(j)[i];
3990 }
3991 for( j = 0; j < len; j++ )
3992 iptr[j] = j;
3993
3994 #if defined USE_IPP_SORT && 0
3995 if (sortRows || !ippFunc || ippFunc(ptr, iptr, len) < 0)
3996 #endif
3997 {
3998 #if defined USE_IPP_SORT && 0
3999 setIppErrorStatus();
4000 #endif
4001 std::sort( iptr, iptr + len, LessThanIdx<T>(ptr) );
4002 if( sortDescending )
4003 {
4004 #if defined USE_IPP_SORT && 0
4005 if (!ippFlipFunc || ippFlipFunc(iptr, len) < 0)
4006 #endif
4007 {
4008 #if defined USE_IPP_SORT && 0
4009 setIppErrorStatus();
4010 #endif
4011 for( j = 0; j < len/2; j++ )
4012 std::swap(iptr[j], iptr[len-1-j]);
4013 }
4014 #if defined USE_IPP_SORT && 0
4015 else
4016 {
4017 CV_IMPL_ADD(CV_IMPL_IPP);
4018 }
4019 #endif
4020 }
4021 }
4022 #if defined USE_IPP_SORT && 0
4023 else
4024 {
4025 CV_IMPL_ADD(CV_IMPL_IPP);
4026 }
4027 #endif
4028
4029 if( !sortRows )
4030 for( j = 0; j < len; j++ )
4031 dst.ptr<int>(j)[i] = iptr[j];
4032 }
4033 }
4034
4035 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
4036
4037 }
4038
sort(InputArray _src,OutputArray _dst,int flags)4039 void cv::sort( InputArray _src, OutputArray _dst, int flags )
4040 {
4041 static SortFunc tab[] =
4042 {
4043 sort_<uchar>, sort_<schar>, sort_<ushort>, sort_<short>,
4044 sort_<int>, sort_<float>, sort_<double>, 0
4045 };
4046 Mat src = _src.getMat();
4047 SortFunc func = tab[src.depth()];
4048 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
4049 _dst.create( src.size(), src.type() );
4050 Mat dst = _dst.getMat();
4051 func( src, dst, flags );
4052 }
4053
sortIdx(InputArray _src,OutputArray _dst,int flags)4054 void cv::sortIdx( InputArray _src, OutputArray _dst, int flags )
4055 {
4056 static SortFunc tab[] =
4057 {
4058 sortIdx_<uchar>, sortIdx_<schar>, sortIdx_<ushort>, sortIdx_<short>,
4059 sortIdx_<int>, sortIdx_<float>, sortIdx_<double>, 0
4060 };
4061 Mat src = _src.getMat();
4062 SortFunc func = tab[src.depth()];
4063 CV_Assert( src.dims <= 2 && src.channels() == 1 && func != 0 );
4064
4065 Mat dst = _dst.getMat();
4066 if( dst.data == src.data )
4067 _dst.release();
4068 _dst.create( src.size(), CV_32S );
4069 dst = _dst.getMat();
4070 func( src, dst, flags );
4071 }
4072
4073
cvSetIdentity(CvArr * arr,CvScalar value)4074 CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
4075 {
4076 cv::Mat m = cv::cvarrToMat(arr);
4077 cv::setIdentity(m, value);
4078 }
4079
4080
cvTrace(const CvArr * arr)4081 CV_IMPL CvScalar cvTrace( const CvArr* arr )
4082 {
4083 return cv::trace(cv::cvarrToMat(arr));
4084 }
4085
4086
cvTranspose(const CvArr * srcarr,CvArr * dstarr)4087 CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr )
4088 {
4089 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4090
4091 CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() );
4092 transpose( src, dst );
4093 }
4094
4095
cvCompleteSymm(CvMat * matrix,int LtoR)4096 CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR )
4097 {
4098 cv::Mat m = cv::cvarrToMat(matrix);
4099 cv::completeSymm( m, LtoR != 0 );
4100 }
4101
4102
cvCrossProduct(const CvArr * srcAarr,const CvArr * srcBarr,CvArr * dstarr)4103 CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr )
4104 {
4105 cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr);
4106
4107 CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() );
4108 srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst);
4109 }
4110
4111
4112 CV_IMPL void
cvReduce(const CvArr * srcarr,CvArr * dstarr,int dim,int op)4113 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
4114 {
4115 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
4116
4117 if( dim < 0 )
4118 dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
4119
4120 if( dim > 1 )
4121 CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );
4122
4123 if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
4124 (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
4125 CV_Error( CV_StsBadSize, "The output array size is incorrect" );
4126
4127 if( src.channels() != dst.channels() )
4128 CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
4129
4130 cv::reduce(src, dst, dim, op, dst.type());
4131 }
4132
4133
4134 CV_IMPL CvArr*
cvRange(CvArr * arr,double start,double end)4135 cvRange( CvArr* arr, double start, double end )
4136 {
4137 int ok = 0;
4138
4139 CvMat stub, *mat = (CvMat*)arr;
4140 double delta;
4141 int type, step;
4142 double val = start;
4143 int i, j;
4144 int rows, cols;
4145
4146 if( !CV_IS_MAT(mat) )
4147 mat = cvGetMat( mat, &stub);
4148
4149 rows = mat->rows;
4150 cols = mat->cols;
4151 type = CV_MAT_TYPE(mat->type);
4152 delta = (end-start)/(rows*cols);
4153
4154 if( CV_IS_MAT_CONT(mat->type) )
4155 {
4156 cols *= rows;
4157 rows = 1;
4158 step = 1;
4159 }
4160 else
4161 step = mat->step / CV_ELEM_SIZE(type);
4162
4163 if( type == CV_32SC1 )
4164 {
4165 int* idata = mat->data.i;
4166 int ival = cvRound(val), idelta = cvRound(delta);
4167
4168 if( fabs(val - ival) < DBL_EPSILON &&
4169 fabs(delta - idelta) < DBL_EPSILON )
4170 {
4171 for( i = 0; i < rows; i++, idata += step )
4172 for( j = 0; j < cols; j++, ival += idelta )
4173 idata[j] = ival;
4174 }
4175 else
4176 {
4177 for( i = 0; i < rows; i++, idata += step )
4178 for( j = 0; j < cols; j++, val += delta )
4179 idata[j] = cvRound(val);
4180 }
4181 }
4182 else if( type == CV_32FC1 )
4183 {
4184 float* fdata = mat->data.fl;
4185 for( i = 0; i < rows; i++, fdata += step )
4186 for( j = 0; j < cols; j++, val += delta )
4187 fdata[j] = (float)val;
4188 }
4189 else
4190 CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" );
4191
4192 ok = 1;
4193 return ok ? arr : 0;
4194 }
4195
4196
4197 CV_IMPL void
cvSort(const CvArr * _src,CvArr * _dst,CvArr * _idx,int flags)4198 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
4199 {
4200 cv::Mat src = cv::cvarrToMat(_src);
4201
4202 if( _idx )
4203 {
4204 cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
4205 CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data );
4206 cv::sortIdx( src, idx, flags );
4207 CV_Assert( idx0.data == idx.data );
4208 }
4209
4210 if( _dst )
4211 {
4212 cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0;
4213 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
4214 cv::sort( src, dst, flags );
4215 CV_Assert( dst0.data == dst.data );
4216 }
4217 }
4218
4219
4220 CV_IMPL int
cvKMeans2(const CvArr * _samples,int cluster_count,CvArr * _labels,CvTermCriteria termcrit,int attempts,CvRNG *,int flags,CvArr * _centers,double * _compactness)4221 cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
4222 CvTermCriteria termcrit, int attempts, CvRNG*,
4223 int flags, CvArr* _centers, double* _compactness )
4224 {
4225 cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
4226 if( _centers )
4227 {
4228 centers = cv::cvarrToMat(_centers);
4229
4230 centers = centers.reshape(1);
4231 data = data.reshape(1);
4232
4233 CV_Assert( !centers.empty() );
4234 CV_Assert( centers.rows == cluster_count );
4235 CV_Assert( centers.cols == data.cols );
4236 CV_Assert( centers.depth() == data.depth() );
4237 }
4238 CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
4239 (labels.cols == 1 || labels.rows == 1) &&
4240 labels.cols + labels.rows - 1 == data.rows );
4241
4242 double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
4243 flags, _centers ? cv::_OutputArray(centers) : cv::_OutputArray() );
4244 if( _compactness )
4245 *_compactness = compactness;
4246 return 1;
4247 }
4248
4249 ///////////////////////////// n-dimensional matrices ////////////////////////////
4250
4251 namespace cv
4252 {
4253
reshape(int _cn,int _newndims,const int * _newsz) const4254 Mat Mat::reshape(int _cn, int _newndims, const int* _newsz) const
4255 {
4256 if(_newndims == dims)
4257 {
4258 if(_newsz == 0)
4259 return reshape(_cn);
4260 if(_newndims == 2)
4261 return reshape(_cn, _newsz[0]);
4262 }
4263
4264 CV_Error(CV_StsNotImplemented, "");
4265 // TBD
4266 return Mat();
4267 }
4268
NAryMatIterator()4269 NAryMatIterator::NAryMatIterator()
4270 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
4271 {
4272 }
4273
NAryMatIterator(const Mat ** _arrays,Mat * _planes,int _narrays)4274 NAryMatIterator::NAryMatIterator(const Mat** _arrays, Mat* _planes, int _narrays)
4275 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
4276 {
4277 init(_arrays, _planes, 0, _narrays);
4278 }
4279
NAryMatIterator(const Mat ** _arrays,uchar ** _ptrs,int _narrays)4280 NAryMatIterator::NAryMatIterator(const Mat** _arrays, uchar** _ptrs, int _narrays)
4281 : arrays(0), planes(0), ptrs(0), narrays(0), nplanes(0), size(0), iterdepth(0), idx(0)
4282 {
4283 init(_arrays, 0, _ptrs, _narrays);
4284 }
4285
init(const Mat ** _arrays,Mat * _planes,uchar ** _ptrs,int _narrays)4286 void NAryMatIterator::init(const Mat** _arrays, Mat* _planes, uchar** _ptrs, int _narrays)
4287 {
4288 CV_Assert( _arrays && (_ptrs || _planes) );
4289 int i, j, d1=0, i0 = -1, d = -1;
4290
4291 arrays = _arrays;
4292 ptrs = _ptrs;
4293 planes = _planes;
4294 narrays = _narrays;
4295 nplanes = 0;
4296 size = 0;
4297
4298 if( narrays < 0 )
4299 {
4300 for( i = 0; _arrays[i] != 0; i++ )
4301 ;
4302 narrays = i;
4303 CV_Assert(narrays <= 1000);
4304 }
4305
4306 iterdepth = 0;
4307
4308 for( i = 0; i < narrays; i++ )
4309 {
4310 CV_Assert(arrays[i] != 0);
4311 const Mat& A = *arrays[i];
4312 if( ptrs )
4313 ptrs[i] = A.data;
4314
4315 if( !A.data )
4316 continue;
4317
4318 if( i0 < 0 )
4319 {
4320 i0 = i;
4321 d = A.dims;
4322
4323 // find the first dimensionality which is different from 1;
4324 // in any of the arrays the first "d1" step do not affect the continuity
4325 for( d1 = 0; d1 < d; d1++ )
4326 if( A.size[d1] > 1 )
4327 break;
4328 }
4329 else
4330 CV_Assert( A.size == arrays[i0]->size );
4331
4332 if( !A.isContinuous() )
4333 {
4334 CV_Assert( A.step[d-1] == A.elemSize() );
4335 for( j = d-1; j > d1; j-- )
4336 if( A.step[j]*A.size[j] < A.step[j-1] )
4337 break;
4338 iterdepth = std::max(iterdepth, j);
4339 }
4340 }
4341
4342 if( i0 >= 0 )
4343 {
4344 size = arrays[i0]->size[d-1];
4345 for( j = d-1; j > iterdepth; j-- )
4346 {
4347 int64 total1 = (int64)size*arrays[i0]->size[j-1];
4348 if( total1 != (int)total1 )
4349 break;
4350 size = (int)total1;
4351 }
4352
4353 iterdepth = j;
4354 if( iterdepth == d1 )
4355 iterdepth = 0;
4356
4357 nplanes = 1;
4358 for( j = iterdepth-1; j >= 0; j-- )
4359 nplanes *= arrays[i0]->size[j];
4360 }
4361 else
4362 iterdepth = 0;
4363
4364 idx = 0;
4365
4366 if( !planes )
4367 return;
4368
4369 for( i = 0; i < narrays; i++ )
4370 {
4371 CV_Assert(arrays[i] != 0);
4372 const Mat& A = *arrays[i];
4373
4374 if( !A.data )
4375 {
4376 planes[i] = Mat();
4377 continue;
4378 }
4379
4380 planes[i] = Mat(1, (int)size, A.type(), A.data);
4381 }
4382 }
4383
4384
operator ++()4385 NAryMatIterator& NAryMatIterator::operator ++()
4386 {
4387 if( idx >= nplanes-1 )
4388 return *this;
4389 ++idx;
4390
4391 if( iterdepth == 1 )
4392 {
4393 if( ptrs )
4394 {
4395 for( int i = 0; i < narrays; i++ )
4396 {
4397 if( !ptrs[i] )
4398 continue;
4399 ptrs[i] = arrays[i]->data + arrays[i]->step[0]*idx;
4400 }
4401 }
4402 if( planes )
4403 {
4404 for( int i = 0; i < narrays; i++ )
4405 {
4406 if( !planes[i].data )
4407 continue;
4408 planes[i].data = arrays[i]->data + arrays[i]->step[0]*idx;
4409 }
4410 }
4411 }
4412 else
4413 {
4414 for( int i = 0; i < narrays; i++ )
4415 {
4416 const Mat& A = *arrays[i];
4417 if( !A.data )
4418 continue;
4419 int _idx = (int)idx;
4420 uchar* data = A.data;
4421 for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- )
4422 {
4423 int szi = A.size[j], t = _idx/szi;
4424 data += (_idx - t * szi)*A.step[j];
4425 _idx = t;
4426 }
4427 if( ptrs )
4428 ptrs[i] = data;
4429 if( planes )
4430 planes[i].data = data;
4431 }
4432 }
4433
4434 return *this;
4435 }
4436
operator ++(int)4437 NAryMatIterator NAryMatIterator::operator ++(int)
4438 {
4439 NAryMatIterator it = *this;
4440 ++*this;
4441 return it;
4442 }
4443
4444 ///////////////////////////////////////////////////////////////////////////
4445 // MatConstIterator //
4446 ///////////////////////////////////////////////////////////////////////////
4447
pos() const4448 Point MatConstIterator::pos() const
4449 {
4450 if( !m )
4451 return Point();
4452 CV_DbgAssert(m->dims <= 2);
4453
4454 ptrdiff_t ofs = ptr - m->ptr();
4455 int y = (int)(ofs/m->step[0]);
4456 return Point((int)((ofs - y*m->step[0])/elemSize), y);
4457 }
4458
pos(int * _idx) const4459 void MatConstIterator::pos(int* _idx) const
4460 {
4461 CV_Assert(m != 0 && _idx);
4462 ptrdiff_t ofs = ptr - m->ptr();
4463 for( int i = 0; i < m->dims; i++ )
4464 {
4465 size_t s = m->step[i], v = ofs/s;
4466 ofs -= v*s;
4467 _idx[i] = (int)v;
4468 }
4469 }
4470
lpos() const4471 ptrdiff_t MatConstIterator::lpos() const
4472 {
4473 if(!m)
4474 return 0;
4475 if( m->isContinuous() )
4476 return (ptr - sliceStart)/elemSize;
4477 ptrdiff_t ofs = ptr - m->ptr();
4478 int i, d = m->dims;
4479 if( d == 2 )
4480 {
4481 ptrdiff_t y = ofs/m->step[0];
4482 return y*m->cols + (ofs - y*m->step[0])/elemSize;
4483 }
4484 ptrdiff_t result = 0;
4485 for( i = 0; i < d; i++ )
4486 {
4487 size_t s = m->step[i], v = ofs/s;
4488 ofs -= v*s;
4489 result = result*m->size[i] + v;
4490 }
4491 return result;
4492 }
4493
seek(ptrdiff_t ofs,bool relative)4494 void MatConstIterator::seek(ptrdiff_t ofs, bool relative)
4495 {
4496 if( m->isContinuous() )
4497 {
4498 ptr = (relative ? ptr : sliceStart) + ofs*elemSize;
4499 if( ptr < sliceStart )
4500 ptr = sliceStart;
4501 else if( ptr > sliceEnd )
4502 ptr = sliceEnd;
4503 return;
4504 }
4505
4506 int d = m->dims;
4507 if( d == 2 )
4508 {
4509 ptrdiff_t ofs0, y;
4510 if( relative )
4511 {
4512 ofs0 = ptr - m->ptr();
4513 y = ofs0/m->step[0];
4514 ofs += y*m->cols + (ofs0 - y*m->step[0])/elemSize;
4515 }
4516 y = ofs/m->cols;
4517 int y1 = std::min(std::max((int)y, 0), m->rows-1);
4518 sliceStart = m->ptr(y1);
4519 sliceEnd = sliceStart + m->cols*elemSize;
4520 ptr = y < 0 ? sliceStart : y >= m->rows ? sliceEnd :
4521 sliceStart + (ofs - y*m->cols)*elemSize;
4522 return;
4523 }
4524
4525 if( relative )
4526 ofs += lpos();
4527
4528 if( ofs < 0 )
4529 ofs = 0;
4530
4531 int szi = m->size[d-1];
4532 ptrdiff_t t = ofs/szi;
4533 int v = (int)(ofs - t*szi);
4534 ofs = t;
4535 ptr = m->ptr() + v*elemSize;
4536 sliceStart = m->ptr();
4537
4538 for( int i = d-2; i >= 0; i-- )
4539 {
4540 szi = m->size[i];
4541 t = ofs/szi;
4542 v = (int)(ofs - t*szi);
4543 ofs = t;
4544 sliceStart += v*m->step[i];
4545 }
4546
4547 sliceEnd = sliceStart + m->size[d-1]*elemSize;
4548 if( ofs > 0 )
4549 ptr = sliceEnd;
4550 else
4551 ptr = sliceStart + (ptr - m->ptr());
4552 }
4553
seek(const int * _idx,bool relative)4554 void MatConstIterator::seek(const int* _idx, bool relative)
4555 {
4556 int i, d = m->dims;
4557 ptrdiff_t ofs = 0;
4558 if( !_idx )
4559 ;
4560 else if( d == 2 )
4561 ofs = _idx[0]*m->size[1] + _idx[1];
4562 else
4563 {
4564 for( i = 0; i < d; i++ )
4565 ofs = ofs*m->size[i] + _idx[i];
4566 }
4567 seek(ofs, relative);
4568 }
4569
4570 //////////////////////////////// SparseMat ////////////////////////////////
4571
4572 template<typename T1, typename T2> void
convertData_(const void * _from,void * _to,int cn)4573 convertData_(const void* _from, void* _to, int cn)
4574 {
4575 const T1* from = (const T1*)_from;
4576 T2* to = (T2*)_to;
4577 if( cn == 1 )
4578 *to = saturate_cast<T2>(*from);
4579 else
4580 for( int i = 0; i < cn; i++ )
4581 to[i] = saturate_cast<T2>(from[i]);
4582 }
4583
4584 template<typename T1, typename T2> void
convertScaleData_(const void * _from,void * _to,int cn,double alpha,double beta)4585 convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta)
4586 {
4587 const T1* from = (const T1*)_from;
4588 T2* to = (T2*)_to;
4589 if( cn == 1 )
4590 *to = saturate_cast<T2>(*from*alpha + beta);
4591 else
4592 for( int i = 0; i < cn; i++ )
4593 to[i] = saturate_cast<T2>(from[i]*alpha + beta);
4594 }
4595
4596 typedef void (*ConvertData)(const void* from, void* to, int cn);
4597 typedef void (*ConvertScaleData)(const void* from, void* to, int cn, double alpha, double beta);
4598
getConvertElem(int fromType,int toType)4599 static ConvertData getConvertElem(int fromType, int toType)
4600 {
4601 static ConvertData tab[][8] =
4602 {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
4603 convertData_<uchar, ushort>, convertData_<uchar, short>,
4604 convertData_<uchar, int>, convertData_<uchar, float>,
4605 convertData_<uchar, double>, 0 },
4606
4607 { convertData_<schar, uchar>, convertData_<schar, schar>,
4608 convertData_<schar, ushort>, convertData_<schar, short>,
4609 convertData_<schar, int>, convertData_<schar, float>,
4610 convertData_<schar, double>, 0 },
4611
4612 { convertData_<ushort, uchar>, convertData_<ushort, schar>,
4613 convertData_<ushort, ushort>, convertData_<ushort, short>,
4614 convertData_<ushort, int>, convertData_<ushort, float>,
4615 convertData_<ushort, double>, 0 },
4616
4617 { convertData_<short, uchar>, convertData_<short, schar>,
4618 convertData_<short, ushort>, convertData_<short, short>,
4619 convertData_<short, int>, convertData_<short, float>,
4620 convertData_<short, double>, 0 },
4621
4622 { convertData_<int, uchar>, convertData_<int, schar>,
4623 convertData_<int, ushort>, convertData_<int, short>,
4624 convertData_<int, int>, convertData_<int, float>,
4625 convertData_<int, double>, 0 },
4626
4627 { convertData_<float, uchar>, convertData_<float, schar>,
4628 convertData_<float, ushort>, convertData_<float, short>,
4629 convertData_<float, int>, convertData_<float, float>,
4630 convertData_<float, double>, 0 },
4631
4632 { convertData_<double, uchar>, convertData_<double, schar>,
4633 convertData_<double, ushort>, convertData_<double, short>,
4634 convertData_<double, int>, convertData_<double, float>,
4635 convertData_<double, double>, 0 },
4636
4637 { 0, 0, 0, 0, 0, 0, 0, 0 }};
4638
4639 ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
4640 CV_Assert( func != 0 );
4641 return func;
4642 }
4643
getConvertScaleElem(int fromType,int toType)4644 static ConvertScaleData getConvertScaleElem(int fromType, int toType)
4645 {
4646 static ConvertScaleData tab[][8] =
4647 {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
4648 convertScaleData_<uchar, ushort>, convertScaleData_<uchar, short>,
4649 convertScaleData_<uchar, int>, convertScaleData_<uchar, float>,
4650 convertScaleData_<uchar, double>, 0 },
4651
4652 { convertScaleData_<schar, uchar>, convertScaleData_<schar, schar>,
4653 convertScaleData_<schar, ushort>, convertScaleData_<schar, short>,
4654 convertScaleData_<schar, int>, convertScaleData_<schar, float>,
4655 convertScaleData_<schar, double>, 0 },
4656
4657 { convertScaleData_<ushort, uchar>, convertScaleData_<ushort, schar>,
4658 convertScaleData_<ushort, ushort>, convertScaleData_<ushort, short>,
4659 convertScaleData_<ushort, int>, convertScaleData_<ushort, float>,
4660 convertScaleData_<ushort, double>, 0 },
4661
4662 { convertScaleData_<short, uchar>, convertScaleData_<short, schar>,
4663 convertScaleData_<short, ushort>, convertScaleData_<short, short>,
4664 convertScaleData_<short, int>, convertScaleData_<short, float>,
4665 convertScaleData_<short, double>, 0 },
4666
4667 { convertScaleData_<int, uchar>, convertScaleData_<int, schar>,
4668 convertScaleData_<int, ushort>, convertScaleData_<int, short>,
4669 convertScaleData_<int, int>, convertScaleData_<int, float>,
4670 convertScaleData_<int, double>, 0 },
4671
4672 { convertScaleData_<float, uchar>, convertScaleData_<float, schar>,
4673 convertScaleData_<float, ushort>, convertScaleData_<float, short>,
4674 convertScaleData_<float, int>, convertScaleData_<float, float>,
4675 convertScaleData_<float, double>, 0 },
4676
4677 { convertScaleData_<double, uchar>, convertScaleData_<double, schar>,
4678 convertScaleData_<double, ushort>, convertScaleData_<double, short>,
4679 convertScaleData_<double, int>, convertScaleData_<double, float>,
4680 convertScaleData_<double, double>, 0 },
4681
4682 { 0, 0, 0, 0, 0, 0, 0, 0 }};
4683
4684 ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
4685 CV_Assert( func != 0 );
4686 return func;
4687 }
4688
4689 enum { HASH_SIZE0 = 8 };
4690
copyElem(const uchar * from,uchar * to,size_t elemSize)4691 static inline void copyElem(const uchar* from, uchar* to, size_t elemSize)
4692 {
4693 size_t i;
4694 for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
4695 *(int*)(to + i) = *(const int*)(from + i);
4696 for( ; i < elemSize; i++ )
4697 to[i] = from[i];
4698 }
4699
isZeroElem(const uchar * data,size_t elemSize)4700 static inline bool isZeroElem(const uchar* data, size_t elemSize)
4701 {
4702 size_t i;
4703 for( i = 0; i + sizeof(int) <= elemSize; i += sizeof(int) )
4704 if( *(int*)(data + i) != 0 )
4705 return false;
4706 for( ; i < elemSize; i++ )
4707 if( data[i] != 0 )
4708 return false;
4709 return true;
4710 }
4711
Hdr(int _dims,const int * _sizes,int _type)4712 SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
4713 {
4714 refcount = 1;
4715
4716 dims = _dims;
4717 valueOffset = (int)alignSize(sizeof(SparseMat::Node) - MAX_DIM*sizeof(int) +
4718 dims*sizeof(int), CV_ELEM_SIZE1(_type));
4719 nodeSize = alignSize(valueOffset +
4720 CV_ELEM_SIZE(_type), (int)sizeof(size_t));
4721
4722 int i;
4723 for( i = 0; i < dims; i++ )
4724 size[i] = _sizes[i];
4725 for( ; i < CV_MAX_DIM; i++ )
4726 size[i] = 0;
4727 clear();
4728 }
4729
clear()4730 void SparseMat::Hdr::clear()
4731 {
4732 hashtab.clear();
4733 hashtab.resize(HASH_SIZE0);
4734 pool.clear();
4735 pool.resize(nodeSize);
4736 nodeCount = freeList = 0;
4737 }
4738
4739
SparseMat(const Mat & m)4740 SparseMat::SparseMat(const Mat& m)
4741 : flags(MAGIC_VAL), hdr(0)
4742 {
4743 create( m.dims, m.size, m.type() );
4744
4745 int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
4746 size_t esz = m.elemSize();
4747 const uchar* dptr = m.ptr();
4748
4749 for(;;)
4750 {
4751 for( i = 0; i < lastSize; i++, dptr += esz )
4752 {
4753 if( isZeroElem(dptr, esz) )
4754 continue;
4755 idx[d-1] = i;
4756 uchar* to = newNode(idx, hash(idx));
4757 copyElem( dptr, to, esz );
4758 }
4759
4760 for( i = d - 2; i >= 0; i-- )
4761 {
4762 dptr += m.step[i] - m.size[i+1]*m.step[i+1];
4763 if( ++idx[i] < m.size[i] )
4764 break;
4765 idx[i] = 0;
4766 }
4767 if( i < 0 )
4768 break;
4769 }
4770 }
4771
create(int d,const int * _sizes,int _type)4772 void SparseMat::create(int d, const int* _sizes, int _type)
4773 {
4774 int i;
4775 CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM );
4776 for( i = 0; i < d; i++ )
4777 CV_Assert( _sizes[i] > 0 );
4778 _type = CV_MAT_TYPE(_type);
4779 if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 )
4780 {
4781 for( i = 0; i < d; i++ )
4782 if( _sizes[i] != hdr->size[i] )
4783 break;
4784 if( i == d )
4785 {
4786 clear();
4787 return;
4788 }
4789 }
4790 release();
4791 flags = MAGIC_VAL | _type;
4792 hdr = new Hdr(d, _sizes, _type);
4793 }
4794
copyTo(SparseMat & m) const4795 void SparseMat::copyTo( SparseMat& m ) const
4796 {
4797 if( hdr == m.hdr )
4798 return;
4799 if( !hdr )
4800 {
4801 m.release();
4802 return;
4803 }
4804 m.create( hdr->dims, hdr->size, type() );
4805 SparseMatConstIterator from = begin();
4806 size_t i, N = nzcount(), esz = elemSize();
4807
4808 for( i = 0; i < N; i++, ++from )
4809 {
4810 const Node* n = from.node();
4811 uchar* to = m.newNode(n->idx, n->hashval);
4812 copyElem( from.ptr, to, esz );
4813 }
4814 }
4815
copyTo(Mat & m) const4816 void SparseMat::copyTo( Mat& m ) const
4817 {
4818 CV_Assert( hdr );
4819 int ndims = dims();
4820 m.create( ndims, hdr->size, type() );
4821 m = Scalar(0);
4822
4823 SparseMatConstIterator from = begin();
4824 size_t i, N = nzcount(), esz = elemSize();
4825
4826 for( i = 0; i < N; i++, ++from )
4827 {
4828 const Node* n = from.node();
4829 copyElem( from.ptr, (ndims > 1 ? m.ptr(n->idx) : m.ptr(n->idx[0])), esz);
4830 }
4831 }
4832
4833
convertTo(SparseMat & m,int rtype,double alpha) const4834 void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
4835 {
4836 int cn = channels();
4837 if( rtype < 0 )
4838 rtype = type();
4839 rtype = CV_MAKETYPE(rtype, cn);
4840 if( hdr == m.hdr && rtype != type() )
4841 {
4842 SparseMat temp;
4843 convertTo(temp, rtype, alpha);
4844 m = temp;
4845 return;
4846 }
4847
4848 CV_Assert(hdr != 0);
4849 if( hdr != m.hdr )
4850 m.create( hdr->dims, hdr->size, rtype );
4851
4852 SparseMatConstIterator from = begin();
4853 size_t i, N = nzcount();
4854
4855 if( alpha == 1 )
4856 {
4857 ConvertData cvtfunc = getConvertElem(type(), rtype);
4858 for( i = 0; i < N; i++, ++from )
4859 {
4860 const Node* n = from.node();
4861 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
4862 cvtfunc( from.ptr, to, cn );
4863 }
4864 }
4865 else
4866 {
4867 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
4868 for( i = 0; i < N; i++, ++from )
4869 {
4870 const Node* n = from.node();
4871 uchar* to = hdr == m.hdr ? from.ptr : m.newNode(n->idx, n->hashval);
4872 cvtfunc( from.ptr, to, cn, alpha, 0 );
4873 }
4874 }
4875 }
4876
4877
convertTo(Mat & m,int rtype,double alpha,double beta) const4878 void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
4879 {
4880 int cn = channels();
4881 if( rtype < 0 )
4882 rtype = type();
4883 rtype = CV_MAKETYPE(rtype, cn);
4884
4885 CV_Assert( hdr );
4886 m.create( dims(), hdr->size, rtype );
4887 m = Scalar(beta);
4888
4889 SparseMatConstIterator from = begin();
4890 size_t i, N = nzcount();
4891
4892 if( alpha == 1 && beta == 0 )
4893 {
4894 ConvertData cvtfunc = getConvertElem(type(), rtype);
4895 for( i = 0; i < N; i++, ++from )
4896 {
4897 const Node* n = from.node();
4898 uchar* to = m.ptr(n->idx);
4899 cvtfunc( from.ptr, to, cn );
4900 }
4901 }
4902 else
4903 {
4904 ConvertScaleData cvtfunc = getConvertScaleElem(type(), rtype);
4905 for( i = 0; i < N; i++, ++from )
4906 {
4907 const Node* n = from.node();
4908 uchar* to = m.ptr(n->idx);
4909 cvtfunc( from.ptr, to, cn, alpha, beta );
4910 }
4911 }
4912 }
4913
clear()4914 void SparseMat::clear()
4915 {
4916 if( hdr )
4917 hdr->clear();
4918 }
4919
ptr(int i0,bool createMissing,size_t * hashval)4920 uchar* SparseMat::ptr(int i0, bool createMissing, size_t* hashval)
4921 {
4922 CV_Assert( hdr && hdr->dims == 1 );
4923 size_t h = hashval ? *hashval : hash(i0);
4924 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
4925 uchar* pool = &hdr->pool[0];
4926 while( nidx != 0 )
4927 {
4928 Node* elem = (Node*)(pool + nidx);
4929 if( elem->hashval == h && elem->idx[0] == i0 )
4930 return &value<uchar>(elem);
4931 nidx = elem->next;
4932 }
4933
4934 if( createMissing )
4935 {
4936 int idx[] = { i0 };
4937 return newNode( idx, h );
4938 }
4939 return 0;
4940 }
4941
ptr(int i0,int i1,bool createMissing,size_t * hashval)4942 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
4943 {
4944 CV_Assert( hdr && hdr->dims == 2 );
4945 size_t h = hashval ? *hashval : hash(i0, i1);
4946 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
4947 uchar* pool = &hdr->pool[0];
4948 while( nidx != 0 )
4949 {
4950 Node* elem = (Node*)(pool + nidx);
4951 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
4952 return &value<uchar>(elem);
4953 nidx = elem->next;
4954 }
4955
4956 if( createMissing )
4957 {
4958 int idx[] = { i0, i1 };
4959 return newNode( idx, h );
4960 }
4961 return 0;
4962 }
4963
ptr(int i0,int i1,int i2,bool createMissing,size_t * hashval)4964 uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval)
4965 {
4966 CV_Assert( hdr && hdr->dims == 3 );
4967 size_t h = hashval ? *hashval : hash(i0, i1, i2);
4968 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
4969 uchar* pool = &hdr->pool[0];
4970 while( nidx != 0 )
4971 {
4972 Node* elem = (Node*)(pool + nidx);
4973 if( elem->hashval == h && elem->idx[0] == i0 &&
4974 elem->idx[1] == i1 && elem->idx[2] == i2 )
4975 return &value<uchar>(elem);
4976 nidx = elem->next;
4977 }
4978
4979 if( createMissing )
4980 {
4981 int idx[] = { i0, i1, i2 };
4982 return newNode( idx, h );
4983 }
4984 return 0;
4985 }
4986
ptr(const int * idx,bool createMissing,size_t * hashval)4987 uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval)
4988 {
4989 CV_Assert( hdr );
4990 int i, d = hdr->dims;
4991 size_t h = hashval ? *hashval : hash(idx);
4992 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
4993 uchar* pool = &hdr->pool[0];
4994 while( nidx != 0 )
4995 {
4996 Node* elem = (Node*)(pool + nidx);
4997 if( elem->hashval == h )
4998 {
4999 for( i = 0; i < d; i++ )
5000 if( elem->idx[i] != idx[i] )
5001 break;
5002 if( i == d )
5003 return &value<uchar>(elem);
5004 }
5005 nidx = elem->next;
5006 }
5007
5008 return createMissing ? newNode(idx, h) : 0;
5009 }
5010
erase(int i0,int i1,size_t * hashval)5011 void SparseMat::erase(int i0, int i1, size_t* hashval)
5012 {
5013 CV_Assert( hdr && hdr->dims == 2 );
5014 size_t h = hashval ? *hashval : hash(i0, i1);
5015 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
5016 uchar* pool = &hdr->pool[0];
5017 while( nidx != 0 )
5018 {
5019 Node* elem = (Node*)(pool + nidx);
5020 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
5021 break;
5022 previdx = nidx;
5023 nidx = elem->next;
5024 }
5025
5026 if( nidx )
5027 removeNode(hidx, nidx, previdx);
5028 }
5029
erase(int i0,int i1,int i2,size_t * hashval)5030 void SparseMat::erase(int i0, int i1, int i2, size_t* hashval)
5031 {
5032 CV_Assert( hdr && hdr->dims == 3 );
5033 size_t h = hashval ? *hashval : hash(i0, i1, i2);
5034 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
5035 uchar* pool = &hdr->pool[0];
5036 while( nidx != 0 )
5037 {
5038 Node* elem = (Node*)(pool + nidx);
5039 if( elem->hashval == h && elem->idx[0] == i0 &&
5040 elem->idx[1] == i1 && elem->idx[2] == i2 )
5041 break;
5042 previdx = nidx;
5043 nidx = elem->next;
5044 }
5045
5046 if( nidx )
5047 removeNode(hidx, nidx, previdx);
5048 }
5049
erase(const int * idx,size_t * hashval)5050 void SparseMat::erase(const int* idx, size_t* hashval)
5051 {
5052 CV_Assert( hdr );
5053 int i, d = hdr->dims;
5054 size_t h = hashval ? *hashval : hash(idx);
5055 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
5056 uchar* pool = &hdr->pool[0];
5057 while( nidx != 0 )
5058 {
5059 Node* elem = (Node*)(pool + nidx);
5060 if( elem->hashval == h )
5061 {
5062 for( i = 0; i < d; i++ )
5063 if( elem->idx[i] != idx[i] )
5064 break;
5065 if( i == d )
5066 break;
5067 }
5068 previdx = nidx;
5069 nidx = elem->next;
5070 }
5071
5072 if( nidx )
5073 removeNode(hidx, nidx, previdx);
5074 }
5075
resizeHashTab(size_t newsize)5076 void SparseMat::resizeHashTab(size_t newsize)
5077 {
5078 newsize = std::max(newsize, (size_t)8);
5079 if((newsize & (newsize-1)) != 0)
5080 newsize = (size_t)1 << cvCeil(std::log((double)newsize)/CV_LOG2);
5081
5082 size_t i, hsize = hdr->hashtab.size();
5083 std::vector<size_t> _newh(newsize);
5084 size_t* newh = &_newh[0];
5085 for( i = 0; i < newsize; i++ )
5086 newh[i] = 0;
5087 uchar* pool = &hdr->pool[0];
5088 for( i = 0; i < hsize; i++ )
5089 {
5090 size_t nidx = hdr->hashtab[i];
5091 while( nidx )
5092 {
5093 Node* elem = (Node*)(pool + nidx);
5094 size_t next = elem->next;
5095 size_t newhidx = elem->hashval & (newsize - 1);
5096 elem->next = newh[newhidx];
5097 newh[newhidx] = nidx;
5098 nidx = next;
5099 }
5100 }
5101 hdr->hashtab = _newh;
5102 }
5103
newNode(const int * idx,size_t hashval)5104 uchar* SparseMat::newNode(const int* idx, size_t hashval)
5105 {
5106 const int HASH_MAX_FILL_FACTOR=3;
5107 assert(hdr);
5108 size_t hsize = hdr->hashtab.size();
5109 if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
5110 {
5111 resizeHashTab(std::max(hsize*2, (size_t)8));
5112 hsize = hdr->hashtab.size();
5113 }
5114
5115 if( !hdr->freeList )
5116 {
5117 size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
5118 newpsize = std::max(psize*3/2, 8*nsz);
5119 newpsize = (newpsize/nsz)*nsz;
5120 hdr->pool.resize(newpsize);
5121 uchar* pool = &hdr->pool[0];
5122 hdr->freeList = std::max(psize, nsz);
5123 for( i = hdr->freeList; i < newpsize - nsz; i += nsz )
5124 ((Node*)(pool + i))->next = i + nsz;
5125 ((Node*)(pool + i))->next = 0;
5126 }
5127 size_t nidx = hdr->freeList;
5128 Node* elem = (Node*)&hdr->pool[nidx];
5129 hdr->freeList = elem->next;
5130 elem->hashval = hashval;
5131 size_t hidx = hashval & (hsize - 1);
5132 elem->next = hdr->hashtab[hidx];
5133 hdr->hashtab[hidx] = nidx;
5134
5135 int i, d = hdr->dims;
5136 for( i = 0; i < d; i++ )
5137 elem->idx[i] = idx[i];
5138 size_t esz = elemSize();
5139 uchar* p = &value<uchar>(elem);
5140 if( esz == sizeof(float) )
5141 *((float*)p) = 0.f;
5142 else if( esz == sizeof(double) )
5143 *((double*)p) = 0.;
5144 else
5145 memset(p, 0, esz);
5146
5147 return p;
5148 }
5149
5150
removeNode(size_t hidx,size_t nidx,size_t previdx)5151 void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx)
5152 {
5153 Node* n = node(nidx);
5154 if( previdx )
5155 {
5156 Node* prev = node(previdx);
5157 prev->next = n->next;
5158 }
5159 else
5160 hdr->hashtab[hidx] = n->next;
5161 n->next = hdr->freeList;
5162 hdr->freeList = nidx;
5163 --hdr->nodeCount;
5164 }
5165
5166
SparseMatConstIterator(const SparseMat * _m)5167 SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m)
5168 : m((SparseMat*)_m), hashidx(0), ptr(0)
5169 {
5170 if(!_m || !_m->hdr)
5171 return;
5172 SparseMat::Hdr& hdr = *m->hdr;
5173 const std::vector<size_t>& htab = hdr.hashtab;
5174 size_t i, hsize = htab.size();
5175 for( i = 0; i < hsize; i++ )
5176 {
5177 size_t nidx = htab[i];
5178 if( nidx )
5179 {
5180 hashidx = i;
5181 ptr = &hdr.pool[nidx] + hdr.valueOffset;
5182 return;
5183 }
5184 }
5185 }
5186
operator ++()5187 SparseMatConstIterator& SparseMatConstIterator::operator ++()
5188 {
5189 if( !ptr || !m || !m->hdr )
5190 return *this;
5191 SparseMat::Hdr& hdr = *m->hdr;
5192 size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next;
5193 if( next )
5194 {
5195 ptr = &hdr.pool[next] + hdr.valueOffset;
5196 return *this;
5197 }
5198 size_t i = hashidx + 1, sz = hdr.hashtab.size();
5199 for( ; i < sz; i++ )
5200 {
5201 size_t nidx = hdr.hashtab[i];
5202 if( nidx )
5203 {
5204 hashidx = i;
5205 ptr = &hdr.pool[nidx] + hdr.valueOffset;
5206 return *this;
5207 }
5208 }
5209 hashidx = sz;
5210 ptr = 0;
5211 return *this;
5212 }
5213
5214
norm(const SparseMat & src,int normType)5215 double norm( const SparseMat& src, int normType )
5216 {
5217 SparseMatConstIterator it = src.begin();
5218
5219 size_t i, N = src.nzcount();
5220 normType &= NORM_TYPE_MASK;
5221 int type = src.type();
5222 double result = 0;
5223
5224 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
5225
5226 if( type == CV_32F )
5227 {
5228 if( normType == NORM_INF )
5229 for( i = 0; i < N; i++, ++it )
5230 result = std::max(result, std::abs((double)it.value<float>()));
5231 else if( normType == NORM_L1 )
5232 for( i = 0; i < N; i++, ++it )
5233 result += std::abs(it.value<float>());
5234 else
5235 for( i = 0; i < N; i++, ++it )
5236 {
5237 double v = it.value<float>();
5238 result += v*v;
5239 }
5240 }
5241 else if( type == CV_64F )
5242 {
5243 if( normType == NORM_INF )
5244 for( i = 0; i < N; i++, ++it )
5245 result = std::max(result, std::abs(it.value<double>()));
5246 else if( normType == NORM_L1 )
5247 for( i = 0; i < N; i++, ++it )
5248 result += std::abs(it.value<double>());
5249 else
5250 for( i = 0; i < N; i++, ++it )
5251 {
5252 double v = it.value<double>();
5253 result += v*v;
5254 }
5255 }
5256 else
5257 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
5258
5259 if( normType == NORM_L2 )
5260 result = std::sqrt(result);
5261 return result;
5262 }
5263
minMaxLoc(const SparseMat & src,double * _minval,double * _maxval,int * _minidx,int * _maxidx)5264 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
5265 {
5266 SparseMatConstIterator it = src.begin();
5267 size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
5268 int type = src.type();
5269 const int *minidx = 0, *maxidx = 0;
5270
5271 if( type == CV_32F )
5272 {
5273 float minval = FLT_MAX, maxval = -FLT_MAX;
5274 for( i = 0; i < N; i++, ++it )
5275 {
5276 float v = it.value<float>();
5277 if( v < minval )
5278 {
5279 minval = v;
5280 minidx = it.node()->idx;
5281 }
5282 if( v > maxval )
5283 {
5284 maxval = v;
5285 maxidx = it.node()->idx;
5286 }
5287 }
5288 if( _minval )
5289 *_minval = minval;
5290 if( _maxval )
5291 *_maxval = maxval;
5292 }
5293 else if( type == CV_64F )
5294 {
5295 double minval = DBL_MAX, maxval = -DBL_MAX;
5296 for( i = 0; i < N; i++, ++it )
5297 {
5298 double v = it.value<double>();
5299 if( v < minval )
5300 {
5301 minval = v;
5302 minidx = it.node()->idx;
5303 }
5304 if( v > maxval )
5305 {
5306 maxval = v;
5307 maxidx = it.node()->idx;
5308 }
5309 }
5310 if( _minval )
5311 *_minval = minval;
5312 if( _maxval )
5313 *_maxval = maxval;
5314 }
5315 else
5316 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
5317
5318 if( _minidx )
5319 for( i = 0; i < d; i++ )
5320 _minidx[i] = minidx[i];
5321 if( _maxidx )
5322 for( i = 0; i < d; i++ )
5323 _maxidx[i] = maxidx[i];
5324 }
5325
5326
normalize(const SparseMat & src,SparseMat & dst,double a,int norm_type)5327 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
5328 {
5329 double scale = 1;
5330 if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
5331 {
5332 scale = norm( src, norm_type );
5333 scale = scale > DBL_EPSILON ? a/scale : 0.;
5334 }
5335 else
5336 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
5337
5338 src.convertTo( dst, -1, scale );
5339 }
5340
5341 ////////////////////// RotatedRect //////////////////////
5342
RotatedRect(const Point2f & _point1,const Point2f & _point2,const Point2f & _point3)5343 RotatedRect::RotatedRect(const Point2f& _point1, const Point2f& _point2, const Point2f& _point3)
5344 {
5345 Point2f _center = 0.5f * (_point1 + _point3);
5346 Vec2f vecs[2];
5347 vecs[0] = Vec2f(_point1 - _point2);
5348 vecs[1] = Vec2f(_point2 - _point3);
5349 // check that given sides are perpendicular
5350 CV_Assert( abs(vecs[0].dot(vecs[1])) / (norm(vecs[0]) * norm(vecs[1])) <= FLT_EPSILON );
5351
5352 // wd_i stores which vector (0,1) or (1,2) will make the width
5353 // One of them will definitely have slope within -1 to 1
5354 int wd_i = 0;
5355 if( abs(vecs[1][1]) < abs(vecs[1][0]) ) wd_i = 1;
5356 int ht_i = (wd_i + 1) % 2;
5357
5358 float _angle = atan(vecs[wd_i][1] / vecs[wd_i][0]) * 180.0f / (float) CV_PI;
5359 float _width = (float) norm(vecs[wd_i]);
5360 float _height = (float) norm(vecs[ht_i]);
5361
5362 center = _center;
5363 size = Size2f(_width, _height);
5364 angle = _angle;
5365 }
5366
points(Point2f pt[]) const5367 void RotatedRect::points(Point2f pt[]) const
5368 {
5369 double _angle = angle*CV_PI/180.;
5370 float b = (float)cos(_angle)*0.5f;
5371 float a = (float)sin(_angle)*0.5f;
5372
5373 pt[0].x = center.x - a*size.height - b*size.width;
5374 pt[0].y = center.y + b*size.height - a*size.width;
5375 pt[1].x = center.x + a*size.height - b*size.width;
5376 pt[1].y = center.y - b*size.height - a*size.width;
5377 pt[2].x = 2*center.x - pt[0].x;
5378 pt[2].y = 2*center.y - pt[0].y;
5379 pt[3].x = 2*center.x - pt[1].x;
5380 pt[3].y = 2*center.y - pt[1].y;
5381 }
5382
boundingRect() const5383 Rect RotatedRect::boundingRect() const
5384 {
5385 Point2f pt[4];
5386 points(pt);
5387 Rect r(cvFloor(std::min(std::min(std::min(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
5388 cvFloor(std::min(std::min(std::min(pt[0].y, pt[1].y), pt[2].y), pt[3].y)),
5389 cvCeil(std::max(std::max(std::max(pt[0].x, pt[1].x), pt[2].x), pt[3].x)),
5390 cvCeil(std::max(std::max(std::max(pt[0].y, pt[1].y), pt[2].y), pt[3].y)));
5391 r.width -= r.x - 1;
5392 r.height -= r.y - 1;
5393 return r;
5394 }
5395
5396 }
5397
5398 // glue
5399
CvMatND(const cv::Mat & m)5400 CvMatND::CvMatND(const cv::Mat& m)
5401 {
5402 cvInitMatNDHeader(this, m.dims, m.size, m.type(), m.data );
5403 int i, d = m.dims;
5404 for( i = 0; i < d; i++ )
5405 dim[i].step = (int)m.step[i];
5406 type |= m.flags & cv::Mat::CONTINUOUS_FLAG;
5407 }
5408
_IplImage(const cv::Mat & m)5409 _IplImage::_IplImage(const cv::Mat& m)
5410 {
5411 CV_Assert( m.dims <= 2 );
5412 cvInitImageHeader(this, m.size(), cvIplDepth(m.flags), m.channels());
5413 cvSetData(this, m.data, (int)m.step[0]);
5414 }
5415
cvCreateSparseMat(const cv::SparseMat & sm)5416 CvSparseMat* cvCreateSparseMat(const cv::SparseMat& sm)
5417 {
5418 if( !sm.hdr )
5419 return 0;
5420
5421 CvSparseMat* m = cvCreateSparseMat(sm.hdr->dims, sm.hdr->size, sm.type());
5422
5423 cv::SparseMatConstIterator from = sm.begin();
5424 size_t i, N = sm.nzcount(), esz = sm.elemSize();
5425
5426 for( i = 0; i < N; i++, ++from )
5427 {
5428 const cv::SparseMat::Node* n = from.node();
5429 uchar* to = cvPtrND(m, n->idx, 0, -2, 0);
5430 cv::copyElem(from.ptr, to, esz);
5431 }
5432 return m;
5433 }
5434
copyToSparseMat(cv::SparseMat & m) const5435 void CvSparseMat::copyToSparseMat(cv::SparseMat& m) const
5436 {
5437 m.create( dims, &size[0], type );
5438
5439 CvSparseMatIterator it;
5440 CvSparseNode* n = cvInitSparseMatIterator(this, &it);
5441 size_t esz = m.elemSize();
5442
5443 for( ; n != 0; n = cvGetNextSparseNode(&it) )
5444 {
5445 const int* idx = CV_NODE_IDX(this, n);
5446 uchar* to = m.newNode(idx, m.hash(idx));
5447 cv::copyElem((const uchar*)CV_NODE_VAL(this, n), to, esz);
5448 }
5449 }
5450
5451
5452 /* End of file. */
5453