1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Copyright (C) 2014-2015, Itseez Inc., all rights reserved.
16 // Third party copyrights are property of their respective owners.
17 //
18 // Redistribution and use in source and binary forms, with or without modification,
19 // are permitted provided that the following conditions are met:
20 //
21 // * Redistribution's of source code must retain the above copyright notice,
22 // this list of conditions and the following disclaimer.
23 //
24 // * Redistribution's in binary form must reproduce the above copyright notice,
25 // this list of conditions and the following disclaimer in the documentation
26 // and/or other materials provided with the distribution.
27 //
28 // * The name of the copyright holders may not be used to endorse or promote products
29 // derived from this software without specific prior written permission.
30 //
31 // This software is provided by the copyright holders and contributors "as is" and
32 // any express or implied warranties, including, but not limited to, the implied
33 // warranties of merchantability and fitness for a particular purpose are disclaimed.
34 // In no event shall the Intel Corporation or contributors be liable for any direct,
35 // indirect, incidental, special, exemplary, or consequential damages
36 // (including, but not limited to, procurement of substitute goods or services;
37 // loss of use, data, or profits; or business interruption) however caused
38 // and on any theory of liability, whether in contract, strict liability,
39 // or tort (including negligence or otherwise) arising in any way out of
40 // the use of this software, even if advised of the possibility of such damage.
41 //
42 //M*/
43
44 #include "precomp.hpp"
45 #include "opencl_kernels_imgproc.hpp"
46
47 /*
48 * This file includes the code, contributed by Simon Perreault
49 * (the function icvMedianBlur_8u_O1)
50 *
51 * Constant-time median filtering -- http://nomis80.org/ctmf.html
52 * Copyright (C) 2006 Simon Perreault
53 *
54 * Contact:
55 * Laboratoire de vision et systemes numeriques
56 * Pavillon Adrien-Pouliot
57 * Universite Laval
58 * Sainte-Foy, Quebec, Canada
59 * G1K 7P4
60 *
61 * perreaul@gel.ulaval.ca
62 */
63
64 namespace cv
65 {
66
67 /****************************************************************************************\
68 Box Filter
69 \****************************************************************************************/
70
71 template<typename T, typename ST>
72 struct RowSum :
73 public BaseRowFilter
74 {
RowSumcv::RowSum75 RowSum( int _ksize, int _anchor ) :
76 BaseRowFilter()
77 {
78 ksize = _ksize;
79 anchor = _anchor;
80 }
81
operator ()cv::RowSum82 virtual void operator()(const uchar* src, uchar* dst, int width, int cn)
83 {
84 const T* S = (const T*)src;
85 ST* D = (ST*)dst;
86 int i = 0, k, ksz_cn = ksize*cn;
87
88 width = (width - 1)*cn;
89 for( k = 0; k < cn; k++, S++, D++ )
90 {
91 ST s = 0;
92 for( i = 0; i < ksz_cn; i += cn )
93 s += S[i];
94 D[0] = s;
95 for( i = 0; i < width; i += cn )
96 {
97 s += S[i + ksz_cn] - S[i];
98 D[i+cn] = s;
99 }
100 }
101 }
102 };
103
104
105 template<typename ST, typename T>
106 struct ColumnSum :
107 public BaseColumnFilter
108 {
ColumnSumcv::ColumnSum109 ColumnSum( int _ksize, int _anchor, double _scale ) :
110 BaseColumnFilter()
111 {
112 ksize = _ksize;
113 anchor = _anchor;
114 scale = _scale;
115 sumCount = 0;
116 }
117
resetcv::ColumnSum118 virtual void reset() { sumCount = 0; }
119
operator ()cv::ColumnSum120 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
121 {
122 int i;
123 ST* SUM;
124 bool haveScale = scale != 1;
125 double _scale = scale;
126
127 if( width != (int)sum.size() )
128 {
129 sum.resize(width);
130 sumCount = 0;
131 }
132
133 SUM = &sum[0];
134 if( sumCount == 0 )
135 {
136 memset((void*)SUM, 0, width*sizeof(ST));
137
138 for( ; sumCount < ksize - 1; sumCount++, src++ )
139 {
140 const ST* Sp = (const ST*)src[0];
141 for( i = 0; i <= width - 2; i += 2 )
142 {
143 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
144 SUM[i] = s0; SUM[i+1] = s1;
145 }
146
147 for( ; i < width; i++ )
148 SUM[i] += Sp[i];
149 }
150 }
151 else
152 {
153 CV_Assert( sumCount == ksize-1 );
154 src += ksize-1;
155 }
156
157 for( ; count--; src++ )
158 {
159 const ST* Sp = (const ST*)src[0];
160 const ST* Sm = (const ST*)src[1-ksize];
161 T* D = (T*)dst;
162 if( haveScale )
163 {
164 for( i = 0; i <= width - 2; i += 2 )
165 {
166 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
167 D[i] = saturate_cast<T>(s0*_scale);
168 D[i+1] = saturate_cast<T>(s1*_scale);
169 s0 -= Sm[i]; s1 -= Sm[i+1];
170 SUM[i] = s0; SUM[i+1] = s1;
171 }
172
173 for( ; i < width; i++ )
174 {
175 ST s0 = SUM[i] + Sp[i];
176 D[i] = saturate_cast<T>(s0*_scale);
177 SUM[i] = s0 - Sm[i];
178 }
179 }
180 else
181 {
182 for( i = 0; i <= width - 2; i += 2 )
183 {
184 ST s0 = SUM[i] + Sp[i], s1 = SUM[i+1] + Sp[i+1];
185 D[i] = saturate_cast<T>(s0);
186 D[i+1] = saturate_cast<T>(s1);
187 s0 -= Sm[i]; s1 -= Sm[i+1];
188 SUM[i] = s0; SUM[i+1] = s1;
189 }
190
191 for( ; i < width; i++ )
192 {
193 ST s0 = SUM[i] + Sp[i];
194 D[i] = saturate_cast<T>(s0);
195 SUM[i] = s0 - Sm[i];
196 }
197 }
198 dst += dststep;
199 }
200 }
201
202 double scale;
203 int sumCount;
204 std::vector<ST> sum;
205 };
206
207
208 template<>
209 struct ColumnSum<int, uchar> :
210 public BaseColumnFilter
211 {
ColumnSumcv::ColumnSum212 ColumnSum( int _ksize, int _anchor, double _scale ) :
213 BaseColumnFilter()
214 {
215 ksize = _ksize;
216 anchor = _anchor;
217 scale = _scale;
218 sumCount = 0;
219 }
220
resetcv::ColumnSum221 virtual void reset() { sumCount = 0; }
222
operator ()cv::ColumnSum223 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
224 {
225 int i;
226 int* SUM;
227 bool haveScale = scale != 1;
228 double _scale = scale;
229
230 #if CV_SSE2
231 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
232 #endif
233
234 if( width != (int)sum.size() )
235 {
236 sum.resize(width);
237 sumCount = 0;
238 }
239
240 SUM = &sum[0];
241 if( sumCount == 0 )
242 {
243 memset((void*)SUM, 0, width*sizeof(int));
244 for( ; sumCount < ksize - 1; sumCount++, src++ )
245 {
246 const int* Sp = (const int*)src[0];
247 i = 0;
248 #if CV_SSE2
249 if(haveSSE2)
250 {
251 for( ; i <= width-4; i+=4 )
252 {
253 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
254 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
255 _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
256 }
257 }
258 #elif CV_NEON
259 for( ; i <= width - 4; i+=4 )
260 vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
261 #endif
262 for( ; i < width; i++ )
263 SUM[i] += Sp[i];
264 }
265 }
266 else
267 {
268 CV_Assert( sumCount == ksize-1 );
269 src += ksize-1;
270 }
271
272 for( ; count--; src++ )
273 {
274 const int* Sp = (const int*)src[0];
275 const int* Sm = (const int*)src[1-ksize];
276 uchar* D = (uchar*)dst;
277 if( haveScale )
278 {
279 i = 0;
280 #if CV_SSE2
281 if(haveSSE2)
282 {
283 const __m128 scale4 = _mm_set1_ps((float)_scale);
284 for( ; i <= width-8; i+=8 )
285 {
286 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
287 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
288
289 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
290 _mm_loadu_si128((const __m128i*)(Sp+i)));
291 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
292 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
293
294 __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
295 __m128i _s0T1 = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s01)));
296
297 _s0T = _mm_packs_epi32(_s0T, _s0T1);
298
299 _mm_storel_epi64((__m128i*)(D+i), _mm_packus_epi16(_s0T, _s0T));
300
301 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
302 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
303 }
304 }
305 #elif CV_NEON
306 float32x4_t v_scale = vdupq_n_f32((float)_scale);
307 for( ; i <= width-8; i+=8 )
308 {
309 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
310 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
311
312 uint32x4_t v_s0d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
313 uint32x4_t v_s01d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
314
315 uint16x8_t v_dst = vcombine_u16(vqmovn_u32(v_s0d), vqmovn_u32(v_s01d));
316 vst1_u8(D + i, vqmovn_u16(v_dst));
317
318 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
319 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
320 }
321 #endif
322 for( ; i < width; i++ )
323 {
324 int s0 = SUM[i] + Sp[i];
325 D[i] = saturate_cast<uchar>(s0*_scale);
326 SUM[i] = s0 - Sm[i];
327 }
328 }
329 else
330 {
331 i = 0;
332 #if CV_SSE2
333 if(haveSSE2)
334 {
335 for( ; i <= width-8; i+=8 )
336 {
337 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
338 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
339
340 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
341 _mm_loadu_si128((const __m128i*)(Sp+i)));
342 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
343 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
344
345 __m128i _s0T = _mm_packs_epi32(_s0, _s01);
346
347 _mm_storel_epi64((__m128i*)(D+i), _mm_packus_epi16(_s0T, _s0T));
348
349 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
350 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
351 }
352 }
353 #elif CV_NEON
354 for( ; i <= width-8; i+=8 )
355 {
356 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
357 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
358
359 uint16x8_t v_dst = vcombine_u16(vqmovun_s32(v_s0), vqmovun_s32(v_s01));
360 vst1_u8(D + i, vqmovn_u16(v_dst));
361
362 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
363 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
364 }
365 #endif
366
367 for( ; i < width; i++ )
368 {
369 int s0 = SUM[i] + Sp[i];
370 D[i] = saturate_cast<uchar>(s0);
371 SUM[i] = s0 - Sm[i];
372 }
373 }
374 dst += dststep;
375 }
376 }
377
378 double scale;
379 int sumCount;
380 std::vector<int> sum;
381 };
382
383 template<>
384 struct ColumnSum<int, short> :
385 public BaseColumnFilter
386 {
ColumnSumcv::ColumnSum387 ColumnSum( int _ksize, int _anchor, double _scale ) :
388 BaseColumnFilter()
389 {
390 ksize = _ksize;
391 anchor = _anchor;
392 scale = _scale;
393 sumCount = 0;
394 }
395
resetcv::ColumnSum396 virtual void reset() { sumCount = 0; }
397
operator ()cv::ColumnSum398 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
399 {
400 int i;
401 int* SUM;
402 bool haveScale = scale != 1;
403 double _scale = scale;
404
405 #if CV_SSE2
406 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
407 #endif
408
409 if( width != (int)sum.size() )
410 {
411 sum.resize(width);
412 sumCount = 0;
413 }
414 SUM = &sum[0];
415 if( sumCount == 0 )
416 {
417 memset((void*)SUM, 0, width*sizeof(int));
418 for( ; sumCount < ksize - 1; sumCount++, src++ )
419 {
420 const int* Sp = (const int*)src[0];
421 i = 0;
422 #if CV_SSE2
423 if(haveSSE2)
424 {
425 for( ; i <= width-4; i+=4 )
426 {
427 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
428 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
429 _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
430 }
431 }
432 #elif CV_NEON
433 for( ; i <= width - 4; i+=4 )
434 vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
435 #endif
436 for( ; i < width; i++ )
437 SUM[i] += Sp[i];
438 }
439 }
440 else
441 {
442 CV_Assert( sumCount == ksize-1 );
443 src += ksize-1;
444 }
445
446 for( ; count--; src++ )
447 {
448 const int* Sp = (const int*)src[0];
449 const int* Sm = (const int*)src[1-ksize];
450 short* D = (short*)dst;
451 if( haveScale )
452 {
453 i = 0;
454 #if CV_SSE2
455 if(haveSSE2)
456 {
457 const __m128 scale4 = _mm_set1_ps((float)_scale);
458 for( ; i <= width-8; i+=8 )
459 {
460 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
461 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
462
463 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
464 _mm_loadu_si128((const __m128i*)(Sp+i)));
465 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
466 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
467
468 __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
469 __m128i _s0T1 = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s01)));
470
471 _mm_storeu_si128((__m128i*)(D+i), _mm_packs_epi32(_s0T, _s0T1));
472
473 _mm_storeu_si128((__m128i*)(SUM+i),_mm_sub_epi32(_s0,_sm));
474 _mm_storeu_si128((__m128i*)(SUM+i+4), _mm_sub_epi32(_s01,_sm1));
475 }
476 }
477 #elif CV_NEON
478 float32x4_t v_scale = vdupq_n_f32((float)_scale);
479 for( ; i <= width-8; i+=8 )
480 {
481 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
482 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
483
484 int32x4_t v_s0d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
485 int32x4_t v_s01d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
486 vst1q_s16(D + i, vcombine_s16(vqmovn_s32(v_s0d), vqmovn_s32(v_s01d)));
487
488 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
489 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
490 }
491 #endif
492 for( ; i < width; i++ )
493 {
494 int s0 = SUM[i] + Sp[i];
495 D[i] = saturate_cast<short>(s0*_scale);
496 SUM[i] = s0 - Sm[i];
497 }
498 }
499 else
500 {
501 i = 0;
502 #if CV_SSE2
503 if(haveSSE2)
504 {
505 for( ; i <= width-8; i+=8 )
506 {
507
508 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
509 __m128i _sm1 = _mm_loadu_si128((const __m128i*)(Sm+i+4));
510
511 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
512 _mm_loadu_si128((const __m128i*)(Sp+i)));
513 __m128i _s01 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i+4)),
514 _mm_loadu_si128((const __m128i*)(Sp+i+4)));
515
516 _mm_storeu_si128((__m128i*)(D+i), _mm_packs_epi32(_s0, _s01));
517
518 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
519 _mm_storeu_si128((__m128i*)(SUM+i+4),_mm_sub_epi32(_s01,_sm1));
520 }
521 }
522 #elif CV_NEON
523 for( ; i <= width-8; i+=8 )
524 {
525 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
526 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
527
528 vst1q_s16(D + i, vcombine_s16(vqmovn_s32(v_s0), vqmovn_s32(v_s01)));
529
530 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
531 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
532 }
533 #endif
534
535 for( ; i < width; i++ )
536 {
537 int s0 = SUM[i] + Sp[i];
538 D[i] = saturate_cast<short>(s0);
539 SUM[i] = s0 - Sm[i];
540 }
541 }
542 dst += dststep;
543 }
544 }
545
546 double scale;
547 int sumCount;
548 std::vector<int> sum;
549 };
550
551
552 template<>
553 struct ColumnSum<int, ushort> :
554 public BaseColumnFilter
555 {
ColumnSumcv::ColumnSum556 ColumnSum( int _ksize, int _anchor, double _scale ) :
557 BaseColumnFilter()
558 {
559 ksize = _ksize;
560 anchor = _anchor;
561 scale = _scale;
562 sumCount = 0;
563 }
564
resetcv::ColumnSum565 virtual void reset() { sumCount = 0; }
566
operator ()cv::ColumnSum567 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
568 {
569 int i;
570 int* SUM;
571 bool haveScale = scale != 1;
572 double _scale = scale;
573 #if CV_SSE2
574 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
575 #endif
576
577 if( width != (int)sum.size() )
578 {
579 sum.resize(width);
580 sumCount = 0;
581 }
582 SUM = &sum[0];
583 if( sumCount == 0 )
584 {
585 memset((void*)SUM, 0, width*sizeof(int));
586 for( ; sumCount < ksize - 1; sumCount++, src++ )
587 {
588 const int* Sp = (const int*)src[0];
589 i = 0;
590 #if CV_SSE2
591 if(haveSSE2)
592 {
593 for( ; i < width-4; i+=4 )
594 {
595 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
596 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
597 _mm_storeu_si128((__m128i*)(SUM+i), _mm_add_epi32(_sum, _sp));
598 }
599 }
600 #elif CV_NEON
601 for( ; i <= width - 4; i+=4 )
602 vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
603 #endif
604 for( ; i < width; i++ )
605 SUM[i] += Sp[i];
606 }
607 }
608 else
609 {
610 CV_Assert( sumCount == ksize-1 );
611 src += ksize-1;
612 }
613
614 for( ; count--; src++ )
615 {
616 const int* Sp = (const int*)src[0];
617 const int* Sm = (const int*)src[1-ksize];
618 ushort* D = (ushort*)dst;
619 if( haveScale )
620 {
621 i = 0;
622 #if CV_SSE2
623 if(haveSSE2)
624 {
625 const __m128 scale4 = _mm_set1_ps((float)_scale);
626 const __m128i delta0 = _mm_set1_epi32(0x8000);
627 const __m128i delta1 = _mm_set1_epi32(0x80008000);
628
629 for( ; i < width-4; i+=4)
630 {
631 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
632 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
633 _mm_loadu_si128((const __m128i*)(Sp+i)));
634
635 __m128i _res = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
636
637 _res = _mm_sub_epi32(_res, delta0);
638 _res = _mm_add_epi16(_mm_packs_epi32(_res, _res), delta1);
639
640 _mm_storel_epi64((__m128i*)(D+i), _res);
641 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
642 }
643 }
644 #elif CV_NEON
645 float32x4_t v_scale = vdupq_n_f32((float)_scale);
646 for( ; i <= width-8; i+=8 )
647 {
648 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
649 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
650
651 uint32x4_t v_s0d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
652 uint32x4_t v_s01d = cv_vrndq_u32_f32(vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
653 vst1q_u16(D + i, vcombine_u16(vqmovn_u32(v_s0d), vqmovn_u32(v_s01d)));
654
655 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
656 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
657 }
658 #endif
659 for( ; i < width; i++ )
660 {
661 int s0 = SUM[i] + Sp[i];
662 D[i] = saturate_cast<ushort>(s0*_scale);
663 SUM[i] = s0 - Sm[i];
664 }
665 }
666 else
667 {
668 i = 0;
669 #if CV_SSE2
670 if(haveSSE2)
671 {
672 const __m128i delta0 = _mm_set1_epi32(0x8000);
673 const __m128i delta1 = _mm_set1_epi32(0x80008000);
674
675 for( ; i < width-4; i+=4 )
676 {
677 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
678 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
679 _mm_loadu_si128((const __m128i*)(Sp+i)));
680
681 __m128i _res = _mm_sub_epi32(_s0, delta0);
682 _res = _mm_add_epi16(_mm_packs_epi32(_res, _res), delta1);
683
684 _mm_storel_epi64((__m128i*)(D+i), _res);
685 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
686 }
687 }
688 #elif CV_NEON
689 for( ; i <= width-8; i+=8 )
690 {
691 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
692 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
693
694 vst1q_u16(D + i, vcombine_u16(vqmovun_s32(v_s0), vqmovun_s32(v_s01)));
695
696 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
697 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
698 }
699 #endif
700
701 for( ; i < width; i++ )
702 {
703 int s0 = SUM[i] + Sp[i];
704 D[i] = saturate_cast<ushort>(s0);
705 SUM[i] = s0 - Sm[i];
706 }
707 }
708 dst += dststep;
709 }
710 }
711
712 double scale;
713 int sumCount;
714 std::vector<int> sum;
715 };
716
717 template<>
718 struct ColumnSum<int, int> :
719 public BaseColumnFilter
720 {
ColumnSumcv::ColumnSum721 ColumnSum( int _ksize, int _anchor, double _scale ) :
722 BaseColumnFilter()
723 {
724 ksize = _ksize;
725 anchor = _anchor;
726 scale = _scale;
727 sumCount = 0;
728 }
729
resetcv::ColumnSum730 virtual void reset() { sumCount = 0; }
731
operator ()cv::ColumnSum732 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
733 {
734 int i;
735 int* SUM;
736 bool haveScale = scale != 1;
737 double _scale = scale;
738
739 #if CV_SSE2
740 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
741 #endif
742
743 if( width != (int)sum.size() )
744 {
745 sum.resize(width);
746 sumCount = 0;
747 }
748 SUM = &sum[0];
749 if( sumCount == 0 )
750 {
751 memset((void*)SUM, 0, width*sizeof(int));
752 for( ; sumCount < ksize - 1; sumCount++, src++ )
753 {
754 const int* Sp = (const int*)src[0];
755 i = 0;
756 #if CV_SSE2
757 if(haveSSE2)
758 {
759 for( ; i <= width-4; i+=4 )
760 {
761 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
762 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
763 _mm_storeu_si128((__m128i*)(SUM+i),_mm_add_epi32(_sum, _sp));
764 }
765 }
766 #elif CV_NEON
767 for( ; i <= width - 4; i+=4 )
768 vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
769 #endif
770 for( ; i < width; i++ )
771 SUM[i] += Sp[i];
772 }
773 }
774 else
775 {
776 CV_Assert( sumCount == ksize-1 );
777 src += ksize-1;
778 }
779
780 for( ; count--; src++ )
781 {
782 const int* Sp = (const int*)src[0];
783 const int* Sm = (const int*)src[1-ksize];
784 int* D = (int*)dst;
785 if( haveScale )
786 {
787 i = 0;
788 #if CV_SSE2
789 if(haveSSE2)
790 {
791 const __m128 scale4 = _mm_set1_ps((float)_scale);
792 for( ; i <= width-4; i+=4 )
793 {
794 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
795
796 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
797 _mm_loadu_si128((const __m128i*)(Sp+i)));
798
799 __m128i _s0T = _mm_cvtps_epi32(_mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
800
801 _mm_storeu_si128((__m128i*)(D+i), _s0T);
802 _mm_storeu_si128((__m128i*)(SUM+i),_mm_sub_epi32(_s0,_sm));
803 }
804 }
805 #elif CV_NEON
806 float32x4_t v_scale = vdupq_n_f32((float)_scale);
807 for( ; i <= width-4; i+=4 )
808 {
809 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
810
811 int32x4_t v_s0d = cv_vrndq_s32_f32(vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
812 vst1q_s32(D + i, v_s0d);
813
814 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
815 }
816 #endif
817 for( ; i < width; i++ )
818 {
819 int s0 = SUM[i] + Sp[i];
820 D[i] = saturate_cast<int>(s0*_scale);
821 SUM[i] = s0 - Sm[i];
822 }
823 }
824 else
825 {
826 i = 0;
827 #if CV_SSE2
828 if(haveSSE2)
829 {
830 for( ; i <= width-4; i+=4 )
831 {
832 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
833 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
834 _mm_loadu_si128((const __m128i*)(Sp+i)));
835
836 _mm_storeu_si128((__m128i*)(D+i), _s0);
837 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
838 }
839 }
840 #elif CV_NEON
841 for( ; i <= width-4; i+=4 )
842 {
843 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
844
845 vst1q_s32(D + i, v_s0);
846 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
847 }
848 #endif
849
850 for( ; i < width; i++ )
851 {
852 int s0 = SUM[i] + Sp[i];
853 D[i] = s0;
854 SUM[i] = s0 - Sm[i];
855 }
856 }
857 dst += dststep;
858 }
859 }
860
861 double scale;
862 int sumCount;
863 std::vector<int> sum;
864 };
865
866
867 template<>
868 struct ColumnSum<int, float> :
869 public BaseColumnFilter
870 {
ColumnSumcv::ColumnSum871 ColumnSum( int _ksize, int _anchor, double _scale ) :
872 BaseColumnFilter()
873 {
874 ksize = _ksize;
875 anchor = _anchor;
876 scale = _scale;
877 sumCount = 0;
878 }
879
resetcv::ColumnSum880 virtual void reset() { sumCount = 0; }
881
operator ()cv::ColumnSum882 virtual void operator()(const uchar** src, uchar* dst, int dststep, int count, int width)
883 {
884 int i;
885 int* SUM;
886 bool haveScale = scale != 1;
887 double _scale = scale;
888
889 #if CV_SSE2
890 bool haveSSE2 = checkHardwareSupport(CV_CPU_SSE2);
891 #endif
892
893 if( width != (int)sum.size() )
894 {
895 sum.resize(width);
896 sumCount = 0;
897 }
898
899 SUM = &sum[0];
900 if( sumCount == 0 )
901 {
902 memset((void *)SUM, 0, sizeof(int) * width);
903
904 for( ; sumCount < ksize - 1; sumCount++, src++ )
905 {
906 const int* Sp = (const int*)src[0];
907 i = 0;
908
909 #if CV_SSE2
910 if(haveSSE2)
911 {
912 for( ; i < width-4; i+=4 )
913 {
914 __m128i _sum = _mm_loadu_si128((const __m128i*)(SUM+i));
915 __m128i _sp = _mm_loadu_si128((const __m128i*)(Sp+i));
916 _mm_storeu_si128((__m128i*)(SUM+i), _mm_add_epi32(_sum, _sp));
917 }
918 }
919 #elif CV_NEON
920 for( ; i <= width - 4; i+=4 )
921 vst1q_s32(SUM + i, vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i)));
922 #endif
923
924 for( ; i < width; i++ )
925 SUM[i] += Sp[i];
926 }
927 }
928 else
929 {
930 CV_Assert( sumCount == ksize-1 );
931 src += ksize-1;
932 }
933
934 for( ; count--; src++ )
935 {
936 const int * Sp = (const int*)src[0];
937 const int * Sm = (const int*)src[1-ksize];
938 float* D = (float*)dst;
939 if( haveScale )
940 {
941 i = 0;
942
943 #if CV_SSE2
944 if(haveSSE2)
945 {
946 const __m128 scale4 = _mm_set1_ps((float)_scale);
947
948 for( ; i < width-4; i+=4)
949 {
950 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
951 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
952 _mm_loadu_si128((const __m128i*)(Sp+i)));
953
954 _mm_storeu_ps(D+i, _mm_mul_ps(scale4, _mm_cvtepi32_ps(_s0)));
955 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
956 }
957 }
958 #elif CV_NEON
959 float32x4_t v_scale = vdupq_n_f32((float)_scale);
960 for( ; i <= width-8; i+=8 )
961 {
962 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
963 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
964
965 vst1q_f32(D + i, vmulq_f32(vcvtq_f32_s32(v_s0), v_scale));
966 vst1q_f32(D + i + 4, vmulq_f32(vcvtq_f32_s32(v_s01), v_scale));
967
968 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
969 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
970 }
971 #endif
972
973 for( ; i < width; i++ )
974 {
975 int s0 = SUM[i] + Sp[i];
976 D[i] = (float)(s0*_scale);
977 SUM[i] = s0 - Sm[i];
978 }
979 }
980 else
981 {
982 i = 0;
983
984 #if CV_SSE2
985 if(haveSSE2)
986 {
987 for( ; i < width-4; i+=4)
988 {
989 __m128i _sm = _mm_loadu_si128((const __m128i*)(Sm+i));
990 __m128i _s0 = _mm_add_epi32(_mm_loadu_si128((const __m128i*)(SUM+i)),
991 _mm_loadu_si128((const __m128i*)(Sp+i)));
992
993 _mm_storeu_ps(D+i, _mm_cvtepi32_ps(_s0));
994 _mm_storeu_si128((__m128i*)(SUM+i), _mm_sub_epi32(_s0,_sm));
995 }
996 }
997 #elif CV_NEON
998 for( ; i <= width-8; i+=8 )
999 {
1000 int32x4_t v_s0 = vaddq_s32(vld1q_s32(SUM + i), vld1q_s32(Sp + i));
1001 int32x4_t v_s01 = vaddq_s32(vld1q_s32(SUM + i + 4), vld1q_s32(Sp + i + 4));
1002
1003 vst1q_f32(D + i, vcvtq_f32_s32(v_s0));
1004 vst1q_f32(D + i + 4, vcvtq_f32_s32(v_s01));
1005
1006 vst1q_s32(SUM + i, vsubq_s32(v_s0, vld1q_s32(Sm + i)));
1007 vst1q_s32(SUM + i + 4, vsubq_s32(v_s01, vld1q_s32(Sm + i + 4)));
1008 }
1009 #endif
1010
1011 for( ; i < width; i++ )
1012 {
1013 int s0 = SUM[i] + Sp[i];
1014 D[i] = (float)(s0);
1015 SUM[i] = s0 - Sm[i];
1016 }
1017 }
1018 dst += dststep;
1019 }
1020 }
1021
1022 double scale;
1023 int sumCount;
1024 std::vector<int> sum;
1025 };
1026
1027 #ifdef HAVE_OPENCL
1028
1029 #define DIVUP(total, grain) ((total + grain - 1) / (grain))
1030 #define ROUNDUP(sz, n) ((sz) + (n) - 1 - (((sz) + (n) - 1) % (n)))
1031
ocl_boxFilter(InputArray _src,OutputArray _dst,int ddepth,Size ksize,Point anchor,int borderType,bool normalize,bool sqr=false)1032 static bool ocl_boxFilter( InputArray _src, OutputArray _dst, int ddepth,
1033 Size ksize, Point anchor, int borderType, bool normalize, bool sqr = false )
1034 {
1035 const ocl::Device & dev = ocl::Device::getDefault();
1036 int type = _src.type(), sdepth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type), esz = CV_ELEM_SIZE(type);
1037 bool doubleSupport = dev.doubleFPConfig() > 0;
1038
1039 if (ddepth < 0)
1040 ddepth = sdepth;
1041
1042 if (cn > 4 || (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F)) ||
1043 _src.offset() % esz != 0 || _src.step() % esz != 0)
1044 return false;
1045
1046 if (anchor.x < 0)
1047 anchor.x = ksize.width / 2;
1048 if (anchor.y < 0)
1049 anchor.y = ksize.height / 2;
1050
1051 int computeUnits = ocl::Device::getDefault().maxComputeUnits();
1052 float alpha = 1.0f / (ksize.height * ksize.width);
1053 Size size = _src.size(), wholeSize;
1054 bool isolated = (borderType & BORDER_ISOLATED) != 0;
1055 borderType &= ~BORDER_ISOLATED;
1056 int wdepth = std::max(CV_32F, std::max(ddepth, sdepth)),
1057 wtype = CV_MAKE_TYPE(wdepth, cn), dtype = CV_MAKE_TYPE(ddepth, cn);
1058
1059 const char * const borderMap[] = { "BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT", 0, "BORDER_REFLECT_101" };
1060 size_t globalsize[2] = { size.width, size.height };
1061 size_t localsize_general[2] = { 0, 1 }, * localsize = NULL;
1062
1063 UMat src = _src.getUMat();
1064 if (!isolated)
1065 {
1066 Point ofs;
1067 src.locateROI(wholeSize, ofs);
1068 }
1069
1070 int h = isolated ? size.height : wholeSize.height;
1071 int w = isolated ? size.width : wholeSize.width;
1072
1073 size_t maxWorkItemSizes[32];
1074 ocl::Device::getDefault().maxWorkItemSizes(maxWorkItemSizes);
1075 int tryWorkItems = (int)maxWorkItemSizes[0];
1076
1077 ocl::Kernel kernel;
1078
1079 if (dev.isIntel() && !(dev.type() & ocl::Device::TYPE_CPU) &&
1080 ((ksize.width < 5 && ksize.height < 5 && esz <= 4) ||
1081 (ksize.width == 5 && ksize.height == 5 && cn == 1)))
1082 {
1083 if (w < ksize.width || h < ksize.height)
1084 return false;
1085
1086 // Figure out what vector size to use for loading the pixels.
1087 int pxLoadNumPixels = cn != 1 || size.width % 4 ? 1 : 4;
1088 int pxLoadVecSize = cn * pxLoadNumPixels;
1089
1090 // Figure out how many pixels per work item to compute in X and Y
1091 // directions. Too many and we run out of registers.
1092 int pxPerWorkItemX = 1, pxPerWorkItemY = 1;
1093 if (cn <= 2 && ksize.width <= 4 && ksize.height <= 4)
1094 {
1095 pxPerWorkItemX = size.width % 8 ? size.width % 4 ? size.width % 2 ? 1 : 2 : 4 : 8;
1096 pxPerWorkItemY = size.height % 2 ? 1 : 2;
1097 }
1098 else if (cn < 4 || (ksize.width <= 4 && ksize.height <= 4))
1099 {
1100 pxPerWorkItemX = size.width % 2 ? 1 : 2;
1101 pxPerWorkItemY = size.height % 2 ? 1 : 2;
1102 }
1103 globalsize[0] = size.width / pxPerWorkItemX;
1104 globalsize[1] = size.height / pxPerWorkItemY;
1105
1106 // Need some padding in the private array for pixels
1107 int privDataWidth = ROUNDUP(pxPerWorkItemX + ksize.width - 1, pxLoadNumPixels);
1108
1109 // Make the global size a nice round number so the runtime can pick
1110 // from reasonable choices for the workgroup size
1111 const int wgRound = 256;
1112 globalsize[0] = ROUNDUP(globalsize[0], wgRound);
1113
1114 char build_options[1024], cvt[2][40];
1115 sprintf(build_options, "-D cn=%d "
1116 "-D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d "
1117 "-D PX_LOAD_VEC_SIZE=%d -D PX_LOAD_NUM_PX=%d "
1118 "-D PX_PER_WI_X=%d -D PX_PER_WI_Y=%d -D PRIV_DATA_WIDTH=%d -D %s -D %s "
1119 "-D PX_LOAD_X_ITERATIONS=%d -D PX_LOAD_Y_ITERATIONS=%d "
1120 "-D srcT=%s -D srcT1=%s -D dstT=%s -D dstT1=%s -D WT=%s -D WT1=%s "
1121 "-D convertToWT=%s -D convertToDstT=%s%s%s -D PX_LOAD_FLOAT_VEC_CONV=convert_%s -D OP_BOX_FILTER",
1122 cn, anchor.x, anchor.y, ksize.width, ksize.height,
1123 pxLoadVecSize, pxLoadNumPixels,
1124 pxPerWorkItemX, pxPerWorkItemY, privDataWidth, borderMap[borderType],
1125 isolated ? "BORDER_ISOLATED" : "NO_BORDER_ISOLATED",
1126 privDataWidth / pxLoadNumPixels, pxPerWorkItemY + ksize.height - 1,
1127 ocl::typeToStr(type), ocl::typeToStr(sdepth), ocl::typeToStr(dtype),
1128 ocl::typeToStr(ddepth), ocl::typeToStr(wtype), ocl::typeToStr(wdepth),
1129 ocl::convertTypeStr(sdepth, wdepth, cn, cvt[0]),
1130 ocl::convertTypeStr(wdepth, ddepth, cn, cvt[1]),
1131 normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
1132 ocl::typeToStr(CV_MAKE_TYPE(wdepth, pxLoadVecSize)) //PX_LOAD_FLOAT_VEC_CONV
1133 );
1134
1135
1136 if (!kernel.create("filterSmall", cv::ocl::imgproc::filterSmall_oclsrc, build_options))
1137 return false;
1138 }
1139 else
1140 {
1141 localsize = localsize_general;
1142 for ( ; ; )
1143 {
1144 int BLOCK_SIZE_X = tryWorkItems, BLOCK_SIZE_Y = std::min(ksize.height * 10, size.height);
1145
1146 while (BLOCK_SIZE_X > 32 && BLOCK_SIZE_X >= ksize.width * 2 && BLOCK_SIZE_X > size.width * 2)
1147 BLOCK_SIZE_X /= 2;
1148 while (BLOCK_SIZE_Y < BLOCK_SIZE_X / 8 && BLOCK_SIZE_Y * computeUnits * 32 < size.height)
1149 BLOCK_SIZE_Y *= 2;
1150
1151 if (ksize.width > BLOCK_SIZE_X || w < ksize.width || h < ksize.height)
1152 return false;
1153
1154 char cvt[2][50];
1155 String opts = format("-D LOCAL_SIZE_X=%d -D BLOCK_SIZE_Y=%d -D ST=%s -D DT=%s -D WT=%s -D convertToDT=%s -D convertToWT=%s"
1156 " -D ANCHOR_X=%d -D ANCHOR_Y=%d -D KERNEL_SIZE_X=%d -D KERNEL_SIZE_Y=%d -D %s%s%s%s%s"
1157 " -D ST1=%s -D DT1=%s -D cn=%d",
1158 BLOCK_SIZE_X, BLOCK_SIZE_Y, ocl::typeToStr(type), ocl::typeToStr(CV_MAKE_TYPE(ddepth, cn)),
1159 ocl::typeToStr(CV_MAKE_TYPE(wdepth, cn)),
1160 ocl::convertTypeStr(wdepth, ddepth, cn, cvt[0]),
1161 ocl::convertTypeStr(sdepth, wdepth, cn, cvt[1]),
1162 anchor.x, anchor.y, ksize.width, ksize.height, borderMap[borderType],
1163 isolated ? " -D BORDER_ISOLATED" : "", doubleSupport ? " -D DOUBLE_SUPPORT" : "",
1164 normalize ? " -D NORMALIZE" : "", sqr ? " -D SQR" : "",
1165 ocl::typeToStr(sdepth), ocl::typeToStr(ddepth), cn);
1166
1167 localsize[0] = BLOCK_SIZE_X;
1168 globalsize[0] = DIVUP(size.width, BLOCK_SIZE_X - (ksize.width - 1)) * BLOCK_SIZE_X;
1169 globalsize[1] = DIVUP(size.height, BLOCK_SIZE_Y);
1170
1171 kernel.create("boxFilter", cv::ocl::imgproc::boxFilter_oclsrc, opts);
1172 if (kernel.empty())
1173 return false;
1174
1175 size_t kernelWorkGroupSize = kernel.workGroupSize();
1176 if (localsize[0] <= kernelWorkGroupSize)
1177 break;
1178 if (BLOCK_SIZE_X < (int)kernelWorkGroupSize)
1179 return false;
1180
1181 tryWorkItems = (int)kernelWorkGroupSize;
1182 }
1183 }
1184
1185 _dst.create(size, CV_MAKETYPE(ddepth, cn));
1186 UMat dst = _dst.getUMat();
1187
1188 int idxArg = kernel.set(0, ocl::KernelArg::PtrReadOnly(src));
1189 idxArg = kernel.set(idxArg, (int)src.step);
1190 int srcOffsetX = (int)((src.offset % src.step) / src.elemSize());
1191 int srcOffsetY = (int)(src.offset / src.step);
1192 int srcEndX = isolated ? srcOffsetX + size.width : wholeSize.width;
1193 int srcEndY = isolated ? srcOffsetY + size.height : wholeSize.height;
1194 idxArg = kernel.set(idxArg, srcOffsetX);
1195 idxArg = kernel.set(idxArg, srcOffsetY);
1196 idxArg = kernel.set(idxArg, srcEndX);
1197 idxArg = kernel.set(idxArg, srcEndY);
1198 idxArg = kernel.set(idxArg, ocl::KernelArg::WriteOnly(dst));
1199 if (normalize)
1200 idxArg = kernel.set(idxArg, (float)alpha);
1201
1202 return kernel.run(2, globalsize, localsize, false);
1203 }
1204
1205 #undef ROUNDUP
1206
1207 #endif
1208
1209 }
1210
1211
getRowSumFilter(int srcType,int sumType,int ksize,int anchor)1212 cv::Ptr<cv::BaseRowFilter> cv::getRowSumFilter(int srcType, int sumType, int ksize, int anchor)
1213 {
1214 int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
1215 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
1216
1217 if( anchor < 0 )
1218 anchor = ksize/2;
1219
1220 if( sdepth == CV_8U && ddepth == CV_32S )
1221 return makePtr<RowSum<uchar, int> >(ksize, anchor);
1222 if( sdepth == CV_8U && ddepth == CV_64F )
1223 return makePtr<RowSum<uchar, double> >(ksize, anchor);
1224 if( sdepth == CV_16U && ddepth == CV_32S )
1225 return makePtr<RowSum<ushort, int> >(ksize, anchor);
1226 if( sdepth == CV_16U && ddepth == CV_64F )
1227 return makePtr<RowSum<ushort, double> >(ksize, anchor);
1228 if( sdepth == CV_16S && ddepth == CV_32S )
1229 return makePtr<RowSum<short, int> >(ksize, anchor);
1230 if( sdepth == CV_32S && ddepth == CV_32S )
1231 return makePtr<RowSum<int, int> >(ksize, anchor);
1232 if( sdepth == CV_16S && ddepth == CV_64F )
1233 return makePtr<RowSum<short, double> >(ksize, anchor);
1234 if( sdepth == CV_32F && ddepth == CV_64F )
1235 return makePtr<RowSum<float, double> >(ksize, anchor);
1236 if( sdepth == CV_64F && ddepth == CV_64F )
1237 return makePtr<RowSum<double, double> >(ksize, anchor);
1238
1239 CV_Error_( CV_StsNotImplemented,
1240 ("Unsupported combination of source format (=%d), and buffer format (=%d)",
1241 srcType, sumType));
1242
1243 return Ptr<BaseRowFilter>();
1244 }
1245
1246
getColumnSumFilter(int sumType,int dstType,int ksize,int anchor,double scale)1247 cv::Ptr<cv::BaseColumnFilter> cv::getColumnSumFilter(int sumType, int dstType, int ksize,
1248 int anchor, double scale)
1249 {
1250 int sdepth = CV_MAT_DEPTH(sumType), ddepth = CV_MAT_DEPTH(dstType);
1251 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(dstType) );
1252
1253 if( anchor < 0 )
1254 anchor = ksize/2;
1255
1256 if( ddepth == CV_8U && sdepth == CV_32S )
1257 return makePtr<ColumnSum<int, uchar> >(ksize, anchor, scale);
1258 if( ddepth == CV_8U && sdepth == CV_64F )
1259 return makePtr<ColumnSum<double, uchar> >(ksize, anchor, scale);
1260 if( ddepth == CV_16U && sdepth == CV_32S )
1261 return makePtr<ColumnSum<int, ushort> >(ksize, anchor, scale);
1262 if( ddepth == CV_16U && sdepth == CV_64F )
1263 return makePtr<ColumnSum<double, ushort> >(ksize, anchor, scale);
1264 if( ddepth == CV_16S && sdepth == CV_32S )
1265 return makePtr<ColumnSum<int, short> >(ksize, anchor, scale);
1266 if( ddepth == CV_16S && sdepth == CV_64F )
1267 return makePtr<ColumnSum<double, short> >(ksize, anchor, scale);
1268 if( ddepth == CV_32S && sdepth == CV_32S )
1269 return makePtr<ColumnSum<int, int> >(ksize, anchor, scale);
1270 if( ddepth == CV_32F && sdepth == CV_32S )
1271 return makePtr<ColumnSum<int, float> >(ksize, anchor, scale);
1272 if( ddepth == CV_32F && sdepth == CV_64F )
1273 return makePtr<ColumnSum<double, float> >(ksize, anchor, scale);
1274 if( ddepth == CV_64F && sdepth == CV_32S )
1275 return makePtr<ColumnSum<int, double> >(ksize, anchor, scale);
1276 if( ddepth == CV_64F && sdepth == CV_64F )
1277 return makePtr<ColumnSum<double, double> >(ksize, anchor, scale);
1278
1279 CV_Error_( CV_StsNotImplemented,
1280 ("Unsupported combination of sum format (=%d), and destination format (=%d)",
1281 sumType, dstType));
1282
1283 return Ptr<BaseColumnFilter>();
1284 }
1285
1286
createBoxFilter(int srcType,int dstType,Size ksize,Point anchor,bool normalize,int borderType)1287 cv::Ptr<cv::FilterEngine> cv::createBoxFilter( int srcType, int dstType, Size ksize,
1288 Point anchor, bool normalize, int borderType )
1289 {
1290 int sdepth = CV_MAT_DEPTH(srcType);
1291 int cn = CV_MAT_CN(srcType), sumType = CV_64F;
1292 if( sdepth <= CV_32S && (!normalize ||
1293 ksize.width*ksize.height <= (sdepth == CV_8U ? (1<<23) :
1294 sdepth == CV_16U ? (1 << 15) : (1 << 16))) )
1295 sumType = CV_32S;
1296 sumType = CV_MAKETYPE( sumType, cn );
1297
1298 Ptr<BaseRowFilter> rowFilter = getRowSumFilter(srcType, sumType, ksize.width, anchor.x );
1299 Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
1300 dstType, ksize.height, anchor.y, normalize ? 1./(ksize.width*ksize.height) : 1);
1301
1302 return makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
1303 srcType, dstType, sumType, borderType );
1304 }
1305
1306
boxFilter(InputArray _src,OutputArray _dst,int ddepth,Size ksize,Point anchor,bool normalize,int borderType)1307 void cv::boxFilter( InputArray _src, OutputArray _dst, int ddepth,
1308 Size ksize, Point anchor,
1309 bool normalize, int borderType )
1310 {
1311 CV_OCL_RUN(_dst.isUMat(), ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize))
1312
1313 Mat src = _src.getMat();
1314 int stype = src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
1315 if( ddepth < 0 )
1316 ddepth = sdepth;
1317 _dst.create( src.size(), CV_MAKETYPE(ddepth, cn) );
1318 Mat dst = _dst.getMat();
1319 if( borderType != BORDER_CONSTANT && normalize && (borderType & BORDER_ISOLATED) != 0 )
1320 {
1321 if( src.rows == 1 )
1322 ksize.height = 1;
1323 if( src.cols == 1 )
1324 ksize.width = 1;
1325 }
1326 #ifdef HAVE_TEGRA_OPTIMIZATION
1327 if ( tegra::useTegra() && tegra::box(src, dst, ksize, anchor, normalize, borderType) )
1328 return;
1329 #endif
1330
1331 #if defined(HAVE_IPP)
1332 CV_IPP_CHECK()
1333 {
1334 int ippBorderType = borderType & ~BORDER_ISOLATED;
1335 Point ocvAnchor, ippAnchor;
1336 ocvAnchor.x = anchor.x < 0 ? ksize.width / 2 : anchor.x;
1337 ocvAnchor.y = anchor.y < 0 ? ksize.height / 2 : anchor.y;
1338 ippAnchor.x = ksize.width / 2 - (ksize.width % 2 == 0 ? 1 : 0);
1339 ippAnchor.y = ksize.height / 2 - (ksize.height % 2 == 0 ? 1 : 0);
1340
1341 if (normalize && !src.isSubmatrix() && ddepth == sdepth &&
1342 (/*ippBorderType == BORDER_REPLICATE ||*/ /* returns ippStsStepErr: Step value is not valid */
1343 ippBorderType == BORDER_CONSTANT) && ocvAnchor == ippAnchor &&
1344 dst.cols != ksize.width && dst.rows != ksize.height) // returns ippStsMaskSizeErr: mask has an illegal value
1345 {
1346 Ipp32s bufSize = 0;
1347 IppiSize roiSize = { dst.cols, dst.rows }, maskSize = { ksize.width, ksize.height };
1348
1349 #define IPP_FILTER_BOX_BORDER(ippType, ippDataType, flavor) \
1350 do \
1351 { \
1352 if (ippiFilterBoxBorderGetBufferSize(roiSize, maskSize, ippDataType, cn, &bufSize) >= 0) \
1353 { \
1354 Ipp8u * buffer = ippsMalloc_8u(bufSize); \
1355 ippType borderValue[4] = { 0, 0, 0, 0 }; \
1356 ippBorderType = ippBorderType == BORDER_CONSTANT ? ippBorderConst : ippBorderRepl; \
1357 IppStatus status = ippiFilterBoxBorder_##flavor(src.ptr<ippType>(), (int)src.step, dst.ptr<ippType>(), \
1358 (int)dst.step, roiSize, maskSize, \
1359 (IppiBorderType)ippBorderType, borderValue, buffer); \
1360 ippsFree(buffer); \
1361 if (status >= 0) \
1362 { \
1363 CV_IMPL_ADD(CV_IMPL_IPP); \
1364 return; \
1365 } \
1366 } \
1367 setIppErrorStatus(); \
1368 } while ((void)0, 0)
1369
1370 if (stype == CV_8UC1)
1371 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C1R);
1372 else if (stype == CV_8UC3)
1373 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C3R);
1374 else if (stype == CV_8UC4)
1375 IPP_FILTER_BOX_BORDER(Ipp8u, ipp8u, 8u_C4R);
1376
1377 // Oct 2014: performance with BORDER_CONSTANT
1378 //else if (stype == CV_16UC1)
1379 // IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C1R);
1380 else if (stype == CV_16UC3)
1381 IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C3R);
1382 else if (stype == CV_16UC4)
1383 IPP_FILTER_BOX_BORDER(Ipp16u, ipp16u, 16u_C4R);
1384
1385 // Oct 2014: performance with BORDER_CONSTANT
1386 //else if (stype == CV_16SC1)
1387 // IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C1R);
1388 else if (stype == CV_16SC3)
1389 IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C3R);
1390 else if (stype == CV_16SC4)
1391 IPP_FILTER_BOX_BORDER(Ipp16s, ipp16s, 16s_C4R);
1392
1393 else if (stype == CV_32FC1)
1394 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C1R);
1395 else if (stype == CV_32FC3)
1396 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C3R);
1397 else if (stype == CV_32FC4)
1398 IPP_FILTER_BOX_BORDER(Ipp32f, ipp32f, 32f_C4R);
1399 }
1400 #undef IPP_FILTER_BOX_BORDER
1401 }
1402 #endif
1403
1404 Ptr<FilterEngine> f = createBoxFilter( src.type(), dst.type(),
1405 ksize, anchor, normalize, borderType );
1406 f->apply( src, dst );
1407 }
1408
blur(InputArray src,OutputArray dst,Size ksize,Point anchor,int borderType)1409 void cv::blur( InputArray src, OutputArray dst,
1410 Size ksize, Point anchor, int borderType )
1411 {
1412 boxFilter( src, dst, -1, ksize, anchor, true, borderType );
1413 }
1414
1415
1416 /****************************************************************************************\
1417 Squared Box Filter
1418 \****************************************************************************************/
1419
1420 namespace cv
1421 {
1422
1423 template<typename T, typename ST>
1424 struct SqrRowSum :
1425 public BaseRowFilter
1426 {
SqrRowSumcv::SqrRowSum1427 SqrRowSum( int _ksize, int _anchor ) :
1428 BaseRowFilter()
1429 {
1430 ksize = _ksize;
1431 anchor = _anchor;
1432 }
1433
operator ()cv::SqrRowSum1434 virtual void operator()(const uchar* src, uchar* dst, int width, int cn)
1435 {
1436 const T* S = (const T*)src;
1437 ST* D = (ST*)dst;
1438 int i = 0, k, ksz_cn = ksize*cn;
1439
1440 width = (width - 1)*cn;
1441 for( k = 0; k < cn; k++, S++, D++ )
1442 {
1443 ST s = 0;
1444 for( i = 0; i < ksz_cn; i += cn )
1445 {
1446 ST val = (ST)S[i];
1447 s += val*val;
1448 }
1449 D[0] = s;
1450 for( i = 0; i < width; i += cn )
1451 {
1452 ST val0 = (ST)S[i], val1 = (ST)S[i + ksz_cn];
1453 s += val1*val1 - val0*val0;
1454 D[i+cn] = s;
1455 }
1456 }
1457 }
1458 };
1459
getSqrRowSumFilter(int srcType,int sumType,int ksize,int anchor)1460 static Ptr<BaseRowFilter> getSqrRowSumFilter(int srcType, int sumType, int ksize, int anchor)
1461 {
1462 int sdepth = CV_MAT_DEPTH(srcType), ddepth = CV_MAT_DEPTH(sumType);
1463 CV_Assert( CV_MAT_CN(sumType) == CV_MAT_CN(srcType) );
1464
1465 if( anchor < 0 )
1466 anchor = ksize/2;
1467
1468 if( sdepth == CV_8U && ddepth == CV_32S )
1469 return makePtr<SqrRowSum<uchar, int> >(ksize, anchor);
1470 if( sdepth == CV_8U && ddepth == CV_64F )
1471 return makePtr<SqrRowSum<uchar, double> >(ksize, anchor);
1472 if( sdepth == CV_16U && ddepth == CV_64F )
1473 return makePtr<SqrRowSum<ushort, double> >(ksize, anchor);
1474 if( sdepth == CV_16S && ddepth == CV_64F )
1475 return makePtr<SqrRowSum<short, double> >(ksize, anchor);
1476 if( sdepth == CV_32F && ddepth == CV_64F )
1477 return makePtr<SqrRowSum<float, double> >(ksize, anchor);
1478 if( sdepth == CV_64F && ddepth == CV_64F )
1479 return makePtr<SqrRowSum<double, double> >(ksize, anchor);
1480
1481 CV_Error_( CV_StsNotImplemented,
1482 ("Unsupported combination of source format (=%d), and buffer format (=%d)",
1483 srcType, sumType));
1484
1485 return Ptr<BaseRowFilter>();
1486 }
1487
1488 }
1489
sqrBoxFilter(InputArray _src,OutputArray _dst,int ddepth,Size ksize,Point anchor,bool normalize,int borderType)1490 void cv::sqrBoxFilter( InputArray _src, OutputArray _dst, int ddepth,
1491 Size ksize, Point anchor,
1492 bool normalize, int borderType )
1493 {
1494 int srcType = _src.type(), sdepth = CV_MAT_DEPTH(srcType), cn = CV_MAT_CN(srcType);
1495 Size size = _src.size();
1496
1497 if( ddepth < 0 )
1498 ddepth = sdepth < CV_32F ? CV_32F : CV_64F;
1499
1500 if( borderType != BORDER_CONSTANT && normalize )
1501 {
1502 if( size.height == 1 )
1503 ksize.height = 1;
1504 if( size.width == 1 )
1505 ksize.width = 1;
1506 }
1507
1508 CV_OCL_RUN(_dst.isUMat() && _src.dims() <= 2,
1509 ocl_boxFilter(_src, _dst, ddepth, ksize, anchor, borderType, normalize, true))
1510
1511 int sumDepth = CV_64F;
1512 if( sdepth == CV_8U )
1513 sumDepth = CV_32S;
1514 int sumType = CV_MAKETYPE( sumDepth, cn ), dstType = CV_MAKETYPE(ddepth, cn);
1515
1516 Mat src = _src.getMat();
1517 _dst.create( size, dstType );
1518 Mat dst = _dst.getMat();
1519
1520 Ptr<BaseRowFilter> rowFilter = getSqrRowSumFilter(srcType, sumType, ksize.width, anchor.x );
1521 Ptr<BaseColumnFilter> columnFilter = getColumnSumFilter(sumType,
1522 dstType, ksize.height, anchor.y,
1523 normalize ? 1./(ksize.width*ksize.height) : 1);
1524
1525 Ptr<FilterEngine> f = makePtr<FilterEngine>(Ptr<BaseFilter>(), rowFilter, columnFilter,
1526 srcType, dstType, sumType, borderType );
1527 f->apply( src, dst );
1528 }
1529
1530
1531 /****************************************************************************************\
1532 Gaussian Blur
1533 \****************************************************************************************/
1534
getGaussianKernel(int n,double sigma,int ktype)1535 cv::Mat cv::getGaussianKernel( int n, double sigma, int ktype )
1536 {
1537 const int SMALL_GAUSSIAN_SIZE = 7;
1538 static const float small_gaussian_tab[][SMALL_GAUSSIAN_SIZE] =
1539 {
1540 {1.f},
1541 {0.25f, 0.5f, 0.25f},
1542 {0.0625f, 0.25f, 0.375f, 0.25f, 0.0625f},
1543 {0.03125f, 0.109375f, 0.21875f, 0.28125f, 0.21875f, 0.109375f, 0.03125f}
1544 };
1545
1546 const float* fixed_kernel = n % 2 == 1 && n <= SMALL_GAUSSIAN_SIZE && sigma <= 0 ?
1547 small_gaussian_tab[n>>1] : 0;
1548
1549 CV_Assert( ktype == CV_32F || ktype == CV_64F );
1550 Mat kernel(n, 1, ktype);
1551 float* cf = kernel.ptr<float>();
1552 double* cd = kernel.ptr<double>();
1553
1554 double sigmaX = sigma > 0 ? sigma : ((n-1)*0.5 - 1)*0.3 + 0.8;
1555 double scale2X = -0.5/(sigmaX*sigmaX);
1556 double sum = 0;
1557
1558 int i;
1559 for( i = 0; i < n; i++ )
1560 {
1561 double x = i - (n-1)*0.5;
1562 double t = fixed_kernel ? (double)fixed_kernel[i] : std::exp(scale2X*x*x);
1563 if( ktype == CV_32F )
1564 {
1565 cf[i] = (float)t;
1566 sum += cf[i];
1567 }
1568 else
1569 {
1570 cd[i] = t;
1571 sum += cd[i];
1572 }
1573 }
1574
1575 sum = 1./sum;
1576 for( i = 0; i < n; i++ )
1577 {
1578 if( ktype == CV_32F )
1579 cf[i] = (float)(cf[i]*sum);
1580 else
1581 cd[i] *= sum;
1582 }
1583
1584 return kernel;
1585 }
1586
1587 namespace cv {
1588
createGaussianKernels(Mat & kx,Mat & ky,int type,Size ksize,double sigma1,double sigma2)1589 static void createGaussianKernels( Mat & kx, Mat & ky, int type, Size ksize,
1590 double sigma1, double sigma2 )
1591 {
1592 int depth = CV_MAT_DEPTH(type);
1593 if( sigma2 <= 0 )
1594 sigma2 = sigma1;
1595
1596 // automatic detection of kernel size from sigma
1597 if( ksize.width <= 0 && sigma1 > 0 )
1598 ksize.width = cvRound(sigma1*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
1599 if( ksize.height <= 0 && sigma2 > 0 )
1600 ksize.height = cvRound(sigma2*(depth == CV_8U ? 3 : 4)*2 + 1)|1;
1601
1602 CV_Assert( ksize.width > 0 && ksize.width % 2 == 1 &&
1603 ksize.height > 0 && ksize.height % 2 == 1 );
1604
1605 sigma1 = std::max( sigma1, 0. );
1606 sigma2 = std::max( sigma2, 0. );
1607
1608 kx = getGaussianKernel( ksize.width, sigma1, std::max(depth, CV_32F) );
1609 if( ksize.height == ksize.width && std::abs(sigma1 - sigma2) < DBL_EPSILON )
1610 ky = kx;
1611 else
1612 ky = getGaussianKernel( ksize.height, sigma2, std::max(depth, CV_32F) );
1613 }
1614
1615 }
1616
createGaussianFilter(int type,Size ksize,double sigma1,double sigma2,int borderType)1617 cv::Ptr<cv::FilterEngine> cv::createGaussianFilter( int type, Size ksize,
1618 double sigma1, double sigma2,
1619 int borderType )
1620 {
1621 Mat kx, ky;
1622 createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
1623
1624 return createSeparableLinearFilter( type, type, kx, ky, Point(-1,-1), 0, borderType );
1625 }
1626
1627
GaussianBlur(InputArray _src,OutputArray _dst,Size ksize,double sigma1,double sigma2,int borderType)1628 void cv::GaussianBlur( InputArray _src, OutputArray _dst, Size ksize,
1629 double sigma1, double sigma2,
1630 int borderType )
1631 {
1632 int type = _src.type();
1633 Size size = _src.size();
1634 _dst.create( size, type );
1635
1636 if( borderType != BORDER_CONSTANT && (borderType & BORDER_ISOLATED) != 0 )
1637 {
1638 if( size.height == 1 )
1639 ksize.height = 1;
1640 if( size.width == 1 )
1641 ksize.width = 1;
1642 }
1643
1644 if( ksize.width == 1 && ksize.height == 1 )
1645 {
1646 _src.copyTo(_dst);
1647 return;
1648 }
1649
1650 #ifdef HAVE_TEGRA_OPTIMIZATION
1651 Mat src = _src.getMat();
1652 Mat dst = _dst.getMat();
1653 if(sigma1 == 0 && sigma2 == 0 && tegra::useTegra() && tegra::gaussian(src, dst, ksize, borderType))
1654 return;
1655 #endif
1656
1657 #if IPP_VERSION_X100 >= 801 && 0 // these functions are slower in IPP 8.1
1658 CV_IPP_CHECK()
1659 {
1660 int depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
1661
1662 if ((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && (cn == 1 || cn == 3) &&
1663 sigma1 == sigma2 && ksize.width == ksize.height && sigma1 != 0.0 )
1664 {
1665 IppiBorderType ippBorder = ippiGetBorderType(borderType);
1666 if (ippBorderConst == ippBorder || ippBorderRepl == ippBorder)
1667 {
1668 Mat src = _src.getMat(), dst = _dst.getMat();
1669 IppiSize roiSize = { src.cols, src.rows };
1670 IppDataType dataType = ippiGetDataType(depth);
1671 Ipp32s specSize = 0, bufferSize = 0;
1672
1673 if (ippiFilterGaussianGetBufferSize(roiSize, (Ipp32u)ksize.width, dataType, cn, &specSize, &bufferSize) >= 0)
1674 {
1675 IppFilterGaussianSpec * pSpec = (IppFilterGaussianSpec *)ippMalloc(specSize);
1676 Ipp8u * pBuffer = (Ipp8u*)ippMalloc(bufferSize);
1677
1678 if (ippiFilterGaussianInit(roiSize, (Ipp32u)ksize.width, (Ipp32f)sigma1, ippBorder, dataType, 1, pSpec, pBuffer) >= 0)
1679 {
1680 #define IPP_FILTER_GAUSS(ippfavor, ippcn) \
1681 do \
1682 { \
1683 typedef Ipp##ippfavor ippType; \
1684 ippType borderValues[] = { 0, 0, 0 }; \
1685 IppStatus status = ippcn == 1 ? \
1686 ippiFilterGaussianBorder_##ippfavor##_C1R(src.ptr<ippType>(), (int)src.step, \
1687 dst.ptr<ippType>(), (int)dst.step, roiSize, borderValues[0], pSpec, pBuffer) : \
1688 ippiFilterGaussianBorder_##ippfavor##_C3R(src.ptr<ippType>(), (int)src.step, \
1689 dst.ptr<ippType>(), (int)dst.step, roiSize, borderValues, pSpec, pBuffer); \
1690 ippFree(pBuffer); \
1691 ippFree(pSpec); \
1692 if (status >= 0) \
1693 { \
1694 CV_IMPL_ADD(CV_IMPL_IPP); \
1695 return; \
1696 } \
1697 } while ((void)0, 0)
1698
1699 if (type == CV_8UC1)
1700 IPP_FILTER_GAUSS(8u, 1);
1701 else if (type == CV_8UC3)
1702 IPP_FILTER_GAUSS(8u, 3);
1703 else if (type == CV_16UC1)
1704 IPP_FILTER_GAUSS(16u, 1);
1705 else if (type == CV_16UC3)
1706 IPP_FILTER_GAUSS(16u, 3);
1707 else if (type == CV_16SC1)
1708 IPP_FILTER_GAUSS(16s, 1);
1709 else if (type == CV_16SC3)
1710 IPP_FILTER_GAUSS(16s, 3);
1711 else if (type == CV_32FC1)
1712 IPP_FILTER_GAUSS(32f, 1);
1713 else if (type == CV_32FC3)
1714 IPP_FILTER_GAUSS(32f, 3);
1715 #undef IPP_FILTER_GAUSS
1716 }
1717 }
1718 setIppErrorStatus();
1719 }
1720 }
1721 }
1722 #endif
1723
1724 Mat kx, ky;
1725 createGaussianKernels(kx, ky, type, ksize, sigma1, sigma2);
1726 sepFilter2D(_src, _dst, CV_MAT_DEPTH(type), kx, ky, Point(-1,-1), 0, borderType );
1727 }
1728
1729 /****************************************************************************************\
1730 Median Filter
1731 \****************************************************************************************/
1732
1733 namespace cv
1734 {
1735 typedef ushort HT;
1736
1737 /**
1738 * This structure represents a two-tier histogram. The first tier (known as the
1739 * "coarse" level) is 4 bit wide and the second tier (known as the "fine" level)
1740 * is 8 bit wide. Pixels inserted in the fine level also get inserted into the
1741 * coarse bucket designated by the 4 MSBs of the fine bucket value.
1742 *
1743 * The structure is aligned on 16 bits, which is a prerequisite for SIMD
1744 * instructions. Each bucket is 16 bit wide, which means that extra care must be
1745 * taken to prevent overflow.
1746 */
1747 typedef struct
1748 {
1749 HT coarse[16];
1750 HT fine[16][16];
1751 } Histogram;
1752
1753
1754 #if CV_SSE2
1755 #define MEDIAN_HAVE_SIMD 1
1756
histogram_add_simd(const HT x[16],HT y[16])1757 static inline void histogram_add_simd( const HT x[16], HT y[16] )
1758 {
1759 const __m128i* rx = (const __m128i*)x;
1760 __m128i* ry = (__m128i*)y;
1761 __m128i r0 = _mm_add_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
1762 __m128i r1 = _mm_add_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
1763 _mm_store_si128(ry+0, r0);
1764 _mm_store_si128(ry+1, r1);
1765 }
1766
histogram_sub_simd(const HT x[16],HT y[16])1767 static inline void histogram_sub_simd( const HT x[16], HT y[16] )
1768 {
1769 const __m128i* rx = (const __m128i*)x;
1770 __m128i* ry = (__m128i*)y;
1771 __m128i r0 = _mm_sub_epi16(_mm_load_si128(ry+0),_mm_load_si128(rx+0));
1772 __m128i r1 = _mm_sub_epi16(_mm_load_si128(ry+1),_mm_load_si128(rx+1));
1773 _mm_store_si128(ry+0, r0);
1774 _mm_store_si128(ry+1, r1);
1775 }
1776
1777 #elif CV_NEON
1778 #define MEDIAN_HAVE_SIMD 1
1779
histogram_add_simd(const HT x[16],HT y[16])1780 static inline void histogram_add_simd( const HT x[16], HT y[16] )
1781 {
1782 vst1q_u16(y, vaddq_u16(vld1q_u16(x), vld1q_u16(y)));
1783 vst1q_u16(y + 8, vaddq_u16(vld1q_u16(x + 8), vld1q_u16(y + 8)));
1784 }
1785
histogram_sub_simd(const HT x[16],HT y[16])1786 static inline void histogram_sub_simd( const HT x[16], HT y[16] )
1787 {
1788 vst1q_u16(y, vsubq_u16(vld1q_u16(x), vld1q_u16(y)));
1789 vst1q_u16(y + 8, vsubq_u16(vld1q_u16(x + 8), vld1q_u16(y + 8)));
1790 }
1791
1792 #else
1793 #define MEDIAN_HAVE_SIMD 0
1794 #endif
1795
1796
histogram_add(const HT x[16],HT y[16])1797 static inline void histogram_add( const HT x[16], HT y[16] )
1798 {
1799 int i;
1800 for( i = 0; i < 16; ++i )
1801 y[i] = (HT)(y[i] + x[i]);
1802 }
1803
histogram_sub(const HT x[16],HT y[16])1804 static inline void histogram_sub( const HT x[16], HT y[16] )
1805 {
1806 int i;
1807 for( i = 0; i < 16; ++i )
1808 y[i] = (HT)(y[i] - x[i]);
1809 }
1810
histogram_muladd(int a,const HT x[16],HT y[16])1811 static inline void histogram_muladd( int a, const HT x[16],
1812 HT y[16] )
1813 {
1814 for( int i = 0; i < 16; ++i )
1815 y[i] = (HT)(y[i] + a * x[i]);
1816 }
1817
1818 static void
medianBlur_8u_O1(const Mat & _src,Mat & _dst,int ksize)1819 medianBlur_8u_O1( const Mat& _src, Mat& _dst, int ksize )
1820 {
1821 /**
1822 * HOP is short for Histogram OPeration. This macro makes an operation \a op on
1823 * histogram \a h for pixel value \a x. It takes care of handling both levels.
1824 */
1825 #define HOP(h,x,op) \
1826 h.coarse[x>>4] op, \
1827 *((HT*)h.fine + x) op
1828
1829 #define COP(c,j,x,op) \
1830 h_coarse[ 16*(n*c+j) + (x>>4) ] op, \
1831 h_fine[ 16 * (n*(16*c+(x>>4)) + j) + (x & 0xF) ] op
1832
1833 int cn = _dst.channels(), m = _dst.rows, r = (ksize-1)/2;
1834 size_t sstep = _src.step, dstep = _dst.step;
1835 Histogram CV_DECL_ALIGNED(16) H[4];
1836 HT CV_DECL_ALIGNED(16) luc[4][16];
1837
1838 int STRIPE_SIZE = std::min( _dst.cols, 512/cn );
1839
1840 std::vector<HT> _h_coarse(1 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
1841 std::vector<HT> _h_fine(16 * 16 * (STRIPE_SIZE + 2*r) * cn + 16);
1842 HT* h_coarse = alignPtr(&_h_coarse[0], 16);
1843 HT* h_fine = alignPtr(&_h_fine[0], 16);
1844 #if MEDIAN_HAVE_SIMD
1845 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON);
1846 #endif
1847
1848 for( int x = 0; x < _dst.cols; x += STRIPE_SIZE )
1849 {
1850 int i, j, k, c, n = std::min(_dst.cols - x, STRIPE_SIZE) + r*2;
1851 const uchar* src = _src.ptr() + x*cn;
1852 uchar* dst = _dst.ptr() + (x - r)*cn;
1853
1854 memset( h_coarse, 0, 16*n*cn*sizeof(h_coarse[0]) );
1855 memset( h_fine, 0, 16*16*n*cn*sizeof(h_fine[0]) );
1856
1857 // First row initialization
1858 for( c = 0; c < cn; c++ )
1859 {
1860 for( j = 0; j < n; j++ )
1861 COP( c, j, src[cn*j+c], += (cv::HT)(r+2) );
1862
1863 for( i = 1; i < r; i++ )
1864 {
1865 const uchar* p = src + sstep*std::min(i, m-1);
1866 for ( j = 0; j < n; j++ )
1867 COP( c, j, p[cn*j+c], ++ );
1868 }
1869 }
1870
1871 for( i = 0; i < m; i++ )
1872 {
1873 const uchar* p0 = src + sstep * std::max( 0, i-r-1 );
1874 const uchar* p1 = src + sstep * std::min( m-1, i+r );
1875
1876 memset( H, 0, cn*sizeof(H[0]) );
1877 memset( luc, 0, cn*sizeof(luc[0]) );
1878 for( c = 0; c < cn; c++ )
1879 {
1880 // Update column histograms for the entire row.
1881 for( j = 0; j < n; j++ )
1882 {
1883 COP( c, j, p0[j*cn + c], -- );
1884 COP( c, j, p1[j*cn + c], ++ );
1885 }
1886
1887 // First column initialization
1888 for( k = 0; k < 16; ++k )
1889 histogram_muladd( 2*r+1, &h_fine[16*n*(16*c+k)], &H[c].fine[k][0] );
1890
1891 #if MEDIAN_HAVE_SIMD
1892 if( useSIMD )
1893 {
1894 for( j = 0; j < 2*r; ++j )
1895 histogram_add_simd( &h_coarse[16*(n*c+j)], H[c].coarse );
1896
1897 for( j = r; j < n-r; j++ )
1898 {
1899 int t = 2*r*r + 2*r, b, sum = 0;
1900 HT* segment;
1901
1902 histogram_add_simd( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
1903
1904 // Find median at coarse level
1905 for ( k = 0; k < 16 ; ++k )
1906 {
1907 sum += H[c].coarse[k];
1908 if ( sum > t )
1909 {
1910 sum -= H[c].coarse[k];
1911 break;
1912 }
1913 }
1914 assert( k < 16 );
1915
1916 /* Update corresponding histogram segment */
1917 if ( luc[c][k] <= j-r )
1918 {
1919 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
1920 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
1921 histogram_add_simd( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
1922
1923 if ( luc[c][k] < j+r+1 )
1924 {
1925 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
1926 luc[c][k] = (HT)(j+r+1);
1927 }
1928 }
1929 else
1930 {
1931 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
1932 {
1933 histogram_sub_simd( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
1934 histogram_add_simd( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
1935 }
1936 }
1937
1938 histogram_sub_simd( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
1939
1940 /* Find median in segment */
1941 segment = H[c].fine[k];
1942 for ( b = 0; b < 16 ; b++ )
1943 {
1944 sum += segment[b];
1945 if ( sum > t )
1946 {
1947 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
1948 break;
1949 }
1950 }
1951 assert( b < 16 );
1952 }
1953 }
1954 else
1955 #endif
1956 {
1957 for( j = 0; j < 2*r; ++j )
1958 histogram_add( &h_coarse[16*(n*c+j)], H[c].coarse );
1959
1960 for( j = r; j < n-r; j++ )
1961 {
1962 int t = 2*r*r + 2*r, b, sum = 0;
1963 HT* segment;
1964
1965 histogram_add( &h_coarse[16*(n*c + std::min(j+r,n-1))], H[c].coarse );
1966
1967 // Find median at coarse level
1968 for ( k = 0; k < 16 ; ++k )
1969 {
1970 sum += H[c].coarse[k];
1971 if ( sum > t )
1972 {
1973 sum -= H[c].coarse[k];
1974 break;
1975 }
1976 }
1977 assert( k < 16 );
1978
1979 /* Update corresponding histogram segment */
1980 if ( luc[c][k] <= j-r )
1981 {
1982 memset( &H[c].fine[k], 0, 16 * sizeof(HT) );
1983 for ( luc[c][k] = cv::HT(j-r); luc[c][k] < MIN(j+r+1,n); ++luc[c][k] )
1984 histogram_add( &h_fine[16*(n*(16*c+k)+luc[c][k])], H[c].fine[k] );
1985
1986 if ( luc[c][k] < j+r+1 )
1987 {
1988 histogram_muladd( j+r+1 - n, &h_fine[16*(n*(16*c+k)+(n-1))], &H[c].fine[k][0] );
1989 luc[c][k] = (HT)(j+r+1);
1990 }
1991 }
1992 else
1993 {
1994 for ( ; luc[c][k] < j+r+1; ++luc[c][k] )
1995 {
1996 histogram_sub( &h_fine[16*(n*(16*c+k)+MAX(luc[c][k]-2*r-1,0))], H[c].fine[k] );
1997 histogram_add( &h_fine[16*(n*(16*c+k)+MIN(luc[c][k],n-1))], H[c].fine[k] );
1998 }
1999 }
2000
2001 histogram_sub( &h_coarse[16*(n*c+MAX(j-r,0))], H[c].coarse );
2002
2003 /* Find median in segment */
2004 segment = H[c].fine[k];
2005 for ( b = 0; b < 16 ; b++ )
2006 {
2007 sum += segment[b];
2008 if ( sum > t )
2009 {
2010 dst[dstep*i+cn*j+c] = (uchar)(16*k + b);
2011 break;
2012 }
2013 }
2014 assert( b < 16 );
2015 }
2016 }
2017 }
2018 }
2019 }
2020
2021 #undef HOP
2022 #undef COP
2023 }
2024
2025 static void
medianBlur_8u_Om(const Mat & _src,Mat & _dst,int m)2026 medianBlur_8u_Om( const Mat& _src, Mat& _dst, int m )
2027 {
2028 #define N 16
2029 int zone0[4][N];
2030 int zone1[4][N*N];
2031 int x, y;
2032 int n2 = m*m/2;
2033 Size size = _dst.size();
2034 const uchar* src = _src.ptr();
2035 uchar* dst = _dst.ptr();
2036 int src_step = (int)_src.step, dst_step = (int)_dst.step;
2037 int cn = _src.channels();
2038 const uchar* src_max = src + size.height*src_step;
2039
2040 #define UPDATE_ACC01( pix, cn, op ) \
2041 { \
2042 int p = (pix); \
2043 zone1[cn][p] op; \
2044 zone0[cn][p >> 4] op; \
2045 }
2046
2047 //CV_Assert( size.height >= nx && size.width >= nx );
2048 for( x = 0; x < size.width; x++, src += cn, dst += cn )
2049 {
2050 uchar* dst_cur = dst;
2051 const uchar* src_top = src;
2052 const uchar* src_bottom = src;
2053 int k, c;
2054 int src_step1 = src_step, dst_step1 = dst_step;
2055
2056 if( x % 2 != 0 )
2057 {
2058 src_bottom = src_top += src_step*(size.height-1);
2059 dst_cur += dst_step*(size.height-1);
2060 src_step1 = -src_step1;
2061 dst_step1 = -dst_step1;
2062 }
2063
2064 // init accumulator
2065 memset( zone0, 0, sizeof(zone0[0])*cn );
2066 memset( zone1, 0, sizeof(zone1[0])*cn );
2067
2068 for( y = 0; y <= m/2; y++ )
2069 {
2070 for( c = 0; c < cn; c++ )
2071 {
2072 if( y > 0 )
2073 {
2074 for( k = 0; k < m*cn; k += cn )
2075 UPDATE_ACC01( src_bottom[k+c], c, ++ );
2076 }
2077 else
2078 {
2079 for( k = 0; k < m*cn; k += cn )
2080 UPDATE_ACC01( src_bottom[k+c], c, += m/2+1 );
2081 }
2082 }
2083
2084 if( (src_step1 > 0 && y < size.height-1) ||
2085 (src_step1 < 0 && size.height-y-1 > 0) )
2086 src_bottom += src_step1;
2087 }
2088
2089 for( y = 0; y < size.height; y++, dst_cur += dst_step1 )
2090 {
2091 // find median
2092 for( c = 0; c < cn; c++ )
2093 {
2094 int s = 0;
2095 for( k = 0; ; k++ )
2096 {
2097 int t = s + zone0[c][k];
2098 if( t > n2 ) break;
2099 s = t;
2100 }
2101
2102 for( k *= N; ;k++ )
2103 {
2104 s += zone1[c][k];
2105 if( s > n2 ) break;
2106 }
2107
2108 dst_cur[c] = (uchar)k;
2109 }
2110
2111 if( y+1 == size.height )
2112 break;
2113
2114 if( cn == 1 )
2115 {
2116 for( k = 0; k < m; k++ )
2117 {
2118 int p = src_top[k];
2119 int q = src_bottom[k];
2120 zone1[0][p]--;
2121 zone0[0][p>>4]--;
2122 zone1[0][q]++;
2123 zone0[0][q>>4]++;
2124 }
2125 }
2126 else if( cn == 3 )
2127 {
2128 for( k = 0; k < m*3; k += 3 )
2129 {
2130 UPDATE_ACC01( src_top[k], 0, -- );
2131 UPDATE_ACC01( src_top[k+1], 1, -- );
2132 UPDATE_ACC01( src_top[k+2], 2, -- );
2133
2134 UPDATE_ACC01( src_bottom[k], 0, ++ );
2135 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
2136 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
2137 }
2138 }
2139 else
2140 {
2141 assert( cn == 4 );
2142 for( k = 0; k < m*4; k += 4 )
2143 {
2144 UPDATE_ACC01( src_top[k], 0, -- );
2145 UPDATE_ACC01( src_top[k+1], 1, -- );
2146 UPDATE_ACC01( src_top[k+2], 2, -- );
2147 UPDATE_ACC01( src_top[k+3], 3, -- );
2148
2149 UPDATE_ACC01( src_bottom[k], 0, ++ );
2150 UPDATE_ACC01( src_bottom[k+1], 1, ++ );
2151 UPDATE_ACC01( src_bottom[k+2], 2, ++ );
2152 UPDATE_ACC01( src_bottom[k+3], 3, ++ );
2153 }
2154 }
2155
2156 if( (src_step1 > 0 && src_bottom + src_step1 < src_max) ||
2157 (src_step1 < 0 && src_bottom + src_step1 >= src) )
2158 src_bottom += src_step1;
2159
2160 if( y >= m/2 )
2161 src_top += src_step1;
2162 }
2163 }
2164 #undef N
2165 #undef UPDATE_ACC
2166 }
2167
2168
2169 struct MinMax8u
2170 {
2171 typedef uchar value_type;
2172 typedef int arg_type;
2173 enum { SIZE = 1 };
loadcv::MinMax8u2174 arg_type load(const uchar* ptr) { return *ptr; }
storecv::MinMax8u2175 void store(uchar* ptr, arg_type val) { *ptr = (uchar)val; }
operator ()cv::MinMax8u2176 void operator()(arg_type& a, arg_type& b) const
2177 {
2178 int t = CV_FAST_CAST_8U(a - b);
2179 b += t; a -= t;
2180 }
2181 };
2182
2183 struct MinMax16u
2184 {
2185 typedef ushort value_type;
2186 typedef int arg_type;
2187 enum { SIZE = 1 };
loadcv::MinMax16u2188 arg_type load(const ushort* ptr) { return *ptr; }
storecv::MinMax16u2189 void store(ushort* ptr, arg_type val) { *ptr = (ushort)val; }
operator ()cv::MinMax16u2190 void operator()(arg_type& a, arg_type& b) const
2191 {
2192 arg_type t = a;
2193 a = std::min(a, b);
2194 b = std::max(b, t);
2195 }
2196 };
2197
2198 struct MinMax16s
2199 {
2200 typedef short value_type;
2201 typedef int arg_type;
2202 enum { SIZE = 1 };
loadcv::MinMax16s2203 arg_type load(const short* ptr) { return *ptr; }
storecv::MinMax16s2204 void store(short* ptr, arg_type val) { *ptr = (short)val; }
operator ()cv::MinMax16s2205 void operator()(arg_type& a, arg_type& b) const
2206 {
2207 arg_type t = a;
2208 a = std::min(a, b);
2209 b = std::max(b, t);
2210 }
2211 };
2212
2213 struct MinMax32f
2214 {
2215 typedef float value_type;
2216 typedef float arg_type;
2217 enum { SIZE = 1 };
loadcv::MinMax32f2218 arg_type load(const float* ptr) { return *ptr; }
storecv::MinMax32f2219 void store(float* ptr, arg_type val) { *ptr = val; }
operator ()cv::MinMax32f2220 void operator()(arg_type& a, arg_type& b) const
2221 {
2222 arg_type t = a;
2223 a = std::min(a, b);
2224 b = std::max(b, t);
2225 }
2226 };
2227
2228 #if CV_SSE2
2229
2230 struct MinMaxVec8u
2231 {
2232 typedef uchar value_type;
2233 typedef __m128i arg_type;
2234 enum { SIZE = 16 };
loadcv::MinMaxVec8u2235 arg_type load(const uchar* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
storecv::MinMaxVec8u2236 void store(uchar* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
operator ()cv::MinMaxVec8u2237 void operator()(arg_type& a, arg_type& b) const
2238 {
2239 arg_type t = a;
2240 a = _mm_min_epu8(a, b);
2241 b = _mm_max_epu8(b, t);
2242 }
2243 };
2244
2245
2246 struct MinMaxVec16u
2247 {
2248 typedef ushort value_type;
2249 typedef __m128i arg_type;
2250 enum { SIZE = 8 };
loadcv::MinMaxVec16u2251 arg_type load(const ushort* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
storecv::MinMaxVec16u2252 void store(ushort* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
operator ()cv::MinMaxVec16u2253 void operator()(arg_type& a, arg_type& b) const
2254 {
2255 arg_type t = _mm_subs_epu16(a, b);
2256 a = _mm_subs_epu16(a, t);
2257 b = _mm_adds_epu16(b, t);
2258 }
2259 };
2260
2261
2262 struct MinMaxVec16s
2263 {
2264 typedef short value_type;
2265 typedef __m128i arg_type;
2266 enum { SIZE = 8 };
loadcv::MinMaxVec16s2267 arg_type load(const short* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
storecv::MinMaxVec16s2268 void store(short* ptr, arg_type val) { _mm_storeu_si128((__m128i*)ptr, val); }
operator ()cv::MinMaxVec16s2269 void operator()(arg_type& a, arg_type& b) const
2270 {
2271 arg_type t = a;
2272 a = _mm_min_epi16(a, b);
2273 b = _mm_max_epi16(b, t);
2274 }
2275 };
2276
2277
2278 struct MinMaxVec32f
2279 {
2280 typedef float value_type;
2281 typedef __m128 arg_type;
2282 enum { SIZE = 4 };
loadcv::MinMaxVec32f2283 arg_type load(const float* ptr) { return _mm_loadu_ps(ptr); }
storecv::MinMaxVec32f2284 void store(float* ptr, arg_type val) { _mm_storeu_ps(ptr, val); }
operator ()cv::MinMaxVec32f2285 void operator()(arg_type& a, arg_type& b) const
2286 {
2287 arg_type t = a;
2288 a = _mm_min_ps(a, b);
2289 b = _mm_max_ps(b, t);
2290 }
2291 };
2292
2293 #elif CV_NEON
2294
2295 struct MinMaxVec8u
2296 {
2297 typedef uchar value_type;
2298 typedef uint8x16_t arg_type;
2299 enum { SIZE = 16 };
loadcv::MinMaxVec8u2300 arg_type load(const uchar* ptr) { return vld1q_u8(ptr); }
storecv::MinMaxVec8u2301 void store(uchar* ptr, arg_type val) { vst1q_u8(ptr, val); }
operator ()cv::MinMaxVec8u2302 void operator()(arg_type& a, arg_type& b) const
2303 {
2304 arg_type t = a;
2305 a = vminq_u8(a, b);
2306 b = vmaxq_u8(b, t);
2307 }
2308 };
2309
2310
2311 struct MinMaxVec16u
2312 {
2313 typedef ushort value_type;
2314 typedef uint16x8_t arg_type;
2315 enum { SIZE = 8 };
loadcv::MinMaxVec16u2316 arg_type load(const ushort* ptr) { return vld1q_u16(ptr); }
storecv::MinMaxVec16u2317 void store(ushort* ptr, arg_type val) { vst1q_u16(ptr, val); }
operator ()cv::MinMaxVec16u2318 void operator()(arg_type& a, arg_type& b) const
2319 {
2320 arg_type t = a;
2321 a = vminq_u16(a, b);
2322 b = vmaxq_u16(b, t);
2323 }
2324 };
2325
2326
2327 struct MinMaxVec16s
2328 {
2329 typedef short value_type;
2330 typedef int16x8_t arg_type;
2331 enum { SIZE = 8 };
loadcv::MinMaxVec16s2332 arg_type load(const short* ptr) { return vld1q_s16(ptr); }
storecv::MinMaxVec16s2333 void store(short* ptr, arg_type val) { vst1q_s16(ptr, val); }
operator ()cv::MinMaxVec16s2334 void operator()(arg_type& a, arg_type& b) const
2335 {
2336 arg_type t = a;
2337 a = vminq_s16(a, b);
2338 b = vmaxq_s16(b, t);
2339 }
2340 };
2341
2342
2343 struct MinMaxVec32f
2344 {
2345 typedef float value_type;
2346 typedef float32x4_t arg_type;
2347 enum { SIZE = 4 };
loadcv::MinMaxVec32f2348 arg_type load(const float* ptr) { return vld1q_f32(ptr); }
storecv::MinMaxVec32f2349 void store(float* ptr, arg_type val) { vst1q_f32(ptr, val); }
operator ()cv::MinMaxVec32f2350 void operator()(arg_type& a, arg_type& b) const
2351 {
2352 arg_type t = a;
2353 a = vminq_f32(a, b);
2354 b = vmaxq_f32(b, t);
2355 }
2356 };
2357
2358
2359 #else
2360
2361 typedef MinMax8u MinMaxVec8u;
2362 typedef MinMax16u MinMaxVec16u;
2363 typedef MinMax16s MinMaxVec16s;
2364 typedef MinMax32f MinMaxVec32f;
2365
2366 #endif
2367
2368 template<class Op, class VecOp>
2369 static void
medianBlur_SortNet(const Mat & _src,Mat & _dst,int m)2370 medianBlur_SortNet( const Mat& _src, Mat& _dst, int m )
2371 {
2372 typedef typename Op::value_type T;
2373 typedef typename Op::arg_type WT;
2374 typedef typename VecOp::arg_type VT;
2375
2376 const T* src = _src.ptr<T>();
2377 T* dst = _dst.ptr<T>();
2378 int sstep = (int)(_src.step/sizeof(T));
2379 int dstep = (int)(_dst.step/sizeof(T));
2380 Size size = _dst.size();
2381 int i, j, k, cn = _src.channels();
2382 Op op;
2383 VecOp vop;
2384 volatile bool useSIMD = checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON);
2385
2386 if( m == 3 )
2387 {
2388 if( size.width == 1 || size.height == 1 )
2389 {
2390 int len = size.width + size.height - 1;
2391 int sdelta = size.height == 1 ? cn : sstep;
2392 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
2393 int ddelta = size.height == 1 ? cn : dstep;
2394
2395 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
2396 for( j = 0; j < cn; j++, src++ )
2397 {
2398 WT p0 = src[i > 0 ? -sdelta : 0];
2399 WT p1 = src[0];
2400 WT p2 = src[i < len - 1 ? sdelta : 0];
2401
2402 op(p0, p1); op(p1, p2); op(p0, p1);
2403 dst[j] = (T)p1;
2404 }
2405 return;
2406 }
2407
2408 size.width *= cn;
2409 for( i = 0; i < size.height; i++, dst += dstep )
2410 {
2411 const T* row0 = src + std::max(i - 1, 0)*sstep;
2412 const T* row1 = src + i*sstep;
2413 const T* row2 = src + std::min(i + 1, size.height-1)*sstep;
2414 int limit = useSIMD ? cn : size.width;
2415
2416 for(j = 0;; )
2417 {
2418 for( ; j < limit; j++ )
2419 {
2420 int j0 = j >= cn ? j - cn : j;
2421 int j2 = j < size.width - cn ? j + cn : j;
2422 WT p0 = row0[j0], p1 = row0[j], p2 = row0[j2];
2423 WT p3 = row1[j0], p4 = row1[j], p5 = row1[j2];
2424 WT p6 = row2[j0], p7 = row2[j], p8 = row2[j2];
2425
2426 op(p1, p2); op(p4, p5); op(p7, p8); op(p0, p1);
2427 op(p3, p4); op(p6, p7); op(p1, p2); op(p4, p5);
2428 op(p7, p8); op(p0, p3); op(p5, p8); op(p4, p7);
2429 op(p3, p6); op(p1, p4); op(p2, p5); op(p4, p7);
2430 op(p4, p2); op(p6, p4); op(p4, p2);
2431 dst[j] = (T)p4;
2432 }
2433
2434 if( limit == size.width )
2435 break;
2436
2437 for( ; j <= size.width - VecOp::SIZE - cn; j += VecOp::SIZE )
2438 {
2439 VT p0 = vop.load(row0+j-cn), p1 = vop.load(row0+j), p2 = vop.load(row0+j+cn);
2440 VT p3 = vop.load(row1+j-cn), p4 = vop.load(row1+j), p5 = vop.load(row1+j+cn);
2441 VT p6 = vop.load(row2+j-cn), p7 = vop.load(row2+j), p8 = vop.load(row2+j+cn);
2442
2443 vop(p1, p2); vop(p4, p5); vop(p7, p8); vop(p0, p1);
2444 vop(p3, p4); vop(p6, p7); vop(p1, p2); vop(p4, p5);
2445 vop(p7, p8); vop(p0, p3); vop(p5, p8); vop(p4, p7);
2446 vop(p3, p6); vop(p1, p4); vop(p2, p5); vop(p4, p7);
2447 vop(p4, p2); vop(p6, p4); vop(p4, p2);
2448 vop.store(dst+j, p4);
2449 }
2450
2451 limit = size.width;
2452 }
2453 }
2454 }
2455 else if( m == 5 )
2456 {
2457 if( size.width == 1 || size.height == 1 )
2458 {
2459 int len = size.width + size.height - 1;
2460 int sdelta = size.height == 1 ? cn : sstep;
2461 int sdelta0 = size.height == 1 ? 0 : sstep - cn;
2462 int ddelta = size.height == 1 ? cn : dstep;
2463
2464 for( i = 0; i < len; i++, src += sdelta0, dst += ddelta )
2465 for( j = 0; j < cn; j++, src++ )
2466 {
2467 int i1 = i > 0 ? -sdelta : 0;
2468 int i0 = i > 1 ? -sdelta*2 : i1;
2469 int i3 = i < len-1 ? sdelta : 0;
2470 int i4 = i < len-2 ? sdelta*2 : i3;
2471 WT p0 = src[i0], p1 = src[i1], p2 = src[0], p3 = src[i3], p4 = src[i4];
2472
2473 op(p0, p1); op(p3, p4); op(p2, p3); op(p3, p4); op(p0, p2);
2474 op(p2, p4); op(p1, p3); op(p1, p2);
2475 dst[j] = (T)p2;
2476 }
2477 return;
2478 }
2479
2480 size.width *= cn;
2481 for( i = 0; i < size.height; i++, dst += dstep )
2482 {
2483 const T* row[5];
2484 row[0] = src + std::max(i - 2, 0)*sstep;
2485 row[1] = src + std::max(i - 1, 0)*sstep;
2486 row[2] = src + i*sstep;
2487 row[3] = src + std::min(i + 1, size.height-1)*sstep;
2488 row[4] = src + std::min(i + 2, size.height-1)*sstep;
2489 int limit = useSIMD ? cn*2 : size.width;
2490
2491 for(j = 0;; )
2492 {
2493 for( ; j < limit; j++ )
2494 {
2495 WT p[25];
2496 int j1 = j >= cn ? j - cn : j;
2497 int j0 = j >= cn*2 ? j - cn*2 : j1;
2498 int j3 = j < size.width - cn ? j + cn : j;
2499 int j4 = j < size.width - cn*2 ? j + cn*2 : j3;
2500 for( k = 0; k < 5; k++ )
2501 {
2502 const T* rowk = row[k];
2503 p[k*5] = rowk[j0]; p[k*5+1] = rowk[j1];
2504 p[k*5+2] = rowk[j]; p[k*5+3] = rowk[j3];
2505 p[k*5+4] = rowk[j4];
2506 }
2507
2508 op(p[1], p[2]); op(p[0], p[1]); op(p[1], p[2]); op(p[4], p[5]); op(p[3], p[4]);
2509 op(p[4], p[5]); op(p[0], p[3]); op(p[2], p[5]); op(p[2], p[3]); op(p[1], p[4]);
2510 op(p[1], p[2]); op(p[3], p[4]); op(p[7], p[8]); op(p[6], p[7]); op(p[7], p[8]);
2511 op(p[10], p[11]); op(p[9], p[10]); op(p[10], p[11]); op(p[6], p[9]); op(p[8], p[11]);
2512 op(p[8], p[9]); op(p[7], p[10]); op(p[7], p[8]); op(p[9], p[10]); op(p[0], p[6]);
2513 op(p[4], p[10]); op(p[4], p[6]); op(p[2], p[8]); op(p[2], p[4]); op(p[6], p[8]);
2514 op(p[1], p[7]); op(p[5], p[11]); op(p[5], p[7]); op(p[3], p[9]); op(p[3], p[5]);
2515 op(p[7], p[9]); op(p[1], p[2]); op(p[3], p[4]); op(p[5], p[6]); op(p[7], p[8]);
2516 op(p[9], p[10]); op(p[13], p[14]); op(p[12], p[13]); op(p[13], p[14]); op(p[16], p[17]);
2517 op(p[15], p[16]); op(p[16], p[17]); op(p[12], p[15]); op(p[14], p[17]); op(p[14], p[15]);
2518 op(p[13], p[16]); op(p[13], p[14]); op(p[15], p[16]); op(p[19], p[20]); op(p[18], p[19]);
2519 op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[21], p[23]); op(p[22], p[24]);
2520 op(p[22], p[23]); op(p[18], p[21]); op(p[20], p[23]); op(p[20], p[21]); op(p[19], p[22]);
2521 op(p[22], p[24]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[12], p[18]);
2522 op(p[16], p[22]); op(p[16], p[18]); op(p[14], p[20]); op(p[20], p[24]); op(p[14], p[16]);
2523 op(p[18], p[20]); op(p[22], p[24]); op(p[13], p[19]); op(p[17], p[23]); op(p[17], p[19]);
2524 op(p[15], p[21]); op(p[15], p[17]); op(p[19], p[21]); op(p[13], p[14]); op(p[15], p[16]);
2525 op(p[17], p[18]); op(p[19], p[20]); op(p[21], p[22]); op(p[23], p[24]); op(p[0], p[12]);
2526 op(p[8], p[20]); op(p[8], p[12]); op(p[4], p[16]); op(p[16], p[24]); op(p[12], p[16]);
2527 op(p[2], p[14]); op(p[10], p[22]); op(p[10], p[14]); op(p[6], p[18]); op(p[6], p[10]);
2528 op(p[10], p[12]); op(p[1], p[13]); op(p[9], p[21]); op(p[9], p[13]); op(p[5], p[17]);
2529 op(p[13], p[17]); op(p[3], p[15]); op(p[11], p[23]); op(p[11], p[15]); op(p[7], p[19]);
2530 op(p[7], p[11]); op(p[11], p[13]); op(p[11], p[12]);
2531 dst[j] = (T)p[12];
2532 }
2533
2534 if( limit == size.width )
2535 break;
2536
2537 for( ; j <= size.width - VecOp::SIZE - cn*2; j += VecOp::SIZE )
2538 {
2539 VT p[25];
2540 for( k = 0; k < 5; k++ )
2541 {
2542 const T* rowk = row[k];
2543 p[k*5] = vop.load(rowk+j-cn*2); p[k*5+1] = vop.load(rowk+j-cn);
2544 p[k*5+2] = vop.load(rowk+j); p[k*5+3] = vop.load(rowk+j+cn);
2545 p[k*5+4] = vop.load(rowk+j+cn*2);
2546 }
2547
2548 vop(p[1], p[2]); vop(p[0], p[1]); vop(p[1], p[2]); vop(p[4], p[5]); vop(p[3], p[4]);
2549 vop(p[4], p[5]); vop(p[0], p[3]); vop(p[2], p[5]); vop(p[2], p[3]); vop(p[1], p[4]);
2550 vop(p[1], p[2]); vop(p[3], p[4]); vop(p[7], p[8]); vop(p[6], p[7]); vop(p[7], p[8]);
2551 vop(p[10], p[11]); vop(p[9], p[10]); vop(p[10], p[11]); vop(p[6], p[9]); vop(p[8], p[11]);
2552 vop(p[8], p[9]); vop(p[7], p[10]); vop(p[7], p[8]); vop(p[9], p[10]); vop(p[0], p[6]);
2553 vop(p[4], p[10]); vop(p[4], p[6]); vop(p[2], p[8]); vop(p[2], p[4]); vop(p[6], p[8]);
2554 vop(p[1], p[7]); vop(p[5], p[11]); vop(p[5], p[7]); vop(p[3], p[9]); vop(p[3], p[5]);
2555 vop(p[7], p[9]); vop(p[1], p[2]); vop(p[3], p[4]); vop(p[5], p[6]); vop(p[7], p[8]);
2556 vop(p[9], p[10]); vop(p[13], p[14]); vop(p[12], p[13]); vop(p[13], p[14]); vop(p[16], p[17]);
2557 vop(p[15], p[16]); vop(p[16], p[17]); vop(p[12], p[15]); vop(p[14], p[17]); vop(p[14], p[15]);
2558 vop(p[13], p[16]); vop(p[13], p[14]); vop(p[15], p[16]); vop(p[19], p[20]); vop(p[18], p[19]);
2559 vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[21], p[23]); vop(p[22], p[24]);
2560 vop(p[22], p[23]); vop(p[18], p[21]); vop(p[20], p[23]); vop(p[20], p[21]); vop(p[19], p[22]);
2561 vop(p[22], p[24]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[12], p[18]);
2562 vop(p[16], p[22]); vop(p[16], p[18]); vop(p[14], p[20]); vop(p[20], p[24]); vop(p[14], p[16]);
2563 vop(p[18], p[20]); vop(p[22], p[24]); vop(p[13], p[19]); vop(p[17], p[23]); vop(p[17], p[19]);
2564 vop(p[15], p[21]); vop(p[15], p[17]); vop(p[19], p[21]); vop(p[13], p[14]); vop(p[15], p[16]);
2565 vop(p[17], p[18]); vop(p[19], p[20]); vop(p[21], p[22]); vop(p[23], p[24]); vop(p[0], p[12]);
2566 vop(p[8], p[20]); vop(p[8], p[12]); vop(p[4], p[16]); vop(p[16], p[24]); vop(p[12], p[16]);
2567 vop(p[2], p[14]); vop(p[10], p[22]); vop(p[10], p[14]); vop(p[6], p[18]); vop(p[6], p[10]);
2568 vop(p[10], p[12]); vop(p[1], p[13]); vop(p[9], p[21]); vop(p[9], p[13]); vop(p[5], p[17]);
2569 vop(p[13], p[17]); vop(p[3], p[15]); vop(p[11], p[23]); vop(p[11], p[15]); vop(p[7], p[19]);
2570 vop(p[7], p[11]); vop(p[11], p[13]); vop(p[11], p[12]);
2571 vop.store(dst+j, p[12]);
2572 }
2573
2574 limit = size.width;
2575 }
2576 }
2577 }
2578 }
2579
2580 #ifdef HAVE_OPENCL
2581
ocl_medianFilter(InputArray _src,OutputArray _dst,int m)2582 static bool ocl_medianFilter(InputArray _src, OutputArray _dst, int m)
2583 {
2584 size_t localsize[2] = { 16, 16 };
2585 size_t globalsize[2];
2586 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2587
2588 if ( !((depth == CV_8U || depth == CV_16U || depth == CV_16S || depth == CV_32F) && cn <= 4 && (m == 3 || m == 5)) )
2589 return false;
2590
2591 Size imgSize = _src.size();
2592 bool useOptimized = (1 == cn) &&
2593 (size_t)imgSize.width >= localsize[0] * 8 &&
2594 (size_t)imgSize.height >= localsize[1] * 8 &&
2595 imgSize.width % 4 == 0 &&
2596 imgSize.height % 4 == 0 &&
2597 (ocl::Device::getDefault().isIntel());
2598
2599 cv::String kname = format( useOptimized ? "medianFilter%d_u" : "medianFilter%d", m) ;
2600 cv::String kdefs = useOptimized ?
2601 format("-D T=%s -D T1=%s -D T4=%s%d -D cn=%d -D USE_4OPT", ocl::typeToStr(type),
2602 ocl::typeToStr(depth), ocl::typeToStr(depth), cn*4, cn)
2603 :
2604 format("-D T=%s -D T1=%s -D cn=%d", ocl::typeToStr(type), ocl::typeToStr(depth), cn) ;
2605
2606 ocl::Kernel k(kname.c_str(), ocl::imgproc::medianFilter_oclsrc, kdefs.c_str() );
2607
2608 if (k.empty())
2609 return false;
2610
2611 UMat src = _src.getUMat();
2612 _dst.create(src.size(), type);
2613 UMat dst = _dst.getUMat();
2614
2615 k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst));
2616
2617 if( useOptimized )
2618 {
2619 globalsize[0] = DIVUP(src.cols / 4, localsize[0]) * localsize[0];
2620 globalsize[1] = DIVUP(src.rows / 4, localsize[1]) * localsize[1];
2621 }
2622 else
2623 {
2624 globalsize[0] = (src.cols + localsize[0] + 2) / localsize[0] * localsize[0];
2625 globalsize[1] = (src.rows + localsize[1] - 1) / localsize[1] * localsize[1];
2626 }
2627
2628 return k.run(2, globalsize, localsize, false);
2629 }
2630
2631 #endif
2632
2633 }
2634
medianBlur(InputArray _src0,OutputArray _dst,int ksize)2635 void cv::medianBlur( InputArray _src0, OutputArray _dst, int ksize )
2636 {
2637 CV_Assert( (ksize % 2 == 1) && (_src0.dims() <= 2 ));
2638
2639 if( ksize <= 1 )
2640 {
2641 _src0.copyTo(_dst);
2642 return;
2643 }
2644
2645 CV_OCL_RUN(_dst.isUMat(),
2646 ocl_medianFilter(_src0,_dst, ksize))
2647
2648 Mat src0 = _src0.getMat();
2649 _dst.create( src0.size(), src0.type() );
2650 Mat dst = _dst.getMat();
2651
2652 #if IPP_VERSION_X100 >= 801
2653 CV_IPP_CHECK()
2654 {
2655 #define IPP_FILTER_MEDIAN_BORDER(ippType, ippDataType, flavor) \
2656 do \
2657 { \
2658 if (ippiFilterMedianBorderGetBufferSize(dstRoiSize, maskSize, \
2659 ippDataType, CV_MAT_CN(type), &bufSize) >= 0) \
2660 { \
2661 Ipp8u * buffer = ippsMalloc_8u(bufSize); \
2662 IppStatus status = ippiFilterMedianBorder_##flavor(src.ptr<ippType>(), (int)src.step, \
2663 dst.ptr<ippType>(), (int)dst.step, dstRoiSize, maskSize, \
2664 ippBorderRepl, (ippType)0, buffer); \
2665 ippsFree(buffer); \
2666 if (status >= 0) \
2667 { \
2668 CV_IMPL_ADD(CV_IMPL_IPP); \
2669 return; \
2670 } \
2671 } \
2672 setIppErrorStatus(); \
2673 } \
2674 while ((void)0, 0)
2675
2676 if( ksize <= 5 )
2677 {
2678 Ipp32s bufSize;
2679 IppiSize dstRoiSize = ippiSize(dst.cols, dst.rows), maskSize = ippiSize(ksize, ksize);
2680 Mat src;
2681 if( dst.data != src0.data )
2682 src = src0;
2683 else
2684 src0.copyTo(src);
2685
2686 int type = src0.type();
2687 if (type == CV_8UC1)
2688 IPP_FILTER_MEDIAN_BORDER(Ipp8u, ipp8u, 8u_C1R);
2689 else if (type == CV_16UC1)
2690 IPP_FILTER_MEDIAN_BORDER(Ipp16u, ipp16u, 16u_C1R);
2691 else if (type == CV_16SC1)
2692 IPP_FILTER_MEDIAN_BORDER(Ipp16s, ipp16s, 16s_C1R);
2693 else if (type == CV_32FC1)
2694 IPP_FILTER_MEDIAN_BORDER(Ipp32f, ipp32f, 32f_C1R);
2695 }
2696 #undef IPP_FILTER_MEDIAN_BORDER
2697 }
2698 #endif
2699
2700 #ifdef HAVE_TEGRA_OPTIMIZATION
2701 if (tegra::useTegra() && tegra::medianBlur(src0, dst, ksize))
2702 return;
2703 #endif
2704
2705 bool useSortNet = ksize == 3 || (ksize == 5
2706 #if !(CV_SSE2 || CV_NEON)
2707 && src0.depth() > CV_8U
2708 #endif
2709 );
2710
2711 Mat src;
2712 if( useSortNet )
2713 {
2714 if( dst.data != src0.data )
2715 src = src0;
2716 else
2717 src0.copyTo(src);
2718
2719 if( src.depth() == CV_8U )
2720 medianBlur_SortNet<MinMax8u, MinMaxVec8u>( src, dst, ksize );
2721 else if( src.depth() == CV_16U )
2722 medianBlur_SortNet<MinMax16u, MinMaxVec16u>( src, dst, ksize );
2723 else if( src.depth() == CV_16S )
2724 medianBlur_SortNet<MinMax16s, MinMaxVec16s>( src, dst, ksize );
2725 else if( src.depth() == CV_32F )
2726 medianBlur_SortNet<MinMax32f, MinMaxVec32f>( src, dst, ksize );
2727 else
2728 CV_Error(CV_StsUnsupportedFormat, "");
2729
2730 return;
2731 }
2732 else
2733 {
2734 cv::copyMakeBorder( src0, src, 0, 0, ksize/2, ksize/2, BORDER_REPLICATE );
2735
2736 int cn = src0.channels();
2737 CV_Assert( src.depth() == CV_8U && (cn == 1 || cn == 3 || cn == 4) );
2738
2739 double img_size_mp = (double)(src0.total())/(1 << 20);
2740 if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)*
2741 (MEDIAN_HAVE_SIMD && (checkHardwareSupport(CV_CPU_SSE2) || checkHardwareSupport(CV_CPU_NEON)) ? 1 : 3))
2742 medianBlur_8u_Om( src, dst, ksize );
2743 else
2744 medianBlur_8u_O1( src, dst, ksize );
2745 }
2746 }
2747
2748 /****************************************************************************************\
2749 Bilateral Filtering
2750 \****************************************************************************************/
2751
2752 namespace cv
2753 {
2754
2755 class BilateralFilter_8u_Invoker :
2756 public ParallelLoopBody
2757 {
2758 public:
BilateralFilter_8u_Invoker(Mat & _dest,const Mat & _temp,int _radius,int _maxk,int * _space_ofs,float * _space_weight,float * _color_weight)2759 BilateralFilter_8u_Invoker(Mat& _dest, const Mat& _temp, int _radius, int _maxk,
2760 int* _space_ofs, float *_space_weight, float *_color_weight) :
2761 temp(&_temp), dest(&_dest), radius(_radius),
2762 maxk(_maxk), space_ofs(_space_ofs), space_weight(_space_weight), color_weight(_color_weight)
2763 {
2764 }
2765
operator ()(const Range & range) const2766 virtual void operator() (const Range& range) const
2767 {
2768 int i, j, cn = dest->channels(), k;
2769 Size size = dest->size();
2770 #if CV_SSE3
2771 int CV_DECL_ALIGNED(16) buf[4];
2772 float CV_DECL_ALIGNED(16) bufSum[4];
2773 static const unsigned int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
2774 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
2775 #endif
2776
2777 for( i = range.start; i < range.end; i++ )
2778 {
2779 const uchar* sptr = temp->ptr(i+radius) + radius*cn;
2780 uchar* dptr = dest->ptr(i);
2781
2782 if( cn == 1 )
2783 {
2784 for( j = 0; j < size.width; j++ )
2785 {
2786 float sum = 0, wsum = 0;
2787 int val0 = sptr[j];
2788 k = 0;
2789 #if CV_SSE3
2790 if( haveSSE3 )
2791 {
2792 __m128 _val0 = _mm_set1_ps(static_cast<float>(val0));
2793 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2794
2795 for( ; k <= maxk - 4; k += 4 )
2796 {
2797 __m128 _valF = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
2798 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
2799
2800 __m128 _val = _mm_andnot_ps(_signMask, _mm_sub_ps(_valF, _val0));
2801 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(_val));
2802
2803 __m128 _cw = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
2804 color_weight[buf[1]],color_weight[buf[0]]);
2805 __m128 _sw = _mm_loadu_ps(space_weight+k);
2806 __m128 _w = _mm_mul_ps(_cw, _sw);
2807 _cw = _mm_mul_ps(_w, _valF);
2808
2809 _sw = _mm_hadd_ps(_w, _cw);
2810 _sw = _mm_hadd_ps(_sw, _sw);
2811 _mm_storel_pi((__m64*)bufSum, _sw);
2812
2813 sum += bufSum[1];
2814 wsum += bufSum[0];
2815 }
2816 }
2817 #endif
2818 for( ; k < maxk; k++ )
2819 {
2820 int val = sptr[j + space_ofs[k]];
2821 float w = space_weight[k]*color_weight[std::abs(val - val0)];
2822 sum += val*w;
2823 wsum += w;
2824 }
2825 // overflow is not possible here => there is no need to use cv::saturate_cast
2826 dptr[j] = (uchar)cvRound(sum/wsum);
2827 }
2828 }
2829 else
2830 {
2831 assert( cn == 3 );
2832 for( j = 0; j < size.width*3; j += 3 )
2833 {
2834 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
2835 int b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
2836 k = 0;
2837 #if CV_SSE3
2838 if( haveSSE3 )
2839 {
2840 const __m128i izero = _mm_setzero_si128();
2841 const __m128 _b0 = _mm_set1_ps(static_cast<float>(b0));
2842 const __m128 _g0 = _mm_set1_ps(static_cast<float>(g0));
2843 const __m128 _r0 = _mm_set1_ps(static_cast<float>(r0));
2844 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
2845
2846 for( ; k <= maxk - 4; k += 4 )
2847 {
2848 const int* const sptr_k0 = reinterpret_cast<const int*>(sptr + j + space_ofs[k]);
2849 const int* const sptr_k1 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+1]);
2850 const int* const sptr_k2 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+2]);
2851 const int* const sptr_k3 = reinterpret_cast<const int*>(sptr + j + space_ofs[k+3]);
2852
2853 __m128 _b = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k0[0]), izero), izero));
2854 __m128 _g = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k1[0]), izero), izero));
2855 __m128 _r = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k2[0]), izero), izero));
2856 __m128 _z = _mm_cvtepi32_ps(_mm_unpacklo_epi16(_mm_unpacklo_epi8(_mm_cvtsi32_si128(sptr_k3[0]), izero), izero));
2857
2858 _MM_TRANSPOSE4_PS(_b, _g, _r, _z);
2859
2860 __m128 bt = _mm_andnot_ps(_signMask, _mm_sub_ps(_b,_b0));
2861 __m128 gt = _mm_andnot_ps(_signMask, _mm_sub_ps(_g,_g0));
2862 __m128 rt = _mm_andnot_ps(_signMask, _mm_sub_ps(_r,_r0));
2863
2864 bt =_mm_add_ps(rt, _mm_add_ps(bt, gt));
2865 _mm_store_si128((__m128i*)buf, _mm_cvtps_epi32(bt));
2866
2867 __m128 _w = _mm_set_ps(color_weight[buf[3]],color_weight[buf[2]],
2868 color_weight[buf[1]],color_weight[buf[0]]);
2869 __m128 _sw = _mm_loadu_ps(space_weight+k);
2870
2871 _w = _mm_mul_ps(_w,_sw);
2872 _b = _mm_mul_ps(_b, _w);
2873 _g = _mm_mul_ps(_g, _w);
2874 _r = _mm_mul_ps(_r, _w);
2875
2876 _w = _mm_hadd_ps(_w, _b);
2877 _g = _mm_hadd_ps(_g, _r);
2878
2879 _w = _mm_hadd_ps(_w, _g);
2880 _mm_store_ps(bufSum, _w);
2881
2882 wsum += bufSum[0];
2883 sum_b += bufSum[1];
2884 sum_g += bufSum[2];
2885 sum_r += bufSum[3];
2886 }
2887 }
2888 #endif
2889
2890 for( ; k < maxk; k++ )
2891 {
2892 const uchar* sptr_k = sptr + j + space_ofs[k];
2893 int b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
2894 float w = space_weight[k]*color_weight[std::abs(b - b0) +
2895 std::abs(g - g0) + std::abs(r - r0)];
2896 sum_b += b*w; sum_g += g*w; sum_r += r*w;
2897 wsum += w;
2898 }
2899 wsum = 1.f/wsum;
2900 b0 = cvRound(sum_b*wsum);
2901 g0 = cvRound(sum_g*wsum);
2902 r0 = cvRound(sum_r*wsum);
2903 dptr[j] = (uchar)b0; dptr[j+1] = (uchar)g0; dptr[j+2] = (uchar)r0;
2904 }
2905 }
2906 }
2907 }
2908
2909 private:
2910 const Mat *temp;
2911 Mat *dest;
2912 int radius, maxk, *space_ofs;
2913 float *space_weight, *color_weight;
2914 };
2915
2916 #if defined (HAVE_IPP) && !defined(HAVE_IPP_ICV_ONLY) && 0
2917 class IPPBilateralFilter_8u_Invoker :
2918 public ParallelLoopBody
2919 {
2920 public:
IPPBilateralFilter_8u_Invoker(Mat & _src,Mat & _dst,double _sigma_color,double _sigma_space,int _radius,bool * _ok)2921 IPPBilateralFilter_8u_Invoker(Mat &_src, Mat &_dst, double _sigma_color, double _sigma_space, int _radius, bool *_ok) :
2922 ParallelLoopBody(), src(_src), dst(_dst), sigma_color(_sigma_color), sigma_space(_sigma_space), radius(_radius), ok(_ok)
2923 {
2924 *ok = true;
2925 }
2926
operator ()(const Range & range) const2927 virtual void operator() (const Range& range) const
2928 {
2929 int d = radius * 2 + 1;
2930 IppiSize kernel = {d, d};
2931 IppiSize roi={dst.cols, range.end - range.start};
2932 int bufsize=0;
2933 if (0 > ippiFilterBilateralGetBufSize_8u_C1R( ippiFilterBilateralGauss, roi, kernel, &bufsize))
2934 {
2935 *ok = false;
2936 return;
2937 }
2938 AutoBuffer<uchar> buf(bufsize);
2939 IppiFilterBilateralSpec *pSpec = (IppiFilterBilateralSpec *)alignPtr(&buf[0], 32);
2940 if (0 > ippiFilterBilateralInit_8u_C1R( ippiFilterBilateralGauss, kernel, (Ipp32f)sigma_color, (Ipp32f)sigma_space, 1, pSpec ))
2941 {
2942 *ok = false;
2943 return;
2944 }
2945 if (0 > ippiFilterBilateral_8u_C1R( src.ptr<uchar>(range.start) + radius * ((int)src.step[0] + 1), (int)src.step[0], dst.ptr<uchar>(range.start), (int)dst.step[0], roi, kernel, pSpec ))
2946 *ok = false;
2947 else
2948 {
2949 CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
2950 }
2951 }
2952 private:
2953 Mat &src;
2954 Mat &dst;
2955 double sigma_color;
2956 double sigma_space;
2957 int radius;
2958 bool *ok;
2959 const IPPBilateralFilter_8u_Invoker& operator= (const IPPBilateralFilter_8u_Invoker&);
2960 };
2961 #endif
2962
2963 #ifdef HAVE_OPENCL
2964
ocl_bilateralFilter_8u(InputArray _src,OutputArray _dst,int d,double sigma_color,double sigma_space,int borderType)2965 static bool ocl_bilateralFilter_8u(InputArray _src, OutputArray _dst, int d,
2966 double sigma_color, double sigma_space,
2967 int borderType)
2968 {
2969 #ifdef ANDROID
2970 if (ocl::Device::getDefault().isNVidia())
2971 return false;
2972 #endif
2973
2974 int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);
2975 int i, j, maxk, radius;
2976
2977 if (depth != CV_8U || cn > 4)
2978 return false;
2979
2980 if (sigma_color <= 0)
2981 sigma_color = 1;
2982 if (sigma_space <= 0)
2983 sigma_space = 1;
2984
2985 double gauss_color_coeff = -0.5 / (sigma_color * sigma_color);
2986 double gauss_space_coeff = -0.5 / (sigma_space * sigma_space);
2987
2988 if ( d <= 0 )
2989 radius = cvRound(sigma_space * 1.5);
2990 else
2991 radius = d / 2;
2992 radius = MAX(radius, 1);
2993 d = radius * 2 + 1;
2994
2995 UMat src = _src.getUMat(), dst = _dst.getUMat(), temp;
2996 if (src.u == dst.u)
2997 return false;
2998
2999 copyMakeBorder(src, temp, radius, radius, radius, radius, borderType);
3000 std::vector<float> _space_weight(d * d);
3001 std::vector<int> _space_ofs(d * d);
3002 float * const space_weight = &_space_weight[0];
3003 int * const space_ofs = &_space_ofs[0];
3004
3005 // initialize space-related bilateral filter coefficients
3006 for( i = -radius, maxk = 0; i <= radius; i++ )
3007 for( j = -radius; j <= radius; j++ )
3008 {
3009 double r = std::sqrt((double)i * i + (double)j * j);
3010 if ( r > radius )
3011 continue;
3012 space_weight[maxk] = (float)std::exp(r * r * gauss_space_coeff);
3013 space_ofs[maxk++] = (int)(i * temp.step + j * cn);
3014 }
3015
3016 char cvt[3][40];
3017 String cnstr = cn > 1 ? format("%d", cn) : "";
3018 String kernelName("bilateral");
3019 size_t sizeDiv = 1;
3020 if ((ocl::Device::getDefault().isIntel()) &&
3021 (ocl::Device::getDefault().type() == ocl::Device::TYPE_GPU))
3022 {
3023 //Intel GPU
3024 if (dst.cols % 4 == 0 && cn == 1) // For single channel x4 sized images.
3025 {
3026 kernelName = "bilateral_float4";
3027 sizeDiv = 4;
3028 }
3029 }
3030 ocl::Kernel k(kernelName.c_str(), ocl::imgproc::bilateral_oclsrc,
3031 format("-D radius=%d -D maxk=%d -D cn=%d -D int_t=%s -D uint_t=uint%s -D convert_int_t=%s"
3032 " -D uchar_t=%s -D float_t=%s -D convert_float_t=%s -D convert_uchar_t=%s -D gauss_color_coeff=%f",
3033 radius, maxk, cn, ocl::typeToStr(CV_32SC(cn)), cnstr.c_str(),
3034 ocl::convertTypeStr(CV_8U, CV_32S, cn, cvt[0]),
3035 ocl::typeToStr(type), ocl::typeToStr(CV_32FC(cn)),
3036 ocl::convertTypeStr(CV_32S, CV_32F, cn, cvt[1]),
3037 ocl::convertTypeStr(CV_32F, CV_8U, cn, cvt[2]), gauss_color_coeff));
3038 if (k.empty())
3039 return false;
3040
3041 Mat mspace_weight(1, d * d, CV_32FC1, space_weight);
3042 Mat mspace_ofs(1, d * d, CV_32SC1, space_ofs);
3043 UMat ucolor_weight, uspace_weight, uspace_ofs;
3044
3045 mspace_weight.copyTo(uspace_weight);
3046 mspace_ofs.copyTo(uspace_ofs);
3047
3048 k.args(ocl::KernelArg::ReadOnlyNoSize(temp), ocl::KernelArg::WriteOnly(dst),
3049 ocl::KernelArg::PtrReadOnly(uspace_weight),
3050 ocl::KernelArg::PtrReadOnly(uspace_ofs));
3051
3052 size_t globalsize[2] = { dst.cols / sizeDiv, dst.rows };
3053 return k.run(2, globalsize, NULL, false);
3054 }
3055
3056 #endif
3057 static void
bilateralFilter_8u(const Mat & src,Mat & dst,int d,double sigma_color,double sigma_space,int borderType)3058 bilateralFilter_8u( const Mat& src, Mat& dst, int d,
3059 double sigma_color, double sigma_space,
3060 int borderType )
3061 {
3062 int cn = src.channels();
3063 int i, j, maxk, radius;
3064 Size size = src.size();
3065
3066 CV_Assert( (src.type() == CV_8UC1 || src.type() == CV_8UC3) && src.data != dst.data );
3067
3068 if( sigma_color <= 0 )
3069 sigma_color = 1;
3070 if( sigma_space <= 0 )
3071 sigma_space = 1;
3072
3073 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
3074 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
3075
3076 if( d <= 0 )
3077 radius = cvRound(sigma_space*1.5);
3078 else
3079 radius = d/2;
3080 radius = MAX(radius, 1);
3081 d = radius*2 + 1;
3082
3083 Mat temp;
3084 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
3085
3086 #if defined HAVE_IPP && (IPP_VERSION_MAJOR >= 7) && 0
3087 CV_IPP_CHECK()
3088 {
3089 if( cn == 1 )
3090 {
3091 bool ok;
3092 IPPBilateralFilter_8u_Invoker body(temp, dst, sigma_color * sigma_color, sigma_space * sigma_space, radius, &ok );
3093 parallel_for_(Range(0, dst.rows), body, dst.total()/(double)(1<<16));
3094 if( ok )
3095 {
3096 CV_IMPL_ADD(CV_IMPL_IPP|CV_IMPL_MT);
3097 return;
3098 }
3099 setIppErrorStatus();
3100 }
3101 }
3102 #endif
3103
3104 std::vector<float> _color_weight(cn*256);
3105 std::vector<float> _space_weight(d*d);
3106 std::vector<int> _space_ofs(d*d);
3107 float* color_weight = &_color_weight[0];
3108 float* space_weight = &_space_weight[0];
3109 int* space_ofs = &_space_ofs[0];
3110
3111 // initialize color-related bilateral filter coefficients
3112
3113 for( i = 0; i < 256*cn; i++ )
3114 color_weight[i] = (float)std::exp(i*i*gauss_color_coeff);
3115
3116 // initialize space-related bilateral filter coefficients
3117 for( i = -radius, maxk = 0; i <= radius; i++ )
3118 {
3119 j = -radius;
3120
3121 for( ; j <= radius; j++ )
3122 {
3123 double r = std::sqrt((double)i*i + (double)j*j);
3124 if( r > radius )
3125 continue;
3126 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
3127 space_ofs[maxk++] = (int)(i*temp.step + j*cn);
3128 }
3129 }
3130
3131 BilateralFilter_8u_Invoker body(dst, temp, radius, maxk, space_ofs, space_weight, color_weight);
3132 parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
3133 }
3134
3135
3136 class BilateralFilter_32f_Invoker :
3137 public ParallelLoopBody
3138 {
3139 public:
3140
BilateralFilter_32f_Invoker(int _cn,int _radius,int _maxk,int * _space_ofs,const Mat & _temp,Mat & _dest,float _scale_index,float * _space_weight,float * _expLUT)3141 BilateralFilter_32f_Invoker(int _cn, int _radius, int _maxk, int *_space_ofs,
3142 const Mat& _temp, Mat& _dest, float _scale_index, float *_space_weight, float *_expLUT) :
3143 cn(_cn), radius(_radius), maxk(_maxk), space_ofs(_space_ofs),
3144 temp(&_temp), dest(&_dest), scale_index(_scale_index), space_weight(_space_weight), expLUT(_expLUT)
3145 {
3146 }
3147
operator ()(const Range & range) const3148 virtual void operator() (const Range& range) const
3149 {
3150 int i, j, k;
3151 Size size = dest->size();
3152 #if CV_SSE3
3153 int CV_DECL_ALIGNED(16) idxBuf[4];
3154 float CV_DECL_ALIGNED(16) bufSum32[4];
3155 static const unsigned int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
3156 bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3);
3157 #endif
3158
3159 for( i = range.start; i < range.end; i++ )
3160 {
3161 const float* sptr = temp->ptr<float>(i+radius) + radius*cn;
3162 float* dptr = dest->ptr<float>(i);
3163
3164 if( cn == 1 )
3165 {
3166 for( j = 0; j < size.width; j++ )
3167 {
3168 float sum = 0, wsum = 0;
3169 float val0 = sptr[j];
3170 k = 0;
3171 #if CV_SSE3
3172 if( haveSSE3 )
3173 {
3174 __m128 psum = _mm_setzero_ps();
3175 const __m128 _val0 = _mm_set1_ps(sptr[j]);
3176 const __m128 _scale_index = _mm_set1_ps(scale_index);
3177 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
3178
3179 for( ; k <= maxk - 4 ; k += 4 )
3180 {
3181 __m128 _sw = _mm_loadu_ps(space_weight + k);
3182 __m128 _val = _mm_set_ps(sptr[j + space_ofs[k+3]], sptr[j + space_ofs[k+2]],
3183 sptr[j + space_ofs[k+1]], sptr[j + space_ofs[k]]);
3184 __m128 _alpha = _mm_mul_ps(_mm_andnot_ps( _signMask, _mm_sub_ps(_val,_val0)), _scale_index);
3185
3186 __m128i _idx = _mm_cvtps_epi32(_alpha);
3187 _mm_store_si128((__m128i*)idxBuf, _idx);
3188 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
3189
3190 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]],
3191 expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
3192 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1],
3193 expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
3194
3195 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
3196 _val = _mm_mul_ps(_w, _val);
3197
3198 _sw = _mm_hadd_ps(_w, _val);
3199 _sw = _mm_hadd_ps(_sw, _sw);
3200 psum = _mm_add_ps(_sw, psum);
3201 }
3202 _mm_storel_pi((__m64*)bufSum32, psum);
3203
3204 sum = bufSum32[1];
3205 wsum = bufSum32[0];
3206 }
3207 #endif
3208
3209 for( ; k < maxk; k++ )
3210 {
3211 float val = sptr[j + space_ofs[k]];
3212 float alpha = (float)(std::abs(val - val0)*scale_index);
3213 int idx = cvFloor(alpha);
3214 alpha -= idx;
3215 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
3216 sum += val*w;
3217 wsum += w;
3218 }
3219 dptr[j] = (float)(sum/wsum);
3220 }
3221 }
3222 else
3223 {
3224 CV_Assert( cn == 3 );
3225 for( j = 0; j < size.width*3; j += 3 )
3226 {
3227 float sum_b = 0, sum_g = 0, sum_r = 0, wsum = 0;
3228 float b0 = sptr[j], g0 = sptr[j+1], r0 = sptr[j+2];
3229 k = 0;
3230 #if CV_SSE3
3231 if( haveSSE3 )
3232 {
3233 __m128 sum = _mm_setzero_ps();
3234 const __m128 _b0 = _mm_set1_ps(b0);
3235 const __m128 _g0 = _mm_set1_ps(g0);
3236 const __m128 _r0 = _mm_set1_ps(r0);
3237 const __m128 _scale_index = _mm_set1_ps(scale_index);
3238 const __m128 _signMask = _mm_load_ps((const float*)bufSignMask);
3239
3240 for( ; k <= maxk-4; k += 4 )
3241 {
3242 __m128 _sw = _mm_loadu_ps(space_weight + k);
3243
3244 const float* const sptr_k0 = sptr + j + space_ofs[k];
3245 const float* const sptr_k1 = sptr + j + space_ofs[k+1];
3246 const float* const sptr_k2 = sptr + j + space_ofs[k+2];
3247 const float* const sptr_k3 = sptr + j + space_ofs[k+3];
3248
3249 __m128 _b = _mm_loadu_ps(sptr_k0);
3250 __m128 _g = _mm_loadu_ps(sptr_k1);
3251 __m128 _r = _mm_loadu_ps(sptr_k2);
3252 __m128 _z = _mm_loadu_ps(sptr_k3);
3253 _MM_TRANSPOSE4_PS(_b, _g, _r, _z);
3254
3255 __m128 _bt = _mm_andnot_ps(_signMask,_mm_sub_ps(_b,_b0));
3256 __m128 _gt = _mm_andnot_ps(_signMask,_mm_sub_ps(_g,_g0));
3257 __m128 _rt = _mm_andnot_ps(_signMask,_mm_sub_ps(_r,_r0));
3258
3259 __m128 _alpha = _mm_mul_ps(_scale_index, _mm_add_ps(_rt,_mm_add_ps(_bt, _gt)));
3260
3261 __m128i _idx = _mm_cvtps_epi32(_alpha);
3262 _mm_store_si128((__m128i*)idxBuf, _idx);
3263 _alpha = _mm_sub_ps(_alpha, _mm_cvtepi32_ps(_idx));
3264
3265 __m128 _explut = _mm_set_ps(expLUT[idxBuf[3]], expLUT[idxBuf[2]], expLUT[idxBuf[1]], expLUT[idxBuf[0]]);
3266 __m128 _explut1 = _mm_set_ps(expLUT[idxBuf[3]+1], expLUT[idxBuf[2]+1], expLUT[idxBuf[1]+1], expLUT[idxBuf[0]+1]);
3267
3268 __m128 _w = _mm_mul_ps(_sw, _mm_add_ps(_explut, _mm_mul_ps(_alpha, _mm_sub_ps(_explut1, _explut))));
3269
3270 _b = _mm_mul_ps(_b, _w);
3271 _g = _mm_mul_ps(_g, _w);
3272 _r = _mm_mul_ps(_r, _w);
3273
3274 _w = _mm_hadd_ps(_w, _b);
3275 _g = _mm_hadd_ps(_g, _r);
3276
3277 _w = _mm_hadd_ps(_w, _g);
3278 sum = _mm_add_ps(sum, _w);
3279 }
3280 _mm_store_ps(bufSum32, sum);
3281 wsum = bufSum32[0];
3282 sum_b = bufSum32[1];
3283 sum_g = bufSum32[2];
3284 sum_r = bufSum32[3];
3285 }
3286 #endif
3287
3288 for(; k < maxk; k++ )
3289 {
3290 const float* sptr_k = sptr + j + space_ofs[k];
3291 float b = sptr_k[0], g = sptr_k[1], r = sptr_k[2];
3292 float alpha = (float)((std::abs(b - b0) +
3293 std::abs(g - g0) + std::abs(r - r0))*scale_index);
3294 int idx = cvFloor(alpha);
3295 alpha -= idx;
3296 float w = space_weight[k]*(expLUT[idx] + alpha*(expLUT[idx+1] - expLUT[idx]));
3297 sum_b += b*w; sum_g += g*w; sum_r += r*w;
3298 wsum += w;
3299 }
3300 wsum = 1.f/wsum;
3301 b0 = sum_b*wsum;
3302 g0 = sum_g*wsum;
3303 r0 = sum_r*wsum;
3304 dptr[j] = b0; dptr[j+1] = g0; dptr[j+2] = r0;
3305 }
3306 }
3307 }
3308 }
3309
3310 private:
3311 int cn, radius, maxk, *space_ofs;
3312 const Mat* temp;
3313 Mat *dest;
3314 float scale_index, *space_weight, *expLUT;
3315 };
3316
3317
3318 static void
bilateralFilter_32f(const Mat & src,Mat & dst,int d,double sigma_color,double sigma_space,int borderType)3319 bilateralFilter_32f( const Mat& src, Mat& dst, int d,
3320 double sigma_color, double sigma_space,
3321 int borderType )
3322 {
3323 int cn = src.channels();
3324 int i, j, maxk, radius;
3325 double minValSrc=-1, maxValSrc=1;
3326 const int kExpNumBinsPerChannel = 1 << 12;
3327 int kExpNumBins = 0;
3328 float lastExpVal = 1.f;
3329 float len, scale_index;
3330 Size size = src.size();
3331
3332 CV_Assert( (src.type() == CV_32FC1 || src.type() == CV_32FC3) && src.data != dst.data );
3333
3334 if( sigma_color <= 0 )
3335 sigma_color = 1;
3336 if( sigma_space <= 0 )
3337 sigma_space = 1;
3338
3339 double gauss_color_coeff = -0.5/(sigma_color*sigma_color);
3340 double gauss_space_coeff = -0.5/(sigma_space*sigma_space);
3341
3342 if( d <= 0 )
3343 radius = cvRound(sigma_space*1.5);
3344 else
3345 radius = d/2;
3346 radius = MAX(radius, 1);
3347 d = radius*2 + 1;
3348 // compute the min/max range for the input image (even if multichannel)
3349
3350 minMaxLoc( src.reshape(1), &minValSrc, &maxValSrc );
3351 if(std::abs(minValSrc - maxValSrc) < FLT_EPSILON)
3352 {
3353 src.copyTo(dst);
3354 return;
3355 }
3356
3357 // temporary copy of the image with borders for easy processing
3358 Mat temp;
3359 copyMakeBorder( src, temp, radius, radius, radius, radius, borderType );
3360 const double insteadNaNValue = -5. * sigma_color;
3361 patchNaNs( temp, insteadNaNValue ); // this replacement of NaNs makes the assumption that depth values are nonnegative
3362 // TODO: make insteadNaNValue avalible in the outside function interface to control the cases breaking the assumption
3363 // allocate lookup tables
3364 std::vector<float> _space_weight(d*d);
3365 std::vector<int> _space_ofs(d*d);
3366 float* space_weight = &_space_weight[0];
3367 int* space_ofs = &_space_ofs[0];
3368
3369 // assign a length which is slightly more than needed
3370 len = (float)(maxValSrc - minValSrc) * cn;
3371 kExpNumBins = kExpNumBinsPerChannel * cn;
3372 std::vector<float> _expLUT(kExpNumBins+2);
3373 float* expLUT = &_expLUT[0];
3374
3375 scale_index = kExpNumBins/len;
3376
3377 // initialize the exp LUT
3378 for( i = 0; i < kExpNumBins+2; i++ )
3379 {
3380 if( lastExpVal > 0.f )
3381 {
3382 double val = i / scale_index;
3383 expLUT[i] = (float)std::exp(val * val * gauss_color_coeff);
3384 lastExpVal = expLUT[i];
3385 }
3386 else
3387 expLUT[i] = 0.f;
3388 }
3389
3390 // initialize space-related bilateral filter coefficients
3391 for( i = -radius, maxk = 0; i <= radius; i++ )
3392 for( j = -radius; j <= radius; j++ )
3393 {
3394 double r = std::sqrt((double)i*i + (double)j*j);
3395 if( r > radius )
3396 continue;
3397 space_weight[maxk] = (float)std::exp(r*r*gauss_space_coeff);
3398 space_ofs[maxk++] = (int)(i*(temp.step/sizeof(float)) + j*cn);
3399 }
3400
3401 // parallel_for usage
3402
3403 BilateralFilter_32f_Invoker body(cn, radius, maxk, space_ofs, temp, dst, scale_index, space_weight, expLUT);
3404 parallel_for_(Range(0, size.height), body, dst.total()/(double)(1<<16));
3405 }
3406
3407 }
3408
bilateralFilter(InputArray _src,OutputArray _dst,int d,double sigmaColor,double sigmaSpace,int borderType)3409 void cv::bilateralFilter( InputArray _src, OutputArray _dst, int d,
3410 double sigmaColor, double sigmaSpace,
3411 int borderType )
3412 {
3413 _dst.create( _src.size(), _src.type() );
3414
3415 CV_OCL_RUN(_src.dims() <= 2 && _dst.isUMat(),
3416 ocl_bilateralFilter_8u(_src, _dst, d, sigmaColor, sigmaSpace, borderType))
3417
3418 Mat src = _src.getMat(), dst = _dst.getMat();
3419
3420 if( src.depth() == CV_8U )
3421 bilateralFilter_8u( src, dst, d, sigmaColor, sigmaSpace, borderType );
3422 else if( src.depth() == CV_32F )
3423 bilateralFilter_32f( src, dst, d, sigmaColor, sigmaSpace, borderType );
3424 else
3425 CV_Error( CV_StsUnsupportedFormat,
3426 "Bilateral filtering is only implemented for 8u and 32f images" );
3427 }
3428
3429 //////////////////////////////////////////////////////////////////////////////////////////
3430
3431 CV_IMPL void
cvSmooth(const void * srcarr,void * dstarr,int smooth_type,int param1,int param2,double param3,double param4)3432 cvSmooth( const void* srcarr, void* dstarr, int smooth_type,
3433 int param1, int param2, double param3, double param4 )
3434 {
3435 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0;
3436
3437 CV_Assert( dst.size() == src.size() &&
3438 (smooth_type == CV_BLUR_NO_SCALE || dst.type() == src.type()) );
3439
3440 if( param2 <= 0 )
3441 param2 = param1;
3442
3443 if( smooth_type == CV_BLUR || smooth_type == CV_BLUR_NO_SCALE )
3444 cv::boxFilter( src, dst, dst.depth(), cv::Size(param1, param2), cv::Point(-1,-1),
3445 smooth_type == CV_BLUR, cv::BORDER_REPLICATE );
3446 else if( smooth_type == CV_GAUSSIAN )
3447 cv::GaussianBlur( src, dst, cv::Size(param1, param2), param3, param4, cv::BORDER_REPLICATE );
3448 else if( smooth_type == CV_MEDIAN )
3449 cv::medianBlur( src, dst, param1 );
3450 else
3451 cv::bilateralFilter( src, dst, param1, param3, param4, cv::BORDER_REPLICATE );
3452
3453 if( dst.data != dst0.data )
3454 CV_Error( CV_StsUnmatchedFormats, "The destination image does not have the proper type" );
3455 }
3456
3457 /* End of file. */
3458