1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #if !defined CUDA_DISABLER
44 
45 #include <thrust/device_ptr.h>
46 #include <thrust/sort.h>
47 
48 #include "opencv2/core/cuda/common.hpp"
49 #include "opencv2/core/cuda/emulation.hpp"
50 #include "opencv2/core/cuda/dynamic_smem.hpp"
51 
52 namespace cv { namespace cuda { namespace device
53 {
54     namespace hough_lines
55     {
56         __device__ int g_counter;
57 
58         ////////////////////////////////////////////////////////////////////////
59         // linesAccum
60 
linesAccumGlobal(const unsigned int * list,const int count,PtrStepi accum,const float irho,const float theta,const int numrho)61         __global__ void linesAccumGlobal(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
62         {
63             const int n = blockIdx.x;
64             const float ang = n * theta;
65 
66             float sinVal;
67             float cosVal;
68             sincosf(ang, &sinVal, &cosVal);
69             sinVal *= irho;
70             cosVal *= irho;
71 
72             const int shift = (numrho - 1) / 2;
73 
74             int* accumRow = accum.ptr(n + 1);
75             for (int i = threadIdx.x; i < count; i += blockDim.x)
76             {
77                 const unsigned int val = list[i];
78 
79                 const int x = (val & 0xFFFF);
80                 const int y = (val >> 16) & 0xFFFF;
81 
82                 int r = __float2int_rn(x * cosVal + y * sinVal);
83                 r += shift;
84 
85                 ::atomicAdd(accumRow + r + 1, 1);
86             }
87         }
88 
linesAccumShared(const unsigned int * list,const int count,PtrStepi accum,const float irho,const float theta,const int numrho)89         __global__ void linesAccumShared(const unsigned int* list, const int count, PtrStepi accum, const float irho, const float theta, const int numrho)
90         {
91             int* smem = DynamicSharedMem<int>();
92 
93             for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
94                 smem[i] = 0;
95 
96             __syncthreads();
97 
98             const int n = blockIdx.x;
99             const float ang = n * theta;
100 
101             float sinVal;
102             float cosVal;
103             sincosf(ang, &sinVal, &cosVal);
104             sinVal *= irho;
105             cosVal *= irho;
106 
107             const int shift = (numrho - 1) / 2;
108 
109             for (int i = threadIdx.x; i < count; i += blockDim.x)
110             {
111                 const unsigned int val = list[i];
112 
113                 const int x = (val & 0xFFFF);
114                 const int y = (val >> 16) & 0xFFFF;
115 
116                 int r = __float2int_rn(x * cosVal + y * sinVal);
117                 r += shift;
118 
119                 Emulation::smem::atomicAdd(&smem[r + 1], 1);
120             }
121 
122             __syncthreads();
123 
124             int* accumRow = accum.ptr(n + 1);
125             for (int i = threadIdx.x; i < numrho + 1; i += blockDim.x)
126                 accumRow[i] = smem[i];
127         }
128 
linesAccum_gpu(const unsigned int * list,int count,PtrStepSzi accum,float rho,float theta,size_t sharedMemPerBlock,bool has20)129         void linesAccum_gpu(const unsigned int* list, int count, PtrStepSzi accum, float rho, float theta, size_t sharedMemPerBlock, bool has20)
130         {
131             const dim3 block(has20 ? 1024 : 512);
132             const dim3 grid(accum.rows - 2);
133 
134             size_t smemSize = (accum.cols - 1) * sizeof(int);
135 
136             if (smemSize < sharedMemPerBlock - 1000)
137                 linesAccumShared<<<grid, block, smemSize>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
138             else
139                 linesAccumGlobal<<<grid, block>>>(list, count, accum, 1.0f / rho, theta, accum.cols - 2);
140 
141             cudaSafeCall( cudaGetLastError() );
142 
143             cudaSafeCall( cudaDeviceSynchronize() );
144         }
145 
146         ////////////////////////////////////////////////////////////////////////
147         // linesGetResult
148 
linesGetResult(const PtrStepSzi accum,float2 * out,int * votes,const int maxSize,const float rho,const float theta,const int threshold,const int numrho)149         __global__ void linesGetResult(const PtrStepSzi accum, float2* out, int* votes, const int maxSize, const float rho, const float theta, const int threshold, const int numrho)
150         {
151             const int r = blockIdx.x * blockDim.x + threadIdx.x;
152             const int n = blockIdx.y * blockDim.y + threadIdx.y;
153 
154             if (r >= accum.cols - 2 || n >= accum.rows - 2)
155                 return;
156 
157             const int curVotes = accum(n + 1, r + 1);
158 
159             if (curVotes > threshold &&
160                 curVotes >  accum(n + 1, r) &&
161                 curVotes >= accum(n + 1, r + 2) &&
162                 curVotes >  accum(n, r + 1) &&
163                 curVotes >= accum(n + 2, r + 1))
164             {
165                 const float radius = (r - (numrho - 1) * 0.5f) * rho;
166                 const float angle = n * theta;
167 
168                 const int ind = ::atomicAdd(&g_counter, 1);
169                 if (ind < maxSize)
170                 {
171                     out[ind] = make_float2(radius, angle);
172                     votes[ind] = curVotes;
173                 }
174             }
175         }
176 
linesGetResult_gpu(PtrStepSzi accum,float2 * out,int * votes,int maxSize,float rho,float theta,int threshold,bool doSort)177         int linesGetResult_gpu(PtrStepSzi accum, float2* out, int* votes, int maxSize, float rho, float theta, int threshold, bool doSort)
178         {
179             void* counterPtr;
180             cudaSafeCall( cudaGetSymbolAddress(&counterPtr, g_counter) );
181 
182             cudaSafeCall( cudaMemset(counterPtr, 0, sizeof(int)) );
183 
184             const dim3 block(32, 8);
185             const dim3 grid(divUp(accum.cols - 2, block.x), divUp(accum.rows - 2, block.y));
186 
187             cudaSafeCall( cudaFuncSetCacheConfig(linesGetResult, cudaFuncCachePreferL1) );
188 
189             linesGetResult<<<grid, block>>>(accum, out, votes, maxSize, rho, theta, threshold, accum.cols - 2);
190             cudaSafeCall( cudaGetLastError() );
191 
192             cudaSafeCall( cudaDeviceSynchronize() );
193 
194             int totalCount;
195             cudaSafeCall( cudaMemcpy(&totalCount, counterPtr, sizeof(int), cudaMemcpyDeviceToHost) );
196 
197             totalCount = ::min(totalCount, maxSize);
198 
199             if (doSort && totalCount > 0)
200             {
201                 thrust::device_ptr<float2> outPtr(out);
202                 thrust::device_ptr<int> votesPtr(votes);
203                 thrust::sort_by_key(votesPtr, votesPtr + totalCount, outPtr, thrust::greater<int>());
204             }
205 
206             return totalCount;
207         }
208     }
209 }}}
210 
211 
212 #endif /* CUDA_DISABLER */
213