1 
2 //=============================================================================
3 //
4 // KAZE.cpp
5 // Author: Pablo F. Alcantarilla
6 // Institution: University d'Auvergne
7 // Address: Clermont Ferrand, France
8 // Date: 21/01/2012
9 // Email: pablofdezalc@gmail.com
10 //
11 // KAZE Features Copyright 2012, Pablo F. Alcantarilla
12 // All Rights Reserved
13 // See LICENSE for the license information
14 //=============================================================================
15 
16 /**
17  * @file KAZEFeatures.cpp
18  * @brief Main class for detecting and describing features in a nonlinear
19  * scale space
20  * @date Jan 21, 2012
21  * @author Pablo F. Alcantarilla
22  */
23 #include "../precomp.hpp"
24 #include "KAZEFeatures.h"
25 #include "utils.h"
26 
27 namespace cv
28 {
29 
30 // Namespaces
31 using namespace std;
32 
33 /* ************************************************************************* */
34 /**
35  * @brief KAZE constructor with input options
36  * @param options KAZE configuration options
37  * @note The constructor allocates memory for the nonlinear scale space
38  */
KAZEFeatures(KAZEOptions & options)39 KAZEFeatures::KAZEFeatures(KAZEOptions& options)
40         : options_(options)
41 {
42     ncycles_ = 0;
43     reordering_ = true;
44 
45     // Now allocate memory for the evolution
46     Allocate_Memory_Evolution();
47 }
48 
49 /* ************************************************************************* */
50 /**
51  * @brief This method allocates the memory for the nonlinear diffusion evolution
52  */
Allocate_Memory_Evolution(void)53 void KAZEFeatures::Allocate_Memory_Evolution(void) {
54 
55     // Allocate the dimension of the matrices for the evolution
56     for (int i = 0; i <= options_.omax - 1; i++)
57     {
58         for (int j = 0; j <= options_.nsublevels - 1; j++)
59         {
60             TEvolution aux;
61             aux.Lx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
62             aux.Ly = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
63             aux.Lxx = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
64             aux.Lxy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
65             aux.Lyy = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
66             aux.Lt = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
67             aux.Lsmooth = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
68             aux.Ldet = Mat::zeros(options_.img_height, options_.img_width, CV_32F);
69             aux.esigma = options_.soffset*pow((float)2.0f, (float)(j) / (float)(options_.nsublevels)+i);
70             aux.etime = 0.5f*(aux.esigma*aux.esigma);
71             aux.sigma_size = fRound(aux.esigma);
72             aux.octave = i;
73             aux.sublevel = j;
74             evolution_.push_back(aux);
75         }
76     }
77 
78     // Allocate memory for the FED number of cycles and time steps
79     for (size_t i = 1; i < evolution_.size(); i++)
80     {
81         int naux = 0;
82         vector<float> tau;
83         float ttime = 0.0;
84         ttime = evolution_[i].etime - evolution_[i - 1].etime;
85         naux = fed_tau_by_process_time(ttime, 1, 0.25f, reordering_, tau);
86         nsteps_.push_back(naux);
87         tsteps_.push_back(tau);
88         ncycles_++;
89     }
90 }
91 
92 /* ************************************************************************* */
93 /**
94  * @brief This method creates the nonlinear scale space for a given image
95  * @param img Input image for which the nonlinear scale space needs to be created
96  * @return 0 if the nonlinear scale space was created successfully. -1 otherwise
97  */
Create_Nonlinear_Scale_Space(const Mat & img)98 int KAZEFeatures::Create_Nonlinear_Scale_Space(const Mat &img)
99 {
100     CV_Assert(evolution_.size() > 0);
101 
102     // Copy the original image to the first level of the evolution
103     img.copyTo(evolution_[0].Lt);
104     gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lt, 0, 0, options_.soffset);
105     gaussian_2D_convolution(evolution_[0].Lt, evolution_[0].Lsmooth, 0, 0, options_.sderivatives);
106 
107     // Firstly compute the kcontrast factor
108         Compute_KContrast(evolution_[0].Lt, options_.kcontrast_percentille);
109 
110     // Allocate memory for the flow and step images
111     Mat Lflow = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
112     Mat Lstep = Mat::zeros(evolution_[0].Lt.rows, evolution_[0].Lt.cols, CV_32F);
113 
114     // Now generate the rest of evolution levels
115     for (size_t i = 1; i < evolution_.size(); i++)
116     {
117         evolution_[i - 1].Lt.copyTo(evolution_[i].Lt);
118         gaussian_2D_convolution(evolution_[i - 1].Lt, evolution_[i].Lsmooth, 0, 0, options_.sderivatives);
119 
120         // Compute the Gaussian derivatives Lx and Ly
121         Scharr(evolution_[i].Lsmooth, evolution_[i].Lx, CV_32F, 1, 0, 1, 0, BORDER_DEFAULT);
122         Scharr(evolution_[i].Lsmooth, evolution_[i].Ly, CV_32F, 0, 1, 1, 0, BORDER_DEFAULT);
123 
124         // Compute the conductivity equation
125         if (options_.diffusivity == KAZE::DIFF_PM_G1)
126             pm_g1(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
127         else if (options_.diffusivity == KAZE::DIFF_PM_G2)
128             pm_g2(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
129         else if (options_.diffusivity == KAZE::DIFF_WEICKERT)
130             weickert_diffusivity(evolution_[i].Lx, evolution_[i].Ly, Lflow, options_.kcontrast);
131 
132         // Perform FED n inner steps
133         for (int j = 0; j < nsteps_[i - 1]; j++)
134             nld_step_scalar(evolution_[i].Lt, Lflow, Lstep, tsteps_[i - 1][j]);
135     }
136 
137     return 0;
138 }
139 
140 /* ************************************************************************* */
141 /**
142  * @brief This method computes the k contrast factor
143  * @param img Input image
144  * @param kpercentile Percentile of the gradient histogram
145  */
Compute_KContrast(const Mat & img,const float & kpercentile)146 void KAZEFeatures::Compute_KContrast(const Mat &img, const float &kpercentile)
147 {
148     options_.kcontrast = compute_k_percentile(img, kpercentile, options_.sderivatives, options_.kcontrast_bins, 0, 0);
149 }
150 
151 /* ************************************************************************* */
152 /**
153  * @brief This method computes the feature detector response for the nonlinear scale space
154  * @note We use the Hessian determinant as feature detector
155  */
Compute_Detector_Response(void)156 void KAZEFeatures::Compute_Detector_Response(void)
157 {
158     float lxx = 0.0, lxy = 0.0, lyy = 0.0;
159 
160     // Firstly compute the multiscale derivatives
161     Compute_Multiscale_Derivatives();
162 
163     for (size_t i = 0; i < evolution_.size(); i++)
164     {
165                 for (int ix = 0; ix < options_.img_height; ix++)
166         {
167                         for (int jx = 0; jx < options_.img_width; jx++)
168             {
169                 lxx = *(evolution_[i].Lxx.ptr<float>(ix)+jx);
170                 lxy = *(evolution_[i].Lxy.ptr<float>(ix)+jx);
171                 lyy = *(evolution_[i].Lyy.ptr<float>(ix)+jx);
172                 *(evolution_[i].Ldet.ptr<float>(ix)+jx) = (lxx*lyy - lxy*lxy);
173             }
174         }
175     }
176 }
177 
178 /* ************************************************************************* */
179 /**
180  * @brief This method selects interesting keypoints through the nonlinear scale space
181  * @param kpts Vector of keypoints
182  */
Feature_Detection(std::vector<KeyPoint> & kpts)183 void KAZEFeatures::Feature_Detection(std::vector<KeyPoint>& kpts)
184 {
185     kpts.clear();
186         Compute_Detector_Response();
187         Determinant_Hessian(kpts);
188     Do_Subpixel_Refinement(kpts);
189 }
190 
191 /* ************************************************************************* */
192 class MultiscaleDerivativesKAZEInvoker : public ParallelLoopBody
193 {
194 public:
MultiscaleDerivativesKAZEInvoker(std::vector<TEvolution> & ev)195     explicit MultiscaleDerivativesKAZEInvoker(std::vector<TEvolution>& ev) : evolution_(&ev)
196     {
197     }
198 
operator ()(const Range & range) const199     void operator()(const Range& range) const
200     {
201         std::vector<TEvolution>& evolution = *evolution_;
202         for (int i = range.start; i < range.end; i++)
203         {
204             compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Lx, 1, 0, evolution[i].sigma_size);
205             compute_scharr_derivatives(evolution[i].Lsmooth, evolution[i].Ly, 0, 1, evolution[i].sigma_size);
206             compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxx, 1, 0, evolution[i].sigma_size);
207             compute_scharr_derivatives(evolution[i].Ly, evolution[i].Lyy, 0, 1, evolution[i].sigma_size);
208             compute_scharr_derivatives(evolution[i].Lx, evolution[i].Lxy, 0, 1, evolution[i].sigma_size);
209 
210             evolution[i].Lx = evolution[i].Lx*((evolution[i].sigma_size));
211             evolution[i].Ly = evolution[i].Ly*((evolution[i].sigma_size));
212             evolution[i].Lxx = evolution[i].Lxx*((evolution[i].sigma_size)*(evolution[i].sigma_size));
213             evolution[i].Lxy = evolution[i].Lxy*((evolution[i].sigma_size)*(evolution[i].sigma_size));
214             evolution[i].Lyy = evolution[i].Lyy*((evolution[i].sigma_size)*(evolution[i].sigma_size));
215         }
216     }
217 
218 private:
219     std::vector<TEvolution>*  evolution_;
220 };
221 
222 /* ************************************************************************* */
223 /**
224  * @brief This method computes the multiscale derivatives for the nonlinear scale space
225  */
Compute_Multiscale_Derivatives(void)226 void KAZEFeatures::Compute_Multiscale_Derivatives(void)
227 {
228     parallel_for_(Range(0, (int)evolution_.size()),
229                                         MultiscaleDerivativesKAZEInvoker(evolution_));
230 }
231 
232 
233 /* ************************************************************************* */
234 class FindExtremumKAZEInvoker : public ParallelLoopBody
235 {
236 public:
FindExtremumKAZEInvoker(std::vector<TEvolution> & ev,std::vector<std::vector<KeyPoint>> & kpts_par,const KAZEOptions & options)237     explicit FindExtremumKAZEInvoker(std::vector<TEvolution>& ev, std::vector<std::vector<KeyPoint> >& kpts_par,
238                                                                      const KAZEOptions& options) : evolution_(&ev), kpts_par_(&kpts_par), options_(options)
239     {
240     }
241 
operator ()(const Range & range) const242     void operator()(const Range& range) const
243     {
244         std::vector<TEvolution>& evolution = *evolution_;
245         std::vector<std::vector<KeyPoint> >& kpts_par = *kpts_par_;
246         for (int i = range.start; i < range.end; i++)
247         {
248             float value = 0.0;
249             bool is_extremum = false;
250 
251             for (int ix = 1; ix < options_.img_height - 1; ix++)
252             {
253                 for (int jx = 1; jx < options_.img_width - 1; jx++)
254                 {
255                     is_extremum = false;
256                     value = *(evolution[i].Ldet.ptr<float>(ix)+jx);
257 
258                     // Filter the points with the detector threshold
259                     if (value > options_.dthreshold)
260                     {
261                         if (value >= *(evolution[i].Ldet.ptr<float>(ix)+jx - 1))
262                         {
263                             // First check on the same scale
264                             if (check_maximum_neighbourhood(evolution[i].Ldet, 1, value, ix, jx, 1))
265                             {
266                                 // Now check on the lower scale
267                                 if (check_maximum_neighbourhood(evolution[i - 1].Ldet, 1, value, ix, jx, 0))
268                                 {
269                                     // Now check on the upper scale
270                                     if (check_maximum_neighbourhood(evolution[i + 1].Ldet, 1, value, ix, jx, 0))
271                                         is_extremum = true;
272                                 }
273                             }
274                         }
275                     }
276 
277                     // Add the point of interest!!
278                     if (is_extremum)
279                     {
280                         KeyPoint point;
281                         point.pt.x = (float)jx;
282                         point.pt.y = (float)ix;
283                         point.response = fabs(value);
284                         point.size = evolution[i].esigma;
285                         point.octave = (int)evolution[i].octave;
286                         point.class_id = i;
287 
288                         // We use the angle field for the sublevel value
289                         // Then, we will replace this angle field with the main orientation
290                         point.angle = static_cast<float>(evolution[i].sublevel);
291                         kpts_par[i - 1].push_back(point);
292                     }
293                 }
294             }
295         }
296     }
297 
298 private:
299     std::vector<TEvolution>*  evolution_;
300     std::vector<std::vector<KeyPoint> >* kpts_par_;
301     KAZEOptions options_;
302 };
303 
304 /* ************************************************************************* */
305 /**
306  * @brief This method performs the detection of keypoints by using the normalized
307  * score of the Hessian determinant through the nonlinear scale space
308  * @param kpts Vector of keypoints
309  * @note We compute features for each of the nonlinear scale space level in a different processing thread
310  */
Determinant_Hessian(std::vector<KeyPoint> & kpts)311 void KAZEFeatures::Determinant_Hessian(std::vector<KeyPoint>& kpts)
312 {
313     int level = 0;
314     float dist = 0.0, smax = 3.0;
315     int npoints = 0, id_repeated = 0;
316     int left_x = 0, right_x = 0, up_y = 0, down_y = 0;
317     bool is_extremum = false, is_repeated = false, is_out = false;
318 
319     // Delete the memory of the vector of keypoints vectors
320     // In case we use the same kaze object for multiple images
321     for (size_t i = 0; i < kpts_par_.size(); i++) {
322         vector<KeyPoint>().swap(kpts_par_[i]);
323     }
324     kpts_par_.clear();
325     vector<KeyPoint> aux;
326 
327     // Allocate memory for the vector of vectors
328     for (size_t i = 1; i < evolution_.size() - 1; i++) {
329         kpts_par_.push_back(aux);
330     }
331 
332     parallel_for_(Range(1, (int)evolution_.size()-1),
333                 FindExtremumKAZEInvoker(evolution_, kpts_par_, options_));
334 
335     // Now fill the vector of keypoints!!!
336     for (int i = 0; i < (int)kpts_par_.size(); i++)
337     {
338         for (int j = 0; j < (int)kpts_par_[i].size(); j++)
339         {
340             level = i + 1;
341             is_extremum = true;
342             is_repeated = false;
343             is_out = false;
344 
345             // Check in case we have the same point as maxima in previous evolution levels
346             for (int ik = 0; ik < (int)kpts.size(); ik++) {
347                 if (kpts[ik].class_id == level || kpts[ik].class_id == level + 1 || kpts[ik].class_id == level - 1) {
348                     dist = pow(kpts_par_[i][j].pt.x - kpts[ik].pt.x, 2) + pow(kpts_par_[i][j].pt.y - kpts[ik].pt.y, 2);
349 
350                     if (dist < evolution_[level].sigma_size*evolution_[level].sigma_size) {
351                         if (kpts_par_[i][j].response > kpts[ik].response) {
352                             id_repeated = ik;
353                             is_repeated = true;
354                         }
355                         else {
356                             is_extremum = false;
357                         }
358 
359                         break;
360                     }
361                 }
362             }
363 
364             if (is_extremum == true) {
365                 // Check that the point is under the image limits for the descriptor computation
366                 left_x = fRound(kpts_par_[i][j].pt.x - smax*kpts_par_[i][j].size);
367                 right_x = fRound(kpts_par_[i][j].pt.x + smax*kpts_par_[i][j].size);
368                 up_y = fRound(kpts_par_[i][j].pt.y - smax*kpts_par_[i][j].size);
369                 down_y = fRound(kpts_par_[i][j].pt.y + smax*kpts_par_[i][j].size);
370 
371                 if (left_x < 0 || right_x >= evolution_[level].Ldet.cols ||
372                     up_y < 0 || down_y >= evolution_[level].Ldet.rows) {
373                     is_out = true;
374                 }
375 
376                 is_out = false;
377 
378                 if (is_out == false) {
379                     if (is_repeated == false) {
380                         kpts.push_back(kpts_par_[i][j]);
381                         npoints++;
382                     }
383                     else {
384                         kpts[id_repeated] = kpts_par_[i][j];
385                     }
386                 }
387             }
388         }
389     }
390 }
391 
392 /* ************************************************************************* */
393 /**
394  * @brief This method performs subpixel refinement of the detected keypoints
395  * @param kpts Vector of detected keypoints
396  */
Do_Subpixel_Refinement(std::vector<KeyPoint> & kpts)397 void KAZEFeatures::Do_Subpixel_Refinement(std::vector<KeyPoint> &kpts) {
398 
399     int step = 1;
400     int x = 0, y = 0;
401     float Dx = 0.0, Dy = 0.0, Ds = 0.0, dsc = 0.0;
402     float Dxx = 0.0, Dyy = 0.0, Dss = 0.0, Dxy = 0.0, Dxs = 0.0, Dys = 0.0;
403     Mat A = Mat::zeros(3, 3, CV_32F);
404     Mat b = Mat::zeros(3, 1, CV_32F);
405     Mat dst = Mat::zeros(3, 1, CV_32F);
406 
407     vector<KeyPoint> kpts_(kpts);
408 
409     for (size_t i = 0; i < kpts_.size(); i++) {
410 
411         x = static_cast<int>(kpts_[i].pt.x);
412         y = static_cast<int>(kpts_[i].pt.y);
413 
414         // Compute the gradient
415         Dx = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
416             - *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step));
417         Dy = (1.0f / (2.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
418             - *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x));
419         Ds = 0.5f*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
420             - *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x));
421 
422         // Compute the Hessian
423         Dxx = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x + step)
424             + *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x - step)
425             - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
426 
427         Dyy = (1.0f / (step*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x)
428             + *(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x)
429             - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x)));
430 
431         Dss = *(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x)
432             + *(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x)
433             - 2.0f*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y)+x));
434 
435         Dxy = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x + step)
436             + (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x - step)))
437             - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y - step) + x + step)
438             + (*(evolution_[kpts_[i].class_id].Ldet.ptr<float>(y + step) + x - step)));
439 
440         Dxs = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x + step)
441             + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x - step)))
442             - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y)+x - step)
443             + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y)+x + step)));
444 
445         Dys = (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y + step) + x)
446             + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y - step) + x)))
447             - (1.0f / (4.0f*step))*(*(evolution_[kpts_[i].class_id + 1].Ldet.ptr<float>(y - step) + x)
448             + (*(evolution_[kpts_[i].class_id - 1].Ldet.ptr<float>(y + step) + x)));
449 
450         // Solve the linear system
451         *(A.ptr<float>(0)) = Dxx;
452         *(A.ptr<float>(1) + 1) = Dyy;
453         *(A.ptr<float>(2) + 2) = Dss;
454 
455         *(A.ptr<float>(0) + 1) = *(A.ptr<float>(1)) = Dxy;
456         *(A.ptr<float>(0) + 2) = *(A.ptr<float>(2)) = Dxs;
457         *(A.ptr<float>(1) + 2) = *(A.ptr<float>(2) + 1) = Dys;
458 
459         *(b.ptr<float>(0)) = -Dx;
460         *(b.ptr<float>(1)) = -Dy;
461         *(b.ptr<float>(2)) = -Ds;
462 
463         solve(A, b, dst, DECOMP_LU);
464 
465         if (fabs(*(dst.ptr<float>(0))) <= 1.0f && fabs(*(dst.ptr<float>(1))) <= 1.0f && fabs(*(dst.ptr<float>(2))) <= 1.0f) {
466             kpts_[i].pt.x += *(dst.ptr<float>(0));
467             kpts_[i].pt.y += *(dst.ptr<float>(1));
468                         dsc = kpts_[i].octave + (kpts_[i].angle + *(dst.ptr<float>(2))) / ((float)(options_.nsublevels));
469 
470             // In OpenCV the size of a keypoint is the diameter!!
471                         kpts_[i].size = 2.0f*options_.soffset*pow((float)2.0f, dsc);
472             kpts_[i].angle = 0.0;
473         }
474         // Set the points to be deleted after the for loop
475         else {
476             kpts_[i].response = -1;
477         }
478     }
479 
480     // Clear the vector of keypoints
481     kpts.clear();
482 
483     for (size_t i = 0; i < kpts_.size(); i++) {
484         if (kpts_[i].response != -1) {
485             kpts.push_back(kpts_[i]);
486         }
487     }
488 }
489 
490 /* ************************************************************************* */
491 class KAZE_Descriptor_Invoker : public ParallelLoopBody
492 {
493 public:
KAZE_Descriptor_Invoker(std::vector<KeyPoint> & kpts,Mat & desc,std::vector<TEvolution> & evolution,const KAZEOptions & options)494         KAZE_Descriptor_Invoker(std::vector<KeyPoint> &kpts, Mat &desc, std::vector<TEvolution>& evolution, const KAZEOptions& options)
495                 : kpts_(&kpts)
496                 , desc_(&desc)
497                 , evolution_(&evolution)
498                 , options_(options)
499     {
500     }
501 
~KAZE_Descriptor_Invoker()502     virtual ~KAZE_Descriptor_Invoker()
503     {
504     }
505 
operator ()(const Range & range) const506     void operator() (const Range& range) const
507     {
508                 std::vector<KeyPoint> &kpts      = *kpts_;
509                 Mat                   &desc      = *desc_;
510                 std::vector<TEvolution>   &evolution = *evolution_;
511 
512         for (int i = range.start; i < range.end; i++)
513         {
514             kpts[i].angle = 0.0;
515                         if (options_.upright)
516             {
517                 kpts[i].angle = 0.0;
518                                 if (options_.extended)
519                     Get_KAZE_Upright_Descriptor_128(kpts[i], desc.ptr<float>((int)i));
520                 else
521                     Get_KAZE_Upright_Descriptor_64(kpts[i], desc.ptr<float>((int)i));
522             }
523             else
524             {
525                                 KAZEFeatures::Compute_Main_Orientation(kpts[i], evolution, options_);
526 
527                                 if (options_.extended)
528                     Get_KAZE_Descriptor_128(kpts[i], desc.ptr<float>((int)i));
529                 else
530                     Get_KAZE_Descriptor_64(kpts[i], desc.ptr<float>((int)i));
531             }
532         }
533     }
534 private:
535     void Get_KAZE_Upright_Descriptor_64(const KeyPoint& kpt, float* desc) const;
536     void Get_KAZE_Descriptor_64(const KeyPoint& kpt, float* desc) const;
537     void Get_KAZE_Upright_Descriptor_128(const KeyPoint& kpt, float* desc) const;
538     void Get_KAZE_Descriptor_128(const KeyPoint& kpt, float *desc) const;
539 
540         std::vector<KeyPoint> * kpts_;
541         Mat                   * desc_;
542         std::vector<TEvolution>   * evolution_;
543         KAZEOptions                 options_;
544 };
545 
546 /* ************************************************************************* */
547 /**
548  * @brief This method  computes the set of descriptors through the nonlinear scale space
549  * @param kpts Vector of keypoints
550  * @param desc Matrix with the feature descriptors
551  */
Feature_Description(std::vector<KeyPoint> & kpts,Mat & desc)552 void KAZEFeatures::Feature_Description(std::vector<KeyPoint> &kpts, Mat &desc)
553 {
554     for(size_t i = 0; i < kpts.size(); i++)
555     {
556         CV_Assert(0 <= kpts[i].class_id && kpts[i].class_id < static_cast<int>(evolution_.size()));
557     }
558 
559     // Allocate memory for the matrix of descriptors
560         if (options_.extended == true) {
561         desc = Mat::zeros((int)kpts.size(), 128, CV_32FC1);
562     }
563     else {
564         desc = Mat::zeros((int)kpts.size(), 64, CV_32FC1);
565     }
566 
567         parallel_for_(Range(0, (int)kpts.size()), KAZE_Descriptor_Invoker(kpts, desc, evolution_, options_));
568 }
569 
570 /* ************************************************************************* */
571 /**
572  * @brief This method computes the main orientation for a given keypoint
573  * @param kpt Input keypoint
574  * @note The orientation is computed using a similar approach as described in the
575  * original SURF method. See Bay et al., Speeded Up Robust Features, ECCV 2006
576  */
Compute_Main_Orientation(KeyPoint & kpt,const std::vector<TEvolution> & evolution_,const KAZEOptions & options)577 void KAZEFeatures::Compute_Main_Orientation(KeyPoint &kpt, const std::vector<TEvolution>& evolution_, const KAZEOptions& options)
578 {
579     int ix = 0, iy = 0, idx = 0, s = 0, level = 0;
580     float xf = 0.0, yf = 0.0, gweight = 0.0;
581     vector<float> resX(109), resY(109), Ang(109);
582 
583     // Variables for computing the dominant direction
584     float sumX = 0.0, sumY = 0.0, max = 0.0, ang1 = 0.0, ang2 = 0.0;
585 
586     // Get the information from the keypoint
587     xf = kpt.pt.x;
588     yf = kpt.pt.y;
589     level = kpt.class_id;
590     s = fRound(kpt.size / 2.0f);
591 
592     // Calculate derivatives responses for points within radius of 6*scale
593     for (int i = -6; i <= 6; ++i) {
594         for (int j = -6; j <= 6; ++j) {
595             if (i*i + j*j < 36) {
596                 iy = fRound(yf + j*s);
597                 ix = fRound(xf + i*s);
598 
599                 if (iy >= 0 && iy < options.img_height && ix >= 0 && ix < options.img_width) {
600                     gweight = gaussian(iy - yf, ix - xf, 2.5f*s);
601                     resX[idx] = gweight*(*(evolution_[level].Lx.ptr<float>(iy)+ix));
602                     resY[idx] = gweight*(*(evolution_[level].Ly.ptr<float>(iy)+ix));
603                 }
604                 else {
605                     resX[idx] = 0.0;
606                     resY[idx] = 0.0;
607                 }
608 
609                 Ang[idx] = getAngle(resX[idx], resY[idx]);
610                 ++idx;
611             }
612         }
613     }
614 
615     // Loop slides pi/3 window around feature point
616     for (ang1 = 0; ang1 < 2.0f*CV_PI; ang1 += 0.15f) {
617         ang2 = (ang1 + (float)(CV_PI / 3.0) > (float)(2.0*CV_PI) ? ang1 - (float)(5.0*CV_PI / 3.0) : ang1 + (float)(CV_PI / 3.0));
618         sumX = sumY = 0.f;
619 
620         for (size_t k = 0; k < Ang.size(); ++k) {
621             // Get angle from the x-axis of the sample point
622             const float & ang = Ang[k];
623 
624             // Determine whether the point is within the window
625             if (ang1 < ang2 && ang1 < ang && ang < ang2) {
626                 sumX += resX[k];
627                 sumY += resY[k];
628             }
629             else if (ang2 < ang1 &&
630                 ((ang > 0 && ang < ang2) || (ang > ang1 && ang < (float)(2.0*CV_PI)))) {
631                 sumX += resX[k];
632                 sumY += resY[k];
633             }
634         }
635 
636         // if the vector produced from this window is longer than all
637         // previous vectors then this forms the new dominant direction
638         if (sumX*sumX + sumY*sumY > max) {
639             // store largest orientation
640             max = sumX*sumX + sumY*sumY;
641             kpt.angle = getAngle(sumX, sumY);
642         }
643     }
644 }
645 
646 /* ************************************************************************* */
647 /**
648  * @brief This method computes the upright descriptor (not rotation invariant) of
649  * the provided keypoint
650  * @param kpt Input keypoint
651  * @param desc Descriptor vector
652  * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
653  * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
654  * ECCV 2008
655  */
Get_KAZE_Upright_Descriptor_64(const KeyPoint & kpt,float * desc) const656 void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_64(const KeyPoint &kpt, float *desc) const
657 {
658     float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
659     float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
660     float sample_x = 0.0, sample_y = 0.0;
661     int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
662     int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
663     float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
664     int dsize = 0, scale = 0, level = 0;
665 
666         std::vector<TEvolution>& evolution = *evolution_;
667 
668     // Subregion centers for the 4x4 gaussian weighting
669     float cx = -0.5f, cy = 0.5f;
670 
671     // Set the descriptor size and the sample and pattern sizes
672     dsize = 64;
673     sample_step = 5;
674     pattern_size = 12;
675 
676     // Get the information from the keypoint
677     yf = kpt.pt.y;
678     xf = kpt.pt.x;
679     scale = fRound(kpt.size / 2.0f);
680     level = kpt.class_id;
681 
682     i = -8;
683 
684     // Calculate descriptor for this interest point
685     // Area of size 24 s x 24 s
686     while (i < pattern_size) {
687         j = -8;
688         i = i - 4;
689 
690         cx += 1.0f;
691         cy = -0.5f;
692 
693         while (j < pattern_size) {
694 
695             dx = dy = mdx = mdy = 0.0;
696             cy += 1.0f;
697             j = j - 4;
698 
699             ky = i + sample_step;
700             kx = j + sample_step;
701 
702             ys = yf + (ky*scale);
703             xs = xf + (kx*scale);
704 
705             for (int k = i; k < i + 9; k++) {
706                 for (int l = j; l < j + 9; l++) {
707 
708                     sample_y = k*scale + yf;
709                     sample_x = l*scale + xf;
710 
711                     //Get the gaussian weighted x and y responses
712                     gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
713 
714                     y1 = (int)(sample_y - 0.5f);
715                     x1 = (int)(sample_x - 0.5f);
716 
717                                         checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
718 
719                     y2 = (int)(sample_y + 0.5f);
720                     x2 = (int)(sample_x + 0.5f);
721 
722                                         checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
723 
724                     fx = sample_x - x1;
725                     fy = sample_y - y1;
726 
727                                         res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
728                                         res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
729                                         res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
730                                         res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
731                     rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
732 
733                                         res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
734                                         res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
735                                         res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
736                                         res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
737                     ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
738 
739                     rx = gauss_s1*rx;
740                     ry = gauss_s1*ry;
741 
742                     // Sum the derivatives to the cumulative descriptor
743                     dx += rx;
744                     dy += ry;
745                     mdx += fabs(rx);
746                     mdy += fabs(ry);
747                 }
748             }
749 
750             // Add the values to the descriptor vector
751             gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
752 
753             desc[dcount++] = dx*gauss_s2;
754             desc[dcount++] = dy*gauss_s2;
755             desc[dcount++] = mdx*gauss_s2;
756             desc[dcount++] = mdy*gauss_s2;
757 
758             len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
759 
760             j += 9;
761         }
762 
763         i += 9;
764     }
765 
766     // convert to unit vector
767     len = sqrt(len);
768 
769     for (i = 0; i < dsize; i++) {
770         desc[i] /= len;
771     }
772 }
773 
774 /* ************************************************************************* */
775 /**
776  * @brief This method computes the descriptor of the provided keypoint given the
777  * main orientation of the keypoint
778  * @param kpt Input keypoint
779  * @param desc Descriptor vector
780  * @note Rectangular grid of 24 s x 24 s. Descriptor Length 64. The descriptor is inspired
781  * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
782  * ECCV 2008
783  */
Get_KAZE_Descriptor_64(const KeyPoint & kpt,float * desc) const784 void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_64(const KeyPoint &kpt, float *desc) const
785 {
786     float dx = 0.0, dy = 0.0, mdx = 0.0, mdy = 0.0, gauss_s1 = 0.0, gauss_s2 = 0.0;
787     float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
788     float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
789     float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
790     int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
791     int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
792     int dsize = 0, scale = 0, level = 0;
793 
794         std::vector<TEvolution>& evolution = *evolution_;
795 
796     // Subregion centers for the 4x4 gaussian weighting
797     float cx = -0.5f, cy = 0.5f;
798 
799     // Set the descriptor size and the sample and pattern sizes
800     dsize = 64;
801     sample_step = 5;
802     pattern_size = 12;
803 
804     // Get the information from the keypoint
805     yf = kpt.pt.y;
806     xf = kpt.pt.x;
807     scale = fRound(kpt.size / 2.0f);
808     angle = kpt.angle;
809     level = kpt.class_id;
810     co = cos(angle);
811     si = sin(angle);
812 
813     i = -8;
814 
815     // Calculate descriptor for this interest point
816     // Area of size 24 s x 24 s
817     while (i < pattern_size) {
818 
819         j = -8;
820         i = i - 4;
821 
822         cx += 1.0f;
823         cy = -0.5f;
824 
825         while (j < pattern_size) {
826 
827             dx = dy = mdx = mdy = 0.0;
828             cy += 1.0f;
829             j = j - 4;
830 
831             ky = i + sample_step;
832             kx = j + sample_step;
833 
834             xs = xf + (-kx*scale*si + ky*scale*co);
835             ys = yf + (kx*scale*co + ky*scale*si);
836 
837             for (int k = i; k < i + 9; ++k) {
838                 for (int l = j; l < j + 9; ++l) {
839 
840                     // Get coords of sample point on the rotated axis
841                     sample_y = yf + (l*scale*co + k*scale*si);
842                     sample_x = xf + (-l*scale*si + k*scale*co);
843 
844                     // Get the gaussian weighted x and y responses
845                     gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
846                     y1 = fRound(sample_y - 0.5f);
847                     x1 = fRound(sample_x - 0.5f);
848 
849                                         checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
850 
851                     y2 = (int)(sample_y + 0.5f);
852                     x2 = (int)(sample_x + 0.5f);
853 
854                                         checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
855 
856                     fx = sample_x - x1;
857                     fy = sample_y - y1;
858 
859                                         res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
860                                         res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
861                                         res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
862                                         res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
863                     rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
864 
865                                         res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
866                                         res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
867                                         res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
868                                         res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
869                     ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
870 
871                     // Get the x and y derivatives on the rotated axis
872                     rry = gauss_s1*(rx*co + ry*si);
873                     rrx = gauss_s1*(-rx*si + ry*co);
874 
875                     // Sum the derivatives to the cumulative descriptor
876                     dx += rrx;
877                     dy += rry;
878                     mdx += fabs(rrx);
879                     mdy += fabs(rry);
880                 }
881             }
882 
883             // Add the values to the descriptor vector
884             gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
885             desc[dcount++] = dx*gauss_s2;
886             desc[dcount++] = dy*gauss_s2;
887             desc[dcount++] = mdx*gauss_s2;
888             desc[dcount++] = mdy*gauss_s2;
889             len += (dx*dx + dy*dy + mdx*mdx + mdy*mdy)*gauss_s2*gauss_s2;
890             j += 9;
891         }
892         i += 9;
893     }
894 
895     // convert to unit vector
896     len = sqrt(len);
897 
898     for (i = 0; i < dsize; i++) {
899         desc[i] /= len;
900     }
901 }
902 
903 /* ************************************************************************* */
904 /**
905  * @brief This method computes the extended upright descriptor (not rotation invariant) of
906  * the provided keypoint
907  * @param kpt Input keypoint
908  * @param desc Descriptor vector
909  * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
910  * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
911  * ECCV 2008
912  */
Get_KAZE_Upright_Descriptor_128(const KeyPoint & kpt,float * desc) const913 void KAZE_Descriptor_Invoker::Get_KAZE_Upright_Descriptor_128(const KeyPoint &kpt, float *desc) const
914 {
915     float gauss_s1 = 0.0, gauss_s2 = 0.0;
916     float rx = 0.0, ry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
917     float sample_x = 0.0, sample_y = 0.0;
918     int x1 = 0, y1 = 0, sample_step = 0, pattern_size = 0;
919     int x2 = 0, y2 = 0, kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
920     float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
921     float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
922     float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
923     int dsize = 0, scale = 0, level = 0;
924 
925     // Subregion centers for the 4x4 gaussian weighting
926     float cx = -0.5f, cy = 0.5f;
927 
928         std::vector<TEvolution>& evolution = *evolution_;
929 
930     // Set the descriptor size and the sample and pattern sizes
931     dsize = 128;
932     sample_step = 5;
933     pattern_size = 12;
934 
935     // Get the information from the keypoint
936     yf = kpt.pt.y;
937     xf = kpt.pt.x;
938     scale = fRound(kpt.size / 2.0f);
939     level = kpt.class_id;
940 
941     i = -8;
942 
943     // Calculate descriptor for this interest point
944     // Area of size 24 s x 24 s
945     while (i < pattern_size) {
946 
947         j = -8;
948         i = i - 4;
949 
950         cx += 1.0f;
951         cy = -0.5f;
952 
953         while (j < pattern_size) {
954 
955             dxp = dxn = mdxp = mdxn = 0.0;
956             dyp = dyn = mdyp = mdyn = 0.0;
957 
958             cy += 1.0f;
959             j = j - 4;
960 
961             ky = i + sample_step;
962             kx = j + sample_step;
963 
964             ys = yf + (ky*scale);
965             xs = xf + (kx*scale);
966 
967             for (int k = i; k < i + 9; k++) {
968                 for (int l = j; l < j + 9; l++) {
969 
970                     sample_y = k*scale + yf;
971                     sample_x = l*scale + xf;
972 
973                     //Get the gaussian weighted x and y responses
974                     gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
975 
976                     y1 = (int)(sample_y - 0.5f);
977                     x1 = (int)(sample_x - 0.5f);
978 
979                                         checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
980 
981                     y2 = (int)(sample_y + 0.5f);
982                     x2 = (int)(sample_x + 0.5f);
983 
984                                         checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
985 
986                     fx = sample_x - x1;
987                     fy = sample_y - y1;
988 
989                                         res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
990                                         res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
991                                         res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
992                                         res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
993                     rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
994 
995                                         res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
996                                         res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
997                                         res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
998                                         res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
999                     ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
1000 
1001                     rx = gauss_s1*rx;
1002                     ry = gauss_s1*ry;
1003 
1004                     // Sum the derivatives to the cumulative descriptor
1005                     if (ry >= 0.0) {
1006                         dxp += rx;
1007                         mdxp += fabs(rx);
1008                     }
1009                     else {
1010                         dxn += rx;
1011                         mdxn += fabs(rx);
1012                     }
1013 
1014                     if (rx >= 0.0) {
1015                         dyp += ry;
1016                         mdyp += fabs(ry);
1017                     }
1018                     else {
1019                         dyn += ry;
1020                         mdyn += fabs(ry);
1021                     }
1022                 }
1023             }
1024 
1025             // Add the values to the descriptor vector
1026             gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
1027 
1028             desc[dcount++] = dxp*gauss_s2;
1029             desc[dcount++] = dxn*gauss_s2;
1030             desc[dcount++] = mdxp*gauss_s2;
1031             desc[dcount++] = mdxn*gauss_s2;
1032             desc[dcount++] = dyp*gauss_s2;
1033             desc[dcount++] = dyn*gauss_s2;
1034             desc[dcount++] = mdyp*gauss_s2;
1035             desc[dcount++] = mdyn*gauss_s2;
1036 
1037             // Store the current length^2 of the vector
1038             len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
1039                 dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
1040 
1041             j += 9;
1042         }
1043 
1044         i += 9;
1045     }
1046 
1047     // convert to unit vector
1048     len = sqrt(len);
1049 
1050     for (i = 0; i < dsize; i++) {
1051         desc[i] /= len;
1052     }
1053 }
1054 
1055 /* ************************************************************************* */
1056 /**
1057  * @brief This method computes the extended G-SURF descriptor of the provided keypoint
1058  * given the main orientation of the keypoint
1059  * @param kpt Input keypoint
1060  * @param desc Descriptor vector
1061  * @note Rectangular grid of 24 s x 24 s. Descriptor Length 128. The descriptor is inspired
1062  * from Agrawal et al., CenSurE: Center Surround Extremas for Realtime Feature Detection and Matching,
1063  * ECCV 2008
1064  */
Get_KAZE_Descriptor_128(const KeyPoint & kpt,float * desc) const1065 void KAZE_Descriptor_Invoker::Get_KAZE_Descriptor_128(const KeyPoint &kpt, float *desc) const
1066 {
1067     float gauss_s1 = 0.0, gauss_s2 = 0.0;
1068     float rx = 0.0, ry = 0.0, rrx = 0.0, rry = 0.0, len = 0.0, xf = 0.0, yf = 0.0, ys = 0.0, xs = 0.0;
1069     float sample_x = 0.0, sample_y = 0.0, co = 0.0, si = 0.0, angle = 0.0;
1070     float fx = 0.0, fy = 0.0, res1 = 0.0, res2 = 0.0, res3 = 0.0, res4 = 0.0;
1071     float dxp = 0.0, dyp = 0.0, mdxp = 0.0, mdyp = 0.0;
1072     float dxn = 0.0, dyn = 0.0, mdxn = 0.0, mdyn = 0.0;
1073     int x1 = 0, y1 = 0, x2 = 0, y2 = 0, sample_step = 0, pattern_size = 0;
1074     int kx = 0, ky = 0, i = 0, j = 0, dcount = 0;
1075     int dsize = 0, scale = 0, level = 0;
1076 
1077         std::vector<TEvolution>& evolution = *evolution_;
1078 
1079     // Subregion centers for the 4x4 gaussian weighting
1080     float cx = -0.5f, cy = 0.5f;
1081 
1082     // Set the descriptor size and the sample and pattern sizes
1083     dsize = 128;
1084     sample_step = 5;
1085     pattern_size = 12;
1086 
1087     // Get the information from the keypoint
1088     yf = kpt.pt.y;
1089     xf = kpt.pt.x;
1090     scale = fRound(kpt.size / 2.0f);
1091     angle = kpt.angle;
1092     level = kpt.class_id;
1093     co = cos(angle);
1094     si = sin(angle);
1095 
1096     i = -8;
1097 
1098     // Calculate descriptor for this interest point
1099     // Area of size 24 s x 24 s
1100     while (i < pattern_size) {
1101 
1102         j = -8;
1103         i = i - 4;
1104 
1105         cx += 1.0f;
1106         cy = -0.5f;
1107 
1108         while (j < pattern_size) {
1109 
1110             dxp = dxn = mdxp = mdxn = 0.0;
1111             dyp = dyn = mdyp = mdyn = 0.0;
1112 
1113             cy += 1.0f;
1114             j = j - 4;
1115 
1116             ky = i + sample_step;
1117             kx = j + sample_step;
1118 
1119             xs = xf + (-kx*scale*si + ky*scale*co);
1120             ys = yf + (kx*scale*co + ky*scale*si);
1121 
1122             for (int k = i; k < i + 9; ++k) {
1123                 for (int l = j; l < j + 9; ++l) {
1124 
1125                     // Get coords of sample point on the rotated axis
1126                     sample_y = yf + (l*scale*co + k*scale*si);
1127                     sample_x = xf + (-l*scale*si + k*scale*co);
1128 
1129                     // Get the gaussian weighted x and y responses
1130                     gauss_s1 = gaussian(xs - sample_x, ys - sample_y, 2.5f*scale);
1131 
1132                     y1 = fRound(sample_y - 0.5f);
1133                     x1 = fRound(sample_x - 0.5f);
1134 
1135                                         checkDescriptorLimits(x1, y1, options_.img_width, options_.img_height);
1136 
1137                     y2 = (int)(sample_y + 0.5f);
1138                     x2 = (int)(sample_x + 0.5f);
1139 
1140                                         checkDescriptorLimits(x2, y2, options_.img_width, options_.img_height);
1141 
1142                     fx = sample_x - x1;
1143                     fy = sample_y - y1;
1144 
1145                                         res1 = *(evolution[level].Lx.ptr<float>(y1)+x1);
1146                                         res2 = *(evolution[level].Lx.ptr<float>(y1)+x2);
1147                                         res3 = *(evolution[level].Lx.ptr<float>(y2)+x1);
1148                                         res4 = *(evolution[level].Lx.ptr<float>(y2)+x2);
1149                     rx = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
1150 
1151                                         res1 = *(evolution[level].Ly.ptr<float>(y1)+x1);
1152                                         res2 = *(evolution[level].Ly.ptr<float>(y1)+x2);
1153                                         res3 = *(evolution[level].Ly.ptr<float>(y2)+x1);
1154                                         res4 = *(evolution[level].Ly.ptr<float>(y2)+x2);
1155                     ry = (1.0f - fx)*(1.0f - fy)*res1 + fx*(1.0f - fy)*res2 + (1.0f - fx)*fy*res3 + fx*fy*res4;
1156 
1157                     // Get the x and y derivatives on the rotated axis
1158                     rry = gauss_s1*(rx*co + ry*si);
1159                     rrx = gauss_s1*(-rx*si + ry*co);
1160 
1161                     // Sum the derivatives to the cumulative descriptor
1162                     if (rry >= 0.0) {
1163                         dxp += rrx;
1164                         mdxp += fabs(rrx);
1165                     }
1166                     else {
1167                         dxn += rrx;
1168                         mdxn += fabs(rrx);
1169                     }
1170 
1171                     if (rrx >= 0.0) {
1172                         dyp += rry;
1173                         mdyp += fabs(rry);
1174                     }
1175                     else {
1176                         dyn += rry;
1177                         mdyn += fabs(rry);
1178                     }
1179                 }
1180             }
1181 
1182             // Add the values to the descriptor vector
1183             gauss_s2 = gaussian(cx - 2.0f, cy - 2.0f, 1.5f);
1184 
1185             desc[dcount++] = dxp*gauss_s2;
1186             desc[dcount++] = dxn*gauss_s2;
1187             desc[dcount++] = mdxp*gauss_s2;
1188             desc[dcount++] = mdxn*gauss_s2;
1189             desc[dcount++] = dyp*gauss_s2;
1190             desc[dcount++] = dyn*gauss_s2;
1191             desc[dcount++] = mdyp*gauss_s2;
1192             desc[dcount++] = mdyn*gauss_s2;
1193 
1194             // Store the current length^2 of the vector
1195             len += (dxp*dxp + dxn*dxn + mdxp*mdxp + mdxn*mdxn +
1196                 dyp*dyp + dyn*dyn + mdyp*mdyp + mdyn*mdyn)*gauss_s2*gauss_s2;
1197 
1198             j += 9;
1199         }
1200 
1201         i += 9;
1202     }
1203 
1204     // convert to unit vector
1205     len = sqrt(len);
1206 
1207     for (i = 0; i < dsize; i++) {
1208         desc[i] /= len;
1209     }
1210 }
1211 
1212 }
1213