ConnectedComponents.cpp
Go to the documentation of this file.
1 /*
2  * This file is part of ALVAR, A Library for Virtual and Augmented Reality.
3  *
4  * Copyright 2007-2012 VTT Technical Research Centre of Finland
5  *
6  * Contact: VTT Augmented Reality Team <alvar.info@vtt.fi>
7  * <http://www.vtt.fi/multimedia/alvar.html>
8  *
9  * ALVAR is free software; you can redistribute it and/or modify it under the
10  * terms of the GNU Lesser General Public License as published by the Free
11  * Software Foundation; either version 2.1 of the License, or (at your option)
12  * any later version.
13  *
14  * This library is distributed in the hope that it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
17  * for more details.
18  *
19  * You should have received a copy of the GNU Lesser General Public License
20  * along with ALVAR; if not, see
21  * <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html>.
22  */
23 
25 #include "ar_track_alvar/Draw.h"
26 #include <cassert>
27 
28 using namespace std;
29 
30 namespace alvar {
31 using namespace std;
32 
33 Labeling::Labeling()
34 {
35  gray = 0;
36  bw = 0;
37  cam = 0;
38  thresh_param1 = 31;
39  thresh_param2 = 5;
40 }
41 
42 Labeling::~Labeling()
43 {
44  if(gray)
45  cvReleaseImage(&gray);
46  if(bw)
47  cvReleaseImage(&bw);
48 }
49 
50 bool Labeling::CheckBorder(CvSeq* contour, int width, int height)
51 {
52  bool ret = true;
53  for(int i = 0; i < contour->total; ++i)
54  {
55  CvPoint* pt = (CvPoint*)cvGetSeqElem(contour, i);
56  if((pt->x <= 1) || (pt->x >= width-2) || (pt->y <= 1) || (pt->y >= height-2)) ret = false;
57  }
58  return ret;
59 }
60 
61 LabelingCvSeq::LabelingCvSeq() : _n_blobs(0), _min_edge(20), _min_area(25)
62 {
63  SetOptions();
64  storage = cvCreateMemStorage(0);
65 }
66 
68 {
69  if(storage)
70  cvReleaseMemStorage(&storage);
71 }
72 
73 void LabelingCvSeq::SetOptions(bool _detect_pose_grayscale) {
74  detect_pose_grayscale = _detect_pose_grayscale;
75 }
76 
78 {
79 
80  if (gray && ((gray->width != image->width) || (gray->height != image->height))) {
81  cvReleaseImage(&gray); gray=NULL;
82  if (bw) cvReleaseImage(&bw); bw=NULL;
83  }
84  if (gray == NULL) {
85  gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
86  gray->origin = image->origin;
87  bw = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
88  bw->origin = image->origin;
89  }
90 
91  // Convert grayscale and threshold
92  if(image->nChannels == 4)
93  cvCvtColor(image, gray, CV_RGBA2GRAY);
94  else if(image->nChannels == 3)
95  cvCvtColor(image, gray, CV_RGB2GRAY);
96  else if(image->nChannels == 1)
97  cvCopy(image, gray);
98  else {
99  cerr<<"Unsupported image format"<<endl;
100  }
101 
102  cvAdaptiveThreshold(gray, bw, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, thresh_param1, thresh_param2);
103  //cvThreshold(gray, bw, 127, 255, CV_THRESH_BINARY_INV);
104 
105  CvSeq* contours;
106  CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage);
107  CvSeq* square_contours = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage);
108 
109  cvFindContours(bw, storage, &contours, sizeof(CvContour),
110  CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
111 
112  while(contours)
113  {
114  if(contours->total < _min_edge)
115  {
116  contours = contours->h_next;
117  continue;
118  }
119 
120  CvSeq* result = cvApproxPoly(contours, sizeof(CvContour), storage,
121  CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.035, 0 ); // TODO: Parameters?
122 
123  if( result->total == 4 && CheckBorder(result, image->width, image->height) &&
124  fabs(cvContourArea(result,CV_WHOLE_SEQ)) > _min_area && // TODO check limits
125  cvCheckContourConvexity(result) ) // ttehop: Changed to 'contours' instead of 'result'
126  {
127  cvSeqPush(squares, result);
128  cvSeqPush(square_contours, contours);
129  }
130  contours = contours->h_next;
131  }
132 
133  _n_blobs = squares->total;
134  blob_corners.resize(_n_blobs);
135 
136  // For every detected 4-corner blob
137  for(int i = 0; i < _n_blobs; ++i)
138  {
139  vector<Line> fitted_lines(4);
140  blob_corners[i].resize(4);
141  CvSeq* sq = (CvSeq*)cvGetSeqElem(squares, i);
142  CvSeq* square_contour = (CvSeq*)cvGetSeqElem(square_contours, i);
143 
144  for(int j = 0; j < 4; ++j)
145  {
146  CvPoint* pt0 = (CvPoint*)cvGetSeqElem(sq, j);
147  CvPoint* pt1 = (CvPoint*)cvGetSeqElem(sq, (j+1)%4);
148  int k0=-1, k1=-1;
149  for (int k = 0; k<square_contour->total; k++) {
150  CvPoint* pt2 = (CvPoint*)cvGetSeqElem(square_contour, k);
151  if ((pt0->x == pt2->x) && (pt0->y == pt2->y)) k0=k;
152  if ((pt1->x == pt2->x) && (pt1->y == pt2->y)) k1=k;
153  }
154  int len;
155  if (k1 >= k0) len = k1-k0-1; // neither k0 nor k1 are included
156  else len = square_contour->total-k0+k1-1;
157  if (len == 0) len = 1;
158 
159  CvMat* line_data = cvCreateMat(1, len, CV_32FC2);
160  for (int l=0; l<len; l++) {
161  int ll = (k0+l+1)%square_contour->total;
162  CvPoint* p = (CvPoint*)cvGetSeqElem(square_contour, ll);
163  CvPoint2D32f pp;
164  pp.x = float(p->x);
165  pp.y = float(p->y);
166 
167  // Undistort
168  if(cam)
169  cam->Undistort(pp);
170 
171  CV_MAT_ELEM(*line_data, CvPoint2D32f, 0, l) = pp;
172  }
173 
174  // Fit edge and put to vector of edges
175  float params[4] = {0};
176 
177  // TODO: The detect_pose_grayscale is still under work...
178  /*
179  if (detect_pose_grayscale &&
180  (pt0->x > 3) && (pt0->y > 3) &&
181  (pt0->x < (gray->width-4)) &&
182  (pt0->y < (gray->height-4)))
183  {
184  // ttehop: Grayscale experiment
185  FitLineGray(line_data, params, gray);
186  }
187  */
188  cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params);
189 
190  //cvFitLine(line_data, CV_DIST_L2, 0, 0.01, 0.01, params);
192  Line line = Line(params);
193  if(visualize) DrawLine(image, line);
194  fitted_lines[j] = line;
195 
196  cvReleaseMat(&line_data);
197  }
198 
199  // Calculated four intersection points
200  for(size_t j = 0; j < 4; ++j)
201  {
202  PointDouble intc = Intersection(fitted_lines[j],fitted_lines[(j+1)%4]);
203 
204  // TODO: Instead, test OpenCV find corner in sub-pix...
205  //CvPoint2D32f pt = cvPoint2D32f(intc.x, intc.y);
206  //cvFindCornerSubPix(gray, &pt,
207  // 1, cvSize(3,3), cvSize(-1,-1),
208  // cvTermCriteria(
209  // CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,10,1e-4));
210 
211  // TODO: Now there is a wierd systematic 0.5 pixel error that is fixed here...
212  //intc.x += 0.5;
213  //intc.y += 0.5;
214 
215  if(cam) cam->Distort(intc);
216 
217  // TODO: Should we make this always counter-clockwise or clockwise?
218  /*
219  if (image->origin && j == 1) blob_corners[i][3] = intc;
220  else if (image->origin && j == 3) blob_corners[i][1] = intc;
221  else blob_corners[i][j] = intc;
222  */
223  blob_corners[i][j] = intc;
224  }
225  if (visualize) {
226  for(size_t j = 0; j < 4; ++j) {
227  PointDouble &intc = blob_corners[i][j];
228  if (j == 0) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(255, 255, 255));
229  if (j == 1) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(255, 0, 0));
230  if (j == 2) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(0, 255, 0));
231  if (j == 3) cvCircle(image, cvPoint(int(intc.x), int(intc.y)), 5, CV_RGB(0, 0, 255));
232  }
233  }
234  }
235 
236  cvClearMemStorage(storage);
237 }
238 
239 CvSeq* LabelingCvSeq::LabelImage(IplImage* image, int min_size, bool approx)
240 {
241  assert(image->origin == 0); // Currently only top-left origin supported
242  if (gray && ((gray->width != image->width) || (gray->height != image->height))) {
243  cvReleaseImage(&gray); gray=NULL;
244  if (bw) cvReleaseImage(&bw); bw=NULL;
245  }
246  if (gray == NULL) {
247  gray = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
248  gray->origin = image->origin;
249  bw = cvCreateImage(cvSize(image->width, image->height), IPL_DEPTH_8U, 1);
250  bw->origin = image->origin;
251  }
252 
253  // Convert grayscale and threshold
254  if(image->nChannels == 4)
255  cvCvtColor(image, gray, CV_RGBA2GRAY);
256  else if(image->nChannels == 3)
257  cvCvtColor(image, gray, CV_RGB2GRAY);
258  else if(image->nChannels == 1)
259  cvCopy(image, gray);
260  else {
261  cerr<<"Unsupported image format"<<endl;
262  }
263 
264  cvAdaptiveThreshold(gray, bw, 255, CV_ADAPTIVE_THRESH_MEAN_C, CV_THRESH_BINARY_INV, thresh_param1, thresh_param2);
265 
266  CvSeq* contours;
267  CvSeq* edges = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage);
268  CvSeq* squares = cvCreateSeq(0, sizeof(CvSeq), sizeof(CvSeq), storage);
269 
270  cvFindContours(bw, storage, &contours, sizeof(CvContour),
271  CV_RETR_LIST, CV_CHAIN_APPROX_NONE, cvPoint(0,0));
272  //cvFindContours(bw, storage, &contours, sizeof(CvContour),
273  // CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, cvPoint(0,0));
274 
275 
276  while(contours)
277  {
278  if(contours->total < min_size)
279  {
280  contours = contours->h_next;
281  continue;
282  }
283 
284  if(approx)
285  {
286  CvSeq* result = cvApproxPoly(contours, sizeof(CvContour), storage,
287  CV_POLY_APPROX_DP, cvContourPerimeter(contours)*0.02, 0 ); // TODO: Parameters?
288 
289  if(cvCheckContourConvexity(result))
290  {
291  cvSeqPush(squares, result);
292  }
293  }
294  else
295  cvSeqPush(squares, contours);
296 
297  contours = contours->h_next;
298  }
299 
300  cvClearMemStorage(storage);
301 
302  return squares;
303 }
304 
305 inline int round(double x) {
306  return (x)>=0?(int)((x)+0.5):(int)((x)-0.5);
307 }
308 
309 template<class T>
310 inline T absdiff(T c1, T c2) {
311  return (c2>c1?c2-c1:c1-c2);
312 }
313 
314 //#define SHOW_DEBUG
315 #ifdef SHOW_DEBUG
316 #include "highgui.h"
317 #endif
318 
319 // TODO: This should be in LabelingCvSeq ???
320 void FitLineGray(CvMat *line_data, float params[4], IplImage *gray) {
321  // this very simple approach works...
322  /*
323  float *cx = &(params[2]);
324  float *cy = &(params[3]);
325  float *sx = &(params[0]);
326  float *sy = &(params[1]);
327  CvPoint2D32f *p1 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, 0, sizeof(CvPoint2D32f));
328  CvPoint2D32f *p2 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, line_data->cols-1, sizeof(CvPoint2D32f));
329  *cx = p1->x; *cy = p1->y;
330  *sx = p2->x - p1->x; *sy = p2->y - p1->y;
331  return;
332  */
333 
334 #ifdef SHOW_DEBUG
335  IplImage *tmp = cvCreateImage(cvSize(gray->width, gray->height), IPL_DEPTH_8U, 3);
336  IplImage *tmp2 = cvCreateImage(cvSize(gray->width*5, gray->height*5), IPL_DEPTH_8U, 3);
337  cvCvtColor(gray, tmp, CV_GRAY2RGB);
338  cvResize(tmp, tmp2, CV_INTER_NN);
339 #endif
340 
341  // Discover 1st the line normal direction
342  CvPoint2D32f *p1 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, 0, sizeof(CvPoint2D32f));
343  CvPoint2D32f *p2 = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, line_data->cols-1, sizeof(CvPoint2D32f));
344  double dx = +(p2->y - p1->y);
345  double dy = -(p2->x - p1->x);
346  if ((dx == 0) && (dy == 0)) return;
347  else if (dx == 0) { dy /= dy; }
348  else if (dy == 0) { dx /= dx; }
349  else if (abs(dx) > abs(dy)) { dy /= dx; dx /= dx; }
350  else { dx /= dy; dy /= dy; }
351 
352  // Build normal search table
353  const int win_size=5;
354  const int win_mid=win_size/2;
355  const int diff_win_size=win_size-1;
356  double xx[win_size], yy[win_size];
357  double dxx[diff_win_size], dyy[diff_win_size];
358  xx[win_mid] = 0; yy[win_mid] = 0;
359  for (int i=1; i<=win_size/2; i++) {
360  xx[win_mid + i] = round(i*dx);
361  xx[win_mid - i] = -xx[win_mid + i];
362  yy[win_mid + i] = round(i*dy);
363  yy[win_mid - i] = -yy[win_mid + i];
364  }
365  for (int i=0; i<diff_win_size; i++) {
366  dxx[i] = (xx[i]+xx[i+1])/2;
367  dyy[i] = (yy[i]+yy[i+1])/2;
368  }
369 
370  // Adjust the points
371  for (int l=0; l<line_data->cols; l++) {
372  CvPoint2D32f *p = (CvPoint2D32f*)CV_MAT_ELEM_PTR_FAST(*line_data, 0, l, sizeof(CvPoint2D32f));
373 
374  double dx=0, dy=0, ww=0;
375  for (int i=0; i<diff_win_size; i++) {
376  unsigned char c1 = (unsigned char)gray->imageData[int((p->y+yy[i])*gray->widthStep+(p->x+xx[i]))];
377  unsigned char c2 = (unsigned char)gray->imageData[int((p->y+yy[i+1])*gray->widthStep+(p->x+xx[i+1]))];
378 #ifdef SHOW_DEBUG
379  cvCircle(tmp2, cvPoint((p->x+xx[i])*5+2,(p->y+yy[i])*5+2), 0, CV_RGB(0,0,255));
380  cvCircle(tmp2, cvPoint((p->x+xx[i+1])*5+2,(p->y+yy[i+1])*5+2), 0, CV_RGB(0,0,255));
381 #endif
382  double w = absdiff(c1, c2);
383  dx += dxx[i]*w;
384  dy += dyy[i]*w;
385  ww += w;
386  }
387  if (ww > 0) {
388  dx /= ww; dy /= ww;
389  }
390 #ifdef SHOW_DEBUG
391  cvLine(tmp2, cvPoint(p->x*5+2,p->y*5+2), cvPoint((p->x+dx)*5+2, (p->y+dy)*5+2), CV_RGB(0,255,0));
392  p->x += float(dx); p->y += float(dy);
393  cvCircle(tmp2, cvPoint(p->x*5+2,p->y*5+2), 0, CV_RGB(255,0,0));
394 #else
395  p->x += float(dx); p->y += float(dy);
396 #endif
397  }
398 
399 #ifdef SHOW_DEBUG
400  cvNamedWindow("tmp");
401  cvShowImage("tmp",tmp2);
402  cvWaitKey(0);
403  cvReleaseImage(&tmp);
404  cvReleaseImage(&tmp2);
405 #endif
406 }
407 
408 } // namespace alvar
T absdiff(T c1, T c2)
Main ALVAR namespace.
Definition: Alvar.h:174
This file implements connected component labeling.
This file implements a collection of functions that are used to visualize lines, contours and corners...
bool CheckBorder(CvSeq *contour, int width, int height)
int visualize
unsigned char * image
Definition: GlutViewer.cpp:155
IplImage * bw
Pointer to binary image that is then labeled.
int thresh_param1
int height
Definition: GlutViewer.cpp:160
void FitLineGray(CvMat *line_data, float params[4], IplImage *gray)
std::vector< std::vector< PointDouble > > blob_corners
Vector of 4-length vectors where the corners of detected blobs are stored.
Struct representing a line. The line is parametrized by its center and direction vector.
Definition: Line.h:41
void SetOptions(bool _detect_pose_grayscale=false)
CvSeq * LabelImage(IplImage *image, int min_size, bool approx=false)
int width
Definition: GlutViewer.cpp:159
void ALVAR_EXPORT DrawLine(IplImage *image, const Line line, CvScalar color=CV_RGB(0, 255, 0))
Draws a line.
Definition: Draw.cpp:38
cv::Mat gray
IplImage * gray
Pointer to grayscale image that is thresholded for labeling.
TFSIMD_FORCE_INLINE const tfScalar & w() const
void LabelSquares(IplImage *image, bool visualize=false)
Labels image and filters blobs to obtain square-shaped objects from the scene.
void Distort(CvPoint2D32f &point)
Applys the lens distortion for one point on an image plane.
Definition: Camera.cpp:611
PointDouble ALVAR_EXPORT Intersection(const Line &l1, const Line &l2)
Calculates an intersection point of two lines.
Definition: Line.cpp:98
ALVAR_EXPORT Point< CvPoint2D64f > PointDouble
The default double point type.
Definition: Util.h:108
Camera * cam
int round(double x)
void Undistort(std::vector< PointDouble > &points)
Unapplys the lens distortion for points on image plane.


ar_track_alvar
Author(s): Scott Niekum
autogenerated on Mon Jun 10 2019 12:47:04