Here is the original code from matlab:
% Calculate each separated object area
cDist=regionprops(bwImg, 'Area');
cDist=[cDist.Area];
% Label each object
[bwImgLabeled, ~]=bwlabel(bwImg);
% Calculate min and max object size based on assumptions on the color
% checker size
maxLabelSize = prod(size(imageData)./[4 6]);
minLabelSize = prod(size(imageData)./[4 6]./10);
% Find label indices for objects that are too large or too small
remInd = find(cDist > maxLabelSize);
remInd = [remInd find(cDist < minLabelSize)];
% Remove over/undersized objects
for n=1:length(remInd)
ri = bwImgLabeled == remInd(n);
bwImgLabeled(ri) = 0;
Here is my code using openCV
//regionprops(bwImg, 'Area');
// cDist=[cDist.Area]
//cv::FileStorage file("C:\\Users\\gdarmon\\Desktop\\gili.txt", cv::FileStorage::WRITE);
//
//file << dst;
dst.convertTo(dst,CV_8U);
cv::vector<cv::vector<cv::Point> > contours;
cv::vector<cv::Vec4i> hierarchy;
cv::findContours(dst,contours,hierarchy,CV_RETR_CCOMP, CV_CHAIN_APPROX_NONE);
std::vector<cv::Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++)
{
mu[i] = cv::moments(contours[i],false);
}
vector<cv::Point2f> mc( contours.size() );
for( int i = 0; i < contours.size(); i++ )
{
mc[i] = cv::Point2f( mu[i].m10/mu[i].m00 , mu[i].m01/mu[i].m00 );
}
Since now ihave the contours I would like to user bwlabel function
1. i have figured that labeling is done in order to get connected 4-8 objects.
can you please explain what is labeling actually is? I would aapriciate any link.
2.connected components in OpenCV
in this article some people are talking about CVblob and some about opecv's cvContourArea, can you explain the difference. and what will be better suited for my use case?
Update: here is what I have tried using cvBlobs
IplImage* img_bw = new IplImage(dst);
CBlobResult blobs;
CBlob *currentBlob;
blobs = CBlobResult(img_bw, NULL, 0);
// Exclude all white blobs smaller than the given value (80)
// The bigger the last parameter, the bigger the blobs need
// to be for inclusion
blobs.Filter( blobs,
B_EXCLUDE,
CBlobGetArea(),
B_LESS,
80 );
// Get the number of blobs discovered
int num_blobs = blobs.GetNumBlobs();
// Display the filtered blobs
IplImage* filtered = cvCreateImage( cvGetSize( img_bw ),
IPL_DEPTH_8U,
3 );
cvMerge( img_bw, img_bw, img_bw, NULL, filtered );
for ( int i = 0; i < num_blobs; i++ )
{
currentBlob = blobs.GetBlob( i );
currentBlob->FillBlob( filtered, CV_RGB(255,0,0));
}
// Display the input / output windows and images
cvNamedWindow( "input" );
cvNamedWindow( "output" );
cvShowImage("input", img_bw );
cvShowImage("output", filtered);
cv::waitKey(0);
/*% Calculate min and max object size based on assumptions on the color
% checker size
maxLabelSize = prod(size(imageData)./[4 6]);
minLabelSize = prod(size(imageData)./[4 6]./10);*/
double maxLabelSize = (dst.rows/4.0) * (dst.cols/6.0);
double minLabelSize = ((dst.rows/40.0) * (dst.cols/60.0));
- i have figured that labeling is done in order to get connected 4-8 objects. can you please explain what is labeling actually is? I would aapriciate any link.
The clearest demonstration of what labeling actually does is in the Matlab documentation for bwlabel
. If you compare the original matrix BW
to the resulting matrix L
, you'll see that it takes a binary image and assigns unique labels to each connected group of 1
's:
L =
1 1 1 0 0 0 0 0
1 1 1 0 2 2 0 0
1 1 1 0 2 2 0 0
1 1 1 0 0 0 3 0
1 1 1 0 0 0 3 0
1 1 1 0 0 0 3 0
1 1 1 0 0 3 3 0
1 1 1 0 0 0 0 0
Here there are three components labeled. This example looks for 4-connected components; a pixel is considered to be connected to the current pixel if it is to the left, right, above or below it. 8-connected objects include the diagonals, which would result in labels 2
and 3
being merged for the matrix above since the lower-right corner of object 2 and the top of object 3 are diagonally connected. The connected component labeling algorithm is described on Wikipedia here.
2.connected components in OpenCV in this article some people are talking about CVblob and some about opecv's cvContourArea, can you explain the difference. and what will be better suited for my use case?
OpenCV 3.0 is out of beta and has two brand new methods: connectedComponents
and connectedComponentsWithStats
(documentation). If you're trying to replicate Matlab's bwlabel
, this is the way to go.
I wrote a test program to try out connectedComponentsWithStats
(complete code below) using this as my test image:
(Actually, this image is reduced from 800x600 to 400x300, but the code to generate it is included below.)
I generated the labeled image using:
int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, 8, CV_32S);
The value returned in nLabels
is 5
. Remember, that this method considers the background to be label 0
.
To see what the labeled areas are, you could scale up the grayscale values from [0..nLabels-1]
to [0..255]
, or you could assign random RGB values and create a color image. For this test I just printed out the values at a couple of locations that I knew were in different components.
cout << "Show label values:" << endl;
// Middle of square at top-left
int component1Pixel = labels.at<int>(150,150);
cout << "pixel at(150,150) = " << component1Pixel << endl;
// Middle of rectangle at far right
int component2Pixel = labels.at<int>(300,550);
cout << "pixel at(300,550) = " << component2Pixel << endl << endl;
Show label values:
pixel at(150,150) = 1
pixel at(300,550) = 2
The stats
is a 5 x nLabels
Mat containing left, top, width, height, and area
for each component (including background). For this image:
stats:
(left,top,width,height,area)
[0, 0, 800, 600, 421697;
100, 100, 101, 101, 10201;
500, 150, 101, 301, 30401;
350, 246, 10, 10, 36;
225, 325, 151, 151, 17665]
You'll notice that component 0
is the full width/height of the image. Summing all of the areas, you get 480,000 = 800x600
. The first 4 elements can be used to create a bounding rectangle:
Rect(Point(left,top), Size(width,height))
centroids
is a 2 x nLabels
Mat containing the x, y
coordinates of the centroid of each component:
centroids:
(x, y)
[398.8575636060963, 298.8746232484461;
150, 150;
550, 300;
354.5, 250.5;
300, 400]
Finally, at some point you're probably going to want to do further processing on one of the components individually. Here I use compare
to generate a new Mat only2
that only contains pixels from labels
that labeled 2
.
compare(labels, 2, only2, CMP_EQ);
compare
helpfully sets these pixels to a value of 255
in the new image so you can see the results:
Here's the complete code:
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
using namespace std;
using namespace cv;
int main(int argc, const char * argv[]) {
// Create an image
const int color_white = 255;
Mat src = Mat::zeros(600, 800, CV_8UC1);
rectangle(src, Point(100, 100), Point(200, 200), color_white, CV_FILLED);
rectangle(src, Point(500, 150), Point(600, 450), color_white, CV_FILLED);
rectangle(src, Point(350,250), Point(359,251), color_white, CV_FILLED);
rectangle(src, Point(354,246), Point(355,255), color_white, CV_FILLED);
circle(src, Point(300, 400), 75, color_white, CV_FILLED);
imshow("Original", src);
// Get connected components and stats
const int connectivity_8 = 8;
Mat labels, stats, centroids;
int nLabels = connectedComponentsWithStats(src, labels, stats, centroids, connectivity_8, CV_32S);
cout << "Number of connected components = " << nLabels << endl << endl;
cout << "Show label values:" << endl;
int component1Pixel = labels.at<int>(150,150);
cout << "pixel at(150,150) = " << component1Pixel << endl;
int component2Pixel = labels.at<int>(300,550);
cout << "pixel at(300,550) = " << component2Pixel << endl << endl;
// Statistics
cout << "Show statistics and centroids:" << endl;
cout << "stats:" << endl << "(left,top,width,height,area)" << endl << stats << endl << endl;
cout << "centroids:" << endl << "(x, y)" << endl << centroids << endl << endl;
// Print individual stats for component 1 (component 0 is background)
cout << "Component 1 stats:" << endl;
cout << "CC_STAT_LEFT = " << stats.at<int>(1,CC_STAT_LEFT) << endl;
cout << "CC_STAT_TOP = " << stats.at<int>(1,CC_STAT_TOP) << endl;
cout << "CC_STAT_WIDTH = " << stats.at<int>(1,CC_STAT_WIDTH) << endl;
cout << "CC_STAT_HEIGHT = " << stats.at<int>(1,CC_STAT_HEIGHT) << endl;
cout << "CC_STAT_AREA = " << stats.at<int>(1,CC_STAT_AREA) << endl;
// Create image with only component 2
Mat only2;
compare(labels, 2, only2, CMP_EQ);
imshow("Component 2", only2);
waitKey(0);
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With