I am trying to implement Automatic perspective correction in my iOS program and when I use the test image I found on the tutorial everything works as expected. But when I take a picture I get back a weird result.
I am using code found in this tutorial
When I give it an image that looks like this:
I get this as the result:
Here is what dst
gives me that might help.
I am using this to call the method which contains the code.
quadSegmentation(Img, bw, dst, quad);
Can anyone tell me when I am getting so many green lines compared to the tutorial? And how I might be able to fix this and properly crop the image to only contain the card?
For perspective transform you need,
source points->Coordinates of quadrangle vertices in the source image.
destination points-> Coordinates of the corresponding quadrangle vertices in the destination image.
Here we will calculate these point by contour process.
Calculate Coordinates of quadrangle vertices in the source image
epsilon
to make 4 co-ordinates.Calculate Coordinates of the corresponding quadrangle vertices in the destination image
In below image the red rectangle represent source points and green for destination points.
Adjust the co-ordinates order and Apply Perspective transform
See the final result
Code
Mat src=imread("card.jpg"); Mat thr; cvtColor(src,thr,CV_BGR2GRAY); threshold( thr, thr, 70, 255,CV_THRESH_BINARY ); vector< vector <Point> > contours; // Vector for storing contour vector< Vec4i > hierarchy; int largest_contour_index=0; int largest_area=0; Mat dst(src.rows,src.cols,CV_8UC1,Scalar::all(0)); //create destination image findContours( thr.clone(), contours, hierarchy,CV_RETR_EXTERNAL, CV_CHAIN_APPROX_SIMPLE ); // Find the contours in the image for( int i = 0; i< contours.size(); i++ ){ double a=contourArea( contours[i],false); // Find the area of contour if(a>largest_area){ largest_area=a; largest_contour_index=i; //Store the index of largest contour } } drawContours( dst,contours, largest_contour_index, Scalar(255,255,255),CV_FILLED, 8, hierarchy ); vector<vector<Point> > contours_poly(1); approxPolyDP( Mat(contours[largest_contour_index]), contours_poly[0],5, true ); Rect boundRect=boundingRect(contours[largest_contour_index]); if(contours_poly[0].size()==4){ std::vector<Point2f> quad_pts; std::vector<Point2f> squre_pts; quad_pts.push_back(Point2f(contours_poly[0][0].x,contours_poly[0][0].y)); quad_pts.push_back(Point2f(contours_poly[0][1].x,contours_poly[0][1].y)); quad_pts.push_back(Point2f(contours_poly[0][3].x,contours_poly[0][3].y)); quad_pts.push_back(Point2f(contours_poly[0][2].x,contours_poly[0][2].y)); squre_pts.push_back(Point2f(boundRect.x,boundRect.y)); squre_pts.push_back(Point2f(boundRect.x,boundRect.y+boundRect.height)); squre_pts.push_back(Point2f(boundRect.x+boundRect.width,boundRect.y)); squre_pts.push_back(Point2f(boundRect.x+boundRect.width,boundRect.y+boundRect.height)); Mat transmtx = getPerspectiveTransform(quad_pts,squre_pts); Mat transformed = Mat::zeros(src.rows, src.cols, CV_8UC3); warpPerspective(src, transformed, transmtx, src.size()); Point P1=contours_poly[0][0]; Point P2=contours_poly[0][1]; Point P3=contours_poly[0][2]; Point P4=contours_poly[0][3]; line(src,P1,P2, Scalar(0,0,255),1,CV_AA,0); line(src,P2,P3, Scalar(0,0,255),1,CV_AA,0); line(src,P3,P4, Scalar(0,0,255),1,CV_AA,0); line(src,P4,P1, Scalar(0,0,255),1,CV_AA,0); rectangle(src,boundRect,Scalar(0,255,0),1,8,0); rectangle(transformed,boundRect,Scalar(0,255,0),1,8,0); imshow("quadrilateral", transformed); imshow("thr",thr); imshow("dst",dst); imshow("src",src); imwrite("result1.jpg",dst); imwrite("result2.jpg",src); imwrite("result3.jpg",transformed); waitKey(); } else cout<<"Make sure that your are getting 4 corner using approxPolyDP..."<<endl;
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With