I have the cameraMatrix
and the distCoeff
needed to undistort an image or a vector of points. Now I'd like to distort them back.
Is it possible with Opencv?
I remember I read something about it in stackoverflow but cannot find now.
EDIT: I found the way to do it in this answer. It is also in the opencv developer zone (in this issue)
But my results are not properly correct. There is some error of 2-4 pixel more or less. Probably there is something wrong in my code because in the answer I linked everything seems good in the unit test. Maybe type casting from float to double, or something else that I cannot see.
here is my test case:
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>
using namespace cv;
using namespace std;
void distortPoints(const std::vector<cv::Point2d> & src, std::vector<cv::Point2d> & dst,
const cv::Mat & cameraMatrix, const cv::Mat & distorsionMatrix)
{
dst.clear();
double fx = cameraMatrix.at<double>(0,0);
double fy = cameraMatrix.at<double>(1,1);
double ux = cameraMatrix.at<double>(0,2);
double uy = cameraMatrix.at<double>(1,2);
double k1 = distorsionMatrix.at<double>(0, 0);
double k2 = distorsionMatrix.at<double>(0, 1);
double p1 = distorsionMatrix.at<double>(0, 2);
double p2 = distorsionMatrix.at<double>(0, 3);
double k3 = distorsionMatrix.at<double>(0, 4);
for (unsigned int i = 0; i < src.size(); i++)
{
const cv::Point2d & p = src[i];
double x = p.x;
double y = p.y;
double xCorrected, yCorrected;
//Step 1 : correct distorsion
{
double r2 = x*x + y*y;
//radial distorsion
xCorrected = x * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2);
yCorrected = y * (1. + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2);
//tangential distorsion
//The "Learning OpenCV" book is wrong here !!!
//False equations from the "Learning OpenCv" book below :
//xCorrected = xCorrected + (2. * p1 * y + p2 * (r2 + 2. * x * x));
//yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x);
//Correct formulae found at : http://www.vision.caltech.edu/bouguetj/calib_doc/htmls/parameters.html
xCorrected = xCorrected + (2. * p1 * x * y + p2 * (r2 + 2. * x * x));
yCorrected = yCorrected + (p1 * (r2 + 2. * y * y) + 2. * p2 * x * y);
}
//Step 2 : ideal coordinates => actual coordinates
{
xCorrected = xCorrected * fx + ux;
yCorrected = yCorrected * fy + uy;
}
dst.push_back(cv::Point2d(xCorrected, yCorrected));
}
}
int main(int /*argc*/, char** /*argv*/) {
cout << "OpenCV version: " << CV_MAJOR_VERSION << " " << CV_MINOR_VERSION << endl; // 2 4
Mat cameraMatrix = (Mat_<double>(3,3) << 1600, 0, 789, 0, 1600, 650, 0, 0, 1);
Mat distorsion = (Mat_<double>(5,1) << -0.48, 0, 0, 0, 0);
cout << "camera matrix: " << cameraMatrix << endl;
cout << "distorsion coefficent: " << distorsion << endl;
// the starting points
std::vector<Point2f> original_pts;
original_pts.push_back( Point2f(23, 358) );
original_pts.push_back( Point2f(8, 357) );
original_pts.push_back( Point2f(12, 342) );
original_pts.push_back( Point2f(27, 343) );
original_pts.push_back( Point2f(7, 350) );
original_pts.push_back( Point2f(-8, 349) );
original_pts.push_back( Point2f(-4, 333) );
original_pts.push_back( Point2f(12, 334) );
Mat original_m = Mat(original_pts);
// undistort
Mat undistorted_m;
undistortPoints(original_m, undistorted_m,
cameraMatrix, distorsion);
cout << "undistort points" << undistorted_m << endl;
// back to array
vector< cv::Point2d > undistorted_points;
for(int i=0; i<original_pts.size(); ++i) {
Point2d p;
p.x = undistorted_m.at<float>(i, 0);
p.y = undistorted_m.at<float>(i, 1);
undistorted_points.push_back( p );
// NOTE THAT HERE THERE IS AN APPROXIMATION
// WHAT IS IT? STD::COUT? CASTING TO FLOAT?
cout << undistorted_points[i] << endl;
}
vector< cv::Point2d > redistorted_points;
distortPoints(undistorted_points, redistorted_points, cameraMatrix, distorsion);
cout << redistorted_points << endl;
for(int i=0; i<original_pts.size(); ++i) {
cout << original_pts[i] << endl;
cout << redistorted_points[i] << endl;
Point2d o;
o.x = original_pts[i].x;
o.y = original_pts[i].y;
Point2d dist = redistorted_points[i] - o;
double norm = sqrt(dist.dot(dist));
std::cout << "distance = " << norm << std::endl;
cout << endl;
}
return 0;
}
And here is my output:
OpenCV version: 2 4
camera matrix: [1600, 0, 789;
0, 1600, 650;
0, 0, 1]
distorsion coefficent: [-0.48; 0; 0; 0; 0]
undistort points[-0.59175861, -0.22557901; -0.61276215, -0.22988389; -0.61078846, -0.24211435; -0.58972651, -0.23759322; -0.61597037, -0.23630577; -0.63910204, -0.24136727; -0.63765121, -0.25489968; -0.61291695, -0.24926868]
[-0.591759, -0.225579]
[-0.612762, -0.229884]
[-0.610788, -0.242114]
[-0.589727, -0.237593]
[-0.61597, -0.236306]
[-0.639102, -0.241367]
[-0.637651, -0.2549]
[-0.612917, -0.249269]
[24.45809095301274, 358.5558144841519; 10.15042938413364, 357.806737955385; 14.23419751024494, 342.8856229036298; 28.51642501095819, 343.610956960508; 9.353743900129871, 350.9029663678638; -4.488033489615646, 350.326357275197; -0.3050714463695385, 334.477016554487; 14.41516474594289, 334.9822130217053]
[23, 358]
[24.4581, 358.556]
distance = 1.56044
[8, 357]
[10.1504, 357.807]
distance = 2.29677
[12, 342]
[14.2342, 342.886]
distance = 2.40332
[27, 343]
[28.5164, 343.611]
distance = 1.63487
[7, 350]
[9.35374, 350.903]
distance = 2.521
[-8, 349]
[-4.48803, 350.326]
distance = 3.75408
[-4, 333]
[-0.305071, 334.477]
distance = 3.97921
[12, 334]
[14.4152, 334.982]
distance = 2.60725
You can easily distort back your points using ProjectPoints.
cv::Mat rVec(3, 1, cv::DataType<double>::type); // Rotation vector
rVec.at<double>(0) = 0;
rVec.at<double>(1) = 0;
rVec.at<double>(2) =0;
cv::Mat tVec(3, 1, cv::DataType<double>::type); // Translation vector
tVec.at<double>(0) =0;
tVec.at<double>(1) = 0;
tVec.at<double>(2) = 0;
cv::projectPoints(points,rVec,tVec, cameraMatrix, distCoeffs,result);
PS: in the opencv 3 they added a function for distort.
If you multiply all the distortion coefficients by -1 you can then pass them to undistort or undistortPoints and basically you will apply the inverse distortion which will bring the distortion back.
The initUndistortRectifyMap
linked in one of the answers of the question you mention does indeed what you want. Since it is used in Remap
to build the full undistorted image, it gives, for each location in the destination image (undistorted), where to find the corresponding pixel in the distorted image so they can use its color. So it's really an f(undistorted) = distorted
map.
However, using this map will only allow for input positions that are integer and within the image rectangle. Thankfully, the documentation gives the full equations.
It is mostly what you have, except that there is a preliminary step that you are missing. Here is my version (it is C# but should be the same):
public PointF Distort(PointF point)
{
// To relative coordinates <- this is the step you are missing.
double x = (point.X - cx) / fx;
double y = (point.Y - cy) / fy;
double r2 = x*x + y*y;
// Radial distorsion
double xDistort = x * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
double yDistort = y * (1 + k1 * r2 + k2 * r2 * r2 + k3 * r2 * r2 * r2);
// Tangential distorsion
xDistort = xDistort + (2 * p1 * x * y + p2 * (r2 + 2 * x * x));
yDistort = yDistort + (p1 * (r2 + 2 * y * y) + 2 * p2 * x * y);
// Back to absolute coordinates.
xDistort = xDistort * fx + cx;
yDistort = yDistort * fy + cy;
return new PointF((float)xDistort, (float)yDistort);
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With