I have multiple images taken simultaneously pointing at the same direction from the same starting location. However, there is still a slight offset because these cameras were not in the exact same place when the picture was taking. I'm looking for a way to calculate the optimal translation/shear/skew/rotation needed to apply to match one image to another so that they overlay (almost) perfectly.
The images are in a .raw format which I am reading in 16 bits at a time.
I have been suggested (by my employer who is not a programmer [I'm an intern btw]) to take a portion of the source image (not at the edges) and brute-force search for a same-sized portion with a high correlation in data values. I'm hoping there is a less-wasteful algorithm.
Here is a short code that does what you want (I use openCV 2.2):
Code:
// Detect special points on each image that can be corresponded
Ptr<FeatureDetector> detector = new SurfFeatureDetector(2000); // Detector for features
vector<KeyPoint> srcFeatures; // Detected key points on first image
vector<KeyPoint> dstFeatures;
detector->detect(srcImage,srcFeatures);
detector->detect(dstImage,dstFeatures);
// Extract descriptors of the features
SurfDescriptorExtractor extractor;
Mat projDescriptors, camDescriptors;
extractor.compute(srcImage, srcFeatures, srcDescriptors);
extractor.compute(dstImage , dstFeatures, dstDescriptors );
// Match descriptors of 2 images (find pairs of corresponding points)
BruteForceMatcher<L2<float>> matcher; // Use FlannBasedMatcher matcher. It is better
vector<DMatch> matches;
matcher.match(srcDescriptors, dstDescriptors, matches);
// Extract pairs of points
vector<int> pairOfsrcKP(matches.size()), pairOfdstKP(matches.size());
for( size_t i = 0; i < matches.size(); i++ ){
pairOfsrcKP[i] = matches[i].queryIdx;
pairOfdstKP[i] = matches[i].trainIdx;
}
vector<Point2f> sPoints; KeyPoint::convert(srcFeatures, sPoints,pairOfsrcKP);
vector<Point2f> dPoints; KeyPoint::convert(dstFeatures, dPoints,pairOfdstKP);
// Matched pairs of 2D points. Those pairs will be used to calculate homography
Mat src2Dfeatures;
Mat dst2Dfeatures;
Mat(sPoints).copyTo(src2Dfeatures);
Mat(dPoints).copyTo(dst2Dfeatures);
// Calculate homography
vector<uchar> outlierMask;
Mat H;
H = findHomography( src2Dfeatures, dst2Dfeatures, outlierMask, RANSAC, 3);
// Show the result (only for debug)
if (debug){
Mat outimg;
drawMatches(srcImage, srcFeatures,dstImage, dstFeatures, matches, outimg, Scalar::all(-1), Scalar::all(-1),
reinterpret_cast<const vector<char>&> (outlierMask));
imshow("Matches: Src image (left) to dst (right)", outimg);
cvWaitKey(0);
}
// Now you have the resulting homography. I mean that: H(srcImage) is alligned to dstImage. Apply H using the below code
Mat AlignedSrcImage;
warpPerspective(srcImage,AlignedSrcImage,H,dstImage.Size(),INTER_LINEAR,BORDER_CONSTANT);
Mat AlignedDstImageToSrc;
warpPerspective(dstImage,AlignedDstImageToSrc,H.inv(),srcImage.Size(),INTER_LINEAR,BORDER_CONSTANT);
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With