OpenMP是1種利用于多處理器程序設計的并行編程處理方案,它提供了對并行編程的高層抽象,只需要在程序中添加簡單的指令,就能夠編寫高效的并行程序,而不用關心具體的并行實現細節,下降了并行編程的難度和復雜度。也正由于OpenMP的簡單易用性,它其實不合適于需要復雜的線程間同步和互斥的場合。
OpenCV中使用Sift或Surf特點進行圖象拼接的算法,需要分別對兩幅或多幅圖象進行特點提取和特點描寫,以后再進行圖象特點點的配對,圖象變換等操作。不同圖象的特點提取和描寫的工作是全部進程中最耗費時間的,也是獨立 運行的,可使用OpenMP進行加速。
以下是不使用OpenMP加速的Sift圖象拼接原程序:
#include "highgui/highgui.hpp" #include "opencv2/nonfree/nonfree.hpp" #include "opencv2/legacy/legacy.hpp" #include "omp.h" using namespace cv; //計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置 Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri); int main(int argc, char *argv[]) { float startTime = omp_get_wtime(); Mat image01 = imread("Test01.jpg"); Mat image02 = imread("Test02.jpg"); imshow("拼接圖象1", image01); imshow("拼接圖象2", image02); //灰度圖轉換 Mat image1, image2; cvtColor(image01, image1, CV_RGB2GRAY); cvtColor(image02, image2, CV_RGB2GRAY); //提取特點點 SiftFeatureDetector siftDetector(800); // 海塞矩陣閾值 vector<KeyPoint> keyPoint1, keyPoint2; siftDetector.detect(image1, keyPoint1); siftDetector.detect(image2, keyPoint2); //特點點描寫,為下邊的特點點匹配做準備 SiftDescriptorExtractor siftDescriptor; Mat imageDesc1, imageDesc2; siftDescriptor.compute(image1, keyPoint1, imageDesc1); siftDescriptor.compute(image2, keyPoint2, imageDesc2); float endTime = omp_get_wtime(); std::cout << "不使用OpenMP加速消耗時間: " << endTime - startTime << std::endl; //取得匹配特點點,并提取最優配對 FlannBasedMatcher matcher; vector<DMatch> matchePoints; matcher.match(imageDesc1, imageDesc2, matchePoints, Mat()); sort(matchePoints.begin(), matchePoints.end()); //特點點排序 //獲得排在前N個的最優匹配特點點 vector<Point2f> imagePoints1, imagePoints2; for (int i = 0; i < 10; i++) { imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt); imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt); } //獲得圖象1到圖象2的投影映照矩陣,尺寸為3*3 Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC); Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0); Mat adjustHomo = adjustMat*homo; //獲得最強配對點在原始圖象和矩陣變換后圖象上的對應位置,用于圖象拼接點的定位 Point2f originalLinkPoint, targetLinkPoint, basedImagePoint; originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt; targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo); basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt; //圖象配準 Mat imageTransform1; warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows)); //在最強匹配點左邊的堆疊區域進行累加,是銜接穩定過渡,消除突變 Mat image1Overlap, image2Overlap; //圖1和圖2的堆疊部份 image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows))); image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows)); Mat image1ROICopy = image1Overlap.clone(); //復制1份圖1的堆疊部份 for (int i = 0; i < image1Overlap.rows; i++) { for (int j = 0; j < image1Overlap.cols; j++) { double weight; weight = (double)j / image1Overlap.cols; //隨距離改變而改變的疊加系數 image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0]; image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1]; image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2]; } } Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows))); //圖2中不重合的部份 ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部份直接銜接上去 namedWindow("拼接結果", 0); imshow("拼接結果", imageTransform1); imwrite("D:\\拼接結果.jpg", imageTransform1); waitKey(); return 0; } //計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置 Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri) { Mat originelP, targetP; originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0); targetP = transformMaxtri*originelP; float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0); float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0); return Point2f(x, y); }
圖象1:
圖象2:
拼接結果 :
在我的機器上不使用OpenMP平均耗時 4.7S。
使用OpenMP也很簡單,VS 內置了對OpenMP的支持。在項目上右鍵->屬性->配置屬性->C/C++->語言->OpenMP支持里選擇是:
以后在程序中加入OpenMP的頭文件“omp.h”就能夠了:
#include "highgui/highgui.hpp" #include "opencv2/nonfree/nonfree.hpp" #include "opencv2/legacy/legacy.hpp" #include "omp.h" using namespace cv; //計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置 Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri); int main(int argc, char *argv[]) { float startTime = omp_get_wtime(); Mat image01, image02; Mat image1, image2; vector<KeyPoint> keyPoint1, keyPoint2; Mat imageDesc1, imageDesc2; SiftFeatureDetector siftDetector(800); // 海塞矩陣閾值 SiftDescriptorExtractor siftDescriptor; //使用OpenMP的sections制導指令開啟多線程 #pragma omp parallel sections { #pragma omp section { image01 = imread("Test01.jpg"); imshow("拼接圖象1", image01); //灰度圖轉換 cvtColor(image01, image1, CV_RGB2GRAY); //提取特點點 siftDetector.detect(image1, keyPoint1); //特點點描寫,為下邊的特點點匹配做準備 siftDescriptor.compute(image1, keyPoint1, imageDesc1); } #pragma omp section { image02 = imread("Test02.jpg"); imshow("拼接圖象2", image02); cvtColor(image02, image2, CV_RGB2GRAY); siftDetector.detect(image2, keyPoint2); siftDescriptor.compute(image2, keyPoint2, imageDesc2); } } float endTime = omp_get_wtime(); std::cout << "使用OpenMP加速消耗時間: " << endTime - startTime << std::endl; //取得匹配特點點,并提取最優配對 FlannBasedMatcher matcher; vector<DMatch> matchePoints; matcher.match(imageDesc1, imageDesc2, matchePoints, Mat()); sort(matchePoints.begin(), matchePoints.end()); //特點點排序 //獲得排在前N個的最優匹配特點點 vector<Point2f> imagePoints1, imagePoints2; for (int i = 0; i < 10; i++) { imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt); imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt); } //獲得圖象1到圖象2的投影映照矩陣,尺寸為3*3 Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC); Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0); Mat adjustHomo = adjustMat*homo; //獲得最強配對點在原始圖象和矩陣變換后圖象上的對應位置,用于圖象拼接點的定位 Point2f originalLinkPoint, targetLinkPoint, basedImagePoint; originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt; targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo); basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt; //圖象配準 Mat imageTransform1; warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows)); //在最強匹配點左邊的堆疊區域進行累加,是銜接穩定過渡,消除突變 Mat image1Overlap, image2Overlap; //圖1和圖2的堆疊部份 image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows))); image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows)); Mat image1ROICopy = image1Overlap.clone(); //復制1份圖1的堆疊部份 for (int i = 0; i < image1Overlap.rows; i++) { for (int j = 0; j < image1Overlap.cols; j++) { double weight; weight = (double)j / image1Overlap.cols; //隨距離改變而改變的疊加系數 image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0]; image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1]; image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2]; } } Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows))); //圖2中不重合的部份 ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部份直接銜接上去 namedWindow("拼接結果", 0); imshow("拼接結果", imageTransform1); imwrite("D:\\拼接結果.jpg", imageTransform1); waitKey(); return 0; } //計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置 Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri) { Mat originelP, targetP; originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0); targetP = transformMaxtri*originelP; float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0); float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0); return Point2f(x, y); }
OpenMP中for制導指令用于迭代計算的任務分配,sections制導指令用于非迭代計算的任務分配,每一個#pragma omp section 語句會引導1個線程。在上邊的程序中相當因而兩個線程分別履行兩幅圖象的特點提取和描寫操作。使用OpenMP后平均耗時2.5S,速度差不多提升了1倍。