多多色-多人伦交性欧美在线观看-多人伦精品一区二区三区视频-多色视频-免费黄色视屏网站-免费黄色在线

國內最全IT社區平臺 聯系我們 | 收藏本站
阿里云優惠2
您當前位置:首頁 > php開源 > php教程 > OpenMP并行編程應用—加速OpenCV圖像拼接算法

OpenMP并行編程應用—加速OpenCV圖像拼接算法

來源:程序員人生   發布時間:2017-02-23 09:04:10 閱讀次數:3699次

OpenMP是1種利用于多處理器程序設計的并行編程處理方案,它提供了對并行編程的高層抽象,只需要在程序中添加簡單的指令,就能夠編寫高效的并行程序,而不用關心具體的并行實現細節,下降了并行編程的難度和復雜度。也正由于OpenMP的簡單易用性,它其實不合適于需要復雜的線程間同步和互斥的場合。


OpenCV中使用Sift或Surf特點進行圖象拼接的算法,需要分別對兩幅或多幅圖象進行特點提取和特點描寫,以后再進行圖象特點點的配對,圖象變換等操作。不同圖象的特點提取和描寫的工作是全部進程中最耗費時間的,也是獨立 運行的,可使用OpenMP進行加速。


以下是不使用OpenMP加速的Sift圖象拼接原程序:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include "omp.h"

using namespace cv;

//計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);

int main(int argc, char *argv[])
{
	float startTime = omp_get_wtime();

	Mat image01 = imread("Test01.jpg");
	Mat image02 = imread("Test02.jpg");
	imshow("拼接圖象1", image01);
	imshow("拼接圖象2", image02);

	//灰度圖轉換  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);

	//提取特點點    
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值  
	vector<KeyPoint> keyPoint1, keyPoint2;
	siftDetector.detect(image1, keyPoint1);
	siftDetector.detect(image2, keyPoint2);

	//特點點描寫,為下邊的特點點匹配做準備    
	SiftDescriptorExtractor siftDescriptor;
	Mat imageDesc1, imageDesc2;
	siftDescriptor.compute(image1, keyPoint1, imageDesc1);
	siftDescriptor.compute(image2, keyPoint2, imageDesc2);

	float endTime = omp_get_wtime();
	std::cout << "不使用OpenMP加速消耗時間: " << endTime - startTime << std::endl;
	//取得匹配特點點,并提取最優配對     
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;
	matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
	sort(matchePoints.begin(), matchePoints.end()); //特點點排序    
													//獲得排在前N個的最優匹配特點點  
	vector<Point2f> imagePoints1, imagePoints2;
	for (int i = 0; i < 10; i++)
	{
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
	}

	//獲得圖象1到圖象2的投影映照矩陣,尺寸為3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);
	Mat adjustHomo = adjustMat*homo;

	//獲得最強配對點在原始圖象和矩陣變換后圖象上的對應位置,用于圖象拼接點的定位  
	Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
	originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
	basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;

	//圖象配準  
	Mat imageTransform1;
	warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows));

	//在最強匹配點左邊的堆疊區域進行累加,是銜接穩定過渡,消除突變  
	Mat image1Overlap, image2Overlap; //圖1和圖2的堆疊部份     
	image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
	image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
	Mat image1ROICopy = image1Overlap.clone();  //復制1份圖1的堆疊部份  
	for (int i = 0; i < image1Overlap.rows; i++)
	{
		for (int j = 0; j < image1Overlap.cols; j++)
		{
			double weight;
			weight = (double)j / image1Overlap.cols;  //隨距離改變而改變的疊加系數  
			image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
			image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
			image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
		}
	}
	Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows)));  //圖2中不重合的部份  
	ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部份直接銜接上去  
	namedWindow("拼接結果", 0);
	imshow("拼接結果", imageTransform1);
	imwrite("D:\\拼接結果.jpg", imageTransform1);
	waitKey();
	return 0;
}

//計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
{
	Mat originelP, targetP;
	originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
	targetP = transformMaxtri*originelP;
	float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
	float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
	return Point2f(x, y);
}


圖象1:



圖象2:



拼接結果 :



在我的機器上不使用OpenMP平均耗時 4.7S。


使用OpenMP也很簡單,VS 內置了對OpenMP的支持。在項目上右鍵->屬性->配置屬性->C/C++->語言->OpenMP支持里選擇是:



以后在程序中加入OpenMP的頭文件“omp.h”就能夠了:

#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include "omp.h"

using namespace cv;

//計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri);

int main(int argc, char *argv[])
{
	float startTime = omp_get_wtime();

	Mat image01, image02;
	Mat image1, image2;
	vector<KeyPoint> keyPoint1, keyPoint2;
	Mat imageDesc1, imageDesc2;
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值  
	SiftDescriptorExtractor siftDescriptor;
	//使用OpenMP的sections制導指令開啟多線程
#pragma omp parallel sections  
	{
#pragma omp section  
		{
			image01 = imread("Test01.jpg");
			imshow("拼接圖象1", image01);
			//灰度圖轉換 
			cvtColor(image01, image1, CV_RGB2GRAY);
			//提取特點點  
			siftDetector.detect(image1, keyPoint1);
			//特點點描寫,為下邊的特點點匹配做準備    
			siftDescriptor.compute(image1, keyPoint1, imageDesc1);
		}
#pragma omp section  
		{
			image02 = imread("Test02.jpg");
			imshow("拼接圖象2", image02);
			cvtColor(image02, image2, CV_RGB2GRAY);
			siftDetector.detect(image2, keyPoint2);
			siftDescriptor.compute(image2, keyPoint2, imageDesc2);
		}
	}
	float endTime = omp_get_wtime();
	std::cout << "使用OpenMP加速消耗時間: " << endTime - startTime << std::endl;

	//取得匹配特點點,并提取最優配對     
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;
	matcher.match(imageDesc1, imageDesc2, matchePoints, Mat());
	sort(matchePoints.begin(), matchePoints.end()); //特點點排序    
	//獲得排在前N個的最優匹配特點點  
	vector<Point2f> imagePoints1, imagePoints2;
	for (int i = 0; i < 10; i++)
	{
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);
	}

	//獲得圖象1到圖象2的投影映照矩陣,尺寸為3*3  
	Mat homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	Mat adjustMat = (Mat_<double>(3, 3) << 1.0, 0, image01.cols, 0, 1.0, 0, 0, 0, 1.0);
	Mat adjustHomo = adjustMat*homo;

	//獲得最強配對點在原始圖象和矩陣變換后圖象上的對應位置,用于圖象拼接點的定位  
	Point2f originalLinkPoint, targetLinkPoint, basedImagePoint;
	originalLinkPoint = keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint = getTransformPoint(originalLinkPoint, adjustHomo);
	basedImagePoint = keyPoint2[matchePoints[0].trainIdx].pt;

	//圖象配準  
	Mat imageTransform1;
	warpPerspective(image01, imageTransform1, adjustMat*homo, Size(image02.cols + image01.cols + 110, image02.rows));

	//在最強匹配點左邊的堆疊區域進行累加,是銜接穩定過渡,消除突變  
	Mat image1Overlap, image2Overlap; //圖1和圖2的堆疊部份     
	image1Overlap = imageTransform1(Rect(Point(targetLinkPoint.x - basedImagePoint.x, 0), Point(targetLinkPoint.x, image02.rows)));
	image2Overlap = image02(Rect(0, 0, image1Overlap.cols, image1Overlap.rows));
	Mat image1ROICopy = image1Overlap.clone();  //復制1份圖1的堆疊部份 
	for (int i = 0; i < image1Overlap.rows; i++)
	{
		for (int j = 0; j < image1Overlap.cols; j++)
		{
			double weight;
			weight = (double)j / image1Overlap.cols;  //隨距離改變而改變的疊加系數  
			image1Overlap.at<Vec3b>(i, j)[0] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[0] + weight*image2Overlap.at<Vec3b>(i, j)[0];
			image1Overlap.at<Vec3b>(i, j)[1] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[1] + weight*image2Overlap.at<Vec3b>(i, j)[1];
			image1Overlap.at<Vec3b>(i, j)[2] = (1 - weight)*image1ROICopy.at<Vec3b>(i, j)[2] + weight*image2Overlap.at<Vec3b>(i, j)[2];
		}
	}
	Mat ROIMat = image02(Rect(Point(image1Overlap.cols, 0), Point(image02.cols, image02.rows)));  //圖2中不重合的部份  
	ROIMat.copyTo(Mat(imageTransform1, Rect(targetLinkPoint.x, 0, ROIMat.cols, image02.rows))); //不重合的部份直接銜接上去  
	namedWindow("拼接結果", 0);
	imshow("拼接結果", imageTransform1);
	imwrite("D:\\拼接結果.jpg", imageTransform1);
	waitKey();
	return 0;
}

//計算原始圖象點位在經過矩陣變換后在目標圖象上對應位置  
Point2f getTransformPoint(const Point2f originalPoint, const Mat &transformMaxtri)
{
	Mat originelP, targetP;
	originelP = (Mat_<double>(3, 1) << originalPoint.x, originalPoint.y, 1.0);
	targetP = transformMaxtri*originelP;
	float x = targetP.at<double>(0, 0) / targetP.at<double>(2, 0);
	float y = targetP.at<double>(1, 0) / targetP.at<double>(2, 0);
	return Point2f(x, y);
}


OpenMP中for制導指令用于迭代計算的任務分配,sections制導指令用于非迭代計算的任務分配,每一個#pragma omp section 語句會引導1個線程。在上邊的程序中相當因而兩個線程分別履行兩幅圖象的特點提取和描寫操作。使用OpenMP后平均耗時2.5S,速度差不多提升了1倍。


生活不易,碼農辛苦
如果您覺得本網站對您的學習有所幫助,可以手機掃描二維碼進行捐贈
程序員人生
------分隔線----------------------------
分享到:
------分隔線----------------------------
關閉
程序員人生
主站蜘蛛池模板: 日本在线不卡免费视频一区 | 亚洲人成片在线观看 | 精品国产96亚洲一区二区三区 | 中文字幕乱码人成乱码在线视频 | 亚洲视频 中文字幕 | 亚洲高清日韩精品第一区 | 日韩欧美天堂 | 成年人在线观看视频免费 | 国产精品亚洲精品观看不卡 | 久久成人小视频 | 2022国产精品网站在线播放 | 亚色91| 中文字幕35页| 一区二区三区四区日韩 | 在线观看国产情趣免费视频 | 国产亚洲精品久久久久久午夜 | 一级做a爰片久久毛片图片 一级做a爰片欧美aaaa | 亚洲精品欧美精品国产精品 | 国产一区二区免费视频 | 2022精品福利在线小视频 | 国产成人精品第一区二区 | 欧美一级一毛片 | 国产国语一级a毛片高清视频 | 亚洲精品不卡视频 | 午夜视频高清在线aaa | 欧美视频不卡 | 宅男在线看片 | 亚洲色图第三页 | 中文国产成人精品久久水 | 日韩精品欧美高清区 | 亚洲六区 | 国产精品午夜在线播放a | 亚洲狠狠狠一区二区三区 | 麻豆久久精品免费看国产 | 国产精品高清一区二区 | 免费伦理片在线观看 | 在线日韩麻豆一区 | 亚洲综合久久综合激情久久 | 日本亚洲中午字幕乱码 | 性欧美www| 热色综合|